repo_name
string
path
string
copies
string
size
string
content
string
license
string
C-Aniruddh/Axiom_totoro
arch/arm/mach-sa1100/leds-badge4.c
4916
2774
/* * linux/arch/arm/mach-sa1100/leds-badge4.c * * Author: Christopher Hoover <ch@hpl.hp.com> * Copyright (C) 2002 Hewlett-Packard Company * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <mach/hardware.h> #include <asm/leds.h> #include <asm/system.h> #include "leds.h" #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 static unsigned int led_state; static unsigned int hw_led_state; #define LED_RED GPIO_GPIO(7) #define LED_GREEN GPIO_GPIO(9) #define LED_MASK (LED_RED|LED_GREEN) #define LED_IDLE LED_GREEN #define LED_TIMER LED_RED void badge4_leds_event(led_event_t evt) { unsigned long flags; local_irq_save(flags); switch (evt) { case led_start: GPDR |= LED_MASK; hw_led_state = LED_MASK; led_state = LED_STATE_ENABLED; break; case led_stop: led_state &= ~LED_STATE_ENABLED; break; case led_claim: led_state |= LED_STATE_CLAIMED; hw_led_state = LED_MASK; break; case led_release: led_state &= ~LED_STATE_CLAIMED; hw_led_state = LED_MASK; break; #ifdef CONFIG_LEDS_TIMER case led_timer: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state ^= LED_TIMER; break; #endif #ifdef CONFIG_LEDS_CPU case led_idle_start: /* LED off when system is idle */ if (!(led_state & LED_STATE_CLAIMED)) hw_led_state &= ~LED_IDLE; break; case led_idle_end: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state |= LED_IDLE; break; #endif case led_red_on: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state &= ~LED_RED; break; case led_red_off: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state |= LED_RED; break; case led_green_on: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state &= ~LED_GREEN; break; case led_green_off: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state |= LED_GREEN; break; default: break; } if (led_state & LED_STATE_ENABLED) { GPSR = hw_led_state; GPCR = hw_led_state ^ LED_MASK; } local_irq_restore(flags); }
gpl-2.0
baran0119/kernel_samsung_baffinlitexx
drivers/usb/storage/usual-tables.c
5428
3473
/* Driver for USB Mass Storage devices * Usual Tables File for usb-storage and libusual * * Copyright (C) 2009 Alan Stern (stern@rowland.harvard.edu) * * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more * information about this driver. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb_usual.h> /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)|(USB_US_TYPE_STOR<<24) } #define COMPLIANT_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } #define USUAL_DEV(useProto, useTrans, useType) \ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \ .driver_info = ((useType)<<24) } struct usb_device_id usb_storage_usb_ids[] = { # include "unusual_devs.h" { } /* Terminating entry */ }; EXPORT_SYMBOL_GPL(usb_storage_usb_ids); MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids); #undef UNUSUAL_DEV #undef COMPLIANT_DEV #undef USUAL_DEV /* * The table of devices to ignore */ struct ignore_entry { u16 vid, pid, bcdmin, bcdmax; }; #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { \ .vid = id_vendor, \ .pid = id_product, \ .bcdmin = bcdDeviceMin, \ .bcdmax = bcdDeviceMax, \ } static struct ignore_entry ignore_ids[] = { # include "unusual_alauda.h" # include "unusual_cypress.h" # include "unusual_datafab.h" # include "unusual_ene_ub6250.h" # include "unusual_freecom.h" # include "unusual_isd200.h" # include "unusual_jumpshot.h" # include "unusual_karma.h" # include "unusual_onetouch.h" # include "unusual_realtek.h" # include "unusual_sddr09.h" # include "unusual_sddr55.h" # include "unusual_usbat.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* Return an error if a device is in the ignore_ids list */ int usb_usual_ignore_device(struct usb_interface *intf) { struct usb_device *udev; unsigned vid, pid, bcd; struct ignore_entry *p; udev = interface_to_usbdev(intf); vid = le16_to_cpu(udev->descriptor.idVendor); pid = le16_to_cpu(udev->descriptor.idProduct); bcd = le16_to_cpu(udev->descriptor.bcdDevice); for (p = ignore_ids; p->vid; ++p) { if (p->vid == vid && p->pid == pid && p->bcdmin <= bcd && p->bcdmax >= bcd) return -ENXIO; } return 0; } EXPORT_SYMBOL_GPL(usb_usual_ignore_device);
gpl-2.0
MassStash/htc_m8_kernel_GPE_6.0
net/activity_stats.c
7988
2833
/* net/activity_stats.c * * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Mike Chan (mike@android.com) */ #include <linux/proc_fs.h> #include <linux/suspend.h> #include <net/net_namespace.h> /* * Track transmission rates in buckets (power of 2). * 1,2,4,8...512 seconds. * * Buckets represent the count of network transmissions at least * N seconds apart, where N is 1 << bucket index. */ #define BUCKET_MAX 10 /* Track network activity frequency */ static unsigned long activity_stats[BUCKET_MAX]; static ktime_t last_transmit; static ktime_t suspend_time; static DEFINE_SPINLOCK(activity_lock); void activity_stats_update(void) { int i; unsigned long flags; ktime_t now; s64 delta; spin_lock_irqsave(&activity_lock, flags); now = ktime_get(); delta = ktime_to_ns(ktime_sub(now, last_transmit)); for (i = BUCKET_MAX - 1; i >= 0; i--) { /* * Check if the time delta between network activity is within the * minimum bucket range. */ if (delta < (1000000000ULL << i)) continue; activity_stats[i]++; last_transmit = now; break; } spin_unlock_irqrestore(&activity_lock, flags); } static int activity_stats_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int i; int len; char *p = page; /* Only print if offset is 0, or we have enough buffer space */ if (off || count < (30 * BUCKET_MAX + 22)) return -ENOMEM; len = snprintf(p, count, "Min Bucket(sec) Count\n"); count -= len; p += len; for (i = 0; i < BUCKET_MAX; i++) { len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]); count -= len; p += len; } *eof = 1; return p - page; } static int activity_stats_notifier(struct notifier_block *nb, unsigned long event, void *dummy) { switch (event) { case PM_SUSPEND_PREPARE: suspend_time = ktime_get_real(); break; case PM_POST_SUSPEND: suspend_time = ktime_sub(ktime_get_real(), suspend_time); last_transmit = ktime_sub(last_transmit, suspend_time); } return 0; } static struct notifier_block activity_stats_notifier_block = { .notifier_call = activity_stats_notifier, }; static int __init activity_stats_init(void) { create_proc_read_entry("activity", S_IRUGO, init_net.proc_net_stat, activity_stats_read_proc, NULL); return register_pm_notifier(&activity_stats_notifier_block); } subsys_initcall(activity_stats_init);
gpl-2.0
OptiPop/kernel_asus_grouper
drivers/net/sni_82596.c
9268
4754
/* * sni_82596.c -- driver for intel 82596 ethernet controller, as * used in older SNI RM machines */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/types.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/irq.h> #define SNI_82596_DRIVER_VERSION "SNI RM 82596 driver - Revision: 0.01" static const char sni_82596_string[] = "snirm_82596"; #define DMA_ALLOC dma_alloc_coherent #define DMA_FREE dma_free_coherent #define DMA_WBACK(priv, addr, len) do { } while (0) #define DMA_INV(priv, addr, len) do { } while (0) #define DMA_WBACK_INV(priv, addr, len) do { } while (0) #define SYSBUS 0x00004400 /* big endian CPU, 82596 little endian */ #define SWAP32(x) cpu_to_le32((u32)(x)) #define SWAP16(x) cpu_to_le16((u16)(x)) #define OPT_MPU_16BIT 0x01 #include "lib82596.c" MODULE_AUTHOR("Thomas Bogendoerfer"); MODULE_DESCRIPTION("i82596 driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:snirm_82596"); module_param(i596_debug, int, 0); MODULE_PARM_DESC(i596_debug, "82596 debug mask"); static inline void ca(struct net_device *dev) { struct i596_private *lp = netdev_priv(dev); writel(0, lp->ca); } static void mpu_port(struct net_device *dev, int c, dma_addr_t x) { struct i596_private *lp = netdev_priv(dev); u32 v = (u32) (c) | (u32) (x); if (lp->options & OPT_MPU_16BIT) { writew(v & 0xffff, lp->mpu_port); wmb(); /* order writes to MPU port */ udelay(1); writew(v >> 16, lp->mpu_port); } else { writel(v, lp->mpu_port); wmb(); /* order writes to MPU port */ udelay(1); writel(v, lp->mpu_port); } } static int __devinit sni_82596_probe(struct platform_device *dev) { struct net_device *netdevice; struct i596_private *lp; struct resource *res, *ca, *idprom, *options; int retval = -ENOMEM; void __iomem *mpu_addr; void __iomem *ca_addr; u8 __iomem *eth_addr; res = platform_get_resource(dev, IORESOURCE_MEM, 0); ca = platform_get_resource(dev, IORESOURCE_MEM, 1); options = platform_get_resource(dev, 0, 0); idprom = platform_get_resource(dev, IORESOURCE_MEM, 2); if (!res || !ca || !options || !idprom) return -ENODEV; mpu_addr = ioremap_nocache(res->start, 4); if (!mpu_addr) return -ENOMEM; ca_addr = ioremap_nocache(ca->start, 4); if (!ca_addr) goto probe_failed_free_mpu; printk(KERN_INFO "Found i82596 at 0x%x\n", res->start); netdevice = alloc_etherdev(sizeof(struct i596_private)); if (!netdevice) goto probe_failed_free_ca; SET_NETDEV_DEV(netdevice, &dev->dev); platform_set_drvdata (dev, netdevice); netdevice->base_addr = res->start; netdevice->irq = platform_get_irq(dev, 0); eth_addr = ioremap_nocache(idprom->start, 0x10); if (!eth_addr) goto probe_failed; /* someone seems to like messed up stuff */ netdevice->dev_addr[0] = readb(eth_addr + 0x0b); netdevice->dev_addr[1] = readb(eth_addr + 0x0a); netdevice->dev_addr[2] = readb(eth_addr + 0x09); netdevice->dev_addr[3] = readb(eth_addr + 0x08); netdevice->dev_addr[4] = readb(eth_addr + 0x07); netdevice->dev_addr[5] = readb(eth_addr + 0x06); iounmap(eth_addr); if (!netdevice->irq) { printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", __FILE__, netdevice->base_addr); goto probe_failed; } lp = netdev_priv(netdevice); lp->options = options->flags & IORESOURCE_BITS; lp->ca = ca_addr; lp->mpu_port = mpu_addr; retval = i82596_probe(netdevice); if (retval == 0) return 0; probe_failed: free_netdev(netdevice); probe_failed_free_ca: iounmap(ca_addr); probe_failed_free_mpu: iounmap(mpu_addr); return retval; } static int __devexit sni_82596_driver_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct i596_private *lp = netdev_priv(dev); unregister_netdev(dev); DMA_FREE(dev->dev.parent, sizeof(struct i596_private), lp->dma, lp->dma_addr); iounmap(lp->ca); iounmap(lp->mpu_port); free_netdev (dev); return 0; } static struct platform_driver sni_82596_driver = { .probe = sni_82596_probe, .remove = __devexit_p(sni_82596_driver_remove), .driver = { .name = sni_82596_string, .owner = THIS_MODULE, }, }; static int __devinit sni_82596_init(void) { printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n"); return platform_driver_register(&sni_82596_driver); } static void __exit sni_82596_exit(void) { platform_driver_unregister(&sni_82596_driver); } module_init(sni_82596_init); module_exit(sni_82596_exit);
gpl-2.0
HashBang173/kernel_common
drivers/media/dvb/frontends/tua6100.c
9524
5366
/** * Driver for Infineon tua6100 pll. * * (c) 2006 Andrew de Quincey * * Based on code found in budget-av.c, which has the following: * Compiled from various sources by Michael Hunold <michael@mihu.de> * * CI interface support (c) 2004 Olivier Gournet <ogournet@anevia.com> & * Andrew de Quincey <adq_dvb@lidskialf.net> * * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de> * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/dvb/frontend.h> #include <asm/types.h> #include "tua6100.h" struct tua6100_priv { /* i2c details */ int i2c_address; struct i2c_adapter *i2c; u32 frequency; }; static int tua6100_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int tua6100_sleep(struct dvb_frontend *fe) { struct tua6100_priv *priv = fe->tuner_priv; int ret; u8 reg0[] = { 0x00, 0x00 }; struct i2c_msg msg = { .addr = priv->i2c_address, .flags = 0, .buf = reg0, .len = 2 }; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if ((ret = i2c_transfer (priv->i2c, &msg, 1)) != 1) { printk("%s: i2c error\n", __func__); } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return (ret == 1) ? 0 : ret; } static int tua6100_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct tua6100_priv *priv = fe->tuner_priv; u32 div; u32 prediv; u8 reg0[] = { 0x00, 0x00 }; u8 reg1[] = { 0x01, 0x00, 0x00, 0x00 }; u8 reg2[] = { 0x02, 0x00, 0x00 }; struct i2c_msg msg0 = { .addr = priv->i2c_address, .flags = 0, .buf = reg0, .len = 2 }; struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 }; struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 }; #define _R 4 #define _P 32 #define _ri 4000000 // setup register 0 if (c->frequency < 2000000) reg0[1] = 0x03; else reg0[1] = 0x07; // setup register 1 if (c->frequency < 1630000) reg1[1] = 0x2c; else reg1[1] = 0x0c; if (_P == 64) reg1[1] |= 0x40; if (c->frequency >= 1525000) reg1[1] |= 0x80; // register 2 reg2[1] = (_R >> 8) & 0x03; reg2[2] = _R; if (c->frequency < 1455000) reg2[1] |= 0x1c; else if (c->frequency < 1630000) reg2[1] |= 0x0c; else reg2[1] |= 0x1c; /* * The N divisor ratio (note: c->frequency is in kHz, but we * need it in Hz) */ prediv = (c->frequency * _R) / (_ri / 1000); div = prediv / _P; reg1[1] |= (div >> 9) & 0x03; reg1[2] = div >> 1; reg1[3] = (div << 7); priv->frequency = ((div * _P) * (_ri / 1000)) / _R; // Finally, calculate and store the value for A reg1[3] |= (prediv - (div*_P)) & 0x7f; #undef _R #undef _P #undef _ri if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(priv->i2c, &msg0, 1) != 1) return -EIO; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(priv->i2c, &msg2, 1) != 1) return -EIO; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(priv->i2c, &msg1, 1) != 1) return -EIO; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return 0; } static int tua6100_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct tua6100_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static struct dvb_tuner_ops tua6100_tuner_ops = { .info = { .name = "Infineon TUA6100", .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 1000, }, .release = tua6100_release, .sleep = tua6100_sleep, .set_params = tua6100_set_params, .get_frequency = tua6100_get_frequency, }; struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c) { struct tua6100_priv *priv = NULL; u8 b1 [] = { 0x80 }; u8 b2 [] = { 0x00 }; struct i2c_msg msg [] = { { .addr = addr, .flags = 0, .buf = b1, .len = 1 }, { .addr = addr, .flags = I2C_M_RD, .buf = b2, .len = 1 } }; int ret; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer (i2c, msg, 2); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); if (ret != 2) return NULL; priv = kzalloc(sizeof(struct tua6100_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->i2c_address = addr; priv->i2c = i2c; memcpy(&fe->ops.tuner_ops, &tua6100_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; return fe; } EXPORT_SYMBOL(tua6100_attach); MODULE_DESCRIPTION("DVB tua6100 driver"); MODULE_AUTHOR("Andrew de Quincey"); MODULE_LICENSE("GPL");
gpl-2.0
dasty/kernel-openwrt-tmp
mm/quicklist.c
9524
2454
/* * Quicklist support. * * Quicklists are light weight lists of pages that have a defined state * on alloc and free. Pages must be in the quicklist specific defined state * (zero by default) when the page is freed. It seems that the initial idea * for such lists first came from Dave Miller and then various other people * improved on it. * * Copyright (C) 2007 SGI, * Christoph Lameter <clameter@sgi.com> * Generalized, added support for multiple lists and * constructors / destructors. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/quicklist.h> DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist); #define FRACTION_OF_NODE_MEM 16 static unsigned long max_pages(unsigned long min_pages) { unsigned long node_free_pages, max; int node = numa_node_id(); struct zone *zones = NODE_DATA(node)->node_zones; int num_cpus_on_node; node_free_pages = #ifdef CONFIG_ZONE_DMA zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) + #endif #ifdef CONFIG_ZONE_DMA32 zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) + #endif zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); max = node_free_pages / FRACTION_OF_NODE_MEM; num_cpus_on_node = cpumask_weight(cpumask_of_node(node)); max /= num_cpus_on_node; return max(max, min_pages); } static long min_pages_to_free(struct quicklist *q, unsigned long min_pages, long max_free) { long pages_to_free; pages_to_free = q->nr_pages - max_pages(min_pages); return min(pages_to_free, max_free); } /* * Trim down the number of pages in the quicklist */ void quicklist_trim(int nr, void (*dtor)(void *), unsigned long min_pages, unsigned long max_free) { long pages_to_free; struct quicklist *q; q = &get_cpu_var(quicklist)[nr]; if (q->nr_pages > min_pages) { pages_to_free = min_pages_to_free(q, min_pages, max_free); while (pages_to_free > 0) { /* * We pass a gfp_t of 0 to quicklist_alloc here * because we will never call into the page allocator. */ void *p = quicklist_alloc(nr, 0, NULL); if (dtor) dtor(p); free_page((unsigned long)p); pages_to_free--; } } put_cpu_var(quicklist); } unsigned long quicklist_total_size(void) { unsigned long count = 0; int cpu; struct quicklist *ql, *q; for_each_online_cpu(cpu) { ql = per_cpu(quicklist, cpu); for (q = ql; q < ql + CONFIG_NR_QUICK; q++) count += q->nr_pages; } return count; }
gpl-2.0
SlimRoms/kernel_lge_hammerhead
arch/sh/kernel/irq_32.c
12340
1147
/* * SHcompact irqflags support * * Copyright (C) 2006 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/irqflags.h> #include <linux/module.h> void notrace arch_local_irq_restore(unsigned long flags) { unsigned long __dummy0, __dummy1; if (flags == ARCH_IRQ_DISABLED) { __asm__ __volatile__ ( "stc sr, %0\n\t" "or #0xf0, %0\n\t" "ldc %0, sr\n\t" : "=&z" (__dummy0) : /* no inputs */ : "memory" ); } else { __asm__ __volatile__ ( "stc sr, %0\n\t" "and %1, %0\n\t" #ifdef CONFIG_CPU_HAS_SR_RB "stc r6_bank, %1\n\t" "or %1, %0\n\t" #endif "ldc %0, sr\n\t" : "=&r" (__dummy0), "=r" (__dummy1) : "1" (~ARCH_IRQ_DISABLED) : "memory" ); } } EXPORT_SYMBOL(arch_local_irq_restore); unsigned long notrace arch_local_save_flags(void) { unsigned long flags; __asm__ __volatile__ ( "stc sr, %0\n\t" "and #0xf0, %0\n\t" : "=&z" (flags) : /* no inputs */ : "memory" ); return flags; } EXPORT_SYMBOL(arch_local_save_flags);
gpl-2.0
AOSP-TEAM/android_kernel_samsung_i9100g
sound/soc/sh/siu_dai.c
53
20838
/* * siu_dai.c - ALSA SoC driver for Renesas SH7343, SH7722 SIU peripheral. * * Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> * Copyright (C) 2006 Carlos Munoz <carlos@kenati.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> #include <linux/firmware.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <asm/clock.h> #include <asm/siu.h> #include <sound/control.h> #include <sound/soc-dai.h> #include "siu.h" /* Board specifics */ #if defined(CONFIG_CPU_SUBTYPE_SH7722) # define SIU_MAX_VOLUME 0x1000 #else # define SIU_MAX_VOLUME 0x7fff #endif #define PRAM_SIZE 0x2000 #define XRAM_SIZE 0x800 #define YRAM_SIZE 0x800 #define XRAM_OFFSET 0x4000 #define YRAM_OFFSET 0x6000 #define REG_OFFSET 0xc000 #define PLAYBACK_ENABLED 1 #define CAPTURE_ENABLED 2 #define VOLUME_CAPTURE 0 #define VOLUME_PLAYBACK 1 #define DFLT_VOLUME_LEVEL 0x08000800 /* * SPDIF is only available on port A and on some SIU implementations it is only * available for input. Due to the lack of hardware to test it, SPDIF is left * disabled in this driver version */ struct format_flag { u32 i2s; u32 pcm; u32 spdif; u32 mask; }; struct port_flag { struct format_flag playback; struct format_flag capture; }; struct siu_info *siu_i2s_data = NULL; EXPORT_SYMBOL_GPL(siu_i2s_data); static struct port_flag siu_flags[SIU_PORT_NUM] = { [SIU_PORT_A] = { .playback = { .i2s = 0x50000000, .pcm = 0x40000000, .spdif = 0x80000000, /* not on all SIU versions */ .mask = 0xd0000000, }, .capture = { .i2s = 0x05000000, .pcm = 0x04000000, .spdif = 0x08000000, .mask = 0x0d000000, }, }, [SIU_PORT_B] = { .playback = { .i2s = 0x00500000, .pcm = 0x00400000, .spdif = 0, /* impossible - turn off */ .mask = 0x00500000, }, .capture = { .i2s = 0x00050000, .pcm = 0x00040000, .spdif = 0, /* impossible - turn off */ .mask = 0x00050000, }, }, }; static void siu_dai_start(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; dev_dbg(port_info->pcm->card->dev, "%s\n", __func__); /* Turn on SIU clock */ pm_runtime_get_sync(port_info->pcm->card->dev); /* Issue software reset to siu */ siu_write32(base + SIU_SRCTL, 0); /* Wait for the reset to take effect */ udelay(1); port_info->stfifo = 0; port_info->trdat = 0; /* portA, portB, SIU operate */ siu_write32(base + SIU_SRCTL, 0x301); /* portA=256fs, portB=256fs */ siu_write32(base + SIU_CKCTL, 0x40400000); /* portA's BRG does not divide SIUCKA */ siu_write32(base + SIU_BRGASEL, 0); siu_write32(base + SIU_BRRA, 0); /* portB's BRG divides SIUCKB by half */ siu_write32(base + SIU_BRGBSEL, 1); siu_write32(base + SIU_BRRB, 0); siu_write32(base + SIU_IFCTL, 0x44440000); /* portA: 32 bit/fs, master; portB: 32 bit/fs, master */ siu_write32(base + SIU_SFORM, 0x0c0c0000); /* * Volume levels: looks like the DSP firmware implements volume controls * differently from what's described in the datasheet */ siu_write32(base + SIU_SBDVCA, port_info->playback.volume); siu_write32(base + SIU_SBDVCB, port_info->capture.volume); } static void siu_dai_stop(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; /* SIU software reset */ siu_write32(base + SIU_SRCTL, 0); /* Turn off SIU clock */ pm_runtime_put_sync(port_info->pcm->card->dev); } static void siu_dai_spbAselect(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; struct siu_firmware *fw = &info->fw; u32 *ydef = fw->yram0; u32 idx; /* path A use */ if (!info->port_id) idx = 1; /* portA */ else idx = 2; /* portB */ ydef[0] = (fw->spbpar[idx].ab1a << 16) | (fw->spbpar[idx].ab0a << 8) | (fw->spbpar[idx].dir << 7) | 3; ydef[1] = fw->yram0[1]; /* 0x03000300 */ ydef[2] = (16 / 2) << 24; ydef[3] = fw->yram0[3]; /* 0 */ ydef[4] = fw->yram0[4]; /* 0 */ ydef[7] = fw->spbpar[idx].event; port_info->stfifo |= fw->spbpar[idx].stfifo; port_info->trdat |= fw->spbpar[idx].trdat; } static void siu_dai_spbBselect(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; struct siu_firmware *fw = &info->fw; u32 *ydef = fw->yram0; u32 idx; /* path B use */ if (!info->port_id) idx = 7; /* portA */ else idx = 8; /* portB */ ydef[5] = (fw->spbpar[idx].ab1a << 16) | (fw->spbpar[idx].ab0a << 8) | 1; ydef[6] = fw->spbpar[idx].event; port_info->stfifo |= fw->spbpar[idx].stfifo; port_info->trdat |= fw->spbpar[idx].trdat; } static void siu_dai_open(struct siu_stream *siu_stream) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; u32 srctl, ifctl; srctl = siu_read32(base + SIU_SRCTL); ifctl = siu_read32(base + SIU_IFCTL); switch (info->port_id) { case SIU_PORT_A: /* portA operates */ srctl |= 0x200; ifctl &= ~0xc2; break; case SIU_PORT_B: /* portB operates */ srctl |= 0x100; ifctl &= ~0x31; break; } siu_write32(base + SIU_SRCTL, srctl); /* Unmute and configure portA */ siu_write32(base + SIU_IFCTL, ifctl); } /* * At the moment only fixed Left-upper, Left-lower, Right-upper, Right-lower * packing is supported */ static void siu_dai_pcmdatapack(struct siu_stream *siu_stream) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; u32 dpak; dpak = siu_read32(base + SIU_DPAK); switch (info->port_id) { case SIU_PORT_A: dpak &= ~0xc0000000; break; case SIU_PORT_B: dpak &= ~0x00c00000; break; } siu_write32(base + SIU_DPAK, dpak); } static int siu_dai_spbstart(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; struct siu_firmware *fw = &info->fw; u32 *ydef = fw->yram0; int cnt; u32 __iomem *add; u32 *ptr; /* Load SPB Program in PRAM */ ptr = fw->pram0; add = info->pram; for (cnt = 0; cnt < PRAM0_SIZE; cnt++, add++, ptr++) siu_write32(add, *ptr); ptr = fw->pram1; add = info->pram + (0x0100 / sizeof(u32)); for (cnt = 0; cnt < PRAM1_SIZE; cnt++, add++, ptr++) siu_write32(add, *ptr); /* XRAM initialization */ add = info->xram; for (cnt = 0; cnt < XRAM0_SIZE + XRAM1_SIZE + XRAM2_SIZE; cnt++, add++) siu_write32(add, 0); /* YRAM variable area initialization */ add = info->yram; for (cnt = 0; cnt < YRAM_DEF_SIZE; cnt++, add++) siu_write32(add, ydef[cnt]); /* YRAM FIR coefficient area initialization */ add = info->yram + (0x0200 / sizeof(u32)); for (cnt = 0; cnt < YRAM_FIR_SIZE; cnt++, add++) siu_write32(add, fw->yram_fir_coeff[cnt]); /* YRAM IIR coefficient area initialization */ add = info->yram + (0x0600 / sizeof(u32)); for (cnt = 0; cnt < YRAM_IIR_SIZE; cnt++, add++) siu_write32(add, 0); siu_write32(base + SIU_TRDAT, port_info->trdat); port_info->trdat = 0x0; /* SPB start condition: software */ siu_write32(base + SIU_SBACTIV, 0); /* Start SPB */ siu_write32(base + SIU_SBCTL, 0xc0000000); /* Wait for program to halt */ cnt = 0x10000; while (--cnt && siu_read32(base + SIU_SBCTL) != 0x80000000) cpu_relax(); if (!cnt) return -EBUSY; /* SPB program start address setting */ siu_write32(base + SIU_SBPSET, 0x00400000); /* SPB hardware start(FIFOCTL source) */ siu_write32(base + SIU_SBACTIV, 0xc0000000); return 0; } static void siu_dai_spbstop(struct siu_port *port_info) { struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; siu_write32(base + SIU_SBACTIV, 0); /* SPB stop */ siu_write32(base + SIU_SBCTL, 0); port_info->stfifo = 0; } /* API functions */ /* Playback and capture hardware properties are identical */ static struct snd_pcm_hardware siu_dai_pcm_hw = { .info = SNDRV_PCM_INFO_INTERLEAVED, .formats = SNDRV_PCM_FMTBIT_S16, .rates = SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = SIU_BUFFER_BYTES_MAX, .period_bytes_min = SIU_PERIOD_BYTES_MIN, .period_bytes_max = SIU_PERIOD_BYTES_MAX, .periods_min = SIU_PERIODS_MIN, .periods_max = SIU_PERIODS_MAX, }; static int siu_dai_info_volume(struct snd_kcontrol *kctrl, struct snd_ctl_elem_info *uinfo) { struct siu_port *port_info = snd_kcontrol_chip(kctrl); dev_dbg(port_info->pcm->card->dev, "%s\n", __func__); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = SIU_MAX_VOLUME; return 0; } static int siu_dai_get_volume(struct snd_kcontrol *kctrl, struct snd_ctl_elem_value *ucontrol) { struct siu_port *port_info = snd_kcontrol_chip(kctrl); struct device *dev = port_info->pcm->card->dev; u32 vol; dev_dbg(dev, "%s\n", __func__); switch (kctrl->private_value) { case VOLUME_PLAYBACK: /* Playback is always on port 0 */ vol = port_info->playback.volume; ucontrol->value.integer.value[0] = vol & 0xffff; ucontrol->value.integer.value[1] = vol >> 16 & 0xffff; break; case VOLUME_CAPTURE: /* Capture is always on port 1 */ vol = port_info->capture.volume; ucontrol->value.integer.value[0] = vol & 0xffff; ucontrol->value.integer.value[1] = vol >> 16 & 0xffff; break; default: dev_err(dev, "%s() invalid private_value=%ld\n", __func__, kctrl->private_value); return -EINVAL; } return 0; } static int siu_dai_put_volume(struct snd_kcontrol *kctrl, struct snd_ctl_elem_value *ucontrol) { struct siu_port *port_info = snd_kcontrol_chip(kctrl); struct device *dev = port_info->pcm->card->dev; struct siu_info *info = siu_i2s_data; u32 __iomem *base = info->reg; u32 new_vol; u32 cur_vol; dev_dbg(dev, "%s\n", __func__); if (ucontrol->value.integer.value[0] < 0 || ucontrol->value.integer.value[0] > SIU_MAX_VOLUME || ucontrol->value.integer.value[1] < 0 || ucontrol->value.integer.value[1] > SIU_MAX_VOLUME) return -EINVAL; new_vol = ucontrol->value.integer.value[0] | ucontrol->value.integer.value[1] << 16; /* See comment above - DSP firmware implementation */ switch (kctrl->private_value) { case VOLUME_PLAYBACK: /* Playback is always on port 0 */ cur_vol = port_info->playback.volume; siu_write32(base + SIU_SBDVCA, new_vol); port_info->playback.volume = new_vol; break; case VOLUME_CAPTURE: /* Capture is always on port 1 */ cur_vol = port_info->capture.volume; siu_write32(base + SIU_SBDVCB, new_vol); port_info->capture.volume = new_vol; break; default: dev_err(dev, "%s() invalid private_value=%ld\n", __func__, kctrl->private_value); return -EINVAL; } if (cur_vol != new_vol) return 1; return 0; } static struct snd_kcontrol_new playback_controls = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Volume", .index = 0, .info = siu_dai_info_volume, .get = siu_dai_get_volume, .put = siu_dai_put_volume, .private_value = VOLUME_PLAYBACK, }; static struct snd_kcontrol_new capture_controls = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Capture Volume", .index = 0, .info = siu_dai_info_volume, .get = siu_dai_get_volume, .put = siu_dai_put_volume, .private_value = VOLUME_CAPTURE, }; int siu_init_port(int port, struct siu_port **port_info, struct snd_card *card) { struct device *dev = card->dev; struct snd_kcontrol *kctrl; int ret; *port_info = kzalloc(sizeof(**port_info), GFP_KERNEL); if (!*port_info) return -ENOMEM; dev_dbg(dev, "%s: port #%d@%p\n", __func__, port, *port_info); (*port_info)->playback.volume = DFLT_VOLUME_LEVEL; (*port_info)->capture.volume = DFLT_VOLUME_LEVEL; /* * Add mixer support. The SPB is used to change the volume. Both * ports use the same SPB. Therefore, we only register one * control instance since it will be used by both channels. * In error case we continue without controls. */ kctrl = snd_ctl_new1(&playback_controls, *port_info); ret = snd_ctl_add(card, kctrl); if (ret < 0) dev_err(dev, "failed to add playback controls %p port=%d err=%d\n", kctrl, port, ret); kctrl = snd_ctl_new1(&capture_controls, *port_info); ret = snd_ctl_add(card, kctrl); if (ret < 0) dev_err(dev, "failed to add capture controls %p port=%d err=%d\n", kctrl, port, ret); return 0; } void siu_free_port(struct siu_port *port_info) { kfree(port_info); } static int siu_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct siu_info *info = snd_soc_dai_get_drvdata(dai); struct snd_pcm_runtime *rt = substream->runtime; struct siu_port *port_info = siu_port_info(substream); int ret; dev_dbg(substream->pcm->card->dev, "%s: port=%d@%p\n", __func__, info->port_id, port_info); snd_soc_set_runtime_hwparams(substream, &siu_dai_pcm_hw); ret = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS); if (unlikely(ret < 0)) return ret; siu_dai_start(port_info); return 0; } static void siu_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct siu_info *info = snd_soc_dai_get_drvdata(dai); struct siu_port *port_info = siu_port_info(substream); dev_dbg(substream->pcm->card->dev, "%s: port=%d@%p\n", __func__, info->port_id, port_info); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) port_info->play_cap &= ~PLAYBACK_ENABLED; else port_info->play_cap &= ~CAPTURE_ENABLED; /* Stop the siu if the other stream is not using it */ if (!port_info->play_cap) { /* during stmread or stmwrite ? */ BUG_ON(port_info->playback.rw_flg || port_info->capture.rw_flg); siu_dai_spbstop(port_info); siu_dai_stop(port_info); } } /* PCM part of siu_dai_playback_prepare() / siu_dai_capture_prepare() */ static int siu_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct siu_info *info = snd_soc_dai_get_drvdata(dai); struct snd_pcm_runtime *rt = substream->runtime; struct siu_port *port_info = siu_port_info(substream); struct siu_stream *siu_stream; int self, ret; dev_dbg(substream->pcm->card->dev, "%s: port %d, active streams %lx, %d channels\n", __func__, info->port_id, port_info->play_cap, rt->channels); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { self = PLAYBACK_ENABLED; siu_stream = &port_info->playback; } else { self = CAPTURE_ENABLED; siu_stream = &port_info->capture; } /* Set up the siu if not already done */ if (!port_info->play_cap) { siu_stream->rw_flg = 0; /* stream-data transfer flag */ siu_dai_spbAselect(port_info); siu_dai_spbBselect(port_info); siu_dai_open(siu_stream); siu_dai_pcmdatapack(siu_stream); ret = siu_dai_spbstart(port_info); if (ret < 0) goto fail; } else { ret = 0; } port_info->play_cap |= self; fail: return ret; } /* * SIU can set bus format to I2S / PCM / SPDIF independently for playback and * capture, however, the current API sets the bus format globally for a DAI. */ static int siu_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct siu_info *info = snd_soc_dai_get_drvdata(dai); u32 __iomem *base = info->reg; u32 ifctl; dev_dbg(dai->dev, "%s: fmt 0x%x on port %d\n", __func__, fmt, info->port_id); if (info->port_id < 0) return -ENODEV; /* Here select between I2S / PCM / SPDIF */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: ifctl = siu_flags[info->port_id].playback.i2s | siu_flags[info->port_id].capture.i2s; break; case SND_SOC_DAIFMT_LEFT_J: ifctl = siu_flags[info->port_id].playback.pcm | siu_flags[info->port_id].capture.pcm; break; /* SPDIF disabled - see comment at the top */ default: return -EINVAL; } ifctl |= ~(siu_flags[info->port_id].playback.mask | siu_flags[info->port_id].capture.mask) & siu_read32(base + SIU_IFCTL); siu_write32(base + SIU_IFCTL, ifctl); return 0; } static int siu_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct clk *siu_clk, *parent_clk; char *siu_name, *parent_name; int ret; if (dir != SND_SOC_CLOCK_IN) return -EINVAL; dev_dbg(dai->dev, "%s: using clock %d\n", __func__, clk_id); switch (clk_id) { case SIU_CLKA_PLL: siu_name = "siua_clk"; parent_name = "pll_clk"; break; case SIU_CLKA_EXT: siu_name = "siua_clk"; parent_name = "siumcka_clk"; break; case SIU_CLKB_PLL: siu_name = "siub_clk"; parent_name = "pll_clk"; break; case SIU_CLKB_EXT: siu_name = "siub_clk"; parent_name = "siumckb_clk"; break; default: return -EINVAL; } siu_clk = clk_get(dai->dev, siu_name); if (IS_ERR(siu_clk)) return PTR_ERR(siu_clk); parent_clk = clk_get(dai->dev, parent_name); if (!IS_ERR(parent_clk)) { ret = clk_set_parent(siu_clk, parent_clk); if (!ret) clk_set_rate(siu_clk, freq); clk_put(parent_clk); } clk_put(siu_clk); return 0; } static struct snd_soc_dai_ops siu_dai_ops = { .startup = siu_dai_startup, .shutdown = siu_dai_shutdown, .prepare = siu_dai_prepare, .set_sysclk = siu_dai_set_sysclk, .set_fmt = siu_dai_set_fmt, }; static struct snd_soc_dai_driver siu_i2s_dai = { .name = "sui-i2s-dai", .playback = { .channels_min = 2, .channels_max = 2, .formats = SNDRV_PCM_FMTBIT_S16, .rates = SNDRV_PCM_RATE_8000_48000, }, .capture = { .channels_min = 2, .channels_max = 2, .formats = SNDRV_PCM_FMTBIT_S16, .rates = SNDRV_PCM_RATE_8000_48000, }, .ops = &siu_dai_ops, }; static int __devinit siu_probe(struct platform_device *pdev) { const struct firmware *fw_entry; struct resource *res, *region; struct siu_info *info; int ret; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; siu_i2s_data = info; ret = request_firmware(&fw_entry, "siu_spb.bin", &pdev->dev); if (ret) goto ereqfw; /* * Loaded firmware is "const" - read only, but we have to modify it in * snd_siu_sh7343_spbAselect() and snd_siu_sh7343_spbBselect() */ memcpy(&info->fw, fw_entry->data, fw_entry->size); release_firmware(fw_entry); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto egetres; } region = request_mem_region(res->start, resource_size(res), pdev->name); if (!region) { dev_err(&pdev->dev, "SIU region already claimed\n"); ret = -EBUSY; goto ereqmemreg; } ret = -ENOMEM; info->pram = ioremap(res->start, PRAM_SIZE); if (!info->pram) goto emappram; info->xram = ioremap(res->start + XRAM_OFFSET, XRAM_SIZE); if (!info->xram) goto emapxram; info->yram = ioremap(res->start + YRAM_OFFSET, YRAM_SIZE); if (!info->yram) goto emapyram; info->reg = ioremap(res->start + REG_OFFSET, resource_size(res) - REG_OFFSET); if (!info->reg) goto emapreg; dev_set_drvdata(&pdev->dev, info); /* register using ARRAY version so we can keep dai name */ ret = snd_soc_register_dais(&pdev->dev, &siu_i2s_dai, 1); if (ret < 0) goto edaiinit; ret = snd_soc_register_platform(&pdev->dev, &siu_platform); if (ret < 0) goto esocregp; pm_runtime_enable(&pdev->dev); return ret; esocregp: snd_soc_unregister_dai(&pdev->dev); edaiinit: iounmap(info->reg); emapreg: iounmap(info->yram); emapyram: iounmap(info->xram); emapxram: iounmap(info->pram); emappram: release_mem_region(res->start, resource_size(res)); ereqmemreg: egetres: ereqfw: kfree(info); return ret; } static int __devexit siu_remove(struct platform_device *pdev) { struct siu_info *info = dev_get_drvdata(&pdev->dev); struct resource *res; pm_runtime_disable(&pdev->dev); snd_soc_unregister_platform(&pdev->dev); snd_soc_unregister_dai(&pdev->dev); iounmap(info->reg); iounmap(info->yram); iounmap(info->xram); iounmap(info->pram); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); kfree(info); return 0; } static struct platform_driver siu_driver = { .driver = { .name = "siu-pcm-audio", }, .probe = siu_probe, .remove = __devexit_p(siu_remove), }; static int __init siu_init(void) { return platform_driver_register(&siu_driver); } static void __exit siu_exit(void) { platform_driver_unregister(&siu_driver); } module_init(siu_init) module_exit(siu_exit) MODULE_AUTHOR("Carlos Munoz <carlos@kenati.com>"); MODULE_DESCRIPTION("ALSA SoC SH7722 SIU driver"); MODULE_LICENSE("GPL");
gpl-2.0
knone1/android_kernel_asus_moorefield
drivers/external_drivers/camera/drivers/media/pci/atomisp2/css2300/sh_css_rx.c
53
13499
/* * Support for Medfield PNW Camera Imaging ISP subsystem. * * Copyright (c) 2010 Intel Corporation. All Rights Reserved. * * Copyright (c) 2010 Silicon Hive www.siliconhive.com. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #define __INLINE_INPUT_SYSTEM__ #include "input_system.h" #include "sh_css.h" #include "sh_css_rx.h" #include "sh_css_internal.h" void sh_css_rx_enable_all_interrupts(void) { hrt_data bits = receiver_port_reg_load(RX0_ID, MIPI_PORT1_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX); bits |= (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT) | #if defined(HAS_RX_VERSION_2) (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT) | #endif (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT) | /* (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT) | */ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT) | (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT); /* (1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT); */ receiver_port_reg_store(RX0_ID, MIPI_PORT1_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits); return; } unsigned int sh_css_rx_get_interrupt_reg(void) { return receiver_port_reg_load(RX0_ID, MIPI_PORT1_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX); } void sh_css_rx_get_interrupt_info(unsigned int *irq_infos) { unsigned long infos = 0; hrt_data bits = receiver_port_reg_load(RX0_ID, MIPI_PORT1_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX); if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT)) infos |= SH_CSS_RX_IRQ_INFO_BUFFER_OVERRUN; #if defined(HAS_RX_VERSION_2) if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT)) infos |= SH_CSS_RX_IRQ_INFO_INIT_TIMEOUT; #endif if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT)) infos |= SH_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ECC_CORRECTED; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_SOT; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_SOT_SYNC; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_CONTROL; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_CRC; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_FRAME_DATA; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC; if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT)) infos |= SH_CSS_RX_IRQ_INFO_ERR_LINE_SYNC; *irq_infos = infos; } void sh_css_rx_clear_interrupt_info(unsigned int irq_infos) { hrt_data bits = receiver_port_reg_load(RX0_ID, MIPI_PORT1_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX); /* MW: Why do we remap the receiver bitmap */ if (irq_infos & SH_CSS_RX_IRQ_INFO_BUFFER_OVERRUN) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT; #if defined(HAS_RX_VERSION_2) if (irq_infos & SH_CSS_RX_IRQ_INFO_INIT_TIMEOUT) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT; #endif if (irq_infos & SH_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ECC_CORRECTED) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_SOT) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_SOT_SYNC) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_CONTROL) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_CRC) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_FRAME_DATA) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT; if (irq_infos & SH_CSS_RX_IRQ_INFO_ERR_LINE_SYNC) bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT; receiver_port_reg_store(RX0_ID, MIPI_PORT1_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits); return; } enum sh_css_err sh_css_input_format_type( enum sh_css_input_format input_format, mipi_predictor_t compression, unsigned int *fmt_type) { /* * Custom (user defined) modes. Used for compressed * MIPI transfers * * Checkpatch thinks the indent before "if" is suspect * I think the only suspect part is the missing "else" * because of the return. */ if (compression != MIPI_PREDICTOR_NONE) { switch (input_format) { case SH_CSS_INPUT_FORMAT_RAW_6: *fmt_type = 6; break; case SH_CSS_INPUT_FORMAT_RAW_7: *fmt_type = 7; break; case SH_CSS_INPUT_FORMAT_RAW_8: *fmt_type = 8; break; case SH_CSS_INPUT_FORMAT_RAW_10: *fmt_type = 10; break; case SH_CSS_INPUT_FORMAT_RAW_12: *fmt_type = 12; break; case SH_CSS_INPUT_FORMAT_RAW_14: *fmt_type = 14; break; case SH_CSS_INPUT_FORMAT_RAW_16: *fmt_type = 16; break; default: return sh_css_err_internal_error; } return sh_css_success; } /* * This mapping comes from the Arasan CSS function spec * (CSS_func_spec1.08_ahb_sep29_08.pdf). * * MW: For some reason the mapping is not 1-to-1 */ switch (input_format) { case SH_CSS_INPUT_FORMAT_RGB_888: *fmt_type = MIPI_FORMAT_RGB888; break; case SH_CSS_INPUT_FORMAT_RGB_555: *fmt_type = MIPI_FORMAT_RGB555; break; case SH_CSS_INPUT_FORMAT_RGB_444: *fmt_type = MIPI_FORMAT_RGB444; break; case SH_CSS_INPUT_FORMAT_RGB_565: *fmt_type = MIPI_FORMAT_RGB565; break; case SH_CSS_INPUT_FORMAT_RGB_666: *fmt_type = MIPI_FORMAT_RGB666; break; case SH_CSS_INPUT_FORMAT_RAW_8: *fmt_type = MIPI_FORMAT_RAW8; break; case SH_CSS_INPUT_FORMAT_RAW_10: *fmt_type = MIPI_FORMAT_RAW10; break; case SH_CSS_INPUT_FORMAT_RAW_6: *fmt_type = MIPI_FORMAT_RAW6; break; case SH_CSS_INPUT_FORMAT_RAW_7: *fmt_type = MIPI_FORMAT_RAW7; break; case SH_CSS_INPUT_FORMAT_RAW_12: *fmt_type = MIPI_FORMAT_RAW12; break; case SH_CSS_INPUT_FORMAT_RAW_14: *fmt_type = MIPI_FORMAT_RAW14; break; case SH_CSS_INPUT_FORMAT_YUV420_8: *fmt_type = MIPI_FORMAT_YUV420_8; break; case SH_CSS_INPUT_FORMAT_YUV420_10: *fmt_type = MIPI_FORMAT_YUV420_10; break; case SH_CSS_INPUT_FORMAT_YUV422_8: *fmt_type = MIPI_FORMAT_YUV422_8; break; case SH_CSS_INPUT_FORMAT_YUV422_10: *fmt_type = MIPI_FORMAT_YUV422_10; break; case SH_CSS_INPUT_FORMAT_BINARY_8: *fmt_type = MIPI_FORMAT_BINARY_8; break; case SH_CSS_INPUT_FORMAT_YUV420_8_LEGACY: *fmt_type = MIPI_FORMAT_YUV420_8_LEGACY; break; case SH_CSS_INPUT_FORMAT_RAW_16: /* This is not specified by Arasan, so we use * 17 for now. */ *fmt_type = MIPI_FORMAT_RAW16; break; #if defined(HAS_RX_VERSION_2) default: if (input_format > (enum sh_css_input_format)N_MIPI_FORMAT) return sh_css_err_internal_error; *fmt_type = input_format; break; #else default: return sh_css_err_internal_error; #endif } return sh_css_success; } #if defined(HAS_RX_VERSION_1) /* This is a device function, shouldn't be here */ static void sh_css_rx_set_bits( const mipi_port_ID_t port, const unsigned int reg, const unsigned int lsb, const unsigned int bits, const unsigned int val) { hrt_data data = receiver_port_reg_load(RX0_ID, port, reg); /* prevent writing out of range */ hrt_data tmp = val & ((1U << bits) - 1); /* shift into place */ data |= (tmp << lsb); receiver_port_reg_store(RX0_ID, port, reg, data); return; } static void sh_css_rx_set_num_lanes( const mipi_port_ID_t port, const unsigned int lanes) { sh_css_rx_set_bits(port, _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX, _HRT_CSS_RECEIVER_AHB_CSI2_NUM_DATA_LANES_IDX, _HRT_CSS_RECEIVER_AHB_CSI2_NUM_DATA_LANES_BITS, lanes); return; } static void sh_css_rx_set_timeout( const mipi_port_ID_t port, const unsigned int timeout) { sh_css_rx_set_bits(port, _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX, _HRT_CSS_RECEIVER_DATA_TIMEOUT_IDX, _HRT_CSS_RECEIVER_DATA_TIMEOUT_BITS, timeout); return; } static void sh_css_rx_set_compression( const mipi_port_ID_t port, const mipi_predictor_t comp) { unsigned int reg = _HRT_CSS_RECEIVER_COMP_PREDICT_REG_IDX; assert(comp < N_MIPI_PREDICTOR_TYPES); receiver_port_reg_store(RX0_ID, port, reg, comp); return; } static void sh_css_rx_set_uncomp_size( const mipi_port_ID_t port, const unsigned int size) { sh_css_rx_set_bits(port, _HRT_CSS_RECEIVER_AHB_COMP_FORMAT_REG_IDX, _HRT_CSS_RECEIVER_AHB_COMP_NUM_BITS_IDX, _HRT_CSS_RECEIVER_AHB_COMP_NUM_BITS_BITS, size); return; } static void sh_css_rx_set_comp_size( const mipi_port_ID_t port, const unsigned int size) { sh_css_rx_set_bits(port, _HRT_CSS_RECEIVER_AHB_COMP_FORMAT_REG_IDX, _HRT_CSS_RECEIVER_AHB_COMP_RAW_BITS_IDX, _HRT_CSS_RECEIVER_AHB_COMP_RAW_BITS_BITS, size); return; } #endif /* defined(HAS_RX_VERSION_1) */ void sh_css_rx_configure( const rx_cfg_t *config) { mipi_port_ID_t port = config->port; /* turn off all ports just in case */ sh_css_rx_disable(); #if defined(HAS_RX_VERSION_2) if (MIPI_PORT_LANES[config->mode][port] != MIPI_0LANE_CFG) { receiver_port_reg_store(RX0_ID, port, _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX, config->timeout); receiver_port_reg_store(RX0_ID, port, _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX, config->initcount); receiver_port_reg_store(RX0_ID, port, _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX, config->synccount); receiver_port_reg_store(RX0_ID, port, _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX, config->rxcount); /* * MW: A bit of a hack, straight wiring of the capture units, * assuming they are linearly enumerated */ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_A_IDX + (unsigned int)port, INPUT_SYSTEM_CSI_BACKEND); input_system_sub_system_reg_store(INPUT_SYSTEM0_ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MUX_IDX, (input_system_multiplex_t)port); } /* * signal input * receiver_reg_store(RX0_ID, _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX, config->mode); */ receiver_reg_store(RX0_ID, _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX, config->is_two_ppc); /* enable the selected port(s) */ for (port = (mipi_port_ID_t)0; port < N_MIPI_PORT_ID; port++) { if (MIPI_PORT_LANES[config->mode][port] != MIPI_0LANE_CFG) receiver_port_reg_store(RX0_ID, port, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, true); } #elif defined(HAS_RX_VERSION_1) /* All settings are per port */ sh_css_rx_set_timeout(port, config->timeout); /* configure the selected port */ sh_css_rx_set_num_lanes(port, config->num_lanes); sh_css_rx_set_compression(port, config->comp); sh_css_rx_set_uncomp_size(port, config->uncomp_bpp); sh_css_rx_set_comp_size(port, config->comp_bpp); receiver_port_reg_store(RX0_ID, port, _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX, config->is_two_ppc); /* enable the selected port */ receiver_port_reg_store(RX0_ID, port, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, true); #else #error "sh_css_rx.c: RX version must be one of {RX_VERSION_1, RX_VERSION_2}" #endif return; } void sh_css_rx_disable(void) { mipi_port_ID_t port; for (port = (mipi_port_ID_t)0; port < N_MIPI_PORT_ID; port++) { receiver_port_reg_store(RX0_ID, port, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, false); } return; }
gpl-2.0
kozmikkick/eternityprj-kernel-endeavoru-128
drivers/isdn/mISDN/socket.c
309
18242
/* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/mISDNif.h> #include <linux/slab.h> #include "core.h" static u_int *debug; static struct proto mISDN_proto = { .name = "misdn", .owner = THIS_MODULE, .obj_size = sizeof(struct mISDN_sock) }; #define _pms(sk) ((struct mISDN_sock *)sk) static struct mISDN_sock_list data_sockets = { .lock = __RW_LOCK_UNLOCKED(data_sockets.lock) }; static struct mISDN_sock_list base_sockets = { .lock = __RW_LOCK_UNLOCKED(base_sockets.lock) }; #define L2_HEADER_LEN 4 static inline struct sk_buff * _l2_alloc_skb(unsigned int len, gfp_t gfp_mask) { struct sk_buff *skb; skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask); if (likely(skb)) skb_reserve(skb, L2_HEADER_LEN); return skb; } static void mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_add_node(sk, &l->head); write_unlock_bh(&l->lock); } static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_del_node_init(sk); write_unlock_bh(&l->lock); } static int mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb) { struct mISDN_sock *msk; int err; msk = container_of(ch, struct mISDN_sock, ch); if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s len %d %p\n", __func__, skb->len, skb); if (msk->sk.sk_state == MISDN_CLOSED) return -EUNATCH; __net_timestamp(skb); err = sock_queue_rcv_skb(&msk->sk, skb); if (err) printk(KERN_WARNING "%s: error %d\n", __func__, err); return err; } static int mISDN_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct mISDN_sock *msk; msk = container_of(ch, struct mISDN_sock, ch); if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p, %x, %p)\n", __func__, ch, cmd, arg); switch (cmd) { case CLOSE_CHANNEL: msk->sk.sk_state = MISDN_CLOSED; break; } return 0; } static inline void mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { struct timeval tv; if (_pms(sk)->cmask & MISDN_TIME_STAMP) { skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_MISDN, MISDN_TIME_STAMP, sizeof(tv), &tv); } } static int mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sk_buff *skb; struct sock *sk = sock->sk; struct sockaddr_mISDN *maddr; int copied, err; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n", __func__, (int)len, flags, _pms(sk)->ch.nr, sk->sk_protocol); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == MISDN_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (!skb) return err; if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { msg->msg_namelen = sizeof(struct sockaddr_mISDN); maddr = (struct sockaddr_mISDN *)msg->msg_name; maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; if ((sk->sk_protocol == ISDN_P_LAPD_TE) || (sk->sk_protocol == ISDN_P_LAPD_NT)) { maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff; maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff; maddr->sapi = mISDN_HEAD_ID(skb) & 0xff; } else { maddr->channel = _pms(sk)->ch.nr; maddr->sapi = _pms(sk)->ch.addr & 0xFF; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; } } else { if (msg->msg_namelen) printk(KERN_WARNING "%s: too small namelen %d\n", __func__, msg->msg_namelen); msg->msg_namelen = 0; } copied = skb->len + MISDN_HEADER_LEN; if (len < copied) { if (flags & MSG_PEEK) atomic_dec(&skb->users); else skb_queue_head(&sk->sk_receive_queue, skb); return -ENOSPC; } memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb), MISDN_HEADER_LEN); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); mISDN_sock_cmsg(sk, msg, skb); skb_free_datagram(sk, skb); return err ? : copied; } static int mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb; int err = -ENOMEM; struct sockaddr_mISDN *maddr; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n", __func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr, sk->sk_protocol); if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) return -EINVAL; if (len < MISDN_HEADER_LEN) return -EINVAL; if (sk->sk_state != MISDN_BOUND) return -EBADFD; lock_sock(sk); skb = _l2_alloc_skb(len, GFP_KERNEL); if (!skb) goto done; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; goto done; } memcpy(mISDN_HEAD_P(skb), skb->data, MISDN_HEADER_LEN); skb_pull(skb, MISDN_HEADER_LEN); if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { /* if we have a address, we use it */ maddr = (struct sockaddr_mISDN *)msg->msg_name; mISDN_HEAD_ID(skb) = maddr->channel; } else { /* use default for L2 messages */ if ((sk->sk_protocol == ISDN_P_LAPD_TE) || (sk->sk_protocol == ISDN_P_LAPD_NT)) mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr; } if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: ID:%x\n", __func__, mISDN_HEAD_ID(skb)); err = -ENODEV; if (!_pms(sk)->ch.peer) goto done; err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb); if (err) goto done; else { skb = NULL; err = len; } done: if (skb) kfree_skb(skb); release_sock(sk); return err; } static int data_sock_release(struct socket *sock) { struct sock *sk = sock->sk; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); if (!sk) return 0; switch (sk->sk_protocol) { case ISDN_P_TE_S0: case ISDN_P_NT_S0: case ISDN_P_TE_E1: case ISDN_P_NT_E1: if (sk->sk_state == MISDN_BOUND) delete_channel(&_pms(sk)->ch); else mISDN_sock_unlink(&data_sockets, sk); break; case ISDN_P_LAPD_TE: case ISDN_P_LAPD_NT: case ISDN_P_B_RAW: case ISDN_P_B_HDLC: case ISDN_P_B_X75SLP: case ISDN_P_B_L2DTMF: case ISDN_P_B_L2DSP: case ISDN_P_B_L2DSPHDLC: delete_channel(&_pms(sk)->ch); mISDN_sock_unlink(&data_sockets, sk); break; } lock_sock(sk); sock_orphan(sk); skb_queue_purge(&sk->sk_receive_queue); release_sock(sk); sock_put(sk); return 0; } static int data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p) { struct mISDN_ctrl_req cq; int err = -EINVAL, val[2]; struct mISDNchannel *bchan, *next; lock_sock(sk); if (!_pms(sk)->dev) { err = -ENODEV; goto done; } switch (cmd) { case IMCTRLREQ: if (copy_from_user(&cq, p, sizeof(cq))) { err = -EFAULT; break; } if ((sk->sk_protocol & ~ISDN_P_B_MASK) == ISDN_P_B_START) { list_for_each_entry_safe(bchan, next, &_pms(sk)->dev->bchannels, list) { if (bchan->nr == cq.channel) { err = bchan->ctrl(bchan, CONTROL_CHANNEL, &cq); break; } } } else err = _pms(sk)->dev->D.ctrl(&_pms(sk)->dev->D, CONTROL_CHANNEL, &cq); if (err) break; if (copy_to_user(p, &cq, sizeof(cq))) err = -EFAULT; break; case IMCLEAR_L2: if (sk->sk_protocol != ISDN_P_LAPD_NT) { err = -EINVAL; break; } val[0] = cmd; if (get_user(val[1], (int __user *)p)) { err = -EFAULT; break; } err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr, CONTROL_CHANNEL, val); break; case IMHOLD_L1: if (sk->sk_protocol != ISDN_P_LAPD_NT && sk->sk_protocol != ISDN_P_LAPD_TE) { err = -EINVAL; break; } val[0] = cmd; if (get_user(val[1], (int __user *)p)) { err = -EFAULT; break; } err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr, CONTROL_CHANNEL, val); break; default: err = -EINVAL; break; } done: release_sock(sk); return err; } static int data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int err = 0, id; struct sock *sk = sock->sk; struct mISDNdevice *dev; struct mISDNversion ver; switch (cmd) { case IMGETVERSION: ver.major = MISDN_MAJOR_VERSION; ver.minor = MISDN_MINOR_VERSION; ver.release = MISDN_RELEASE; if (copy_to_user((void __user *)arg, &ver, sizeof(ver))) err = -EFAULT; break; case IMGETCOUNT: id = get_mdevice_count(); if (put_user(id, (int __user *)arg)) err = -EFAULT; break; case IMGETDEVINFO: if (get_user(id, (int __user *)arg)) { err = -EFAULT; break; } dev = get_mdevice(id); if (dev) { struct mISDN_devinfo di; memset(&di, 0, sizeof(di)); di.id = dev->id; di.Dprotocols = dev->Dprotocols; di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); di.protocol = dev->D.protocol; memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; strcpy(di.name, dev_name(&dev->dev)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else err = -ENODEV; break; default: if (sk->sk_state == MISDN_BOUND) err = data_sock_ioctl_bound(sk, cmd, (void __user *)arg); else err = -ENOTCONN; } return err; } static int data_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) { struct sock *sk = sock->sk; int err = 0, opt = 0; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p, %d, %x, %p, %d)\n", __func__, sock, level, optname, optval, len); lock_sock(sk); switch (optname) { case MISDN_TIME_STAMP: if (get_user(opt, (int __user *)optval)) { err = -EFAULT; break; } if (opt) _pms(sk)->cmask |= MISDN_TIME_STAMP; else _pms(sk)->cmask &= ~MISDN_TIME_STAMP; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int data_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int len, opt; if (get_user(len, optlen)) return -EFAULT; switch (optname) { case MISDN_TIME_STAMP: if (_pms(sk)->cmask & MISDN_TIME_STAMP) opt = 1; else opt = 0; if (put_user(opt, optval)) return -EFAULT; break; default: return -ENOPROTOOPT; } return 0; } static int data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sock *sk = sock->sk; struct hlist_node *node; struct sock *csk; int err = 0; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); if (addr_len != sizeof(struct sockaddr_mISDN)) return -EINVAL; if (!maddr || maddr->family != AF_ISDN) return -EINVAL; lock_sock(sk); if (_pms(sk)->dev) { err = -EALREADY; goto done; } _pms(sk)->dev = get_mdevice(maddr->dev); if (!_pms(sk)->dev) { err = -ENODEV; goto done; } if (sk->sk_protocol < ISDN_P_B_START) { read_lock_bh(&data_sockets.lock); sk_for_each(csk, node, &data_sockets.head) { if (sk == csk) continue; if (_pms(csk)->dev != _pms(sk)->dev) continue; if (csk->sk_protocol >= ISDN_P_B_START) continue; if (IS_ISDN_P_TE(csk->sk_protocol) == IS_ISDN_P_TE(sk->sk_protocol)) continue; read_unlock_bh(&data_sockets.lock); err = -EBUSY; goto done; } read_unlock_bh(&data_sockets.lock); } _pms(sk)->ch.send = mISDN_send; _pms(sk)->ch.ctrl = mISDN_ctrl; switch (sk->sk_protocol) { case ISDN_P_TE_S0: case ISDN_P_NT_S0: case ISDN_P_TE_E1: case ISDN_P_NT_E1: mISDN_sock_unlink(&data_sockets, sk); err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch, sk->sk_protocol, maddr); if (err) mISDN_sock_link(&data_sockets, sk); break; case ISDN_P_LAPD_TE: case ISDN_P_LAPD_NT: err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch, sk->sk_protocol, maddr); break; case ISDN_P_B_RAW: case ISDN_P_B_HDLC: case ISDN_P_B_X75SLP: case ISDN_P_B_L2DTMF: case ISDN_P_B_L2DSP: case ISDN_P_B_L2DSPHDLC: err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch, sk->sk_protocol, maddr); break; default: err = -EPROTONOSUPPORT; } if (err) goto done; sk->sk_state = MISDN_BOUND; _pms(sk)->ch.protocol = sk->sk_protocol; done: release_sock(sk); return err; } static int data_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) { struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sock *sk = sock->sk; if (!_pms(sk)->dev) return -EBADFD; lock_sock(sk); *addr_len = sizeof(*maddr); maddr->dev = _pms(sk)->dev->id; maddr->channel = _pms(sk)->ch.nr; maddr->sapi = _pms(sk)->ch.addr & 0xff; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff; release_sock(sk); return 0; } static const struct proto_ops data_sock_ops = { .family = PF_ISDN, .owner = THIS_MODULE, .release = data_sock_release, .ioctl = data_sock_ioctl, .bind = data_sock_bind, .getname = data_sock_getname, .sendmsg = mISDN_sock_sendmsg, .recvmsg = mISDN_sock_recvmsg, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = data_sock_setsockopt, .getsockopt = data_sock_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static int data_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; if (sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &data_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = MISDN_OPEN; mISDN_sock_link(&data_sockets, sk); return 0; } static int base_sock_release(struct socket *sock) { struct sock *sk = sock->sk; printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); if (!sk) return 0; mISDN_sock_unlink(&base_sockets, sk); sock_orphan(sk); sock_put(sk); return 0; } static int base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int err = 0, id; struct mISDNdevice *dev; struct mISDNversion ver; switch (cmd) { case IMGETVERSION: ver.major = MISDN_MAJOR_VERSION; ver.minor = MISDN_MINOR_VERSION; ver.release = MISDN_RELEASE; if (copy_to_user((void __user *)arg, &ver, sizeof(ver))) err = -EFAULT; break; case IMGETCOUNT: id = get_mdevice_count(); if (put_user(id, (int __user *)arg)) err = -EFAULT; break; case IMGETDEVINFO: if (get_user(id, (int __user *)arg)) { err = -EFAULT; break; } dev = get_mdevice(id); if (dev) { struct mISDN_devinfo di; memset(&di, 0, sizeof(di)); di.id = dev->id; di.Dprotocols = dev->Dprotocols; di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); di.protocol = dev->D.protocol; memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; strcpy(di.name, dev_name(&dev->dev)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else err = -ENODEV; break; case IMSETDEVNAME: { struct mISDN_devrename dn; if (copy_from_user(&dn, (void __user *)arg, sizeof(dn))) { err = -EFAULT; break; } dev = get_mdevice(dn.id); if (dev) err = device_rename(&dev->dev, dn.name); else err = -ENODEV; } break; default: err = -EINVAL; } return err; } static int base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sock *sk = sock->sk; int err = 0; if (!maddr || maddr->family != AF_ISDN) return -EINVAL; lock_sock(sk); if (_pms(sk)->dev) { err = -EALREADY; goto done; } _pms(sk)->dev = get_mdevice(maddr->dev); if (!_pms(sk)->dev) { err = -ENODEV; goto done; } sk->sk_state = MISDN_BOUND; done: release_sock(sk); return err; } static const struct proto_ops base_sock_ops = { .family = PF_ISDN, .owner = THIS_MODULE, .release = base_sock_release, .ioctl = base_sock_ioctl, .bind = base_sock_bind, .getname = sock_no_getname, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .poll = sock_no_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static int base_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &base_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = MISDN_OPEN; mISDN_sock_link(&base_sockets, sk); return 0; } static int mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int err = -EPROTONOSUPPORT; switch (proto) { case ISDN_P_BASE: err = base_sock_create(net, sock, proto); break; case ISDN_P_TE_S0: case ISDN_P_NT_S0: case ISDN_P_TE_E1: case ISDN_P_NT_E1: case ISDN_P_LAPD_TE: case ISDN_P_LAPD_NT: case ISDN_P_B_RAW: case ISDN_P_B_HDLC: case ISDN_P_B_X75SLP: case ISDN_P_B_L2DTMF: case ISDN_P_B_L2DSP: case ISDN_P_B_L2DSPHDLC: err = data_sock_create(net, sock, proto); break; default: return err; } return err; } static const struct net_proto_family mISDN_sock_family_ops = { .owner = THIS_MODULE, .family = PF_ISDN, .create = mISDN_sock_create, }; int misdn_sock_init(u_int *deb) { int err; debug = deb; err = sock_register(&mISDN_sock_family_ops); if (err) printk(KERN_ERR "%s: error(%d)\n", __func__, err); return err; } void misdn_sock_cleanup(void) { sock_unregister(PF_ISDN); }
gpl-2.0
johnzz/fastsocket
kernel/lib/inflate.c
565
39493
#define DEBG(x) #define DEBG1(x) /* inflate.c -- Not copyrighted 1992 by Mark Adler version c10p1, 10 January 1993 */ /* * Adapted for booting Linux by Hannu Savolainen 1993 * based on gzip-1.0.3 * * Nicolas Pitre <nico@fluxnic.net>, 1999/04/14 : * Little mods for all variable to reside either into rodata or bss segments * by marking constant variables with 'const' and initializing all the others * at run-time only. This allows for the kernel uncompressor to run * directly from Flash or ROM memory on embedded systems. */ /* Inflate deflated (PKZIP's method 8 compressed) data. The compression method searches for as much of the current string of bytes (up to a length of 258) in the previous 32 K bytes. If it doesn't find any matches (of at least length 3), it codes the next byte. Otherwise, it codes the length of the matched string and its distance backwards from the current position. There is a single Huffman code that codes both single bytes (called "literals") and match lengths. A second Huffman code codes the distance information, which follows a length code. Each length or distance code actually represents a base value and a number of "extra" (sometimes zero) bits to get to add to the base value. At the end of each deflated block is a special end-of-block (EOB) literal/ length code. The decoding process is basically: get a literal/length code; if EOB then done; if a literal, emit the decoded byte; if a length then get the distance and emit the referred-to bytes from the sliding window of previously emitted data. There are (currently) three kinds of inflate blocks: stored, fixed, and dynamic. The compressor deals with some chunk of data at a time, and decides which method to use on a chunk-by-chunk basis. A chunk might typically be 32 K or 64 K. If the chunk is incompressible, then the "stored" method is used. In this case, the bytes are simply stored as is, eight bits per byte, with none of the above coding. The bytes are preceded by a count, since there is no longer an EOB code. If the data is compressible, then either the fixed or dynamic methods are used. In the dynamic method, the compressed data is preceded by an encoding of the literal/length and distance Huffman codes that are to be used to decode this block. The representation is itself Huffman coded, and so is preceded by a description of that code. These code descriptions take up a little space, and so for small blocks, there is a predefined set of codes, called the fixed codes. The fixed method is used if the block codes up smaller that way (usually for quite small chunks), otherwise the dynamic method is used. In the latter case, the codes are customized to the probabilities in the current block, and so can code it much better than the pre-determined fixed codes. The Huffman codes themselves are decoded using a multi-level table lookup, in order to maximize the speed of decoding plus the speed of building the decoding tables. See the comments below that precede the lbits and dbits tuning parameters. */ /* Notes beyond the 1.93a appnote.txt: 1. Distance pointers never point before the beginning of the output stream. 2. Distance pointers can point back across blocks, up to 32k away. 3. There is an implied maximum of 7 bits for the bit length table and 15 bits for the actual data. 4. If only one code exists, then it is encoded using one bit. (Zero would be more efficient, but perhaps a little confusing.) If two codes exist, they are coded using one bit each (0 and 1). 5. There is no way of sending zero distance codes--a dummy must be sent if there are none. (History: a pre 2.0 version of PKZIP would store blocks with no distance codes, but this was discovered to be too harsh a criterion.) Valid only for 1.93a. 2.04c does allow zero distance codes, which is sent as one code of zero bits in length. 6. There are up to 286 literal/length codes. Code 256 represents the end-of-block. Note however that the static length tree defines 288 codes just to fill out the Huffman codes. Codes 286 and 287 cannot be used though, since there is no length base or extra bits defined for them. Similarly, there are up to 30 distance codes. However, static trees define 32 codes (all 5 bits) to fill out the Huffman codes, but the last two had better not show up in the data. 7. Unzip can check dynamic Huffman blocks for complete code sets. The exception is that a single code would not be complete (see #4). 8. The five bits following the block type is really the number of literal codes sent minus 257. 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits (1+6+6). Therefore, to output three times the length, you output three codes (1+1+1), whereas to output four times the same length, you only need two codes (1+3). Hmm. 10. In the tree reconstruction algorithm, Code = Code + Increment only if BitLength(i) is not zero. (Pretty obvious.) 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) 12. Note: length code 284 can represent 227-258, but length code 285 really is 258. The last length deserves its own, short code since it gets used a lot in very redundant files. The length 258 is special since 258 - 3 (the min match length) is 255. 13. The literal/length and distance code bit lengths are read as a single stream of lengths. It is possible (and advantageous) for a repeat code (16, 17, or 18) to go across the boundary between the two sets of lengths. */ #include <linux/compiler.h> #ifdef RCSID static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; #endif #ifndef STATIC #if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H) # include <sys/types.h> # include <stdlib.h> #endif #include "gzip.h" #define STATIC #endif /* !STATIC */ #ifndef INIT #define INIT #endif #define slide window /* Huffman code lookup table entry--this entry is four bytes for machines that have 16-bit pointers (e.g. PC's in the small or medium model). Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 means that v is a literal, 16 < e < 32 means that v is a pointer to the next table, which codes e - 16 bits, and lastly e == 99 indicates an unused code. If a code with e == 99 is looked up, this implies an error in the data. */ struct huft { uch e; /* number of extra bits or operation */ uch b; /* number of bits in this code or subcode */ union { ush n; /* literal, length base, or distance base */ struct huft *t; /* pointer to next level of table */ } v; }; /* Function prototypes */ STATIC int INIT huft_build OF((unsigned *, unsigned, unsigned, const ush *, const ush *, struct huft **, int *)); STATIC int INIT huft_free OF((struct huft *)); STATIC int INIT inflate_codes OF((struct huft *, struct huft *, int, int)); STATIC int INIT inflate_stored OF((void)); STATIC int INIT inflate_fixed OF((void)); STATIC int INIT inflate_dynamic OF((void)); STATIC int INIT inflate_block OF((int *)); STATIC int INIT inflate OF((void)); /* The inflate algorithm uses a sliding 32 K byte window on the uncompressed stream to find repeated byte strings. This is implemented here as a circular buffer. The index is updated simply by incrementing and then ANDing with 0x7fff (32K-1). */ /* It is left to other modules to supply the 32 K area. It is assumed to be usable as if it were declared "uch slide[32768];" or as just "uch *slide;" and then malloc'ed in the latter case. The definition must be in unzip.h, included above. */ /* unsigned wp; current position in slide */ #define wp outcnt #define flush_output(w) (wp=(w),flush_window()) /* Tables for deflate from PKZIP's appnote.txt. */ static const unsigned border[] = { /* Order of the bit length code lengths */ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; /* note: see note #13 above about the 258 in this list. */ static const ush cplext[] = { /* Extra bits for literal codes 257..285 */ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577}; static const ush cpdext[] = { /* Extra bits for distance codes */ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; /* Macros for inflate() bit peeking and grabbing. The usage is: NEEDBITS(j) x = b & mask_bits[j]; DUMPBITS(j) where NEEDBITS makes sure that b has at least j bits in it, and DUMPBITS removes the bits from b. The macros use the variable k for the number of bits in b. Normally, b and k are register variables for speed, and are initialized at the beginning of a routine that uses these macros from a global bit buffer and count. If we assume that EOB will be the longest code, then we will never ask for bits with NEEDBITS that are beyond the end of the stream. So, NEEDBITS should not read any more bytes than are needed to meet the request. Then no bytes need to be "returned" to the buffer at the end of the last block. However, this assumption is not true for fixed blocks--the EOB code is 7 bits, but the other literal/length codes can be 8 or 9 bits. (The EOB code is shorter than other codes because fixed blocks are generally short. So, while a block always has an EOB, many other literal/length codes have a significantly lower probability of showing up at all.) However, by making the first table have a lookup of seven bits, the EOB code will be found in that first lookup, and so will not require that too many bits be pulled from the stream. */ STATIC ulg bb; /* bit buffer */ STATIC unsigned bk; /* bits in bit buffer */ STATIC const ush mask_bits[] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff }; #define NEXTBYTE() ({ int v = get_byte(); if (v < 0) goto underrun; (uch)v; }) #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} #define DUMPBITS(n) {b>>=(n);k-=(n);} #ifndef NO_INFLATE_MALLOC /* A trivial malloc implementation, adapted from * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 */ static unsigned long malloc_ptr; static int malloc_count; static void *malloc(int size) { void *p; if (size < 0) error("Malloc error"); if (!malloc_ptr) malloc_ptr = free_mem_ptr; malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ p = (void *)malloc_ptr; malloc_ptr += size; if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) error("Out of memory"); malloc_count++; return p; } static void free(void *where) { malloc_count--; if (!malloc_count) malloc_ptr = free_mem_ptr; } #else #define malloc(a) kmalloc(a, GFP_KERNEL) #define free(a) kfree(a) #endif /* Huffman code decoding is performed using a multi-level table lookup. The fastest way to decode is to simply build a lookup table whose size is determined by the longest code. However, the time it takes to build this table can also be a factor if the data being decoded is not very long. The most common codes are necessarily the shortest codes, so those codes dominate the decoding time, and hence the speed. The idea is you can have a shorter table that decodes the shorter, more probable codes, and then point to subsidiary tables for the longer codes. The time it costs to decode the longer codes is then traded against the time it takes to make longer tables. This results of this trade are in the variables lbits and dbits below. lbits is the number of bits the first level table for literal/ length codes can decode in one step, and dbits is the same thing for the distance codes. Subsequent tables are also less than or equal to those sizes. These values may be adjusted either when all of the codes are shorter than that, in which case the longest code length in bits is used, or when the shortest code is *longer* than the requested table size, in which case the length of the shortest code in bits is used. There are two different values for the two tables, since they code a different number of possibilities each. The literal/length table codes 286 possible values, or in a flat code, a little over eight bits. The distance table codes 30 possible values, or a little less than five bits, flat. The optimum values for speed end up being about one bit more than those, so lbits is 8+1 and dbits is 5+1. The optimum values may differ though from machine to machine, and possibly even between compilers. Your mileage may vary. */ STATIC const int lbits = 9; /* bits in base literal/length lookup table */ STATIC const int dbits = 6; /* bits in base distance lookup table */ /* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ #define BMAX 16 /* maximum bit length of any code (16 for explode) */ #define N_MAX 288 /* maximum number of codes in any set */ STATIC unsigned hufts; /* track memory usage */ STATIC int INIT huft_build( unsigned *b, /* code lengths in bits (all assumed <= BMAX) */ unsigned n, /* number of codes (assumed <= N_MAX) */ unsigned s, /* number of simple-valued codes (0..s-1) */ const ush *d, /* list of base values for non-simple codes */ const ush *e, /* list of extra bits for non-simple codes */ struct huft **t, /* result: starting table */ int *m /* maximum lookup bits, returns actual */ ) /* Given a list of code lengths and a maximum table size, make a set of tables to decode that set of codes. Return zero on success, one if the given code set is incomplete (the tables are still built in this case), two if the input is invalid (all zero length codes or an oversubscribed set of lengths), and three if not enough memory. */ { unsigned a; /* counter for codes of length k */ unsigned f; /* i repeats in table every f entries */ int g; /* maximum code length */ int h; /* table level */ register unsigned i; /* counter, current code */ register unsigned j; /* counter */ register int k; /* number of bits in current code */ int l; /* bits per table (returned in m) */ register unsigned *p; /* pointer into c[], b[], or v[] */ register struct huft *q; /* points to current table */ struct huft r; /* table entry for structure assignment */ register int w; /* bits before this table == (l * h) */ unsigned *xp; /* pointer into x */ int y; /* number of dummy codes added */ unsigned z; /* number of entries in current table */ struct { unsigned c[BMAX+1]; /* bit length count table */ struct huft *u[BMAX]; /* table stack */ unsigned v[N_MAX]; /* values in order of bit length */ unsigned x[BMAX+1]; /* bit offsets, then code stack */ } *stk; unsigned *c, *v, *x; struct huft **u; int ret; DEBG("huft1 "); stk = malloc(sizeof(*stk)); if (stk == NULL) return 3; /* out of memory */ c = stk->c; v = stk->v; x = stk->x; u = stk->u; /* Generate counts for each bit length */ memzero(stk->c, sizeof(stk->c)); p = b; i = n; do { Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), n-i, *p)); c[*p]++; /* assume all entries <= BMAX */ p++; /* Can't combine with above line (Solaris bug) */ } while (--i); if (c[0] == n) /* null input--all zero length codes */ { *t = (struct huft *)NULL; *m = 0; ret = 2; goto out; } DEBG("huft2 "); /* Find minimum and maximum length, bound *m by those */ l = *m; for (j = 1; j <= BMAX; j++) if (c[j]) break; k = j; /* minimum code length */ if ((unsigned)l < j) l = j; for (i = BMAX; i; i--) if (c[i]) break; g = i; /* maximum code length */ if ((unsigned)l > i) l = i; *m = l; DEBG("huft3 "); /* Adjust last length count to fill out codes, if needed */ for (y = 1 << j; j < i; j++, y <<= 1) if ((y -= c[j]) < 0) { ret = 2; /* bad input: more codes than bits */ goto out; } if ((y -= c[i]) < 0) { ret = 2; goto out; } c[i] += y; DEBG("huft4 "); /* Generate starting offsets into the value table for each length */ x[1] = j = 0; p = c + 1; xp = x + 2; while (--i) { /* note that i == g from above */ *xp++ = (j += *p++); } DEBG("huft5 "); /* Make a table of values in order of bit lengths */ p = b; i = 0; do { if ((j = *p++) != 0) v[x[j]++] = i; } while (++i < n); n = x[g]; /* set n to length of v */ DEBG("h6 "); /* Generate the Huffman codes and for each, make the table entries */ x[0] = i = 0; /* first Huffman code is zero */ p = v; /* grab values in bit order */ h = -1; /* no tables yet--level -1 */ w = -l; /* bits decoded == (l * h) */ u[0] = (struct huft *)NULL; /* just to keep compilers happy */ q = (struct huft *)NULL; /* ditto */ z = 0; /* ditto */ DEBG("h6a "); /* go through the bit lengths (k already is bits in shortest code) */ for (; k <= g; k++) { DEBG("h6b "); a = c[k]; while (a--) { DEBG("h6b1 "); /* here i is the Huffman code of length k bits for value *p */ /* make tables up to required level */ while (k > w + l) { DEBG1("1 "); h++; w += l; /* previous table always l bits */ /* compute minimum size table less than or equal to l bits */ z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ { /* too few codes for k-w bit table */ DEBG1("2 "); f -= a + 1; /* deduct codes from patterns left */ xp = c + k; if (j < z) while (++j < z) /* try smaller tables up to z bits */ { if ((f <<= 1) <= *++xp) break; /* enough codes to use up j bits */ f -= *xp; /* else deduct codes from patterns */ } } DEBG1("3 "); z = 1 << j; /* table entries for j-bit table */ /* allocate and link in new table */ if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) == (struct huft *)NULL) { if (h) huft_free(u[0]); ret = 3; /* not enough memory */ goto out; } DEBG1("4 "); hufts += z + 1; /* track memory usage */ *t = q + 1; /* link to list for huft_free() */ *(t = &(q->v.t)) = (struct huft *)NULL; u[h] = ++q; /* table starts after link */ DEBG1("5 "); /* connect to last table, if there is one */ if (h) { x[h] = i; /* save pattern for backing up */ r.b = (uch)l; /* bits to dump before this table */ r.e = (uch)(16 + j); /* bits in this table */ r.v.t = q; /* pointer to this table */ j = i >> (w - l); /* (get around Turbo C bug) */ u[h-1][j] = r; /* connect to last table */ } DEBG1("6 "); } DEBG("h6c "); /* set up table entry in r */ r.b = (uch)(k - w); if (p >= v + n) r.e = 99; /* out of values--invalid code */ else if (*p < s) { r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */ r.v.n = (ush)(*p); /* simple code is just the value */ p++; /* one compiler does not like *p++ */ } else { r.e = (uch)e[*p - s]; /* non-simple--look up in lists */ r.v.n = d[*p++ - s]; } DEBG("h6d "); /* fill code-like entries with r */ f = 1 << (k - w); for (j = i >> w; j < z; j += f) q[j] = r; /* backwards increment the k-bit code i */ for (j = 1 << (k - 1); i & j; j >>= 1) i ^= j; i ^= j; /* backup over finished tables */ while ((i & ((1 << w) - 1)) != x[h]) { h--; /* don't need to update q */ w -= l; } DEBG("h6e "); } DEBG("h6f "); } DEBG("huft7 "); /* Return true (1) if we were given an incomplete table */ ret = y != 0 && g != 1; out: free(stk); return ret; } STATIC int INIT huft_free( struct huft *t /* table to free */ ) /* Free the malloc'ed tables built by huft_build(), which makes a linked list of the tables it made, with the links in a dummy first entry of each table. */ { register struct huft *p, *q; /* Go through linked list, freeing from the malloced (t[-1]) address. */ p = t; while (p != (struct huft *)NULL) { q = (--p)->v.t; free((char*)p); p = q; } return 0; } STATIC int INIT inflate_codes( struct huft *tl, /* literal/length decoder tables */ struct huft *td, /* distance decoder tables */ int bl, /* number of bits decoded by tl[] */ int bd /* number of bits decoded by td[] */ ) /* inflate (decompress) the codes in a deflated (compressed) block. Return an error code or zero if it all goes ok. */ { register unsigned e; /* table entry flag/number of extra bits */ unsigned n, d; /* length and index for copy */ unsigned w; /* current window position */ struct huft *t; /* pointer to table entry */ unsigned ml, md; /* masks for bl and bd bits */ register ulg b; /* bit buffer */ register unsigned k; /* number of bits in bit buffer */ /* make local copies of globals */ b = bb; /* initialize bit buffer */ k = bk; w = wp; /* initialize window position */ /* inflate the coded data */ ml = mask_bits[bl]; /* precompute masks for speed */ md = mask_bits[bd]; for (;;) /* do until end of block */ { NEEDBITS((unsigned)bl) if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) do { if (e == 99) return 1; DUMPBITS(t->b) e -= 16; NEEDBITS(e) } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); DUMPBITS(t->b) if (e == 16) /* then it's a literal */ { slide[w++] = (uch)t->v.n; Tracevv((stderr, "%c", slide[w-1])); if (w == WSIZE) { flush_output(w); w = 0; } } else /* it's an EOB or a length */ { /* exit if end of block */ if (e == 15) break; /* get length of block to copy */ NEEDBITS(e) n = t->v.n + ((unsigned)b & mask_bits[e]); DUMPBITS(e); /* decode distance of block to copy */ NEEDBITS((unsigned)bd) if ((e = (t = td + ((unsigned)b & md))->e) > 16) do { if (e == 99) return 1; DUMPBITS(t->b) e -= 16; NEEDBITS(e) } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); DUMPBITS(t->b) NEEDBITS(e) d = w - t->v.n - ((unsigned)b & mask_bits[e]); DUMPBITS(e) Tracevv((stderr,"\\[%d,%d]", w-d, n)); /* do the copy */ do { n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); #if !defined(NOMEMCPY) && !defined(DEBUG) if (w - d >= e) /* (this test assumes unsigned comparison) */ { memcpy(slide + w, slide + d, e); w += e; d += e; } else /* do it slow to avoid memcpy() overlap */ #endif /* !NOMEMCPY */ do { slide[w++] = slide[d++]; Tracevv((stderr, "%c", slide[w-1])); } while (--e); if (w == WSIZE) { flush_output(w); w = 0; } } while (n); } } /* restore the globals from the locals */ wp = w; /* restore global window pointer */ bb = b; /* restore global bit buffer */ bk = k; /* done */ return 0; underrun: return 4; /* Input underrun */ } STATIC int INIT inflate_stored(void) /* "decompress" an inflated type 0 (stored) block. */ { unsigned n; /* number of bytes in block */ unsigned w; /* current window position */ register ulg b; /* bit buffer */ register unsigned k; /* number of bits in bit buffer */ DEBG("<stor"); /* make local copies of globals */ b = bb; /* initialize bit buffer */ k = bk; w = wp; /* initialize window position */ /* go to byte boundary */ n = k & 7; DUMPBITS(n); /* get the length and its complement */ NEEDBITS(16) n = ((unsigned)b & 0xffff); DUMPBITS(16) NEEDBITS(16) if (n != (unsigned)((~b) & 0xffff)) return 1; /* error in compressed data */ DUMPBITS(16) /* read and output the compressed data */ while (n--) { NEEDBITS(8) slide[w++] = (uch)b; if (w == WSIZE) { flush_output(w); w = 0; } DUMPBITS(8) } /* restore the globals from the locals */ wp = w; /* restore global window pointer */ bb = b; /* restore global bit buffer */ bk = k; DEBG(">"); return 0; underrun: return 4; /* Input underrun */ } /* * We use `noinline' here to prevent gcc-3.5 from using too much stack space */ STATIC int noinline INIT inflate_fixed(void) /* decompress an inflated type 1 (fixed Huffman codes) block. We should either replace this with a custom decoder, or at least precompute the Huffman tables. */ { int i; /* temporary variable */ struct huft *tl; /* literal/length code table */ struct huft *td; /* distance code table */ int bl; /* lookup bits for tl */ int bd; /* lookup bits for td */ unsigned *l; /* length list for huft_build */ DEBG("<fix"); l = malloc(sizeof(*l) * 288); if (l == NULL) return 3; /* out of memory */ /* set up literal table */ for (i = 0; i < 144; i++) l[i] = 8; for (; i < 256; i++) l[i] = 9; for (; i < 280; i++) l[i] = 7; for (; i < 288; i++) /* make a complete, but wrong code set */ l[i] = 8; bl = 7; if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) { free(l); return i; } /* set up distance table */ for (i = 0; i < 30; i++) /* make an incomplete code set */ l[i] = 5; bd = 5; if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1) { huft_free(tl); free(l); DEBG(">"); return i; } /* decompress until an end-of-block code */ if (inflate_codes(tl, td, bl, bd)) { free(l); return 1; } /* free the decoding tables, return */ free(l); huft_free(tl); huft_free(td); return 0; } /* * We use `noinline' here to prevent gcc-3.5 from using too much stack space */ STATIC int noinline INIT inflate_dynamic(void) /* decompress an inflated type 2 (dynamic Huffman codes) block. */ { int i; /* temporary variables */ unsigned j; unsigned l; /* last length */ unsigned m; /* mask for bit lengths table */ unsigned n; /* number of lengths to get */ struct huft *tl; /* literal/length code table */ struct huft *td; /* distance code table */ int bl; /* lookup bits for tl */ int bd; /* lookup bits for td */ unsigned nb; /* number of bit length codes */ unsigned nl; /* number of literal/length codes */ unsigned nd; /* number of distance codes */ unsigned *ll; /* literal/length and distance code lengths */ register ulg b; /* bit buffer */ register unsigned k; /* number of bits in bit buffer */ int ret; DEBG("<dyn"); #ifdef PKZIP_BUG_WORKAROUND ll = malloc(sizeof(*ll) * (288+32)); /* literal/length and distance code lengths */ #else ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ #endif if (ll == NULL) return 1; /* make local bit buffer */ b = bb; k = bk; /* read in table lengths */ NEEDBITS(5) nl = 257 + ((unsigned)b & 0x1f); /* number of literal/length codes */ DUMPBITS(5) NEEDBITS(5) nd = 1 + ((unsigned)b & 0x1f); /* number of distance codes */ DUMPBITS(5) NEEDBITS(4) nb = 4 + ((unsigned)b & 0xf); /* number of bit length codes */ DUMPBITS(4) #ifdef PKZIP_BUG_WORKAROUND if (nl > 288 || nd > 32) #else if (nl > 286 || nd > 30) #endif { ret = 1; /* bad lengths */ goto out; } DEBG("dyn1 "); /* read in bit-length-code lengths */ for (j = 0; j < nb; j++) { NEEDBITS(3) ll[border[j]] = (unsigned)b & 7; DUMPBITS(3) } for (; j < 19; j++) ll[border[j]] = 0; DEBG("dyn2 "); /* build decoding table for trees--single level, 7 bit lookup */ bl = 7; if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) { if (i == 1) huft_free(tl); ret = i; /* incomplete code set */ goto out; } DEBG("dyn3 "); /* read in literal and distance code lengths */ n = nl + nd; m = mask_bits[bl]; i = l = 0; while ((unsigned)i < n) { NEEDBITS((unsigned)bl) j = (td = tl + ((unsigned)b & m))->b; DUMPBITS(j) j = td->v.n; if (j < 16) /* length of code in bits (0..15) */ ll[i++] = l = j; /* save last length in l */ else if (j == 16) /* repeat last length 3 to 6 times */ { NEEDBITS(2) j = 3 + ((unsigned)b & 3); DUMPBITS(2) if ((unsigned)i + j > n) { ret = 1; goto out; } while (j--) ll[i++] = l; } else if (j == 17) /* 3 to 10 zero length codes */ { NEEDBITS(3) j = 3 + ((unsigned)b & 7); DUMPBITS(3) if ((unsigned)i + j > n) { ret = 1; goto out; } while (j--) ll[i++] = 0; l = 0; } else /* j == 18: 11 to 138 zero length codes */ { NEEDBITS(7) j = 11 + ((unsigned)b & 0x7f); DUMPBITS(7) if ((unsigned)i + j > n) { ret = 1; goto out; } while (j--) ll[i++] = 0; l = 0; } } DEBG("dyn4 "); /* free decoding table for trees */ huft_free(tl); DEBG("dyn5 "); /* restore the global bit buffer */ bb = b; bk = k; DEBG("dyn5a "); /* build the decoding tables for literal/length and distance codes */ bl = lbits; if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0) { DEBG("dyn5b "); if (i == 1) { error("incomplete literal tree"); huft_free(tl); } ret = i; /* incomplete code set */ goto out; } DEBG("dyn5c "); bd = dbits; if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0) { DEBG("dyn5d "); if (i == 1) { error("incomplete distance tree"); #ifdef PKZIP_BUG_WORKAROUND i = 0; } #else huft_free(td); } huft_free(tl); ret = i; /* incomplete code set */ goto out; #endif } DEBG("dyn6 "); /* decompress until an end-of-block code */ if (inflate_codes(tl, td, bl, bd)) { ret = 1; goto out; } DEBG("dyn7 "); /* free the decoding tables, return */ huft_free(tl); huft_free(td); DEBG(">"); ret = 0; out: free(ll); return ret; underrun: ret = 4; /* Input underrun */ goto out; } STATIC int INIT inflate_block( int *e /* last block flag */ ) /* decompress an inflated block */ { unsigned t; /* block type */ register ulg b; /* bit buffer */ register unsigned k; /* number of bits in bit buffer */ DEBG("<blk"); /* make local bit buffer */ b = bb; k = bk; /* read in last block bit */ NEEDBITS(1) *e = (int)b & 1; DUMPBITS(1) /* read in block type */ NEEDBITS(2) t = (unsigned)b & 3; DUMPBITS(2) /* restore the global bit buffer */ bb = b; bk = k; /* inflate that block type */ if (t == 2) return inflate_dynamic(); if (t == 0) return inflate_stored(); if (t == 1) return inflate_fixed(); DEBG(">"); /* bad block type */ return 2; underrun: return 4; /* Input underrun */ } STATIC int INIT inflate(void) /* decompress an inflated entry */ { int e; /* last block flag */ int r; /* result code */ unsigned h; /* maximum struct huft's malloc'ed */ /* initialize window, bit buffer */ wp = 0; bk = 0; bb = 0; /* decompress until the last block */ h = 0; do { hufts = 0; #ifdef ARCH_HAS_DECOMP_WDOG arch_decomp_wdog(); #endif r = inflate_block(&e); if (r) return r; if (hufts > h) h = hufts; } while (!e); /* Undo too much lookahead. The next read will be byte aligned so we * can discard unused bits in the last meaningful byte. */ while (bk >= 8) { bk -= 8; inptr--; } /* flush out slide */ flush_output(wp); /* return success */ #ifdef DEBUG fprintf(stderr, "<%u> ", h); #endif /* DEBUG */ return 0; } /********************************************************************** * * The following are support routines for inflate.c * **********************************************************************/ static ulg crc_32_tab[256]; static ulg crc; /* initialized in makecrc() so it'll reside in bss */ #define CRC_VALUE (crc ^ 0xffffffffUL) /* * Code to compute the CRC-32 table. Borrowed from * gzip-1.0.3/makecrc.c. */ static void INIT makecrc(void) { /* Not copyrighted 1990 Mark Adler */ unsigned long c; /* crc shift register */ unsigned long e; /* polynomial exclusive-or pattern */ int i; /* counter for all possible eight bit values */ int k; /* byte being shifted into crc apparatus */ /* terms of polynomial defining this crc (except x^32): */ static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; /* Make exclusive-or pattern from polynomial */ e = 0; for (i = 0; i < sizeof(p)/sizeof(int); i++) e |= 1L << (31 - p[i]); crc_32_tab[0] = 0; for (i = 1; i < 256; i++) { c = 0; for (k = i | 256; k != 1; k >>= 1) { c = c & 1 ? (c >> 1) ^ e : c >> 1; if (k & 1) c ^= e; } crc_32_tab[i] = c; } /* this is initialized here so this code could reside in ROM */ crc = (ulg)0xffffffffUL; /* shift register contents */ } /* gzip flag byte */ #define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ #define ORIG_NAME 0x08 /* bit 3 set: original file name present */ #define COMMENT 0x10 /* bit 4 set: file comment present */ #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ #define RESERVED 0xC0 /* bit 6,7: reserved */ /* * Do the uncompression! */ static int INIT gunzip(void) { uch flags; unsigned char magic[2]; /* magic header */ char method; ulg orig_crc = 0; /* original crc */ ulg orig_len = 0; /* original uncompressed length */ int res; magic[0] = NEXTBYTE(); magic[1] = NEXTBYTE(); method = NEXTBYTE(); if (magic[0] != 037 || ((magic[1] != 0213) && (magic[1] != 0236))) { error("bad gzip magic numbers"); return -1; } /* We only support method #8, DEFLATED */ if (method != 8) { error("internal error, invalid method"); return -1; } flags = (uch)get_byte(); if ((flags & ENCRYPTED) != 0) { error("Input is encrypted"); return -1; } if ((flags & CONTINUATION) != 0) { error("Multi part input"); return -1; } if ((flags & RESERVED) != 0) { error("Input has invalid flags"); return -1; } NEXTBYTE(); /* Get timestamp */ NEXTBYTE(); NEXTBYTE(); NEXTBYTE(); (void)NEXTBYTE(); /* Ignore extra flags for the moment */ (void)NEXTBYTE(); /* Ignore OS type for the moment */ if ((flags & EXTRA_FIELD) != 0) { unsigned len = (unsigned)NEXTBYTE(); len |= ((unsigned)NEXTBYTE())<<8; while (len--) (void)NEXTBYTE(); } /* Get original file name if it was truncated */ if ((flags & ORIG_NAME) != 0) { /* Discard the old name */ while (NEXTBYTE() != 0) /* null */ ; } /* Discard file comment if any */ if ((flags & COMMENT) != 0) { while (NEXTBYTE() != 0) /* null */ ; } /* Decompress */ if ((res = inflate())) { switch (res) { case 0: break; case 1: error("invalid compressed format (err=1)"); break; case 2: error("invalid compressed format (err=2)"); break; case 3: error("out of memory"); break; case 4: error("out of input data"); break; default: error("invalid compressed format (other)"); } return -1; } /* Get the crc and original length */ /* crc32 (see algorithm.doc) * uncompressed input size modulo 2^32 */ orig_crc = (ulg) NEXTBYTE(); orig_crc |= (ulg) NEXTBYTE() << 8; orig_crc |= (ulg) NEXTBYTE() << 16; orig_crc |= (ulg) NEXTBYTE() << 24; orig_len = (ulg) NEXTBYTE(); orig_len |= (ulg) NEXTBYTE() << 8; orig_len |= (ulg) NEXTBYTE() << 16; orig_len |= (ulg) NEXTBYTE() << 24; /* Validate decompression */ if (orig_crc != CRC_VALUE) { error("crc error"); return -1; } if (orig_len != bytes_out) { error("length error"); return -1; } return 0; underrun: /* NEXTBYTE() goto's here if needed */ error("out of input data"); return -1; }
gpl-2.0
petkan/linux
drivers/mfd/tps80031.c
821
16688
/* * tps80031.c -- TI TPS80031/TPS80032 mfd core driver. * * MFD core driver for TI TPS80031/TPS80032 Fully Integrated * Power Management with Power Path and Battery Charger * * Copyright (c) 2012, NVIDIA Corporation. * * Author: Laxman Dewangan <ldewangan@nvidia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, * whether express or implied; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307, USA */ #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/mfd/core.h> #include <linux/mfd/tps80031.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/regmap.h> #include <linux/slab.h> static struct resource tps80031_rtc_resources[] = { { .start = TPS80031_INT_RTC_ALARM, .end = TPS80031_INT_RTC_ALARM, .flags = IORESOURCE_IRQ, }, }; /* TPS80031 sub mfd devices */ static const struct mfd_cell tps80031_cell[] = { { .name = "tps80031-pmic", }, { .name = "tps80031-clock", }, { .name = "tps80031-rtc", .num_resources = ARRAY_SIZE(tps80031_rtc_resources), .resources = tps80031_rtc_resources, }, { .name = "tps80031-gpadc", }, { .name = "tps80031-fuel-gauge", }, { .name = "tps80031-charger", }, }; static int tps80031_slave_address[TPS80031_NUM_SLAVES] = { TPS80031_I2C_ID0_ADDR, TPS80031_I2C_ID1_ADDR, TPS80031_I2C_ID2_ADDR, TPS80031_I2C_ID3_ADDR, }; struct tps80031_pupd_data { u8 reg; u8 pullup_bit; u8 pulldown_bit; }; #define TPS80031_IRQ(_reg, _mask) \ { \ .reg_offset = (TPS80031_INT_MSK_LINE_##_reg) - \ TPS80031_INT_MSK_LINE_A, \ .mask = BIT(_mask), \ } static const struct regmap_irq tps80031_main_irqs[] = { [TPS80031_INT_PWRON] = TPS80031_IRQ(A, 0), [TPS80031_INT_RPWRON] = TPS80031_IRQ(A, 1), [TPS80031_INT_SYS_VLOW] = TPS80031_IRQ(A, 2), [TPS80031_INT_RTC_ALARM] = TPS80031_IRQ(A, 3), [TPS80031_INT_RTC_PERIOD] = TPS80031_IRQ(A, 4), [TPS80031_INT_HOT_DIE] = TPS80031_IRQ(A, 5), [TPS80031_INT_VXX_SHORT] = TPS80031_IRQ(A, 6), [TPS80031_INT_SPDURATION] = TPS80031_IRQ(A, 7), [TPS80031_INT_WATCHDOG] = TPS80031_IRQ(B, 0), [TPS80031_INT_BAT] = TPS80031_IRQ(B, 1), [TPS80031_INT_SIM] = TPS80031_IRQ(B, 2), [TPS80031_INT_MMC] = TPS80031_IRQ(B, 3), [TPS80031_INT_RES] = TPS80031_IRQ(B, 4), [TPS80031_INT_GPADC_RT] = TPS80031_IRQ(B, 5), [TPS80031_INT_GPADC_SW2_EOC] = TPS80031_IRQ(B, 6), [TPS80031_INT_CC_AUTOCAL] = TPS80031_IRQ(B, 7), [TPS80031_INT_ID_WKUP] = TPS80031_IRQ(C, 0), [TPS80031_INT_VBUSS_WKUP] = TPS80031_IRQ(C, 1), [TPS80031_INT_ID] = TPS80031_IRQ(C, 2), [TPS80031_INT_VBUS] = TPS80031_IRQ(C, 3), [TPS80031_INT_CHRG_CTRL] = TPS80031_IRQ(C, 4), [TPS80031_INT_EXT_CHRG] = TPS80031_IRQ(C, 5), [TPS80031_INT_INT_CHRG] = TPS80031_IRQ(C, 6), [TPS80031_INT_RES2] = TPS80031_IRQ(C, 7), }; static struct regmap_irq_chip tps80031_irq_chip = { .name = "tps80031", .irqs = tps80031_main_irqs, .num_irqs = ARRAY_SIZE(tps80031_main_irqs), .num_regs = 3, .status_base = TPS80031_INT_STS_A, .mask_base = TPS80031_INT_MSK_LINE_A, }; #define PUPD_DATA(_reg, _pulldown_bit, _pullup_bit) \ { \ .reg = TPS80031_CFG_INPUT_PUPD##_reg, \ .pulldown_bit = _pulldown_bit, \ .pullup_bit = _pullup_bit, \ } static const struct tps80031_pupd_data tps80031_pupds[] = { [TPS80031_PREQ1] = PUPD_DATA(1, BIT(0), BIT(1)), [TPS80031_PREQ2A] = PUPD_DATA(1, BIT(2), BIT(3)), [TPS80031_PREQ2B] = PUPD_DATA(1, BIT(4), BIT(5)), [TPS80031_PREQ2C] = PUPD_DATA(1, BIT(6), BIT(7)), [TPS80031_PREQ3] = PUPD_DATA(2, BIT(0), BIT(1)), [TPS80031_NRES_WARM] = PUPD_DATA(2, 0, BIT(2)), [TPS80031_PWM_FORCE] = PUPD_DATA(2, BIT(5), 0), [TPS80031_CHRG_EXT_CHRG_STATZ] = PUPD_DATA(2, 0, BIT(6)), [TPS80031_SIM] = PUPD_DATA(3, BIT(0), BIT(1)), [TPS80031_MMC] = PUPD_DATA(3, BIT(2), BIT(3)), [TPS80031_GPADC_START] = PUPD_DATA(3, BIT(4), 0), [TPS80031_DVSI2C_SCL] = PUPD_DATA(4, 0, BIT(0)), [TPS80031_DVSI2C_SDA] = PUPD_DATA(4, 0, BIT(1)), [TPS80031_CTLI2C_SCL] = PUPD_DATA(4, 0, BIT(2)), [TPS80031_CTLI2C_SDA] = PUPD_DATA(4, 0, BIT(3)), }; static struct tps80031 *tps80031_power_off_dev; int tps80031_ext_power_req_config(struct device *dev, unsigned long ext_ctrl_flag, int preq_bit, int state_reg_add, int trans_reg_add) { u8 res_ass_reg = 0; int preq_mask_bit = 0; int ret; if (!(ext_ctrl_flag & TPS80031_EXT_PWR_REQ)) return 0; if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ1) { res_ass_reg = TPS80031_PREQ1_RES_ASS_A + (preq_bit >> 3); preq_mask_bit = 5; } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ2) { res_ass_reg = TPS80031_PREQ2_RES_ASS_A + (preq_bit >> 3); preq_mask_bit = 6; } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ3) { res_ass_reg = TPS80031_PREQ3_RES_ASS_A + (preq_bit >> 3); preq_mask_bit = 7; } /* Configure REQ_ASS registers */ ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1, res_ass_reg, BIT(preq_bit & 0x7)); if (ret < 0) { dev_err(dev, "reg 0x%02x setbit failed, err = %d\n", res_ass_reg, ret); return ret; } /* Unmask the PREQ */ ret = tps80031_clr_bits(dev, TPS80031_SLAVE_ID1, TPS80031_PHOENIX_MSK_TRANSITION, BIT(preq_mask_bit)); if (ret < 0) { dev_err(dev, "reg 0x%02x clrbit failed, err = %d\n", TPS80031_PHOENIX_MSK_TRANSITION, ret); return ret; } /* Switch regulator control to resource now */ if (ext_ctrl_flag & (TPS80031_PWR_REQ_INPUT_PREQ2 | TPS80031_PWR_REQ_INPUT_PREQ3)) { ret = tps80031_update(dev, TPS80031_SLAVE_ID1, state_reg_add, 0x0, TPS80031_STATE_MASK); if (ret < 0) dev_err(dev, "reg 0x%02x update failed, err = %d\n", state_reg_add, ret); } else { ret = tps80031_update(dev, TPS80031_SLAVE_ID1, trans_reg_add, TPS80031_TRANS_SLEEP_OFF, TPS80031_TRANS_SLEEP_MASK); if (ret < 0) dev_err(dev, "reg 0x%02x update failed, err = %d\n", trans_reg_add, ret); } return ret; } EXPORT_SYMBOL_GPL(tps80031_ext_power_req_config); static void tps80031_power_off(void) { dev_info(tps80031_power_off_dev->dev, "switching off PMU\n"); tps80031_write(tps80031_power_off_dev->dev, TPS80031_SLAVE_ID1, TPS80031_PHOENIX_DEV_ON, TPS80031_DEVOFF); } static void tps80031_pupd_init(struct tps80031 *tps80031, struct tps80031_platform_data *pdata) { struct tps80031_pupd_init_data *pupd_init_data = pdata->pupd_init_data; int data_size = pdata->pupd_init_data_size; int i; for (i = 0; i < data_size; ++i) { struct tps80031_pupd_init_data *pupd_init = &pupd_init_data[i]; const struct tps80031_pupd_data *pupd = &tps80031_pupds[pupd_init->input_pin]; u8 update_value = 0; u8 update_mask = pupd->pulldown_bit | pupd->pullup_bit; if (pupd_init->setting == TPS80031_PUPD_PULLDOWN) update_value = pupd->pulldown_bit; else if (pupd_init->setting == TPS80031_PUPD_PULLUP) update_value = pupd->pullup_bit; tps80031_update(tps80031->dev, TPS80031_SLAVE_ID1, pupd->reg, update_value, update_mask); } } static int tps80031_init_ext_control(struct tps80031 *tps80031, struct tps80031_platform_data *pdata) { struct device *dev = tps80031->dev; int ret; int i; /* Clear all external control for this rail */ for (i = 0; i < 9; ++i) { ret = tps80031_write(dev, TPS80031_SLAVE_ID1, TPS80031_PREQ1_RES_ASS_A + i, 0); if (ret < 0) { dev_err(dev, "reg 0x%02x write failed, err = %d\n", TPS80031_PREQ1_RES_ASS_A + i, ret); return ret; } } /* Mask the PREQ */ ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1, TPS80031_PHOENIX_MSK_TRANSITION, 0x7 << 5); if (ret < 0) { dev_err(dev, "reg 0x%02x set_bits failed, err = %d\n", TPS80031_PHOENIX_MSK_TRANSITION, ret); return ret; } return ret; } static int tps80031_irq_init(struct tps80031 *tps80031, int irq, int irq_base) { struct device *dev = tps80031->dev; int i, ret; /* * The MASK register used for updating status register when * interrupt occurs and LINE register used to pass the status * to actual interrupt line. As per datasheet: * When INT_MSK_LINE [i] is set to 1, the associated interrupt * number i is INT line masked, which means that no interrupt is * generated on the INT line. * When INT_MSK_LINE [i] is set to 0, the associated interrupt * number i is line enabled: An interrupt is generated on the * INT line. * In any case, the INT_STS [i] status bit may or may not be updated, * only linked to the INT_MSK_STS [i] configuration register bit. * * When INT_MSK_STS [i] is set to 1, the associated interrupt number * i is status masked, which means that no interrupt is stored in * the INT_STS[i] status bit. Note that no interrupt number i is * generated on the INT line, even if the INT_MSK_LINE [i] register * bit is set to 0. * When INT_MSK_STS [i] is set to 0, the associated interrupt number i * is status enabled: An interrupt status is updated in the INT_STS [i] * register. The interrupt may or may not be generated on the INT line, * depending on the INT_MSK_LINE [i] configuration register bit. */ for (i = 0; i < 3; i++) tps80031_write(dev, TPS80031_SLAVE_ID2, TPS80031_INT_MSK_STS_A + i, 0x00); ret = regmap_add_irq_chip(tps80031->regmap[TPS80031_SLAVE_ID2], irq, IRQF_ONESHOT, irq_base, &tps80031_irq_chip, &tps80031->irq_data); if (ret < 0) { dev_err(dev, "add irq failed, err = %d\n", ret); return ret; } return ret; } static bool rd_wr_reg_id0(struct device *dev, unsigned int reg) { switch (reg) { case TPS80031_SMPS1_CFG_FORCE ... TPS80031_SMPS2_CFG_VOLTAGE: return true; default: return false; } } static bool rd_wr_reg_id1(struct device *dev, unsigned int reg) { switch (reg) { case TPS80031_SECONDS_REG ... TPS80031_RTC_RESET_STATUS_REG: case TPS80031_VALIDITY0 ... TPS80031_VALIDITY7: case TPS80031_PHOENIX_START_CONDITION ... TPS80031_KEY_PRESS_DUR_CFG: case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE: case TPS80031_BROADCAST_ADDR_ALL ... TPS80031_BROADCAST_ADDR_CLK_RST: case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE: case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE: case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C: case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING: case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD: case TPS80031_BACKUP_REG: return true; default: return false; } } static bool is_volatile_reg_id1(struct device *dev, unsigned int reg) { switch (reg) { case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE: case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE: case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE: case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C: case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING: case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD: return true; default: return false; } } static bool rd_wr_reg_id2(struct device *dev, unsigned int reg) { switch (reg) { case TPS80031_USB_VENDOR_ID_LSB ... TPS80031_USB_OTG_REVISION: case TPS80031_GPADC_CTRL ... TPS80031_CTRL_P1: case TPS80031_RTCH0_LSB ... TPS80031_GPCH0_MSB: case TPS80031_TOGGLE1 ... TPS80031_VIBMODE: case TPS80031_PWM1ON ... TPS80031_PWM2OFF: case TPS80031_FG_REG_00 ... TPS80031_FG_REG_11: case TPS80031_INT_STS_A ... TPS80031_INT_MSK_STS_C: case TPS80031_CONTROLLER_CTRL2 ... TPS80031_LED_PWM_CTRL2: return true; default: return false; } } static bool rd_wr_reg_id3(struct device *dev, unsigned int reg) { switch (reg) { case TPS80031_GPADC_TRIM0 ... TPS80031_GPADC_TRIM18: return true; default: return false; } } static const struct regmap_config tps80031_regmap_configs[] = { { .reg_bits = 8, .val_bits = 8, .writeable_reg = rd_wr_reg_id0, .readable_reg = rd_wr_reg_id0, .max_register = TPS80031_MAX_REGISTER, }, { .reg_bits = 8, .val_bits = 8, .writeable_reg = rd_wr_reg_id1, .readable_reg = rd_wr_reg_id1, .volatile_reg = is_volatile_reg_id1, .max_register = TPS80031_MAX_REGISTER, }, { .reg_bits = 8, .val_bits = 8, .writeable_reg = rd_wr_reg_id2, .readable_reg = rd_wr_reg_id2, .max_register = TPS80031_MAX_REGISTER, }, { .reg_bits = 8, .val_bits = 8, .writeable_reg = rd_wr_reg_id3, .readable_reg = rd_wr_reg_id3, .max_register = TPS80031_MAX_REGISTER, }, }; static int tps80031_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct tps80031_platform_data *pdata = dev_get_platdata(&client->dev); struct tps80031 *tps80031; int ret; uint8_t es_version; uint8_t ep_ver; int i; if (!pdata) { dev_err(&client->dev, "tps80031 requires platform data\n"); return -EINVAL; } tps80031 = devm_kzalloc(&client->dev, sizeof(*tps80031), GFP_KERNEL); if (!tps80031) { dev_err(&client->dev, "Malloc failed for tps80031\n"); return -ENOMEM; } for (i = 0; i < TPS80031_NUM_SLAVES; i++) { if (tps80031_slave_address[i] == client->addr) tps80031->clients[i] = client; else tps80031->clients[i] = i2c_new_dummy(client->adapter, tps80031_slave_address[i]); if (!tps80031->clients[i]) { dev_err(&client->dev, "can't attach client %d\n", i); ret = -ENOMEM; goto fail_client_reg; } i2c_set_clientdata(tps80031->clients[i], tps80031); tps80031->regmap[i] = devm_regmap_init_i2c(tps80031->clients[i], &tps80031_regmap_configs[i]); if (IS_ERR(tps80031->regmap[i])) { ret = PTR_ERR(tps80031->regmap[i]); dev_err(&client->dev, "regmap %d init failed, err %d\n", i, ret); goto fail_client_reg; } } ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3, TPS80031_JTAGVERNUM, &es_version); if (ret < 0) { dev_err(&client->dev, "Silicon version number read failed: %d\n", ret); goto fail_client_reg; } ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3, TPS80031_EPROM_REV, &ep_ver); if (ret < 0) { dev_err(&client->dev, "Silicon eeprom version read failed: %d\n", ret); goto fail_client_reg; } dev_info(&client->dev, "ES version 0x%02x and EPROM version 0x%02x\n", es_version, ep_ver); tps80031->es_version = es_version; tps80031->dev = &client->dev; i2c_set_clientdata(client, tps80031); tps80031->chip_info = id->driver_data; ret = tps80031_irq_init(tps80031, client->irq, pdata->irq_base); if (ret) { dev_err(&client->dev, "IRQ init failed: %d\n", ret); goto fail_client_reg; } tps80031_pupd_init(tps80031, pdata); tps80031_init_ext_control(tps80031, pdata); ret = mfd_add_devices(tps80031->dev, -1, tps80031_cell, ARRAY_SIZE(tps80031_cell), NULL, 0, regmap_irq_get_domain(tps80031->irq_data)); if (ret < 0) { dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret); goto fail_mfd_add; } if (pdata->use_power_off && !pm_power_off) { tps80031_power_off_dev = tps80031; pm_power_off = tps80031_power_off; } return 0; fail_mfd_add: regmap_del_irq_chip(client->irq, tps80031->irq_data); fail_client_reg: for (i = 0; i < TPS80031_NUM_SLAVES; i++) { if (tps80031->clients[i] && (tps80031->clients[i] != client)) i2c_unregister_device(tps80031->clients[i]); } return ret; } static int tps80031_remove(struct i2c_client *client) { struct tps80031 *tps80031 = i2c_get_clientdata(client); int i; if (tps80031_power_off_dev == tps80031) { tps80031_power_off_dev = NULL; pm_power_off = NULL; } mfd_remove_devices(tps80031->dev); regmap_del_irq_chip(client->irq, tps80031->irq_data); for (i = 0; i < TPS80031_NUM_SLAVES; i++) { if (tps80031->clients[i] != client) i2c_unregister_device(tps80031->clients[i]); } return 0; } static const struct i2c_device_id tps80031_id_table[] = { { "tps80031", TPS80031 }, { "tps80032", TPS80032 }, { } }; MODULE_DEVICE_TABLE(i2c, tps80031_id_table); static struct i2c_driver tps80031_driver = { .driver = { .name = "tps80031", }, .probe = tps80031_probe, .remove = tps80031_remove, .id_table = tps80031_id_table, }; static int __init tps80031_init(void) { return i2c_add_driver(&tps80031_driver); } subsys_initcall(tps80031_init); static void __exit tps80031_exit(void) { i2c_del_driver(&tps80031_driver); } module_exit(tps80031_exit); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_DESCRIPTION("TPS80031 core driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
snq-/bravo-kernel
crypto/cipher.c
821
3392
/* * Cryptographic API. * * Cipher operations. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/kernel.h> #include <linux/crypto.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include "internal.h" static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; unsigned long absize; absize = keylen + alignmask; buffer = kmalloc(absize, GFP_ATOMIC); if (!buffer) return -ENOMEM; alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cia->cia_setkey(tfm, alignbuffer, keylen); memset(alignbuffer, 0, keylen); kfree(buffer); return ret; } static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) { tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } if ((unsigned long)key & alignmask) return setkey_unaligned(tfm, key, keylen); return cia->cia_setkey(tfm, key, keylen); } static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *, const u8 *), struct crypto_tfm *tfm, u8 *dst, const u8 *src) { unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); unsigned int size = crypto_tfm_alg_blocksize(tfm); u8 buffer[size + alignmask]; u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(tmp, src, size); fn(tfm, tmp, tmp); memcpy(dst, tmp, size); } static void cipher_encrypt_unaligned(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src); return; } cipher->cia_encrypt(tfm, dst, src); } static void cipher_decrypt_unaligned(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src); return; } cipher->cia_decrypt(tfm, dst, src); } int crypto_init_cipher_ops(struct crypto_tfm *tfm) { struct cipher_tfm *ops = &tfm->crt_cipher; struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; ops->cit_setkey = setkey; ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ? cipher_encrypt_unaligned : cipher->cia_encrypt; ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ? cipher_decrypt_unaligned : cipher->cia_decrypt; return 0; } void crypto_exit_cipher_ops(struct crypto_tfm *tfm) { }
gpl-2.0
Restorn/android_kernel_elephone_p8000
drivers/clk/spear/spear3xx_clock.c
1333
23043
/* * SPEAr3xx machines clock framework source file * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <viresh.linux@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/spinlock_types.h> #include "clk.h" static DEFINE_SPINLOCK(_lock); #define PLL1_CTR (misc_base + 0x008) #define PLL1_FRQ (misc_base + 0x00C) #define PLL2_CTR (misc_base + 0x014) #define PLL2_FRQ (misc_base + 0x018) #define PLL_CLK_CFG (misc_base + 0x020) /* PLL_CLK_CFG register masks */ #define MCTR_CLK_SHIFT 28 #define MCTR_CLK_MASK 3 #define CORE_CLK_CFG (misc_base + 0x024) /* CORE CLK CFG register masks */ #define GEN_SYNTH2_3_CLK_SHIFT 18 #define GEN_SYNTH2_3_CLK_MASK 1 #define HCLK_RATIO_SHIFT 10 #define HCLK_RATIO_MASK 2 #define PCLK_RATIO_SHIFT 8 #define PCLK_RATIO_MASK 2 #define PERIP_CLK_CFG (misc_base + 0x028) /* PERIP_CLK_CFG register masks */ #define UART_CLK_SHIFT 4 #define UART_CLK_MASK 1 #define FIRDA_CLK_SHIFT 5 #define FIRDA_CLK_MASK 2 #define GPT0_CLK_SHIFT 8 #define GPT1_CLK_SHIFT 11 #define GPT2_CLK_SHIFT 12 #define GPT_CLK_MASK 1 #define PERIP1_CLK_ENB (misc_base + 0x02C) /* PERIP1_CLK_ENB register masks */ #define UART_CLK_ENB 3 #define SSP_CLK_ENB 5 #define I2C_CLK_ENB 7 #define JPEG_CLK_ENB 8 #define FIRDA_CLK_ENB 10 #define GPT1_CLK_ENB 11 #define GPT2_CLK_ENB 12 #define ADC_CLK_ENB 15 #define RTC_CLK_ENB 17 #define GPIO_CLK_ENB 18 #define DMA_CLK_ENB 19 #define SMI_CLK_ENB 21 #define GMAC_CLK_ENB 23 #define USBD_CLK_ENB 24 #define USBH_CLK_ENB 25 #define C3_CLK_ENB 31 #define RAS_CLK_ENB (misc_base + 0x034) #define RAS_AHB_CLK_ENB 0 #define RAS_PLL1_CLK_ENB 1 #define RAS_APB_CLK_ENB 2 #define RAS_32K_CLK_ENB 3 #define RAS_24M_CLK_ENB 4 #define RAS_48M_CLK_ENB 5 #define RAS_PLL2_CLK_ENB 7 #define RAS_SYNT0_CLK_ENB 8 #define RAS_SYNT1_CLK_ENB 9 #define RAS_SYNT2_CLK_ENB 10 #define RAS_SYNT3_CLK_ENB 11 #define PRSC0_CLK_CFG (misc_base + 0x044) #define PRSC1_CLK_CFG (misc_base + 0x048) #define PRSC2_CLK_CFG (misc_base + 0x04C) #define AMEM_CLK_CFG (misc_base + 0x050) #define AMEM_CLK_ENB 0 #define CLCD_CLK_SYNT (misc_base + 0x05C) #define FIRDA_CLK_SYNT (misc_base + 0x060) #define UART_CLK_SYNT (misc_base + 0x064) #define GMAC_CLK_SYNT (misc_base + 0x068) #define GEN0_CLK_SYNT (misc_base + 0x06C) #define GEN1_CLK_SYNT (misc_base + 0x070) #define GEN2_CLK_SYNT (misc_base + 0x074) #define GEN3_CLK_SYNT (misc_base + 0x078) /* pll rate configuration table, in ascending order of rates */ static struct pll_rate_tbl pll_rtbl[] = { {.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */ {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */ {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */ }; /* aux rate configuration table, in ascending order of rates */ static struct aux_rate_tbl aux_rtbl[] = { /* For PLL1 = 332 MHz */ {.xscale = 1, .yscale = 81, .eq = 0}, /* 2.049 MHz */ {.xscale = 1, .yscale = 59, .eq = 0}, /* 2.822 MHz */ {.xscale = 2, .yscale = 81, .eq = 0}, /* 4.098 MHz */ {.xscale = 3, .yscale = 89, .eq = 0}, /* 5.644 MHz */ {.xscale = 4, .yscale = 81, .eq = 0}, /* 8.197 MHz */ {.xscale = 4, .yscale = 59, .eq = 0}, /* 11.254 MHz */ {.xscale = 2, .yscale = 27, .eq = 0}, /* 12.296 MHz */ {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */ {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */ }; /* gpt rate configuration table, in ascending order of rates */ static struct gpt_rate_tbl gpt_rtbl[] = { /* For pll1 = 332 MHz */ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */ {.mscale = 1, .nscale = 0}, /* 83 MHz */ }; /* clock parents */ static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", }; static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", }; static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", }; static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", }; static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", }; static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", }; static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none", "pll2_clk", }; #ifdef CONFIG_MACH_SPEAR300 static void __init spear300_clk_init(void) { struct clk *clk; clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "60000000.clcd"); clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "94000000.flash"); clk = clk_register_fixed_factor(NULL, "sdhci_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "70000000.sdhci"); clk = clk_register_fixed_factor(NULL, "gpio1_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a9000000.gpio"); clk = clk_register_fixed_factor(NULL, "kbd_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a0000000.kbd"); } #else static inline void spear300_clk_init(void) { } #endif /* array of all spear 310 clock lookups */ #ifdef CONFIG_MACH_SPEAR310 static void __init spear310_clk_init(void) { struct clk *clk; clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, "emi", NULL); clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "44000000.flash"); clk = clk_register_fixed_factor(NULL, "tdm_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "tdm"); clk = clk_register_fixed_factor(NULL, "uart1_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2000000.serial"); clk = clk_register_fixed_factor(NULL, "uart2_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2080000.serial"); clk = clk_register_fixed_factor(NULL, "uart3_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2100000.serial"); clk = clk_register_fixed_factor(NULL, "uart4_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2180000.serial"); clk = clk_register_fixed_factor(NULL, "uart5_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2200000.serial"); } #else static inline void spear310_clk_init(void) { } #endif /* array of all spear 320 clock lookups */ #ifdef CONFIG_MACH_SPEAR320 #define SPEAR320_CONTROL_REG (soc_config_base + 0x0010) #define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) #define SPEAR320_UARTX_PCLK_MASK 0x1 #define SPEAR320_UART2_PCLK_SHIFT 8 #define SPEAR320_UART3_PCLK_SHIFT 9 #define SPEAR320_UART4_PCLK_SHIFT 10 #define SPEAR320_UART5_PCLK_SHIFT 11 #define SPEAR320_UART6_PCLK_SHIFT 12 #define SPEAR320_RS485_PCLK_SHIFT 13 #define SMII_PCLK_SHIFT 18 #define SMII_PCLK_MASK 2 #define SMII_PCLK_VAL_PAD 0x0 #define SMII_PCLK_VAL_PLL2 0x1 #define SMII_PCLK_VAL_SYNTH0 0x2 #define SDHCI_PCLK_SHIFT 15 #define SDHCI_PCLK_MASK 1 #define SDHCI_PCLK_VAL_48M 0x0 #define SDHCI_PCLK_VAL_SYNTH3 0x1 #define I2S_REF_PCLK_SHIFT 8 #define I2S_REF_PCLK_MASK 1 #define I2S_REF_PCLK_SYNTH_VAL 0x1 #define I2S_REF_PCLK_PLL2_VAL 0x0 #define UART1_PCLK_SHIFT 6 #define UART1_PCLK_MASK 1 #define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0 #define SPEAR320_UARTX_PCLK_VAL_APB 0x1 static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", }; static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", }; static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk", "ras_syn0_gclk", }; static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", }; static void __init spear320_clk_init(void __iomem *soc_config_base) { struct clk *clk; clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL, CLK_IS_ROOT, 125000000); clk_register_clkdev(clk, "smii_125m_pad", NULL); clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "90000000.clcd"); clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, "emi", NULL); clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "4c000000.flash"); clk = clk_register_fixed_factor(NULL, "i2c1_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a7000000.i2c"); clk = clk_register_fixed_factor(NULL, "pwm_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a8000000.pwm"); clk = clk_register_fixed_factor(NULL, "ssp1_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a5000000.spi"); clk = clk_register_fixed_factor(NULL, "ssp2_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a6000000.spi"); clk = clk_register_fixed_factor(NULL, "can0_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "c_can_platform.0"); clk = clk_register_fixed_factor(NULL, "can1_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "c_can_platform.1"); clk = clk_register_fixed_factor(NULL, "i2s_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a9400000.i2s"); clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents, ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT, SPEAR320_CONTROL_REG, I2S_REF_PCLK_SHIFT, I2S_REF_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, "i2s_ref_clk", NULL); clk = clk_register_fixed_factor(NULL, "i2s_sclk", "i2s_ref_clk", CLK_SET_RATE_PARENT, 1, 4); clk_register_clkdev(clk, "i2s_sclk", NULL); clk = clk_register_fixed_factor(NULL, "macb1_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, "hclk", "aa000000.eth"); clk = clk_register_fixed_factor(NULL, "macb2_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, "hclk", "ab000000.eth"); clk = clk_register_mux(NULL, "rs485_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_RS485_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a9300000.serial"); clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents, ARRAY_SIZE(sdhci_parents), CLK_SET_RATE_PARENT, SPEAR320_CONTROL_REG, SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "70000000.sdhci"); clk = clk_register_mux(NULL, "smii_pclk", smii0_parents, ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG, SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "smii_pclk"); clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1); clk_register_clkdev(clk, NULL, "smii"); clk = clk_register_mux(NULL, "uart1_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a3000000.serial"); clk = clk_register_mux(NULL, "uart2_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a4000000.serial"); clk = clk_register_mux(NULL, "uart3_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART3_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a9100000.serial"); clk = clk_register_mux(NULL, "uart4_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART4_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a9200000.serial"); clk = clk_register_mux(NULL, "uart5_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART5_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "60000000.serial"); clk = clk_register_mux(NULL, "uart6_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART6_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "60100000.serial"); } #else static inline void spear320_clk_init(void __iomem *soc_config_base) { } #endif void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base) { struct clk *clk, *clk1; clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT, 32000); clk_register_clkdev(clk, "osc_32k_clk", NULL); clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT, 24000000); clk_register_clkdev(clk, "osc_24m_clk", NULL); /* clock derived from 32 KHz osc clk */ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0, PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "fc900000.rtc"); /* clock derived from 24 MHz osc clk */ clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0, 48000000); clk_register_clkdev(clk, "pll3_clk", NULL); clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "fc880000.wdt"); clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_24m_clk", 0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco1_clk", NULL); clk_register_clkdev(clk1, "pll1_clk", NULL); clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_24m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco2_clk", NULL); clk_register_clkdev(clk1, "pll2_clk", NULL); /* clock derived from pll1 clk */ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", CLK_SET_RATE_PARENT, 1, 1); clk_register_clkdev(clk, "cpu_clk", NULL); clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk", CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT, HCLK_RATIO_MASK, 0, &_lock); clk_register_clkdev(clk, "ahb_clk", NULL); clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "uart_syn_clk", NULL); clk_register_clkdev(clk1, "uart_syn_gclk", NULL); clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents, ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "uart0_mclk", NULL); clk = clk_register_gate(NULL, "uart0", "uart0_mclk", CLK_SET_RATE_PARENT, PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0000000.serial"); clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "firda_syn_clk", NULL); clk_register_clkdev(clk1, "firda_syn_gclk", NULL); clk = clk_register_mux(NULL, "firda_mclk", firda_parents, ARRAY_SIZE(firda_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "firda_mclk", NULL); clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", CLK_SET_RATE_PARENT, PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "firda"); /* gpt clocks */ clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents, ARRAY_SIZE(gpt0_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt0"); clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents, ARRAY_SIZE(gpt1_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "gpt1_mclk", NULL); clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", CLK_SET_RATE_PARENT, PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt1"); clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents, ARRAY_SIZE(gpt2_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "gpt2_mclk", NULL); clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", CLK_SET_RATE_PARENT, PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt2"); /* general synths clocks */ clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "gen0_syn_clk", NULL); clk_register_clkdev(clk1, "gen0_syn_gclk", NULL); clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "gen1_syn_clk", NULL); clk_register_clkdev(clk1, "gen1_syn_gclk", NULL); clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents, ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG, GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "gen2_3_par_clk", NULL); clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk", "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "gen2_syn_clk", NULL); clk_register_clkdev(clk1, "gen2_syn_gclk", NULL); clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk", "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "gen3_syn_clk", NULL); clk_register_clkdev(clk1, "gen3_syn_gclk", NULL); /* clock derived from pll3 clk */ clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "e1800000.ehci"); clk_register_clkdev(clk, NULL, "e1900000.ohci"); clk_register_clkdev(clk, NULL, "e2100000.ohci"); clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1, 1); clk_register_clkdev(clk, "usbh.0_clk", NULL); clk = clk_register_fixed_factor(NULL, "usbh.1_clk", "usbh_clk", 0, 1, 1); clk_register_clkdev(clk, "usbh.1_clk", NULL); clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "e1100000.usbd"); /* clock derived from ahb clk */ clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2, 1); clk_register_clkdev(clk, "ahbmult2_clk", NULL); clk = clk_register_mux(NULL, "ddr_clk", ddr_parents, ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "ddr_clk", NULL); clk = clk_register_divider(NULL, "apb_clk", "ahb_clk", CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT, PCLK_RATIO_MASK, 0, &_lock); clk_register_clkdev(clk, "apb_clk", NULL); clk = clk_register_gate(NULL, "amem_clk", "ahb_clk", 0, AMEM_CLK_CFG, AMEM_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "amem_clk", NULL); clk = clk_register_gate(NULL, "c3_clk", "ahb_clk", 0, PERIP1_CLK_ENB, C3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "c3_clk"); clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB, DMA_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "fc400000.dma"); clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB, GMAC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "e0800000.eth"); clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, PERIP1_CLK_ENB, I2C_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0180000.i2c"); clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB, JPEG_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "jpeg"); clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB, SMI_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "fc000000.flash"); /* clock derived from apb clk */ clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB, ADC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0080000.adc"); clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0, PERIP1_CLK_ENB, GPIO_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "fc980000.gpio"); clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB, SSP_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0100000.spi"); /* RAS clk enable */ clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0, RAS_CLK_ENB, RAS_AHB_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_ahb_clk", NULL); clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB, RAS_APB_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_apb_clk", NULL); clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0, RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_32k_clk", NULL); clk = clk_register_gate(NULL, "ras_24m_clk", "osc_24m_clk", 0, RAS_CLK_ENB, RAS_24M_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_24m_clk", NULL); clk = clk_register_gate(NULL, "ras_pll1_clk", "pll1_clk", 0, RAS_CLK_ENB, RAS_PLL1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_pll1_clk", NULL); clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0, RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_pll2_clk", NULL); clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0, RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_pll3_clk", NULL); clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", CLK_SET_RATE_PARENT, RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_syn0_gclk", NULL); clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", CLK_SET_RATE_PARENT, RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_syn1_gclk", NULL); clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", CLK_SET_RATE_PARENT, RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_syn2_gclk", NULL); clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", CLK_SET_RATE_PARENT, RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_syn3_gclk", NULL); if (of_machine_is_compatible("st,spear300")) spear300_clk_init(); else if (of_machine_is_compatible("st,spear310")) spear310_clk_init(); else if (of_machine_is_compatible("st,spear320")) spear320_clk_init(soc_config_base); }
gpl-2.0
dovydasvenckus/linux
fs/fat/nfs.c
1333
8016
/* fs/fat/nfs.c * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/exportfs.h> #include "fat.h" struct fat_fid { u32 i_gen; u32 i_pos_low; u16 i_pos_hi; u16 parent_i_pos_hi; u32 parent_i_pos_low; u32 parent_i_gen; }; #define FAT_FID_SIZE_WITHOUT_PARENT 3 #define FAT_FID_SIZE_WITH_PARENT (sizeof(struct fat_fid)/sizeof(u32)) /** * Look up a directory inode given its starting cluster. */ static struct inode *fat_dget(struct super_block *sb, int i_logstart) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct hlist_head *head; struct msdos_inode_info *i; struct inode *inode = NULL; head = sbi->dir_hashtable + fat_dir_hash(i_logstart); spin_lock(&sbi->dir_hash_lock); hlist_for_each_entry(i, head, i_dir_hash) { BUG_ON(i->vfs_inode.i_sb != sb); if (i->i_logstart != i_logstart) continue; inode = igrab(&i->vfs_inode); if (inode) break; } spin_unlock(&sbi->dir_hash_lock); return inode; } static struct inode *fat_ilookup(struct super_block *sb, u64 ino, loff_t i_pos) { if (MSDOS_SB(sb)->options.nfs == FAT_NFS_NOSTALE_RO) return fat_iget(sb, i_pos); else { if ((ino < MSDOS_ROOT_INO) || (ino == MSDOS_FSINFO_INO)) return NULL; return ilookup(sb, ino); } } static struct inode *__fat_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation, loff_t i_pos) { struct inode *inode = fat_ilookup(sb, ino, i_pos); if (inode && generation && (inode->i_generation != generation)) { iput(inode); inode = NULL; } if (inode == NULL && MSDOS_SB(sb)->options.nfs == FAT_NFS_NOSTALE_RO) { struct buffer_head *bh = NULL; struct msdos_dir_entry *de ; sector_t blocknr; int offset; fat_get_blknr_offset(MSDOS_SB(sb), i_pos, &blocknr, &offset); bh = sb_bread(sb, blocknr); if (!bh) { fat_msg(sb, KERN_ERR, "unable to read block(%llu) for building NFS inode", (llu)blocknr); return inode; } de = (struct msdos_dir_entry *)bh->b_data; /* If a file is deleted on server and client is not updated * yet, we must not build the inode upon a lookup call. */ if (IS_FREE(de[offset].name)) inode = NULL; else inode = fat_build_inode(sb, &de[offset], i_pos); brelse(bh); } return inode; } static struct inode *fat_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { return __fat_nfs_get_inode(sb, ino, generation, 0); } static int fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent) { int len = *lenp; struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); struct fat_fid *fid = (struct fat_fid *) fh; loff_t i_pos; int type = FILEID_FAT_WITHOUT_PARENT; if (parent) { if (len < FAT_FID_SIZE_WITH_PARENT) { *lenp = FAT_FID_SIZE_WITH_PARENT; return FILEID_INVALID; } } else { if (len < FAT_FID_SIZE_WITHOUT_PARENT) { *lenp = FAT_FID_SIZE_WITHOUT_PARENT; return FILEID_INVALID; } } i_pos = fat_i_pos_read(sbi, inode); *lenp = FAT_FID_SIZE_WITHOUT_PARENT; fid->i_gen = inode->i_generation; fid->i_pos_low = i_pos & 0xFFFFFFFF; fid->i_pos_hi = (i_pos >> 32) & 0xFFFF; if (parent) { i_pos = fat_i_pos_read(sbi, parent); fid->parent_i_pos_hi = (i_pos >> 32) & 0xFFFF; fid->parent_i_pos_low = i_pos & 0xFFFFFFFF; fid->parent_i_gen = parent->i_generation; type = FILEID_FAT_WITH_PARENT; *lenp = FAT_FID_SIZE_WITH_PARENT; } return type; } /** * Map a NFS file handle to a corresponding dentry. * The dentry may or may not be connected to the filesystem root. */ static struct dentry *fat_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, fat_nfs_get_inode); } static struct dentry *fat_fh_to_dentry_nostale(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct inode *inode = NULL; struct fat_fid *fid = (struct fat_fid *)fh; loff_t i_pos; switch (fh_type) { case FILEID_FAT_WITHOUT_PARENT: if (fh_len < FAT_FID_SIZE_WITHOUT_PARENT) return NULL; break; case FILEID_FAT_WITH_PARENT: if (fh_len < FAT_FID_SIZE_WITH_PARENT) return NULL; break; default: return NULL; } i_pos = fid->i_pos_hi; i_pos = (i_pos << 32) | (fid->i_pos_low); inode = __fat_nfs_get_inode(sb, 0, fid->i_gen, i_pos); return d_obtain_alias(inode); } /* * Find the parent for a file specified by NFS handle. * This requires that the handle contain the i_ino of the parent. */ static struct dentry *fat_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, fat_nfs_get_inode); } static struct dentry *fat_fh_to_parent_nostale(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct inode *inode = NULL; struct fat_fid *fid = (struct fat_fid *)fh; loff_t i_pos; if (fh_len < FAT_FID_SIZE_WITH_PARENT) return NULL; switch (fh_type) { case FILEID_FAT_WITH_PARENT: i_pos = fid->parent_i_pos_hi; i_pos = (i_pos << 32) | (fid->parent_i_pos_low); inode = __fat_nfs_get_inode(sb, 0, fid->parent_i_gen, i_pos); break; } return d_obtain_alias(inode); } /* * Rebuild the parent for a directory that is not connected * to the filesystem root */ static struct inode *fat_rebuild_parent(struct super_block *sb, int parent_logstart) { int search_clus, clus_to_match; struct msdos_dir_entry *de; struct inode *parent = NULL; struct inode *dummy_grand_parent = NULL; struct fat_slot_info sinfo; struct msdos_sb_info *sbi = MSDOS_SB(sb); sector_t blknr = fat_clus_to_blknr(sbi, parent_logstart); struct buffer_head *parent_bh = sb_bread(sb, blknr); if (!parent_bh) { fat_msg(sb, KERN_ERR, "unable to read cluster of parent directory"); return NULL; } de = (struct msdos_dir_entry *) parent_bh->b_data; clus_to_match = fat_get_start(sbi, &de[0]); search_clus = fat_get_start(sbi, &de[1]); dummy_grand_parent = fat_dget(sb, search_clus); if (!dummy_grand_parent) { dummy_grand_parent = new_inode(sb); if (!dummy_grand_parent) { brelse(parent_bh); return parent; } dummy_grand_parent->i_ino = iunique(sb, MSDOS_ROOT_INO); fat_fill_inode(dummy_grand_parent, &de[1]); MSDOS_I(dummy_grand_parent)->i_pos = -1; } if (!fat_scan_logstart(dummy_grand_parent, clus_to_match, &sinfo)) parent = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(parent_bh); iput(dummy_grand_parent); return parent; } /* * Find the parent for a directory that is not currently connected to * the filesystem root. * * On entry, the caller holds d_inode(child_dir)->i_mutex. */ static struct dentry *fat_get_parent(struct dentry *child_dir) { struct super_block *sb = child_dir->d_sb; struct buffer_head *bh = NULL; struct msdos_dir_entry *de; struct inode *parent_inode = NULL; struct msdos_sb_info *sbi = MSDOS_SB(sb); if (!fat_get_dotdot_entry(d_inode(child_dir), &bh, &de)) { int parent_logstart = fat_get_start(sbi, de); parent_inode = fat_dget(sb, parent_logstart); if (!parent_inode && sbi->options.nfs == FAT_NFS_NOSTALE_RO) parent_inode = fat_rebuild_parent(sb, parent_logstart); } brelse(bh); return d_obtain_alias(parent_inode); } const struct export_operations fat_export_ops = { .fh_to_dentry = fat_fh_to_dentry, .fh_to_parent = fat_fh_to_parent, .get_parent = fat_get_parent, }; const struct export_operations fat_export_ops_nostale = { .encode_fh = fat_encode_fh_nostale, .fh_to_dentry = fat_fh_to_dentry_nostale, .fh_to_parent = fat_fh_to_parent_nostale, .get_parent = fat_get_parent, };
gpl-2.0
xergm/linux
drivers/i2c/busses/i2c-sis96x.c
1845
8528
/* Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ /* This module must be considered BETA unless and until the chipset manufacturer releases a datasheet. The register definitions are based on the SiS630. This module relies on quirk_sis_96x_smbus (drivers/pci/quirks.c) for just about every machine for which users have reported. If this module isn't detecting your 96x south bridge, have a look there. We assume there can only be one SiS96x with one SMBus interface. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> /* base address register in PCI config space */ #define SIS96x_BAR 0x04 /* SiS96x SMBus registers */ #define SMB_STS 0x00 #define SMB_EN 0x01 #define SMB_CNT 0x02 #define SMB_HOST_CNT 0x03 #define SMB_ADDR 0x04 #define SMB_CMD 0x05 #define SMB_PCOUNT 0x06 #define SMB_COUNT 0x07 #define SMB_BYTE 0x08 #define SMB_DEV_ADDR 0x10 #define SMB_DB0 0x11 #define SMB_DB1 0x12 #define SMB_SAA 0x13 /* register count for request_region */ #define SMB_IOSIZE 0x20 /* Other settings */ #define MAX_TIMEOUT 500 /* SiS96x SMBus constants */ #define SIS96x_QUICK 0x00 #define SIS96x_BYTE 0x01 #define SIS96x_BYTE_DATA 0x02 #define SIS96x_WORD_DATA 0x03 #define SIS96x_PROC_CALL 0x04 #define SIS96x_BLOCK_DATA 0x05 static struct pci_driver sis96x_driver; static struct i2c_adapter sis96x_adapter; static u16 sis96x_smbus_base; static inline u8 sis96x_read(u8 reg) { return inb(sis96x_smbus_base + reg) ; } static inline void sis96x_write(u8 reg, u8 data) { outb(data, sis96x_smbus_base + reg) ; } /* Execute a SMBus transaction. int size is from SIS96x_QUICK to SIS96x_BLOCK_DATA */ static int sis96x_transaction(int size) { int temp; int result = 0; int timeout = 0; dev_dbg(&sis96x_adapter.dev, "SMBus transaction %d\n", size); /* Make sure the SMBus host is ready to start transmitting */ if (((temp = sis96x_read(SMB_CNT)) & 0x03) != 0x00) { dev_dbg(&sis96x_adapter.dev, "SMBus busy (0x%02x). " "Resetting...\n", temp); /* kill the transaction */ sis96x_write(SMB_HOST_CNT, 0x20); /* check it again */ if (((temp = sis96x_read(SMB_CNT)) & 0x03) != 0x00) { dev_dbg(&sis96x_adapter.dev, "Failed (0x%02x)\n", temp); return -EBUSY; } else { dev_dbg(&sis96x_adapter.dev, "Successful\n"); } } /* Turn off timeout interrupts, set fast host clock */ sis96x_write(SMB_CNT, 0x20); /* clear all (sticky) status flags */ temp = sis96x_read(SMB_STS); sis96x_write(SMB_STS, temp & 0x1e); /* start the transaction by setting bit 4 and size bits */ sis96x_write(SMB_HOST_CNT, 0x10 | (size & 0x07)); /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis96x_read(SMB_STS); } while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&sis96x_adapter.dev, "SMBus Timeout! (0x%02x)\n", temp); result = -ETIMEDOUT; } /* device error - probably missing ACK */ if (temp & 0x02) { dev_dbg(&sis96x_adapter.dev, "Failed bus transaction!\n"); result = -ENXIO; } /* bus collision */ if (temp & 0x04) { dev_dbg(&sis96x_adapter.dev, "Bus collision!\n"); result = -EIO; } /* Finish up by resetting the bus */ sis96x_write(SMB_STS, temp); if ((temp = sis96x_read(SMB_STS))) { dev_dbg(&sis96x_adapter.dev, "Failed reset at " "end of transaction! (0x%02x)\n", temp); } return result; } /* Return negative errno on error. */ static s32 sis96x_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS96x_QUICK; break; case I2C_SMBUS_BYTE: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis96x_write(SMB_CMD, command); size = SIS96x_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis96x_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis96x_write(SMB_BYTE, data->byte); size = SIS96x_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis96x_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis96x_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis96x_write(SMB_BYTE, data->word & 0xff); sis96x_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL ? SIS96x_PROC_CALL : SIS96x_WORD_DATA); break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } status = sis96x_transaction(size); if (status) return status; if ((size != SIS96x_PROC_CALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS96x_QUICK))) return 0; switch (size) { case SIS96x_BYTE: case SIS96x_BYTE_DATA: data->byte = sis96x_read(SMB_BYTE); break; case SIS96x_WORD_DATA: case SIS96x_PROC_CALL: data->word = sis96x_read(SMB_BYTE) + (sis96x_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis96x_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis96x_access, .functionality = sis96x_func, }; static struct i2c_adapter sis96x_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static const struct pci_device_id sis96x_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) }, { 0, } }; MODULE_DEVICE_TABLE (pci, sis96x_ids); static int sis96x_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 ww = 0; int retval; if (sis96x_smbus_base) { dev_err(&dev->dev, "Only one device supported.\n"); return -EBUSY; } pci_read_config_word(dev, PCI_CLASS_DEVICE, &ww); if (PCI_CLASS_SERIAL_SMBUS != ww) { dev_err(&dev->dev, "Unsupported device class 0x%04x!\n", ww); return -ENODEV; } sis96x_smbus_base = pci_resource_start(dev, SIS96x_BAR); if (!sis96x_smbus_base) { dev_err(&dev->dev, "SiS96x SMBus base address " "not initialized!\n"); return -EINVAL; } dev_info(&dev->dev, "SiS96x SMBus base address: 0x%04x\n", sis96x_smbus_base); retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); if (retval) return -ENODEV; /* Everything is happy, let's grab the memory and set things up. */ if (!request_region(sis96x_smbus_base, SMB_IOSIZE, sis96x_driver.name)) { dev_err(&dev->dev, "SMBus registers 0x%04x-0x%04x " "already in use!\n", sis96x_smbus_base, sis96x_smbus_base + SMB_IOSIZE - 1); sis96x_smbus_base = 0; return -EINVAL; } /* set up the sysfs linkage to our parent device */ sis96x_adapter.dev.parent = &dev->dev; snprintf(sis96x_adapter.name, sizeof(sis96x_adapter.name), "SiS96x SMBus adapter at 0x%04x", sis96x_smbus_base); if ((retval = i2c_add_adapter(&sis96x_adapter))) { dev_err(&dev->dev, "Couldn't register adapter!\n"); release_region(sis96x_smbus_base, SMB_IOSIZE); sis96x_smbus_base = 0; } return retval; } static void sis96x_remove(struct pci_dev *dev) { if (sis96x_smbus_base) { i2c_del_adapter(&sis96x_adapter); release_region(sis96x_smbus_base, SMB_IOSIZE); sis96x_smbus_base = 0; } } static struct pci_driver sis96x_driver = { .name = "sis96x_smbus", .id_table = sis96x_ids, .probe = sis96x_probe, .remove = sis96x_remove, }; module_pci_driver(sis96x_driver); MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>"); MODULE_DESCRIPTION("SiS96x SMBus driver"); MODULE_LICENSE("GPL");
gpl-2.0
xNombre/android_kernel_samsung_golden
net/ethernet/eth.c
2357
10908
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Ethernet-type device handling. * * Version: @(#)eth.c 1.0.7 05/25/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Florian La Roche, <rzsfl@rz.uni-sb.de> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Fixes: * Mr Linux : Arp problems * Alan Cox : Generic queue tidyup (very tiny here) * Alan Cox : eth_header ntohs should be htons * Alan Cox : eth_rebuild_header missing an htons and * minor other things. * Tegge : Arp bug fixes. * Florian : Removed many unnecessary functions, code cleanup * and changes for new arp and skbuff. * Alan Cox : Redid header building to reflect new format. * Alan Cox : ARP only when compiled with CONFIG_INET * Greg Page : 802.2 and SNAP stuff. * Alan Cox : MAC layer pointers/new format. * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding. * Alan Cox : Protect against forwarding explosions with * older network drivers and IFF_ALLMULTI. * Christer Weinigel : Better rebuild header message. * Andrew Morton : 26Feb01: kill ether_setup() - use netdev_boot_setup(). * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/if_ether.h> #include <net/dst.h> #include <net/arp.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/ip.h> #include <net/dsa.h> #include <asm/uaccess.h> #include <asm/system.h> __setup("ether=", netdev_boot_setup); /** * eth_header - create the Ethernet header * @skb: buffer to alter * @dev: source device * @type: Ethernet type field * @daddr: destination address (NULL leave destination address) * @saddr: source address (NULL use device source address) * @len: packet length (<= skb->len) * * * Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length * in here instead. */ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); if (type != ETH_P_802_3 && type != ETH_P_802_2) eth->h_proto = htons(type); else eth->h_proto = htons(len); /* * Set the source hardware address. */ if (!saddr) saddr = dev->dev_addr; memcpy(eth->h_source, saddr, ETH_ALEN); if (daddr) { memcpy(eth->h_dest, daddr, ETH_ALEN); return ETH_HLEN; } /* * Anyway, the loopback-device should never use this function... */ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { memset(eth->h_dest, 0, ETH_ALEN); return ETH_HLEN; } return -ETH_HLEN; } EXPORT_SYMBOL(eth_header); /** * eth_rebuild_header- rebuild the Ethernet MAC header. * @skb: socket buffer to update * * This is called after an ARP or IPV6 ndisc it's resolution on this * sk_buff. We now let protocol (ARP) fill in the other fields. * * This routine CANNOT use cached dst->neigh! * Really, it is used only when dst->neigh is wrong. */ int eth_rebuild_header(struct sk_buff *skb) { struct ethhdr *eth = (struct ethhdr *)skb->data; struct net_device *dev = skb->dev; switch (eth->h_proto) { #ifdef CONFIG_INET case htons(ETH_P_IP): return arp_find(eth->h_dest, skb); #endif default: printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n", dev->name, ntohs(eth->h_proto)); memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); break; } return 0; } EXPORT_SYMBOL(eth_rebuild_header); /** * eth_type_trans - determine the packet's protocol ID. * @skb: received socket data * @dev: receiving network device * * The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. */ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) { struct ethhdr *eth; skb->dev = dev; skb_reset_mac_header(skb); skb_pull_inline(skb, ETH_HLEN); eth = eth_hdr(skb); if (unlikely(is_multicast_ether_addr(eth->h_dest))) { if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; } /* * This ALLMULTI check should be redundant by 1.4 * so don't forget to remove it. * * Seems, you forgot to remove it. All silly devices * seems to set IFF_PROMISC. */ else if (1 /*dev->flags&IFF_PROMISC */ ) { if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr))) skb->pkt_type = PACKET_OTHERHOST; } /* * Some variants of DSA tagging don't have an ethertype field * at all, so we check here whether one of those tagging * variants has been configured on the receiving interface, * and if so, set skb->protocol without looking at the packet. */ if (netdev_uses_dsa_tags(dev)) return htons(ETH_P_DSA); if (netdev_uses_trailer_tags(dev)) return htons(ETH_P_TRAILER); if (ntohs(eth->h_proto) >= 1536) return eth->h_proto; /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF) return htons(ETH_P_802_3); /* * Real 802.2 LLC */ return htons(ETH_P_802_2); } EXPORT_SYMBOL(eth_type_trans); /** * eth_header_parse - extract hardware address from packet * @skb: packet to extract header from * @haddr: destination buffer */ int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr) { const struct ethhdr *eth = eth_hdr(skb); memcpy(haddr, eth->h_source, ETH_ALEN); return ETH_ALEN; } EXPORT_SYMBOL(eth_header_parse); /** * eth_header_cache - fill cache entry from neighbour * @neigh: source neighbour * @hh: destination cache entry * Create an Ethernet header template from the neighbour. */ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { __be16 type = hh->hh_type; struct ethhdr *eth; const struct net_device *dev = neigh->dev; eth = (struct ethhdr *) (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth)))); if (type == htons(ETH_P_802_3)) return -1; eth->h_proto = type; memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); memcpy(eth->h_dest, neigh->ha, ETH_ALEN); hh->hh_len = ETH_HLEN; return 0; } EXPORT_SYMBOL(eth_header_cache); /** * eth_header_cache_update - update cache entry * @hh: destination cache entry * @dev: network device * @haddr: new hardware address * * Called by Address Resolution module to notify changes in address. */ void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr) { memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)), haddr, ETH_ALEN); } EXPORT_SYMBOL(eth_header_cache_update); /** * eth_mac_addr - set new Ethernet hardware address * @dev: network device * @p: socket address * Change hardware address of device. * * This doesn't change hardware matching, so needs to be overridden * for most real devices. */ int eth_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); return 0; } EXPORT_SYMBOL(eth_mac_addr); /** * eth_change_mtu - set new MTU size * @dev: network device * @new_mtu: new Maximum Transfer Unit * * Allow changing MTU size. Needs to be overridden for devices * supporting jumbo frames. */ int eth_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < 68 || new_mtu > ETH_DATA_LEN) return -EINVAL; dev->mtu = new_mtu; return 0; } EXPORT_SYMBOL(eth_change_mtu); int eth_validate_addr(struct net_device *dev) { if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; return 0; } EXPORT_SYMBOL(eth_validate_addr); const struct header_ops eth_header_ops ____cacheline_aligned = { .create = eth_header, .parse = eth_header_parse, .rebuild = eth_rebuild_header, .cache = eth_header_cache, .cache_update = eth_header_cache_update, }; /** * ether_setup - setup Ethernet network device * @dev: network device * Fill in the fields of the device structure with Ethernet-generic values. */ void ether_setup(struct net_device *dev) { dev->header_ops = &eth_header_ops; dev->type = ARPHRD_ETHER; dev->hard_header_len = ETH_HLEN; dev->mtu = ETH_DATA_LEN; dev->addr_len = ETH_ALEN; dev->tx_queue_len = 1000; /* Ethernet wants good queues */ dev->flags = IFF_BROADCAST|IFF_MULTICAST; dev->priv_flags = IFF_TX_SKB_SHARING; memset(dev->broadcast, 0xFF, ETH_ALEN); } EXPORT_SYMBOL(ether_setup); /** * alloc_etherdev_mqs - Allocates and sets up an Ethernet device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this Ethernet device * @txqs: The number of TX queues this device has. * @rxqs: The number of RX queues this device has. * * Fill in the fields of the device structure with Ethernet-generic * values. Basically does everything except registering the device. * * Constructs a new net device, complete with a private data area of * size (sizeof_priv). A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, unsigned int rxqs) { return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs); } EXPORT_SYMBOL(alloc_etherdev_mqs); static size_t _format_mac_addr(char *buf, int buflen, const unsigned char *addr, int len) { int i; char *cp = buf; for (i = 0; i < len; i++) { cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]); if (i == len - 1) break; cp += scnprintf(cp, buflen - (cp - buf), ":"); } return cp - buf; } ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len) { size_t l; l = _format_mac_addr(buf, PAGE_SIZE, addr, len); l += scnprintf(buf + l, PAGE_SIZE - l, "\n"); return (ssize_t)l; } EXPORT_SYMBOL(sysfs_format_mac);
gpl-2.0
jcadduono/nethunter_kernel_g5
arch/tile/kernel/tlb.c
2357
3098
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * */ #include <linux/cpumask.h> #include <linux/module.h> #include <linux/hugetlb.h> #include <asm/tlbflush.h> #include <asm/homecache.h> #include <hv/hypervisor.h> /* From tlbflush.h */ DEFINE_PER_CPU(int, current_asid); int min_asid, max_asid; /* * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB * so that when we are unmapping an executable page, we also flush it. * Combined with flushing the L1I at context switch time, this means * we don't have to do any other icache flushes. */ void flush_tlb_mm(struct mm_struct *mm) { HV_Remote_ASID asids[NR_CPUS]; int i = 0, cpu; for_each_cpu(cpu, mm_cpumask(mm)) { HV_Remote_ASID *asid = &asids[i++]; asid->y = cpu / smp_topology.width; asid->x = cpu % smp_topology.width; asid->asid = per_cpu(current_asid, cpu); } flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm), 0, 0, 0, NULL, asids, i); } void flush_tlb_current_task(void) { flush_tlb_mm(current->mm); } void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, unsigned long va) { unsigned long size = vma_kernel_pagesize(vma); int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; flush_remote(0, cache, mm_cpumask(mm), va, size, size, mm_cpumask(mm), NULL, 0); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { flush_tlb_page_mm(vma, vma->vm_mm, va); } EXPORT_SYMBOL(flush_tlb_page); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { unsigned long size = vma_kernel_pagesize(vma); struct mm_struct *mm = vma->vm_mm; int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; flush_remote(0, cache, mm_cpumask(mm), start, end - start, size, mm_cpumask(mm), NULL, 0); } void flush_tlb_all(void) { int i; for (i = 0; ; ++i) { HV_VirtAddrRange r = hv_inquire_virtual(i); if (r.size == 0) break; flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, r.start, r.size, PAGE_SIZE, cpu_online_mask, NULL, 0); flush_remote(0, 0, NULL, r.start, r.size, HPAGE_SIZE, cpu_online_mask, NULL, 0); } } /* * Callers need to flush the L1I themselves if necessary, e.g. for * kernel module unload. Otherwise we assume callers are not using * executable pgprot_t's. Using EVICT_L1I means that dataplane cpus * will get an unnecessary interrupt otherwise. */ void flush_tlb_kernel_range(unsigned long start, unsigned long end) { flush_remote(0, 0, NULL, start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0); }
gpl-2.0
ownhere/samsung-kernel-sgs3-ownhere
fs/reiserfs/file.c
2613
10501
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ #include <linux/time.h> #include <linux/reiserfs_fs.h> #include <linux/reiserfs_acl.h> #include <linux/reiserfs_xattr.h> #include <asm/uaccess.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/quotaops.h> /* ** We pack the tails of files on file close, not at the time they are written. ** This implies an unnecessary copy of the tail and an unnecessary indirect item ** insertion/balancing, for files that are written in one write. ** It avoids unnecessary tail packings (balances) for files that are written in ** multiple writes and are small enough to have tails. ** ** file_release is called by the VFS layer when the file is closed. If ** this is the last open file descriptor, and the file ** small enough to have a tail, and the tail is currently in an ** unformatted node, the tail is converted back into a direct item. ** ** We use reiserfs_truncate_file to pack the tail, since it already has ** all the conditions coded. */ static int reiserfs_file_release(struct inode *inode, struct file *filp) { struct reiserfs_transaction_handle th; int err; int jbegin_failure = 0; BUG_ON(!S_ISREG(inode->i_mode)); if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1)) return 0; mutex_lock(&(REISERFS_I(inode)->tailpack)); if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) { mutex_unlock(&(REISERFS_I(inode)->tailpack)); return 0; } /* fast out for when nothing needs to be done */ if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) || !tail_has_to_be_packed(inode)) && REISERFS_I(inode)->i_prealloc_count <= 0) { mutex_unlock(&(REISERFS_I(inode)->tailpack)); return 0; } reiserfs_write_lock(inode->i_sb); /* freeing preallocation only involves relogging blocks that * are already in the current transaction. preallocation gets * freed at the end of each transaction, so it is impossible for * us to log any additional blocks (including quota blocks) */ err = journal_begin(&th, inode->i_sb, 1); if (err) { /* uh oh, we can't allow the inode to go away while there * is still preallocation blocks pending. Try to join the * aborted transaction */ jbegin_failure = err; err = journal_join_abort(&th, inode->i_sb, 1); if (err) { /* hmpf, our choices here aren't good. We can pin the inode * which will disallow unmount from every happening, we can * do nothing, which will corrupt random memory on unmount, * or we can forcibly remove the file from the preallocation * list, which will leak blocks on disk. Lets pin the inode * and let the admin know what is going on. */ igrab(inode); reiserfs_warning(inode->i_sb, "clm-9001", "pinning inode %lu because the " "preallocation can't be freed", inode->i_ino); goto out; } } reiserfs_update_inode_transaction(inode); #ifdef REISERFS_PREALLOCATE reiserfs_discard_prealloc(&th, inode); #endif err = journal_end(&th, inode->i_sb, 1); /* copy back the error code from journal_begin */ if (!err) err = jbegin_failure; if (!err && (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) && tail_has_to_be_packed(inode)) { /* if regular file is released by last holder and it has been appended (we append by unformatted node only) or its direct item(s) had to be converted, then it may have to be indirect2direct converted */ err = reiserfs_truncate_file(inode, 0); } out: reiserfs_write_unlock(inode->i_sb); mutex_unlock(&(REISERFS_I(inode)->tailpack)); return err; } static int reiserfs_file_open(struct inode *inode, struct file *file) { int err = dquot_file_open(inode, file); if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) { /* somebody might be tailpacking on final close; wait for it */ mutex_lock(&(REISERFS_I(inode)->tailpack)); atomic_inc(&REISERFS_I(inode)->openers); mutex_unlock(&(REISERFS_I(inode)->tailpack)); } return err; } static void reiserfs_vfs_truncate_file(struct inode *inode) { mutex_lock(&(REISERFS_I(inode)->tailpack)); reiserfs_truncate_file(inode, 1); mutex_unlock(&(REISERFS_I(inode)->tailpack)); } /* Sync a reiserfs file. */ /* * FIXME: sync_mapping_buffers() never has anything to sync. Can * be removed... */ static int reiserfs_sync_file(struct file *filp, int datasync) { struct inode *inode = filp->f_mapping->host; int err; int barrier_done; BUG_ON(!S_ISREG(inode->i_mode)); err = sync_mapping_buffers(inode->i_mapping); reiserfs_write_lock(inode->i_sb); barrier_done = reiserfs_commit_for_inode(inode); reiserfs_write_unlock(inode->i_sb); if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb)) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); if (barrier_done < 0) return barrier_done; return (err < 0) ? -EIO : 0; } /* taken fs/buffer.c:__block_commit_write */ int reiserfs_commit_page(struct inode *inode, struct page *page, unsigned from, unsigned to) { unsigned block_start, block_end; int partial = 0; unsigned blocksize; struct buffer_head *bh, *head; unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT; int new; int logit = reiserfs_file_data_log(inode); struct super_block *s = inode->i_sb; int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; struct reiserfs_transaction_handle th; int ret = 0; th.t_trans_id = 0; blocksize = 1 << inode->i_blkbits; if (logit) { reiserfs_write_lock(s); ret = journal_begin(&th, s, bh_per_page + 1); if (ret) goto drop_write_lock; reiserfs_update_inode_transaction(inode); } for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { new = buffer_new(bh); clear_buffer_new(bh); block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (!buffer_uptodate(bh)) partial = 1; } else { set_buffer_uptodate(bh); if (logit) { reiserfs_prepare_for_journal(s, bh, 1); journal_mark_dirty(&th, s, bh); } else if (!buffer_dirty(bh)) { mark_buffer_dirty(bh); /* do data=ordered on any page past the end * of file and any buffer marked BH_New. */ if (reiserfs_data_ordered(inode->i_sb) && (new || page->index >= i_size_index)) { reiserfs_add_ordered_list(inode, bh); } } } } if (logit) { ret = journal_end(&th, s, bh_per_page + 1); drop_write_lock: reiserfs_write_unlock(s); } /* * If this is a partial write which happened to make all buffers * uptodate then we can optimize away a bogus readpage() for * the next read(). Here we 'discover' whether the page went * uptodate as a result of this (potentially partial) write. */ if (!partial) SetPageUptodate(page); return ret; } /* Write @count bytes at position @ppos in a file indicated by @file from the buffer @buf. generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was written for (ext2/3). This is for several reasons: * It has no understanding of any filesystem specific optimizations. * It enters the filesystem repeatedly for each page that is written. * It depends on reiserfs_get_block() function which if implemented by reiserfs performs costly search_by_key * operation for each page it is supplied with. By contrast reiserfs_file_write() feeds as much as possible at a time * to reiserfs which allows for fewer tree traversals. * Each indirect pointer insertion takes a lot of cpu, because it involves memory moves inside of blocks. * Asking the block allocation code for blocks one at a time is slightly less efficient. All of these reasons for not using only generic file write were understood back when reiserfs was first miscoded to use it, but we were in a hurry to make code freeze, and so it couldn't be revised then. This new code should make things right finally. Future Features: providing search_by_key with hints. */ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going to write into */ const char __user * buf, /* pointer to user supplied data (in userspace) */ size_t count, /* amount of bytes to write */ loff_t * ppos /* pointer to position in file that we start writing at. Should be updated to * new current position before returning. */ ) { struct inode *inode = file->f_path.dentry->d_inode; // Inode of the file that we are writing to. /* To simplify coding at this time, we store locked pages in array for now */ struct reiserfs_transaction_handle th; th.t_trans_id = 0; /* If a filesystem is converted from 3.5 to 3.6, we'll have v3.5 items * lying around (most of the disk, in fact). Despite the filesystem * now being a v3.6 format, the old items still can't support large * file sizes. Catch this case here, as the rest of the VFS layer is * oblivious to the different limitations between old and new items. * reiserfs_setattr catches this for truncates. This chunk is lifted * from generic_write_checks. */ if (get_inode_item_key_version (inode) == KEY_FORMAT_3_5 && *ppos + count > MAX_NON_LFS) { if (*ppos >= MAX_NON_LFS) { return -EFBIG; } if (count > MAX_NON_LFS - (unsigned long)*ppos) count = MAX_NON_LFS - (unsigned long)*ppos; } return do_sync_write(file, buf, count, ppos); } const struct file_operations reiserfs_file_operations = { .read = do_sync_read, .write = reiserfs_file_write, .unlocked_ioctl = reiserfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = reiserfs_compat_ioctl, #endif .mmap = generic_file_mmap, .open = reiserfs_file_open, .release = reiserfs_file_release, .fsync = reiserfs_sync_file, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, .llseek = generic_file_llseek, }; const struct inode_operations reiserfs_file_inode_operations = { .truncate = reiserfs_vfs_truncate_file, .setattr = reiserfs_setattr, .setxattr = reiserfs_setxattr, .getxattr = reiserfs_getxattr, .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, };
gpl-2.0
schqiushui/kernel_kk444_sense_m8ace
security/integrity/ima/ima_queue.c
7221
3993
/* * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Serge Hallyn <serue@us.ibm.com> * Reiner Sailer <sailer@watson.ibm.com> * Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_queue.c * Implements queues that store template measurements and * maintains aggregate over the stored measurements * in the pre-configured TPM PCR (if available). * The measurement list is append-only. No entry is * ever removed or changed during the boot-cycle. */ #include <linux/module.h> #include <linux/rculist.h> #include <linux/slab.h> #include "ima.h" #define AUDIT_CAUSE_LEN_MAX 32 LIST_HEAD(ima_measurements); /* list of all measurements */ /* key: inode (before secure-hashing a file) */ struct ima_h_table ima_htable = { .len = ATOMIC_LONG_INIT(0), .violations = ATOMIC_LONG_INIT(0), .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT }; /* mutex protects atomicity of extending measurement list * and extending the TPM PCR aggregate. Since tpm_extend can take * long (and the tpm driver uses a mutex), we can't use the spinlock. */ static DEFINE_MUTEX(ima_extend_list_mutex); /* lookup up the digest value in the hash table, and return the entry */ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value) { struct ima_queue_entry *qe, *ret = NULL; unsigned int key; struct hlist_node *pos; int rc; key = ima_hash_key(digest_value); rcu_read_lock(); hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) { rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE); if (rc == 0) { ret = qe; break; } } rcu_read_unlock(); return ret; } /* ima_add_template_entry helper function: * - Add template entry to measurement list and hash table. * * (Called with ima_extend_list_mutex held.) */ static int ima_add_digest_entry(struct ima_template_entry *entry) { struct ima_queue_entry *qe; unsigned int key; qe = kmalloc(sizeof(*qe), GFP_KERNEL); if (qe == NULL) { pr_err("IMA: OUT OF MEMORY ERROR creating queue entry.\n"); return -ENOMEM; } qe->entry = entry; INIT_LIST_HEAD(&qe->later); list_add_tail_rcu(&qe->later, &ima_measurements); atomic_long_inc(&ima_htable.len); key = ima_hash_key(entry->digest); hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); return 0; } static int ima_pcr_extend(const u8 *hash) { int result = 0; if (!ima_used_chip) return result; result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash); if (result != 0) pr_err("IMA: Error Communicating to TPM chip, result: %d\n", result); return result; } /* Add template entry to the measurement list and hash table, * and extend the pcr. */ int ima_add_template_entry(struct ima_template_entry *entry, int violation, const char *op, struct inode *inode) { u8 digest[IMA_DIGEST_SIZE]; const char *audit_cause = "hash_added"; char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX]; int audit_info = 1; int result = 0, tpmresult = 0; mutex_lock(&ima_extend_list_mutex); if (!violation) { memcpy(digest, entry->digest, sizeof digest); if (ima_lookup_digest_entry(digest)) { audit_cause = "hash_exists"; result = -EEXIST; goto out; } } result = ima_add_digest_entry(entry); if (result < 0) { audit_cause = "ENOMEM"; audit_info = 0; goto out; } if (violation) /* invalidate pcr */ memset(digest, 0xff, sizeof digest); tpmresult = ima_pcr_extend(digest); if (tpmresult != 0) { snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)", tpmresult); audit_cause = tpm_audit_cause; audit_info = 0; } out: mutex_unlock(&ima_extend_list_mutex); integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, entry->template.file_name, op, audit_cause, result, audit_info); return result; }
gpl-2.0
jollaman999/LGF180-Optimus-G-_Android_KK_v30b_Kernel
drivers/media/dvb/dvb-usb/mxl111sf-phy.c
7733
8740
/* * mxl111sf-phy.c - driver for the MaxLinear MXL111SF * * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "mxl111sf-phy.h" #include "mxl111sf-reg.h" int mxl111sf_init_tuner_demod(struct mxl111sf_state *state) { struct mxl111sf_reg_ctrl_info mxl_111_overwrite_default[] = { {0x07, 0xff, 0x0c}, {0x58, 0xff, 0x9d}, {0x09, 0xff, 0x00}, {0x06, 0xff, 0x06}, {0xc8, 0xff, 0x40}, /* ED_LE_WIN_OLD = 0 */ {0x8d, 0x01, 0x01}, /* NEGATE_Q */ {0x32, 0xff, 0xac}, /* DIG_RFREFSELECT = 12 */ {0x42, 0xff, 0x43}, /* DIG_REG_AMP = 4 */ {0x74, 0xff, 0xc4}, /* SSPUR_FS_PRIO = 4 */ {0x71, 0xff, 0xe6}, /* SPUR_ROT_PRIO_VAL = 1 */ {0x83, 0xff, 0x64}, /* INF_FILT1_THD_SC = 100 */ {0x85, 0xff, 0x64}, /* INF_FILT2_THD_SC = 100 */ {0x88, 0xff, 0xf0}, /* INF_THD = 240 */ {0x6f, 0xf0, 0xb0}, /* DFE_DLY = 11 */ {0x00, 0xff, 0x01}, /* Change to page 1 */ {0x81, 0xff, 0x11}, /* DSM_FERR_BYPASS = 1 */ {0xf4, 0xff, 0x07}, /* DIG_FREQ_CORR = 1 */ {0xd4, 0x1f, 0x0f}, /* SPUR_TEST_NOISE_TH = 15 */ {0xd6, 0xff, 0x0c}, /* SPUR_TEST_NOISE_PAPR = 12 */ {0x00, 0xff, 0x00}, /* Change to page 0 */ {0, 0, 0} }; mxl_debug("()"); return mxl111sf_ctrl_program_regs(state, mxl_111_overwrite_default); } int mxl1x1sf_soft_reset(struct mxl111sf_state *state) { int ret; mxl_debug("()"); ret = mxl111sf_write_reg(state, 0xff, 0x00); /* AIC */ if (mxl_fail(ret)) goto fail; ret = mxl111sf_write_reg(state, 0x02, 0x01); /* get out of reset */ mxl_fail(ret); fail: return ret; } int mxl1x1sf_set_device_mode(struct mxl111sf_state *state, int mode) { int ret; mxl_debug("(%s)", MXL_SOC_MODE == mode ? "MXL_SOC_MODE" : "MXL_TUNER_MODE"); /* set device mode */ ret = mxl111sf_write_reg(state, 0x03, MXL_SOC_MODE == mode ? 0x01 : 0x00); if (mxl_fail(ret)) goto fail; ret = mxl111sf_write_reg_mask(state, 0x7d, 0x40, MXL_SOC_MODE == mode ? 0x00 : /* enable impulse noise filter, INF_BYP = 0 */ 0x40); /* disable impulse noise filter, INF_BYP = 1 */ if (mxl_fail(ret)) goto fail; state->device_mode = mode; fail: return ret; } /* power up tuner */ int mxl1x1sf_top_master_ctrl(struct mxl111sf_state *state, int onoff) { mxl_debug("(%d)", onoff); return mxl111sf_write_reg(state, 0x01, onoff ? 0x01 : 0x00); } int mxl111sf_disable_656_port(struct mxl111sf_state *state) { mxl_debug("()"); return mxl111sf_write_reg_mask(state, 0x12, 0x04, 0x00); } int mxl111sf_enable_usb_output(struct mxl111sf_state *state) { mxl_debug("()"); return mxl111sf_write_reg_mask(state, 0x17, 0x40, 0x00); } /* initialize TSIF as input port of MxL1X1SF for MPEG2 data transfer */ int mxl111sf_config_mpeg_in(struct mxl111sf_state *state, unsigned int parallel_serial, unsigned int msb_lsb_1st, unsigned int clock_phase, unsigned int mpeg_valid_pol, unsigned int mpeg_sync_pol) { int ret; u8 mode, tmp; mxl_debug("(%u,%u,%u,%u,%u)", parallel_serial, msb_lsb_1st, clock_phase, mpeg_valid_pol, mpeg_sync_pol); /* Enable PIN MUX */ ret = mxl111sf_write_reg(state, V6_PIN_MUX_MODE_REG, V6_ENABLE_PIN_MUX); mxl_fail(ret); /* Configure MPEG Clock phase */ mxl111sf_read_reg(state, V6_MPEG_IN_CLK_INV_REG, &mode); if (clock_phase == TSIF_NORMAL) mode &= ~V6_INVERTED_CLK_PHASE; else mode |= V6_INVERTED_CLK_PHASE; ret = mxl111sf_write_reg(state, V6_MPEG_IN_CLK_INV_REG, mode); mxl_fail(ret); /* Configure data input mode, MPEG Valid polarity, MPEG Sync polarity * Get current configuration */ ret = mxl111sf_read_reg(state, V6_MPEG_IN_CTRL_REG, &mode); mxl_fail(ret); /* Data Input mode */ if (parallel_serial == TSIF_INPUT_PARALLEL) { /* Disable serial mode */ mode &= ~V6_MPEG_IN_DATA_SERIAL; /* Enable Parallel mode */ mode |= V6_MPEG_IN_DATA_PARALLEL; } else { /* Disable Parallel mode */ mode &= ~V6_MPEG_IN_DATA_PARALLEL; /* Enable Serial Mode */ mode |= V6_MPEG_IN_DATA_SERIAL; /* If serial interface is chosen, configure MSB or LSB order in transmission */ ret = mxl111sf_read_reg(state, V6_MPEG_INOUT_BIT_ORDER_CTRL_REG, &tmp); mxl_fail(ret); if (msb_lsb_1st == MPEG_SER_MSB_FIRST_ENABLED) tmp |= V6_MPEG_SER_MSB_FIRST; else tmp &= ~V6_MPEG_SER_MSB_FIRST; ret = mxl111sf_write_reg(state, V6_MPEG_INOUT_BIT_ORDER_CTRL_REG, tmp); mxl_fail(ret); } /* MPEG Sync polarity */ if (mpeg_sync_pol == TSIF_NORMAL) mode &= ~V6_INVERTED_MPEG_SYNC; else mode |= V6_INVERTED_MPEG_SYNC; /* MPEG Valid polarity */ if (mpeg_valid_pol == 0) mode &= ~V6_INVERTED_MPEG_VALID; else mode |= V6_INVERTED_MPEG_VALID; ret = mxl111sf_write_reg(state, V6_MPEG_IN_CTRL_REG, mode); mxl_fail(ret); return ret; } int mxl111sf_init_i2s_port(struct mxl111sf_state *state, u8 sample_size) { static struct mxl111sf_reg_ctrl_info init_i2s[] = { {0x1b, 0xff, 0x1e}, /* pin mux mode, Choose 656/I2S input */ {0x15, 0x60, 0x60}, /* Enable I2S */ {0x17, 0xe0, 0x20}, /* Input, MPEG MODE USB, Inverted 656 Clock, I2S_SOFT_RESET, 0 : Normal operation, 1 : Reset State */ #if 0 {0x12, 0x01, 0x00}, /* AUDIO_IRQ_CLR (Overflow Indicator) */ #endif {0x00, 0xff, 0x02}, /* Change to Control Page */ {0x26, 0x0d, 0x0d}, /* I2S_MODE & BT656_SRC_SEL for FPGA only */ {0x00, 0xff, 0x00}, {0, 0, 0} }; int ret; mxl_debug("(0x%02x)", sample_size); ret = mxl111sf_ctrl_program_regs(state, init_i2s); if (mxl_fail(ret)) goto fail; ret = mxl111sf_write_reg(state, V6_I2S_NUM_SAMPLES_REG, sample_size); mxl_fail(ret); fail: return ret; } int mxl111sf_disable_i2s_port(struct mxl111sf_state *state) { static struct mxl111sf_reg_ctrl_info disable_i2s[] = { {0x15, 0x40, 0x00}, {0, 0, 0} }; mxl_debug("()"); return mxl111sf_ctrl_program_regs(state, disable_i2s); } int mxl111sf_config_i2s(struct mxl111sf_state *state, u8 msb_start_pos, u8 data_width) { int ret; u8 tmp; mxl_debug("(0x%02x, 0x%02x)", msb_start_pos, data_width); ret = mxl111sf_read_reg(state, V6_I2S_STREAM_START_BIT_REG, &tmp); if (mxl_fail(ret)) goto fail; tmp &= 0xe0; tmp |= msb_start_pos; ret = mxl111sf_write_reg(state, V6_I2S_STREAM_START_BIT_REG, tmp); if (mxl_fail(ret)) goto fail; ret = mxl111sf_read_reg(state, V6_I2S_STREAM_END_BIT_REG, &tmp); if (mxl_fail(ret)) goto fail; tmp &= 0xe0; tmp |= data_width; ret = mxl111sf_write_reg(state, V6_I2S_STREAM_END_BIT_REG, tmp); mxl_fail(ret); fail: return ret; } int mxl111sf_config_spi(struct mxl111sf_state *state, int onoff) { u8 val; int ret; mxl_debug("(%d)", onoff); ret = mxl111sf_write_reg(state, 0x00, 0x02); if (mxl_fail(ret)) goto fail; ret = mxl111sf_read_reg(state, V8_SPI_MODE_REG, &val); if (mxl_fail(ret)) goto fail; if (onoff) val |= 0x04; else val &= ~0x04; ret = mxl111sf_write_reg(state, V8_SPI_MODE_REG, val); if (mxl_fail(ret)) goto fail; ret = mxl111sf_write_reg(state, 0x00, 0x00); mxl_fail(ret); fail: return ret; } int mxl111sf_idac_config(struct mxl111sf_state *state, u8 control_mode, u8 current_setting, u8 current_value, u8 hysteresis_value) { int ret; u8 val; /* current value will be set for both automatic & manual IDAC control */ val = current_value; if (control_mode == IDAC_MANUAL_CONTROL) { /* enable manual control of IDAC */ val |= IDAC_MANUAL_CONTROL_BIT_MASK; if (current_setting == IDAC_CURRENT_SINKING_ENABLE) /* enable current sinking in manual mode */ val |= IDAC_CURRENT_SINKING_BIT_MASK; else /* disable current sinking in manual mode */ val &= ~IDAC_CURRENT_SINKING_BIT_MASK; } else { /* disable manual control of IDAC */ val &= ~IDAC_MANUAL_CONTROL_BIT_MASK; /* set hysteresis value reg: 0x0B<5:0> */ ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG, (hysteresis_value & 0x3F)); mxl_fail(ret); } ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val); mxl_fail(ret); return ret; } /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
NicholasPace/android_kernel_motorola_msm8226
drivers/xen/xen-pciback/passthrough.c
8501
4666
/* * PCI Backend - Provides restricted access to the real PCI bus topology * to the frontend * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> */ #include <linux/list.h> #include <linux/pci.h> #include <linux/mutex.h> #include "pciback.h" struct passthrough_dev_data { /* Access to dev_list must be protected by lock */ struct list_head dev_list; struct mutex lock; }; static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, unsigned int bus, unsigned int devfn) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry; struct pci_dev *dev = NULL; mutex_lock(&dev_data->lock); list_for_each_entry(dev_entry, &dev_data->dev_list, list) { if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus) && bus == (unsigned int)dev_entry->dev->bus->number && devfn == dev_entry->dev->devfn) { dev = dev_entry->dev; break; } } mutex_unlock(&dev_data->lock); return dev; } static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev, int devid, publish_pci_dev_cb publish_cb) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry; unsigned int domain, bus, devfn; int err; dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL); if (!dev_entry) return -ENOMEM; dev_entry->dev = dev; mutex_lock(&dev_data->lock); list_add_tail(&dev_entry->list, &dev_data->dev_list); mutex_unlock(&dev_data->lock); /* Publish this device. */ domain = (unsigned int)pci_domain_nr(dev->bus); bus = (unsigned int)dev->bus->number; devfn = dev->devfn; err = publish_cb(pdev, domain, bus, devfn, devid); return err; } static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *t; struct pci_dev *found_dev = NULL; mutex_lock(&dev_data->lock); list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { if (dev_entry->dev == dev) { list_del(&dev_entry->list); found_dev = dev_entry->dev; kfree(dev_entry); } } mutex_unlock(&dev_data->lock); if (found_dev) pcistub_put_pci_dev(found_dev); } static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) { struct passthrough_dev_data *dev_data; dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL); if (!dev_data) return -ENOMEM; mutex_init(&dev_data->lock); INIT_LIST_HEAD(&dev_data->dev_list); pdev->pci_dev_data = dev_data; return 0; } static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev, publish_pci_root_cb publish_root_cb) { int err = 0; struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *e; struct pci_dev *dev; int found; unsigned int domain, bus; mutex_lock(&dev_data->lock); list_for_each_entry(dev_entry, &dev_data->dev_list, list) { /* Only publish this device as a root if none of its * parent bridges are exported */ found = 0; dev = dev_entry->dev->bus->self; for (; !found && dev != NULL; dev = dev->bus->self) { list_for_each_entry(e, &dev_data->dev_list, list) { if (dev == e->dev) { found = 1; break; } } } domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus); bus = (unsigned int)dev_entry->dev->bus->number; if (!found) { err = publish_root_cb(pdev, domain, bus); if (err) break; } } mutex_unlock(&dev_data->lock); return err; } static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *t; list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { list_del(&dev_entry->list); pcistub_put_pci_dev(dev_entry->dev); kfree(dev_entry); } kfree(dev_data); pdev->pci_dev_data = NULL; } static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev, struct xen_pcibk_device *pdev, unsigned int *domain, unsigned int *bus, unsigned int *devfn) { *domain = pci_domain_nr(pcidev->bus); *bus = pcidev->bus->number; *devfn = pcidev->devfn; return 1; } const struct xen_pcibk_backend xen_pcibk_passthrough_backend = { .name = "passthrough", .init = __xen_pcibk_init_devices, .free = __xen_pcibk_release_devices, .find = __xen_pcibk_get_pcifront_dev, .publish = __xen_pcibk_publish_pci_roots, .release = __xen_pcibk_release_pci_dev, .add = __xen_pcibk_add_pci_dev, .get = __xen_pcibk_get_pci_dev, };
gpl-2.0
judacis/Galaxy-S2-Kernel
arch/ia64/kernel/ia64_ksyms.c
8501
2506
/* * Architecture-specific kernel symbols * * Don't put any exports here unless it's defined in an assembler file. * All other exports should be put directly after the definition. */ #include <linux/module.h> #include <linux/string.h> EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(strlen); #include<asm/pgtable.h> EXPORT_SYMBOL_GPL(empty_zero_page); #include <asm/checksum.h> EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ EXPORT_SYMBOL(csum_ipv6_magic); #include <asm/page.h> EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); #ifdef CONFIG_VIRTUAL_MEM_MAP #include <linux/bootmem.h> EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ #endif #include <asm/processor.h> EXPORT_SYMBOL(ia64_cpu_info); #ifdef CONFIG_SMP EXPORT_SYMBOL(local_per_cpu_offset); #endif #include <asm/uaccess.h> EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__do_clear_user); EXPORT_SYMBOL(__strlen_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strnlen_user); /* from arch/ia64/lib */ extern void __divsi3(void); extern void __udivsi3(void); extern void __modsi3(void); extern void __umodsi3(void); extern void __divdi3(void); extern void __udivdi3(void); extern void __moddi3(void); extern void __umoddi3(void); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__divdi3); EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__moddi3); EXPORT_SYMBOL(__umoddi3); #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) extern void xor_ia64_2(void); extern void xor_ia64_3(void); extern void xor_ia64_4(void); extern void xor_ia64_5(void); EXPORT_SYMBOL(xor_ia64_2); EXPORT_SYMBOL(xor_ia64_3); EXPORT_SYMBOL(xor_ia64_4); EXPORT_SYMBOL(xor_ia64_5); #endif #include <asm/pal.h> EXPORT_SYMBOL(ia64_pal_call_phys_stacked); EXPORT_SYMBOL(ia64_pal_call_phys_static); EXPORT_SYMBOL(ia64_pal_call_stacked); EXPORT_SYMBOL(ia64_pal_call_static); EXPORT_SYMBOL(ia64_load_scratch_fpregs); EXPORT_SYMBOL(ia64_save_scratch_fpregs); #include <asm/unwind.h> EXPORT_SYMBOL(unw_init_running); #if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) extern void esi_call_phys (void); EXPORT_SYMBOL_GPL(esi_call_phys); #endif extern char ia64_ivt[]; EXPORT_SYMBOL(ia64_ivt); #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER /* mcount is defined in assembly */ EXPORT_SYMBOL(_mcount); #endif
gpl-2.0
Benowit/android_kernel_motorola_msm8226
sound/i2c/other/pt2258.c
10037
6224
/* * ALSA Driver for the PT2258 volume controller. * * Copyright (c) 2006 Jochen Voss <voss@seehuhn.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/i2c.h> #include <sound/pt2258.h> #include <linux/module.h> MODULE_AUTHOR("Jochen Voss <voss@seehuhn.de>"); MODULE_DESCRIPTION("PT2258 volume controller (Princeton Technology Corp.)"); MODULE_LICENSE("GPL"); #define PT2258_CMD_RESET 0xc0 #define PT2258_CMD_UNMUTE 0xf8 #define PT2258_CMD_MUTE 0xf9 static const unsigned char pt2258_channel_code[12] = { 0x80, 0x90, /* channel 1: -10dB, -1dB */ 0x40, 0x50, /* channel 2: -10dB, -1dB */ 0x00, 0x10, /* channel 3: -10dB, -1dB */ 0x20, 0x30, /* channel 4: -10dB, -1dB */ 0x60, 0x70, /* channel 5: -10dB, -1dB */ 0xa0, 0xb0 /* channel 6: -10dB, -1dB */ }; int snd_pt2258_reset(struct snd_pt2258 *pt) { unsigned char bytes[2]; int i; /* reset chip */ bytes[0] = PT2258_CMD_RESET; snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 1) != 1) goto __error; snd_i2c_unlock(pt->i2c_bus); /* mute all channels */ pt->mute = 1; bytes[0] = PT2258_CMD_MUTE; snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 1) != 1) goto __error; snd_i2c_unlock(pt->i2c_bus); /* set all channels to 0dB */ for (i = 0; i < 6; ++i) pt->volume[i] = 0; bytes[0] = 0xd0; bytes[1] = 0xe0; snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 2) != 2) goto __error; snd_i2c_unlock(pt->i2c_bus); return 0; __error: snd_i2c_unlock(pt->i2c_bus); snd_printk(KERN_ERR "PT2258 reset failed\n"); return -EIO; } static int pt2258_stereo_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 79; return 0; } static int pt2258_stereo_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pt2258 *pt = kcontrol->private_data; int base = kcontrol->private_value; /* chip does not support register reads */ ucontrol->value.integer.value[0] = 79 - pt->volume[base]; ucontrol->value.integer.value[1] = 79 - pt->volume[base + 1]; return 0; } static int pt2258_stereo_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pt2258 *pt = kcontrol->private_data; int base = kcontrol->private_value; unsigned char bytes[2]; int val0, val1; val0 = 79 - ucontrol->value.integer.value[0]; val1 = 79 - ucontrol->value.integer.value[1]; if (val0 < 0 || val0 > 79 || val1 < 0 || val1 > 79) return -EINVAL; if (val0 == pt->volume[base] && val1 == pt->volume[base + 1]) return 0; pt->volume[base] = val0; bytes[0] = pt2258_channel_code[2 * base] | (val0 / 10); bytes[1] = pt2258_channel_code[2 * base + 1] | (val0 % 10); snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 2) != 2) goto __error; snd_i2c_unlock(pt->i2c_bus); pt->volume[base + 1] = val1; bytes[0] = pt2258_channel_code[2 * base + 2] | (val1 / 10); bytes[1] = pt2258_channel_code[2 * base + 3] | (val1 % 10); snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 2) != 2) goto __error; snd_i2c_unlock(pt->i2c_bus); return 1; __error: snd_i2c_unlock(pt->i2c_bus); snd_printk(KERN_ERR "PT2258 access failed\n"); return -EIO; } #define pt2258_switch_info snd_ctl_boolean_mono_info static int pt2258_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pt2258 *pt = kcontrol->private_data; ucontrol->value.integer.value[0] = !pt->mute; return 0; } static int pt2258_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pt2258 *pt = kcontrol->private_data; unsigned char bytes[2]; int val; val = !ucontrol->value.integer.value[0]; if (pt->mute == val) return 0; pt->mute = val; bytes[0] = val ? PT2258_CMD_MUTE : PT2258_CMD_UNMUTE; snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 1) != 1) goto __error; snd_i2c_unlock(pt->i2c_bus); return 1; __error: snd_i2c_unlock(pt->i2c_bus); snd_printk(KERN_ERR "PT2258 access failed 2\n"); return -EIO; } static const DECLARE_TLV_DB_SCALE(pt2258_db_scale, -7900, 100, 0); int snd_pt2258_build_controls(struct snd_pt2258 *pt) { struct snd_kcontrol_new knew; char *names[3] = { "Mic Loopback Playback Volume", "Line Loopback Playback Volume", "CD Loopback Playback Volume" }; int i, err; for (i = 0; i < 3; ++i) { memset(&knew, 0, sizeof(knew)); knew.name = names[i]; knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.count = 1; knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ; knew.private_value = 2 * i; knew.info = pt2258_stereo_volume_info; knew.get = pt2258_stereo_volume_get; knew.put = pt2258_stereo_volume_put; knew.tlv.p = pt2258_db_scale; err = snd_ctl_add(pt->card, snd_ctl_new1(&knew, pt)); if (err < 0) return err; } memset(&knew, 0, sizeof(knew)); knew.name = "Loopback Switch"; knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.info = pt2258_switch_info; knew.get = pt2258_switch_get; knew.put = pt2258_switch_put; knew.access = 0; err = snd_ctl_add(pt->card, snd_ctl_new1(&knew, pt)); if (err < 0) return err; return 0; } EXPORT_SYMBOL(snd_pt2258_reset); EXPORT_SYMBOL(snd_pt2258_build_controls);
gpl-2.0
brocktice/pixel_linux
sound/i2c/other/pt2258.c
10037
6224
/* * ALSA Driver for the PT2258 volume controller. * * Copyright (c) 2006 Jochen Voss <voss@seehuhn.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/i2c.h> #include <sound/pt2258.h> #include <linux/module.h> MODULE_AUTHOR("Jochen Voss <voss@seehuhn.de>"); MODULE_DESCRIPTION("PT2258 volume controller (Princeton Technology Corp.)"); MODULE_LICENSE("GPL"); #define PT2258_CMD_RESET 0xc0 #define PT2258_CMD_UNMUTE 0xf8 #define PT2258_CMD_MUTE 0xf9 static const unsigned char pt2258_channel_code[12] = { 0x80, 0x90, /* channel 1: -10dB, -1dB */ 0x40, 0x50, /* channel 2: -10dB, -1dB */ 0x00, 0x10, /* channel 3: -10dB, -1dB */ 0x20, 0x30, /* channel 4: -10dB, -1dB */ 0x60, 0x70, /* channel 5: -10dB, -1dB */ 0xa0, 0xb0 /* channel 6: -10dB, -1dB */ }; int snd_pt2258_reset(struct snd_pt2258 *pt) { unsigned char bytes[2]; int i; /* reset chip */ bytes[0] = PT2258_CMD_RESET; snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 1) != 1) goto __error; snd_i2c_unlock(pt->i2c_bus); /* mute all channels */ pt->mute = 1; bytes[0] = PT2258_CMD_MUTE; snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 1) != 1) goto __error; snd_i2c_unlock(pt->i2c_bus); /* set all channels to 0dB */ for (i = 0; i < 6; ++i) pt->volume[i] = 0; bytes[0] = 0xd0; bytes[1] = 0xe0; snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 2) != 2) goto __error; snd_i2c_unlock(pt->i2c_bus); return 0; __error: snd_i2c_unlock(pt->i2c_bus); snd_printk(KERN_ERR "PT2258 reset failed\n"); return -EIO; } static int pt2258_stereo_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 79; return 0; } static int pt2258_stereo_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pt2258 *pt = kcontrol->private_data; int base = kcontrol->private_value; /* chip does not support register reads */ ucontrol->value.integer.value[0] = 79 - pt->volume[base]; ucontrol->value.integer.value[1] = 79 - pt->volume[base + 1]; return 0; } static int pt2258_stereo_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pt2258 *pt = kcontrol->private_data; int base = kcontrol->private_value; unsigned char bytes[2]; int val0, val1; val0 = 79 - ucontrol->value.integer.value[0]; val1 = 79 - ucontrol->value.integer.value[1]; if (val0 < 0 || val0 > 79 || val1 < 0 || val1 > 79) return -EINVAL; if (val0 == pt->volume[base] && val1 == pt->volume[base + 1]) return 0; pt->volume[base] = val0; bytes[0] = pt2258_channel_code[2 * base] | (val0 / 10); bytes[1] = pt2258_channel_code[2 * base + 1] | (val0 % 10); snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 2) != 2) goto __error; snd_i2c_unlock(pt->i2c_bus); pt->volume[base + 1] = val1; bytes[0] = pt2258_channel_code[2 * base + 2] | (val1 / 10); bytes[1] = pt2258_channel_code[2 * base + 3] | (val1 % 10); snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 2) != 2) goto __error; snd_i2c_unlock(pt->i2c_bus); return 1; __error: snd_i2c_unlock(pt->i2c_bus); snd_printk(KERN_ERR "PT2258 access failed\n"); return -EIO; } #define pt2258_switch_info snd_ctl_boolean_mono_info static int pt2258_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pt2258 *pt = kcontrol->private_data; ucontrol->value.integer.value[0] = !pt->mute; return 0; } static int pt2258_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pt2258 *pt = kcontrol->private_data; unsigned char bytes[2]; int val; val = !ucontrol->value.integer.value[0]; if (pt->mute == val) return 0; pt->mute = val; bytes[0] = val ? PT2258_CMD_MUTE : PT2258_CMD_UNMUTE; snd_i2c_lock(pt->i2c_bus); if (snd_i2c_sendbytes(pt->i2c_dev, bytes, 1) != 1) goto __error; snd_i2c_unlock(pt->i2c_bus); return 1; __error: snd_i2c_unlock(pt->i2c_bus); snd_printk(KERN_ERR "PT2258 access failed 2\n"); return -EIO; } static const DECLARE_TLV_DB_SCALE(pt2258_db_scale, -7900, 100, 0); int snd_pt2258_build_controls(struct snd_pt2258 *pt) { struct snd_kcontrol_new knew; char *names[3] = { "Mic Loopback Playback Volume", "Line Loopback Playback Volume", "CD Loopback Playback Volume" }; int i, err; for (i = 0; i < 3; ++i) { memset(&knew, 0, sizeof(knew)); knew.name = names[i]; knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.count = 1; knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ; knew.private_value = 2 * i; knew.info = pt2258_stereo_volume_info; knew.get = pt2258_stereo_volume_get; knew.put = pt2258_stereo_volume_put; knew.tlv.p = pt2258_db_scale; err = snd_ctl_add(pt->card, snd_ctl_new1(&knew, pt)); if (err < 0) return err; } memset(&knew, 0, sizeof(knew)); knew.name = "Loopback Switch"; knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.info = pt2258_switch_info; knew.get = pt2258_switch_get; knew.put = pt2258_switch_put; knew.access = 0; err = snd_ctl_add(pt->card, snd_ctl_new1(&knew, pt)); if (err < 0) return err; return 0; } EXPORT_SYMBOL(snd_pt2258_reset); EXPORT_SYMBOL(snd_pt2258_build_controls);
gpl-2.0
huiyiqun/kernel_flo
arch/sh/drivers/pci/ops-sh4.c
12341
2527
/* * Generic SH-4 / SH-4A PCIC operations (SH7751, SH7780). * * Copyright (C) 2002 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License v2. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/pci.h> #include <linux/io.h> #include <linux/spinlock.h> #include <asm/addrspace.h> #include "pci-sh4.h" /* * Direct access to PCI hardware... */ #define CONFIG_CMD(bus, devfn, where) \ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) /* * Functions for accessing PCI configuration space with type 1 accesses */ static int sh4_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct pci_channel *chan = bus->sysdata; unsigned long flags; u32 data; /* * PCIPDR may only be accessed as 32 bit words, * so we must do byte alignment by hand */ raw_spin_lock_irqsave(&pci_config_lock, flags); pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR); data = pci_read_reg(chan, SH4_PCIPDR); raw_spin_unlock_irqrestore(&pci_config_lock, flags); switch (size) { case 1: *val = (data >> ((where & 3) << 3)) & 0xff; break; case 2: *val = (data >> ((where & 2) << 3)) & 0xffff; break; case 4: *val = data; break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } return PCIBIOS_SUCCESSFUL; } /* * Since SH4 only does 32bit access we'll have to do a read, * mask,write operation. * We'll allow an odd byte offset, though it should be illegal. */ static int sh4_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct pci_channel *chan = bus->sysdata; unsigned long flags; int shift; u32 data; raw_spin_lock_irqsave(&pci_config_lock, flags); pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR); data = pci_read_reg(chan, SH4_PCIPDR); raw_spin_unlock_irqrestore(&pci_config_lock, flags); switch (size) { case 1: shift = (where & 3) << 3; data &= ~(0xff << shift); data |= ((val & 0xff) << shift); break; case 2: shift = (where & 2) << 3; data &= ~(0xffff << shift); data |= ((val & 0xffff) << shift); break; case 4: data = val; break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } pci_write_reg(chan, data, SH4_PCIPDR); return PCIBIOS_SUCCESSFUL; } struct pci_ops sh4_pci_ops = { .read = sh4_pci_read, .write = sh4_pci_write, }; int __attribute__((weak)) pci_fixup_pcic(struct pci_channel *chan) { /* Nothing to do. */ return 0; }
gpl-2.0
Jetson-TK1-AndroidTV/android_kernel_tegra_hdmi_prime
arch/sh/boards/mach-x3proto/setup.c
12341
5933
/* * arch/sh/boards/mach-x3proto/setup.c * * Renesas SH-X3 Prototype Board Support. * * Copyright (C) 2007 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/smc91x.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/usb/r8a66597.h> #include <linux/usb/m66592.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <mach/ilsel.h> #include <mach/hardware.h> #include <asm/smp-ops.h> static struct resource heartbeat_resources[] = { [0] = { .start = 0xb8140020, .end = 0xb8140020, .flags = IORESOURCE_MEM, }, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .num_resources = ARRAY_SIZE(heartbeat_resources), .resource = heartbeat_resources, }; static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, }; static struct resource smc91x_resources[] = { [0] = { .start = 0x18000300, .end = 0x18000300 + 0x10 - 1, .flags = IORESOURCE_MEM, }, [1] = { /* Filled in by ilsel */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = -1, .resource = smc91x_resources, .num_resources = ARRAY_SIZE(smc91x_resources), .dev = { .platform_data = &smc91x_info, }, }; static struct r8a66597_platdata r8a66597_data = { .xtal = R8A66597_PLATDATA_XTAL_12MHZ, .vif = 1, }; static struct resource r8a66597_usb_host_resources[] = { [0] = { .start = 0x18040000, .end = 0x18080000 - 1, .flags = IORESOURCE_MEM, }, [1] = { /* Filled in by ilsel */ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device r8a66597_usb_host_device = { .name = "r8a66597_hcd", .id = -1, .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &r8a66597_data, }, .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources), .resource = r8a66597_usb_host_resources, }; static struct m66592_platdata usbf_platdata = { .xtal = M66592_PLATDATA_XTAL_24MHZ, .vif = 1, }; static struct resource m66592_usb_peripheral_resources[] = { [0] = { .name = "m66592_udc", .start = 0x18080000, .end = 0x180c0000 - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "m66592_udc", /* Filled in by ilsel */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device m66592_usb_peripheral_device = { .name = "m66592_udc", .id = -1, .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(m66592_usb_peripheral_resources), .resource = m66592_usb_peripheral_resources, }; static struct gpio_keys_button baseboard_buttons[NR_BASEBOARD_GPIOS] = { { .desc = "key44", .code = KEY_POWER, .active_low = 1, .wakeup = 1, }, { .desc = "key43", .code = KEY_SUSPEND, .active_low = 1, .wakeup = 1, }, { .desc = "key42", .code = KEY_KATAKANAHIRAGANA, .active_low = 1, }, { .desc = "key41", .code = KEY_SWITCHVIDEOMODE, .active_low = 1, }, { .desc = "key34", .code = KEY_F12, .active_low = 1, }, { .desc = "key33", .code = KEY_F11, .active_low = 1, }, { .desc = "key32", .code = KEY_F10, .active_low = 1, }, { .desc = "key31", .code = KEY_F9, .active_low = 1, }, { .desc = "key24", .code = KEY_F8, .active_low = 1, }, { .desc = "key23", .code = KEY_F7, .active_low = 1, }, { .desc = "key22", .code = KEY_F6, .active_low = 1, }, { .desc = "key21", .code = KEY_F5, .active_low = 1, }, { .desc = "key14", .code = KEY_F4, .active_low = 1, }, { .desc = "key13", .code = KEY_F3, .active_low = 1, }, { .desc = "key12", .code = KEY_F2, .active_low = 1, }, { .desc = "key11", .code = KEY_F1, .active_low = 1, }, }; static struct gpio_keys_platform_data baseboard_buttons_data = { .buttons = baseboard_buttons, .nbuttons = ARRAY_SIZE(baseboard_buttons), }; static struct platform_device baseboard_buttons_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &baseboard_buttons_data, }, }; static struct platform_device *x3proto_devices[] __initdata = { &heartbeat_device, &smc91x_device, &r8a66597_usb_host_device, &m66592_usb_peripheral_device, &baseboard_buttons_device, }; static void __init x3proto_init_irq(void) { plat_irq_setup_pins(IRQ_MODE_IRL3210); /* Set ICR0.LVLMODE */ __raw_writel(__raw_readl(0xfe410000) | (1 << 21), 0xfe410000); } static int __init x3proto_devices_setup(void) { int ret, i; /* * IRLs are only needed for ILSEL mappings, so flip over the INTC * pins at a later point to enable the GPIOs to settle. */ x3proto_init_irq(); /* * Now that ILSELs are available, set up the baseboard GPIOs. */ ret = x3proto_gpio_setup(); if (unlikely(ret)) return ret; /* * Propagate dynamic GPIOs for the baseboard button device. */ for (i = 0; i < ARRAY_SIZE(baseboard_buttons); i++) baseboard_buttons[i].gpio = x3proto_gpio_chip.base + i; r8a66597_usb_host_resources[1].start = r8a66597_usb_host_resources[1].end = ilsel_enable(ILSEL_USBH_I); m66592_usb_peripheral_resources[1].start = m66592_usb_peripheral_resources[1].end = ilsel_enable(ILSEL_USBP_I); smc91x_resources[1].start = smc91x_resources[1].end = ilsel_enable(ILSEL_LAN); return platform_add_devices(x3proto_devices, ARRAY_SIZE(x3proto_devices)); } device_initcall(x3proto_devices_setup); static void __init x3proto_setup(char **cmdline_p) { register_smp_ops(&shx3_smp_ops); } static struct sh_machine_vector mv_x3proto __initmv = { .mv_name = "x3proto", .mv_setup = x3proto_setup, };
gpl-2.0
Klozz/TheXperienceProject_Motorola_Kernel_msm8226_falcon
drivers/tty/serial/8250/8250_boca.c
12341
1261
/* * Copyright (C) 2005 Russell King. * Data taken from include/asm-i386/serial.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/serial_8250.h> #define PORT(_base,_irq) \ { \ .iobase = _base, \ .irq = _irq, \ .uartclk = 1843200, \ .iotype = UPIO_PORT, \ .flags = UPF_BOOT_AUTOCONF, \ } static struct plat_serial8250_port boca_data[] = { PORT(0x100, 12), PORT(0x108, 12), PORT(0x110, 12), PORT(0x118, 12), PORT(0x120, 12), PORT(0x128, 12), PORT(0x130, 12), PORT(0x138, 12), PORT(0x140, 12), PORT(0x148, 12), PORT(0x150, 12), PORT(0x158, 12), PORT(0x160, 12), PORT(0x168, 12), PORT(0x170, 12), PORT(0x178, 12), { }, }; static struct platform_device boca_device = { .name = "serial8250", .id = PLAT8250_DEV_BOCA, .dev = { .platform_data = boca_data, }, }; static int __init boca_init(void) { return platform_device_register(&boca_device); } module_init(boca_init); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("8250 serial probe module for Boca cards"); MODULE_LICENSE("GPL");
gpl-2.0
Guitarboarder28/android_kernel_htc_flounder
samples/kdb/kdb_hello.c
13365
1543
/* * Created by: Jason Wessel <jason.wessel@windriver.com> * * Copyright (c) 2010 Wind River Systems, Inc. All Rights Reserved. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/kdb.h> /* * All kdb shell command call backs receive argc and argv, where * argv[0] is the command the end user typed */ static int kdb_hello_cmd(int argc, const char **argv) { if (argc > 1) return KDB_ARGCOUNT; if (argc) kdb_printf("Hello %s.\n", argv[1]); else kdb_printf("Hello world!\n"); return 0; } static int __init kdb_hello_cmd_init(void) { /* * Registration of a dynamically added kdb command is done with * kdb_register() with the arguments being: * 1: The name of the shell command * 2: The function that processes the command * 3: Description of the usage of any arguments * 4: Descriptive text when you run help * 5: Number of characters to complete the command * 0 == type the whole command * 1 == match both "g" and "go" for example */ kdb_register("hello", kdb_hello_cmd, "[string]", "Say Hello World or Hello [string]", 0); return 0; } static void __exit kdb_hello_cmd_exit(void) { kdb_unregister("hello"); } module_init(kdb_hello_cmd_init); module_exit(kdb_hello_cmd_exit); MODULE_AUTHOR("WindRiver"); MODULE_DESCRIPTION("KDB example to add a hello command"); MODULE_LICENSE("GPL");
gpl-2.0
bigbiff/android_kernel_samsung_n900
samples/kdb/kdb_hello.c
13365
1543
/* * Created by: Jason Wessel <jason.wessel@windriver.com> * * Copyright (c) 2010 Wind River Systems, Inc. All Rights Reserved. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/kdb.h> /* * All kdb shell command call backs receive argc and argv, where * argv[0] is the command the end user typed */ static int kdb_hello_cmd(int argc, const char **argv) { if (argc > 1) return KDB_ARGCOUNT; if (argc) kdb_printf("Hello %s.\n", argv[1]); else kdb_printf("Hello world!\n"); return 0; } static int __init kdb_hello_cmd_init(void) { /* * Registration of a dynamically added kdb command is done with * kdb_register() with the arguments being: * 1: The name of the shell command * 2: The function that processes the command * 3: Description of the usage of any arguments * 4: Descriptive text when you run help * 5: Number of characters to complete the command * 0 == type the whole command * 1 == match both "g" and "go" for example */ kdb_register("hello", kdb_hello_cmd, "[string]", "Say Hello World or Hello [string]", 0); return 0; } static void __exit kdb_hello_cmd_exit(void) { kdb_unregister("hello"); } module_init(kdb_hello_cmd_init); module_exit(kdb_hello_cmd_exit); MODULE_AUTHOR("WindRiver"); MODULE_DESCRIPTION("KDB example to add a hello command"); MODULE_LICENSE("GPL");
gpl-2.0
GaloisInc/linux-deadline
fs/nfsd/nfssvc.c
54
15923
/* * Central processing for nfsd. * * Authors: Olaf Kirch (okir@monad.swb.de) * * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> */ #include <linux/sched.h> #include <linux/freezer.h> #include <linux/fs_struct.h> #include <linux/swap.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/svcsock.h> #include <linux/lockd/bind.h> #include <linux/nfsacl.h> #include <linux/seq_file.h> #include <net/net_namespace.h> #include "nfsd.h" #include "cache.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_SVC extern struct svc_program nfsd_program; static int nfsd(void *vrqstp); struct timeval nfssvc_boot; /* * nfsd_mutex protects nfsd_serv -- both the pointer itself and the members * of the svc_serv struct. In particular, ->sv_nrthreads but also to some * extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt * * If (out side the lock) nfsd_serv is non-NULL, then it must point to a * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number * of nfsd threads must exist and each must listed in ->sp_all_threads in each * entry of ->sv_pools[]. * * Transitions of the thread count between zero and non-zero are of particular * interest since the svc_serv needs to be created and initialized at that * point, or freed. * * Finally, the nfsd_mutex also protects some of the global variables that are * accessed when nfsd starts and that are settable via the write_* routines in * nfsctl.c. In particular: * * user_recovery_dirname * user_lease_time * nfsd_versions */ DEFINE_MUTEX(nfsd_mutex); struct svc_serv *nfsd_serv; /* * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used. * nfsd_drc_max_pages limits the total amount of memory available for * version 4.1 DRC caches. * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage. */ spinlock_t nfsd_drc_lock; unsigned int nfsd_drc_max_mem; unsigned int nfsd_drc_mem_used; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) static struct svc_stat nfsd_acl_svcstats; static struct svc_version * nfsd_acl_version[] = { [2] = &nfsd_acl_version2, [3] = &nfsd_acl_version3, }; #define NFSD_ACL_MINVERS 2 #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version) static struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS]; static struct svc_program nfsd_acl_program = { .pg_prog = NFS_ACL_PROGRAM, .pg_nvers = NFSD_ACL_NRVERS, .pg_vers = nfsd_acl_versions, .pg_name = "nfsacl", .pg_class = "nfsd", .pg_stats = &nfsd_acl_svcstats, .pg_authenticate = &svc_set_client, }; static struct svc_stat nfsd_acl_svcstats = { .program = &nfsd_acl_program, }; #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */ static struct svc_version * nfsd_version[] = { [2] = &nfsd_version2, #if defined(CONFIG_NFSD_V3) [3] = &nfsd_version3, #endif #if defined(CONFIG_NFSD_V4) [4] = &nfsd_version4, #endif }; #define NFSD_MINVERS 2 #define NFSD_NRVERS ARRAY_SIZE(nfsd_version) static struct svc_version *nfsd_versions[NFSD_NRVERS]; struct svc_program nfsd_program = { #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) .pg_next = &nfsd_acl_program, #endif .pg_prog = NFS_PROGRAM, /* program number */ .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */ .pg_vers = nfsd_versions, /* version table */ .pg_name = "nfsd", /* program name */ .pg_class = "nfsd", /* authentication class */ .pg_stats = &nfsd_svcstats, /* version table */ .pg_authenticate = &svc_set_client, /* export authentication */ }; u32 nfsd_supported_minorversion; int nfsd_vers(int vers, enum vers_op change) { if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS) return 0; switch(change) { case NFSD_SET: nfsd_versions[vers] = nfsd_version[vers]; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) if (vers < NFSD_ACL_NRVERS) nfsd_acl_versions[vers] = nfsd_acl_version[vers]; #endif break; case NFSD_CLEAR: nfsd_versions[vers] = NULL; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) if (vers < NFSD_ACL_NRVERS) nfsd_acl_versions[vers] = NULL; #endif break; case NFSD_TEST: return nfsd_versions[vers] != NULL; case NFSD_AVAIL: return nfsd_version[vers] != NULL; } return 0; } int nfsd_minorversion(u32 minorversion, enum vers_op change) { if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) return -1; switch(change) { case NFSD_SET: nfsd_supported_minorversion = minorversion; break; case NFSD_CLEAR: if (minorversion == 0) return -1; nfsd_supported_minorversion = minorversion - 1; break; case NFSD_TEST: return minorversion <= nfsd_supported_minorversion; case NFSD_AVAIL: return minorversion <= NFSD_SUPPORTED_MINOR_VERSION; } return 0; } /* * Maximum number of nfsd processes */ #define NFSD_MAXSERVS 8192 int nfsd_nrthreads(void) { int rv = 0; mutex_lock(&nfsd_mutex); if (nfsd_serv) rv = nfsd_serv->sv_nrthreads; mutex_unlock(&nfsd_mutex); return rv; } static int nfsd_init_socks(int port) { int error; if (!list_empty(&nfsd_serv->sv_permsocks)) return 0; error = svc_create_xprt(nfsd_serv, "udp", &init_net, PF_INET, port, SVC_SOCK_DEFAULTS); if (error < 0) return error; error = svc_create_xprt(nfsd_serv, "tcp", &init_net, PF_INET, port, SVC_SOCK_DEFAULTS); if (error < 0) return error; return 0; } static bool nfsd_up = false; static int nfsd_startup(unsigned short port, int nrservs) { int ret; if (nfsd_up) return 0; /* * Readahead param cache - will no-op if it already exists. * (Note therefore results will be suboptimal if number of * threads is modified after nfsd start.) */ ret = nfsd_racache_init(2*nrservs); if (ret) return ret; ret = nfsd_init_socks(port); if (ret) goto out_racache; ret = lockd_up(); if (ret) goto out_racache; ret = nfs4_state_start(); if (ret) goto out_lockd; nfsd_up = true; return 0; out_lockd: lockd_down(); out_racache: nfsd_racache_shutdown(); return ret; } static void nfsd_shutdown(void) { /* * write_ports can create the server without actually starting * any threads--if we get shut down before any threads are * started, then nfsd_last_thread will be run before any of this * other initialization has been done. */ if (!nfsd_up) return; nfs4_state_shutdown(); lockd_down(); nfsd_racache_shutdown(); nfsd_up = false; } static void nfsd_last_thread(struct svc_serv *serv) { /* When last nfsd thread exits we need to do some clean-up */ nfsd_serv = NULL; nfsd_shutdown(); printk(KERN_WARNING "nfsd: last server has exited, flushing export " "cache\n"); nfsd_export_flush(); } void nfsd_reset_versions(void) { int found_one = 0; int i; for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { if (nfsd_program.pg_vers[i]) found_one = 1; } if (!found_one) { for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) nfsd_program.pg_vers[i] = nfsd_version[i]; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) nfsd_acl_program.pg_vers[i] = nfsd_acl_version[i]; #endif } } /* * Each session guarantees a negotiated per slot memory cache for replies * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated * NFSv4.1 server might want to use more memory for a DRC than a machine * with mutiple services. * * Impose a hard limit on the number of pages for the DRC which varies * according to the machines free pages. This is of course only a default. * * For now this is a #defined shift which could be under admin control * in the future. */ static void set_max_drc(void) { #define NFSD_DRC_SIZE_SHIFT 10 nfsd_drc_max_mem = (nr_free_buffer_pages() >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE; nfsd_drc_mem_used = 0; spin_lock_init(&nfsd_drc_lock); dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem); } int nfsd_create_serv(void) { int err = 0; WARN_ON(!mutex_is_locked(&nfsd_mutex)); if (nfsd_serv) { svc_get(nfsd_serv); return 0; } if (nfsd_max_blksize == 0) { /* choose a suitable default */ struct sysinfo i; si_meminfo(&i); /* Aim for 1/4096 of memory per thread * This gives 1MB on 4Gig machines * But only uses 32K on 128M machines. * Bottom out at 8K on 32M and smaller. * Of course, this is only a default. */ nfsd_max_blksize = NFSSVC_MAXBLKSIZE; i.totalram <<= PAGE_SHIFT - 12; while (nfsd_max_blksize > i.totalram && nfsd_max_blksize >= 8*1024*2) nfsd_max_blksize /= 2; } nfsd_reset_versions(); nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd_last_thread, nfsd, THIS_MODULE); if (nfsd_serv == NULL) return -ENOMEM; set_max_drc(); do_gettimeofday(&nfssvc_boot); /* record boot time */ return err; } int nfsd_nrpools(void) { if (nfsd_serv == NULL) return 0; else return nfsd_serv->sv_nrpools; } int nfsd_get_nrthreads(int n, int *nthreads) { int i = 0; if (nfsd_serv != NULL) { for (i = 0; i < nfsd_serv->sv_nrpools && i < n; i++) nthreads[i] = nfsd_serv->sv_pools[i].sp_nrthreads; } return 0; } int nfsd_set_nrthreads(int n, int *nthreads) { int i = 0; int tot = 0; int err = 0; WARN_ON(!mutex_is_locked(&nfsd_mutex)); if (nfsd_serv == NULL || n <= 0) return 0; if (n > nfsd_serv->sv_nrpools) n = nfsd_serv->sv_nrpools; /* enforce a global maximum number of threads */ tot = 0; for (i = 0; i < n; i++) { if (nthreads[i] > NFSD_MAXSERVS) nthreads[i] = NFSD_MAXSERVS; tot += nthreads[i]; } if (tot > NFSD_MAXSERVS) { /* total too large: scale down requested numbers */ for (i = 0; i < n && tot > 0; i++) { int new = nthreads[i] * NFSD_MAXSERVS / tot; tot -= (nthreads[i] - new); nthreads[i] = new; } for (i = 0; i < n && tot > 0; i++) { nthreads[i]--; tot--; } } /* * There must always be a thread in pool 0; the admin * can't shut down NFS completely using pool_threads. */ if (nthreads[0] == 0) nthreads[0] = 1; /* apply the new numbers */ svc_get(nfsd_serv); for (i = 0; i < n; i++) { err = svc_set_num_threads(nfsd_serv, &nfsd_serv->sv_pools[i], nthreads[i]); if (err) break; } svc_destroy(nfsd_serv); return err; } /* * Adjust the number of threads and return the new number of threads. * This is also the function that starts the server if necessary, if * this is the first time nrservs is nonzero. */ int nfsd_svc(unsigned short port, int nrservs) { int error; bool nfsd_up_before; mutex_lock(&nfsd_mutex); dprintk("nfsd: creating service\n"); if (nrservs <= 0) nrservs = 0; if (nrservs > NFSD_MAXSERVS) nrservs = NFSD_MAXSERVS; error = 0; if (nrservs == 0 && nfsd_serv == NULL) goto out; error = nfsd_create_serv(); if (error) goto out; nfsd_up_before = nfsd_up; error = nfsd_startup(port, nrservs); if (error) goto out_destroy; error = svc_set_num_threads(nfsd_serv, NULL, nrservs); if (error) goto out_shutdown; /* We are holding a reference to nfsd_serv which * we don't want to count in the return value, * so subtract 1 */ error = nfsd_serv->sv_nrthreads - 1; out_shutdown: if (error < 0 && !nfsd_up_before) nfsd_shutdown(); out_destroy: svc_destroy(nfsd_serv); /* Release server */ out: mutex_unlock(&nfsd_mutex); return error; } /* * This is the NFS server kernel thread */ static int nfsd(void *vrqstp) { struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp; int err, preverr = 0; /* Lock module and set up kernel thread */ mutex_lock(&nfsd_mutex); /* At this point, the thread shares current->fs * with the init process. We need to create files with a * umask of 0 instead of init's umask. */ if (unshare_fs_struct() < 0) { printk("Unable to start nfsd thread: out of memory\n"); goto out; } current->fs->umask = 0; /* * thread is spawned with all signals set to SIG_IGN, re-enable * the ones that will bring down the thread */ allow_signal(SIGKILL); allow_signal(SIGHUP); allow_signal(SIGINT); allow_signal(SIGQUIT); nfsdstats.th_cnt++; mutex_unlock(&nfsd_mutex); /* * We want less throttling in balance_dirty_pages() so that nfs to * localhost doesn't cause nfsd to lock up due to all the client's * dirty pages. */ current->flags |= PF_LESS_THROTTLE; set_freezable(); /* * The main request loop */ for (;;) { /* * Find a socket with data available and call its * recvfrom routine. */ while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN) ; if (err == -EINTR) break; else if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, -err); preverr = err; } schedule_timeout_uninterruptible(HZ); continue; } /* Lock the export hash tables for reading. */ exp_readlock(); validate_process_creds(); svc_process(rqstp); validate_process_creds(); /* Unlock export hash tables */ exp_readunlock(); } /* Clear signals before calling svc_exit_thread() */ flush_signals(current); mutex_lock(&nfsd_mutex); nfsdstats.th_cnt --; out: /* Release the thread */ svc_exit_thread(rqstp); /* Release module */ mutex_unlock(&nfsd_mutex); module_put_and_exit(0); return 0; } static __be32 map_new_errors(u32 vers, __be32 nfserr) { if (nfserr == nfserr_jukebox && vers == 2) return nfserr_dropit; if (nfserr == nfserr_wrongsec && vers < 4) return nfserr_acces; return nfserr; } int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) { struct svc_procedure *proc; kxdrproc_t xdr; __be32 nfserr; __be32 *nfserrp; dprintk("nfsd_dispatch: vers %d proc %d\n", rqstp->rq_vers, rqstp->rq_proc); proc = rqstp->rq_procinfo; /* Check whether we have this call in the cache. */ switch (nfsd_cache_lookup(rqstp, proc->pc_cachetype)) { case RC_INTR: case RC_DROPIT: return 0; case RC_REPLY: return 1; case RC_DOIT:; /* do it */ } /* Decode arguments */ xdr = proc->pc_decode; if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base, rqstp->rq_argp)) { dprintk("nfsd: failed to decode arguments!\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); *statp = rpc_garbage_args; return 1; } /* need to grab the location to store the status, as * nfsv4 does some encoding while processing */ nfserrp = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len; rqstp->rq_res.head[0].iov_len += sizeof(__be32); /* Now call the procedure handler, and encode NFS status. */ nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); nfserr = map_new_errors(rqstp->rq_vers, nfserr); if (nfserr == nfserr_dropit) { dprintk("nfsd: Dropping request; may be revisited later\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); return 0; } if (rqstp->rq_proc != 0) *nfserrp++ = nfserr; /* Encode result. * For NFSv2, additional info is never returned in case of an error. */ if (!(nfserr && rqstp->rq_vers == 2)) { xdr = proc->pc_encode; if (xdr && !xdr(rqstp, nfserrp, rqstp->rq_resp)) { /* Failed to encode result. Release cache entry */ dprintk("nfsd: failed to encode result!\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); *statp = rpc_system_err; return 1; } } /* Store reply in cache. */ nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1); return 1; } int nfsd_pool_stats_open(struct inode *inode, struct file *file) { int ret; mutex_lock(&nfsd_mutex); if (nfsd_serv == NULL) { mutex_unlock(&nfsd_mutex); return -ENODEV; } /* bump up the psudo refcount while traversing */ svc_get(nfsd_serv); ret = svc_pool_stats_open(nfsd_serv, file); mutex_unlock(&nfsd_mutex); return ret; } int nfsd_pool_stats_release(struct inode *inode, struct file *file) { int ret = seq_release(inode, file); mutex_lock(&nfsd_mutex); /* this function really, really should have been called svc_put() */ svc_destroy(nfsd_serv); mutex_unlock(&nfsd_mutex); return ret; }
gpl-2.0
lsigithub/axxia_yocto_linux_4.1_pull
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
54
26933
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" #include "amdgpu_i2c.h" #include "atom.h" #include "amdgpu_connectors.h" #include <asm/div64.h> #include <linux/pm_runtime.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) { struct amdgpu_flip_work *work = container_of(cb, struct amdgpu_flip_work, cb); dma_fence_put(f); schedule_work(&work->flip_work.work); } static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, struct dma_fence **f) { struct dma_fence *fence= *f; if (fence == NULL) return false; *f = NULL; if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) return true; dma_fence_put(fence); return false; } static void amdgpu_flip_work_func(struct work_struct *__work) { struct delayed_work *delayed_work = container_of(__work, struct delayed_work, work); struct amdgpu_flip_work *work = container_of(delayed_work, struct amdgpu_flip_work, flip_work); struct amdgpu_device *adev = work->adev; struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &amdgpu_crtc->base; unsigned long flags; unsigned i; int vpos, hpos; if (amdgpu_flip_handle_fence(work, &work->excl)) return; for (i = 0; i < work->shared_count; ++i) if (amdgpu_flip_handle_fence(work, &work->shared[i])) return; /* Wait until we're out of the vertical blank period before the one * targeted by the flip */ if (amdgpu_crtc->enabled && (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, &vpos, &hpos, NULL, NULL, &crtc->hwmode) & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && (int)(work->target_vblank - amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) { schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); return; } /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); /* Do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); /* Set the flip status */ amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n", amdgpu_crtc->crtc_id, amdgpu_crtc, work); } /* * Handle unpin events outside the interrupt handler proper. */ static void amdgpu_unpin_work_func(struct work_struct *__work) { struct amdgpu_flip_work *work = container_of(__work, struct amdgpu_flip_work, unpin_work); int r; /* unpin of the old buffer */ r = amdgpu_bo_reserve(work->old_abo, true); if (likely(r == 0)) { r = amdgpu_bo_unpin(work->old_abo); if (unlikely(r != 0)) { DRM_ERROR("failed to unpin buffer after flip\n"); } amdgpu_bo_unreserve(work->old_abo); } else DRM_ERROR("failed to reserve buffer after flip\n"); amdgpu_bo_unref(&work->old_abo); kfree(work->shared); kfree(work); } int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags, uint32_t target, struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_framebuffer *old_amdgpu_fb; struct amdgpu_framebuffer *new_amdgpu_fb; struct drm_gem_object *obj; struct amdgpu_flip_work *work; struct amdgpu_bo *new_abo; unsigned long flags; u64 tiling_flags; u64 base; int i, r; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) return -ENOMEM; INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func); INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); work->event = event; work->adev = adev; work->crtc_id = amdgpu_crtc->crtc_id; work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; /* schedule unpin of the old buffer */ old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); obj = old_amdgpu_fb->obj; /* take a reference to the old object */ work->old_abo = gem_to_amdgpu_bo(obj); amdgpu_bo_ref(work->old_abo); new_amdgpu_fb = to_amdgpu_framebuffer(fb); obj = new_amdgpu_fb->obj; new_abo = gem_to_amdgpu_bo(obj); /* pin the new buffer */ r = amdgpu_bo_reserve(new_abo, false); if (unlikely(r != 0)) { DRM_ERROR("failed to reserve new abo buffer before flip\n"); goto cleanup; } r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base); if (unlikely(r != 0)) { DRM_ERROR("failed to pin new abo buffer before flip\n"); goto unreserve; } r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl, &work->shared_count, &work->shared); if (unlikely(r != 0)) { DRM_ERROR("failed to get fences for buffer\n"); goto unpin; } amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); amdgpu_bo_unreserve(new_abo); work->base = base; work->target_vblank = target - drm_crtc_vblank_count(crtc) + amdgpu_get_vblank_counter_kms(dev, work->crtc_id); /* we borrow the event spin lock for protecting flip_wrok */ spin_lock_irqsave(&crtc->dev->event_lock, flags); if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) { DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); r = -EBUSY; goto pflip_cleanup; } amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; amdgpu_crtc->pflip_works = work; DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n", amdgpu_crtc->crtc_id, amdgpu_crtc, work); /* update crtc fb */ crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); amdgpu_flip_work_func(&work->flip_work.work); return 0; pflip_cleanup: if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) { DRM_ERROR("failed to reserve new abo in error path\n"); goto cleanup; } unpin: if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { DRM_ERROR("failed to unpin new abo in error path\n"); } unreserve: amdgpu_bo_unreserve(new_abo); cleanup: amdgpu_bo_unref(&work->old_abo); dma_fence_put(work->excl); for (i = 0; i < work->shared_count; ++i) dma_fence_put(work->shared[i]); kfree(work->shared); kfree(work); return r; } int amdgpu_crtc_set_config(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev; struct amdgpu_device *adev; struct drm_crtc *crtc; bool active = false; int ret; if (!set || !set->crtc) return -EINVAL; dev = set->crtc->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) return ret; ret = drm_crtc_helper_set_config(set, ctx); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) if (crtc->enabled) active = true; pm_runtime_mark_last_busy(dev->dev); adev = dev->dev_private; /* if we have active crtcs and we don't have a power ref, take the current one */ if (active && !adev->have_disp_power_ref) { adev->have_disp_power_ref = true; return ret; } /* if we have no active crtcs, then drop the power ref we got before */ if (!active && adev->have_disp_power_ref) { pm_runtime_put_autosuspend(dev->dev); adev->have_disp_power_ref = false; } /* drop the power reference we got coming in here */ pm_runtime_put_autosuspend(dev->dev); return ret; } static const char *encoder_names[41] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", "INTERNAL_TMDS2", "INTERNAL_DAC1", "INTERNAL_DAC2", "INTERNAL_SDVOA", "INTERNAL_SDVOB", "SI170B", "CH7303", "CH7301", "INTERNAL_DVO1", "EXTERNAL_SDVOA", "EXTERNAL_SDVOB", "TITFP513", "INTERNAL_LVTM1", "VT1623", "HDMI_SI1930", "HDMI_INTERNAL", "INTERNAL_KLDSCP_TMDS1", "INTERNAL_KLDSCP_DVO1", "INTERNAL_KLDSCP_DAC1", "INTERNAL_KLDSCP_DAC2", "SI178", "MVPU_FPGA", "INTERNAL_DDI", "VT1625", "HDMI_SI1932", "DP_AN9801", "DP_DP501", "INTERNAL_UNIPHY", "INTERNAL_KLDSCP_LVTMA", "INTERNAL_UNIPHY1", "INTERNAL_UNIPHY2", "NUTMEG", "TRAVIS", "INTERNAL_VCE", "INTERNAL_UNIPHY3", "HDMI_ANX9805", "INTERNAL_AMCLK", "VIRTUAL", }; static const char *hpd_names[6] = { "HPD1", "HPD2", "HPD3", "HPD4", "HPD5", "HPD6", }; void amdgpu_print_display_setup(struct drm_device *dev) { struct drm_connector *connector; struct amdgpu_connector *amdgpu_connector; struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; uint32_t devices; int i = 0; DRM_INFO("AMDGPU Display Connectors\n"); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { amdgpu_connector = to_amdgpu_connector(connector); DRM_INFO("Connector %d:\n", i); DRM_INFO(" %s\n", connector->name); if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE) DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]); if (amdgpu_connector->ddc_bus) { DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", amdgpu_connector->ddc_bus->rec.mask_clk_reg, amdgpu_connector->ddc_bus->rec.mask_data_reg, amdgpu_connector->ddc_bus->rec.a_clk_reg, amdgpu_connector->ddc_bus->rec.a_data_reg, amdgpu_connector->ddc_bus->rec.en_clk_reg, amdgpu_connector->ddc_bus->rec.en_data_reg, amdgpu_connector->ddc_bus->rec.y_clk_reg, amdgpu_connector->ddc_bus->rec.y_data_reg); if (amdgpu_connector->router.ddc_valid) DRM_INFO(" DDC Router 0x%x/0x%x\n", amdgpu_connector->router.ddc_mux_control_pin, amdgpu_connector->router.ddc_mux_state); if (amdgpu_connector->router.cd_valid) DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", amdgpu_connector->router.cd_mux_control_pin, amdgpu_connector->router.cd_mux_state); } else { if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || connector->connector_type == DRM_MODE_CONNECTOR_DVII || connector->connector_type == DRM_MODE_CONNECTOR_DVID || connector->connector_type == DRM_MODE_CONNECTOR_DVIA || connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); } DRM_INFO(" Encoders:\n"); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { amdgpu_encoder = to_amdgpu_encoder(encoder); devices = amdgpu_encoder->devices & amdgpu_connector->devices; if (devices) { if (devices & ATOM_DEVICE_CRT1_SUPPORT) DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_CRT2_SUPPORT) DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_LCD1_SUPPORT) DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP1_SUPPORT) DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP2_SUPPORT) DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP3_SUPPORT) DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP4_SUPPORT) DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP5_SUPPORT) DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP6_SUPPORT) DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_TV1_SUPPORT) DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); if (devices & ATOM_DEVICE_CV_SUPPORT) DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]); } } i++; } } /** * amdgpu_ddc_probe * */ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux) { u8 out = 0x0; u8 buf[8]; int ret; struct i2c_msg msgs[] = { { .addr = DDC_ADDR, .flags = 0, .len = 1, .buf = &out, }, { .addr = DDC_ADDR, .flags = I2C_M_RD, .len = 8, .buf = buf, } }; /* on hw with routers, select right port */ if (amdgpu_connector->router.ddc_valid) amdgpu_i2c_router_select_ddc_port(amdgpu_connector); if (use_aux) { ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2); } else { ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2); } if (ret != 2) /* Couldn't find an accessible DDC on this connector */ return false; /* Probe also for valid EDID header * EDID header starts with: * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. * Only the first 6 bytes must be valid as * drm_edid_block_valid() can fix the last 2 bytes */ if (drm_edid_header_is_valid(buf) < 6) { /* Couldn't find an accessible EDID on this * connector */ return false; } return true; } static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); drm_gem_object_unreference_unlocked(amdgpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(amdgpu_fb); } static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle) { struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle); } static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { .destroy = amdgpu_user_framebuffer_destroy, .create_handle = amdgpu_user_framebuffer_create_handle, }; int amdgpu_framebuffer_init(struct drm_device *dev, struct amdgpu_framebuffer *rfb, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; rfb->obj = obj; drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); if (ret) { rfb->obj = NULL; return ret; } return 0; } static struct drm_framebuffer * amdgpu_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct amdgpu_framebuffer *amdgpu_fb; int ret; obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (obj == NULL) { dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " "can't create framebuffer\n", mode_cmd->handles[0]); return ERR_PTR(-ENOENT); } /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ if (obj->import_attach) { DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); return ERR_PTR(-EINVAL); } amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); if (amdgpu_fb == NULL) { drm_gem_object_unreference_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); if (ret) { kfree(amdgpu_fb); drm_gem_object_unreference_unlocked(obj); return ERR_PTR(ret); } return &amdgpu_fb->base; } static void amdgpu_output_poll_changed(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; amdgpu_fb_output_poll_changed(adev); } const struct drm_mode_config_funcs amdgpu_mode_funcs = { .fb_create = amdgpu_user_framebuffer_create, .output_poll_changed = amdgpu_output_poll_changed }; static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = { { UNDERSCAN_OFF, "off" }, { UNDERSCAN_ON, "on" }, { UNDERSCAN_AUTO, "auto" }, }; static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = { { AMDGPU_AUDIO_DISABLE, "off" }, { AMDGPU_AUDIO_ENABLE, "on" }, { AMDGPU_AUDIO_AUTO, "auto" }, }; /* XXX support different dither options? spatial, temporal, both, etc. */ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = { { AMDGPU_FMT_DITHER_DISABLE, "off" }, { AMDGPU_FMT_DITHER_ENABLE, "on" }, }; int amdgpu_modeset_create_props(struct amdgpu_device *adev) { int sz; adev->mode_info.coherent_mode_property = drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1); if (!adev->mode_info.coherent_mode_property) return -ENOMEM; adev->mode_info.load_detect_property = drm_property_create_range(adev->ddev, 0, "load detection", 0, 1); if (!adev->mode_info.load_detect_property) return -ENOMEM; drm_mode_create_scaling_mode_property(adev->ddev); sz = ARRAY_SIZE(amdgpu_underscan_enum_list); adev->mode_info.underscan_property = drm_property_create_enum(adev->ddev, 0, "underscan", amdgpu_underscan_enum_list, sz); adev->mode_info.underscan_hborder_property = drm_property_create_range(adev->ddev, 0, "underscan hborder", 0, 128); if (!adev->mode_info.underscan_hborder_property) return -ENOMEM; adev->mode_info.underscan_vborder_property = drm_property_create_range(adev->ddev, 0, "underscan vborder", 0, 128); if (!adev->mode_info.underscan_vborder_property) return -ENOMEM; sz = ARRAY_SIZE(amdgpu_audio_enum_list); adev->mode_info.audio_property = drm_property_create_enum(adev->ddev, 0, "audio", amdgpu_audio_enum_list, sz); sz = ARRAY_SIZE(amdgpu_dither_enum_list); adev->mode_info.dither_property = drm_property_create_enum(adev->ddev, 0, "dither", amdgpu_dither_enum_list, sz); return 0; } void amdgpu_update_display_priority(struct amdgpu_device *adev) { /* adjustment options for the display watermarks */ if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2)) adev->mode_info.disp_priority = 0; else adev->mode_info.disp_priority = amdgpu_disp_priority; } static bool is_hdtv_mode(const struct drm_display_mode *mode) { /* try and guess if this is a tv or a monitor */ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ (mode->vdisplay == 576) || /* 576p */ (mode->vdisplay == 720) || /* 720p */ (mode->vdisplay == 1080)) /* 1080p */ return true; else return false; } bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_encoder *amdgpu_encoder; struct drm_connector *connector; struct amdgpu_connector *amdgpu_connector; u32 src_v = 1, dst_v = 1; u32 src_h = 1, dst_h = 1; amdgpu_crtc->h_border = 0; amdgpu_crtc->v_border = 0; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; amdgpu_encoder = to_amdgpu_encoder(encoder); connector = amdgpu_get_connector_for_encoder(encoder); amdgpu_connector = to_amdgpu_connector(connector); /* set scaling */ if (amdgpu_encoder->rmx_type == RMX_OFF) amdgpu_crtc->rmx_type = RMX_OFF; else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay || mode->vdisplay < amdgpu_encoder->native_mode.vdisplay) amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type; else amdgpu_crtc->rmx_type = RMX_OFF; /* copy native mode */ memcpy(&amdgpu_crtc->native_mode, &amdgpu_encoder->native_mode, sizeof(struct drm_display_mode)); src_v = crtc->mode.vdisplay; dst_v = amdgpu_crtc->native_mode.vdisplay; src_h = crtc->mode.hdisplay; dst_h = amdgpu_crtc->native_mode.hdisplay; /* fix up for overscan on hdmi */ if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && is_hdtv_mode(mode)))) { if (amdgpu_encoder->underscan_hborder != 0) amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; else amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16; if (amdgpu_encoder->underscan_vborder != 0) amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder; else amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16; amdgpu_crtc->rmx_type = RMX_FULL; src_v = crtc->mode.vdisplay; dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2); src_h = crtc->mode.hdisplay; dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2); } } if (amdgpu_crtc->rmx_type != RMX_OFF) { fixed20_12 a, b; a.full = dfixed_const(src_v); b.full = dfixed_const(dst_v); amdgpu_crtc->vsc.full = dfixed_div(a, b); a.full = dfixed_const(src_h); b.full = dfixed_const(dst_h); amdgpu_crtc->hsc.full = dfixed_div(a, b); } else { amdgpu_crtc->vsc.full = dfixed_const(1); amdgpu_crtc->hsc.full = dfixed_const(1); } return true; } /* * Retrieve current video scanout position of crtc on a given gpu, and * an optional accurate timestamp of when query happened. * * \param dev Device to query. * \param pipe Crtc to query. * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). * For driver internal use only also supports these flags: * * USE_REAL_VBLANKSTART to use the real start of vblank instead * of a fudged earlier start of vblank. * * GET_DISTANCE_TO_VBLANKSTART to return distance to the * fudged earlier start of vblank in *vpos and the distance * to true start of vblank in *hpos. * * \param *vpos Location where vertical scanout position should be stored. * \param *hpos Location where horizontal scanout position should go. * \param *stime Target location for timestamp taken immediately before * scanout position query. Can be NULL to skip timestamp. * \param *etime Target location for timestamp taken immediately after * scanout position query. Can be NULL to skip timestamp. * * Returns vpos as a positive number while in active scanout area. * Returns vpos as a negative number inside vblank, counting the number * of scanlines to go until end of vblank, e.g., -1 means "one scanline * until start of active scanout / end of vblank." * * \return Flags, or'ed together as follows: * * DRM_SCANOUTPOS_VALID = Query successful. * DRM_SCANOUTPOS_INVBL = Inside vblank. * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of * this flag means that returned position may be offset by a constant but * unknown small number of scanlines wrt. real scanout position. * */ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, unsigned int flags, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode) { u32 vbl = 0, position = 0; int vbl_start, vbl_end, vtotal, ret = 0; bool in_vbl = true; struct amdgpu_device *adev = dev->dev_private; /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ /* Get optional system timestamp before query. */ if (stime) *stime = ktime_get(); if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0) ret |= DRM_SCANOUTPOS_VALID; /* Get optional system timestamp after query. */ if (etime) *etime = ktime_get(); /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ /* Decode into vertical and horizontal scanout position. */ *vpos = position & 0x1fff; *hpos = (position >> 16) & 0x1fff; /* Valid vblank area boundaries from gpu retrieved? */ if (vbl > 0) { /* Yes: Decode. */ ret |= DRM_SCANOUTPOS_ACCURATE; vbl_start = vbl & 0x1fff; vbl_end = (vbl >> 16) & 0x1fff; } else { /* No: Fake something reasonable which gives at least ok results. */ vbl_start = mode->crtc_vdisplay; vbl_end = 0; } /* Called from driver internal vblank counter query code? */ if (flags & GET_DISTANCE_TO_VBLANKSTART) { /* Caller wants distance from real vbl_start in *hpos */ *hpos = *vpos - vbl_start; } /* Fudge vblank to start a few scanlines earlier to handle the * problem that vblank irqs fire a few scanlines before start * of vblank. Some driver internal callers need the true vblank * start to be used and signal this via the USE_REAL_VBLANKSTART flag. * * The cause of the "early" vblank irq is that the irq is triggered * by the line buffer logic when the line buffer read position enters * the vblank, whereas our crtc scanout position naturally lags the * line buffer read position. */ if (!(flags & USE_REAL_VBLANKSTART)) vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; /* Test scanout position against vblank region. */ if ((*vpos < vbl_start) && (*vpos >= vbl_end)) in_vbl = false; /* In vblank? */ if (in_vbl) ret |= DRM_SCANOUTPOS_IN_VBLANK; /* Called from driver internal vblank counter query code? */ if (flags & GET_DISTANCE_TO_VBLANKSTART) { /* Caller wants distance from fudged earlier vbl_start */ *vpos -= vbl_start; return ret; } /* Check if inside vblank area and apply corrective offsets: * vpos will then be >=0 in video scanout area, but negative * within vblank area, counting down the number of lines until * start of scanout. */ /* Inside "upper part" of vblank area? Apply corrective offset if so: */ if (in_vbl && (*vpos >= vbl_start)) { vtotal = mode->crtc_vtotal; *vpos = *vpos - vtotal; } /* Correct for shifted end of vbl at vbl_end. */ *vpos = *vpos - vbl_end; return ret; } int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc) { if (crtc < 0 || crtc >= adev->mode_info.num_crtc) return AMDGPU_CRTC_IRQ_NONE; switch (crtc) { case 0: return AMDGPU_CRTC_IRQ_VBLANK1; case 1: return AMDGPU_CRTC_IRQ_VBLANK2; case 2: return AMDGPU_CRTC_IRQ_VBLANK3; case 3: return AMDGPU_CRTC_IRQ_VBLANK4; case 4: return AMDGPU_CRTC_IRQ_VBLANK5; case 5: return AMDGPU_CRTC_IRQ_VBLANK6; default: return AMDGPU_CRTC_IRQ_NONE; } }
gpl-2.0
strukturag/gst-plugins-bad
sys/cdrom/gstcdplayer_ioctl.c
54
3356
/* gstcdplay * Copyright (c) 2002 Charles Schmidt <cbschmid@uiuc.edu> * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "gstcdplayer_ioctl.h" #include <sys/types.h> #include <sys/stat.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <fcntl.h> #include <unistd.h> #include <sys/ioctl.h> #include <errno.h> /* private functions */ static void cd_fix_track_range (struct cd *cd, gint * start_track, gint * end_track); static gint cddb_sum (gint n); #if defined(HAVE_LINUX_CDROM_H) #include <linux/cdrom.h> #elif defined(HAVE_SYS_CDIO_H) #include <sys/cdio.h> /* irix cdaudio works quite a bit differently than ioctl(), so its not ready #elif defined(HAVE_DMEDIA_CDAUDIO_H) #include <dmedia/cdaudio.h> */ #endif /* these headers define low level functions: gboolean cd_init(struct cd *cd,const gchar *device); gboolean cd_start(struct cd *cd,gint start_track,gint end_track); gboolean cd_pause(struct cd *cd); gboolean cd_resume(struct cd *cd); gboolean cd_stop(struct cd *cd); CDStatus cd_status(struct cd *cd); gint cd_current_track(struct cd *cd); gboolean cd_close(struct cd *cd); */ #if defined(HAVE_CDROM_SOLARIS) #include "gstcdplayer_ioctl_solaris.h" #elif defined(HAVE_CDROM_BSD) #include "gstcdplayer_ioctl_bsd.h" /* #elif defined(HAVE_CDROM_IRIX) #include "gstcdplayer_ioctl_irix.h" */ #endif static void cd_fix_track_range (struct cd *cd, gint * start_track, gint * end_track) { if (*start_track <= 0) { *start_track = 1; } if (*start_track > cd->num_tracks) { *start_track = cd->num_tracks; } if (*end_track < *start_track && *end_track != LEADOUT) { *end_track = *start_track; } if (*end_track > cd->num_tracks || *end_track + 1 > cd->num_tracks) { *end_track = LEADOUT; } return; } /* this cddb info is from http://www.freedb.org/modules.php?name=Sections&sop=viewarticle&artid=6 this will probably be of interest to anyone wishing to actually use the discid http://www.freedb.org/modules.php?name=Sections&sop=viewarticle&artid=28 */ static gint cddb_sum (gint n) { gint ret = 0; while (n > 0) { ret += n % 10; n /= 10; } return ret; } guint32 cd_cddb_discid (struct cd * cd) { guint i; guint n = 0; guint t; for (i = 1; i <= cd->num_tracks; i++) { n += cddb_sum (cd->tracks[i].minute * 60 + cd->tracks[i].second); } t = (cd->tracks[LEADOUT].minute * 60 + cd->tracks[LEADOUT].second) - (cd->tracks[1].minute * 60 + cd->tracks[1].second); return ((n % 0xff) << 24 | t << 8 | (cd->num_tracks)); }
gpl-2.0
binkybear/nexus7_2012-5
arch/arm/mach-tegra/board-ardbeg-memory.c
54
779156
/* * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307, USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_data/tegra_emc_pdata.h> #include "board.h" #include "board-ardbeg.h" #include "tegra-board-id.h" #include "tegra12_emc.h" #include "devices.h" static struct tegra12_emc_table ardbeg_ddr3_emc_table_pm358[] = { { 0x18, /* V5.0.12 */ "03_12750_02_V5.0.12_V0.9", /* DVFS table version */ 12750, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000003e, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000003, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000060, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000018, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000005, /* EMC_TXSR */ 0x00000005, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000064, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000007, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x800001c5, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40040001, /* MC_EMEM_ARB_CFG */ 0x8000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x77e30303, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000007, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 57820, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_20400_02_V5.0.12_V0.9", /* DVFS table version */ 20400, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000026, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000005, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x0000009a, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000026, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000006, /* EMC_TXSR */ 0x00000006, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x000000a0, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x0000000b, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000023a, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40020001, /* MC_EMEM_ARB_CFG */ 0x80000012, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x76230303, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000000a, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 35610, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_40800_02_V5.0.12_V0.9", /* DVFS table version */ 40800, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000012, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000001, /* EMC_RC */ 0x0000000a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000001, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000134, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000004d, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000008, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000000c, /* EMC_TXSR */ 0x0000000c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000013f, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000015, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000370, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0xa0000001, /* MC_EMEM_ARB_CFG */ 0x80000017, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x74a30303, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000014, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 20850, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_68000_02_V5.0.12_V0.9", /* DVFS table version */ 68000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000000a, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000003, /* EMC_RC */ 0x00000011, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000202, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000080, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000000f, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000013, /* EMC_TXSR */ 0x00000013, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000001, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000213, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000022, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000050e, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x00000001, /* MC_EMEM_ARB_CFG */ 0x8000001e, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x74230403, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000021, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00b0, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00e90049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00a3, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ee00ef, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 10720, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_102000_02_V5.0.12_V0.9", /* DVFS table version */ 102000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000006, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000004, /* EMC_RC */ 0x0000001a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000003, /* EMC_RAS */ 0x00000001, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000001, /* EMC_RD_RCD */ 0x00000001, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000304, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000c1, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000018, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000001c, /* EMC_TXSR */ 0x0000001c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000003, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000031c, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000033, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x08000001, /* MC_EMEM_ARB_CFG */ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0403, /* MC_EMEM_ARB_DA_COVERS */ 0x73c30504, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 6890, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_204000_03_V5.0.12_V0.9", /* DVFS table version */ 204000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000002, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000009, /* EMC_RC */ 0x00000035, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000006, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000005, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000004, /* EMC_EINPUT */ 0x00000006, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000003, /* EMC_QRST */ 0x0000000d, /* EMC_QSAFE */ 0x0000000f, /* EMC_RDV */ 0x00000011, /* EMC_RDV_MASK */ 0x00000607, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000181, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000032, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000038, /* EMC_TXSR */ 0x00000038, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000007, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000638, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00008000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00008000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00090000, /* EMC_DLL_XFORM_DQ0 */ 0x00090000, /* EMC_DLL_XFORM_DQ1 */ 0x00090000, /* EMC_DLL_XFORM_DQ2 */ 0x00090000, /* EMC_DLL_XFORM_DQ3 */ 0x00009000, /* EMC_DLL_XFORM_DQ4 */ 0x00009000, /* EMC_DLL_XFORM_DQ5 */ 0x00009000, /* EMC_DLL_XFORM_DQ6 */ 0x00009000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000707, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000066, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000d2b3, /* EMC_CFG_PIPE */ 0x80000d22, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x01000003, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000004, /* MC_EMEM_ARB_TIMING_RC */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000004, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0404, /* MC_EMEM_ARB_DA_COVERS */ 0x73840a05, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000062, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x004e0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080057, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0063, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0036, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff006b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510050, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x0000088d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 3420, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_300000_03_V5.0.12_V0.9", /* DVFS table version */ 300000, /* SDRAM frequency */ 820, /* min voltage */ 800, /* gpu min voltage */ "pllc_out0", /* clock source id */ 0x20000002, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000000d, /* EMC_RC */ 0x0000004d, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000009, /* EMC_RAS */ 0x00000003, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x00000009, /* EMC_W2P */ 0x00000003, /* EMC_RD_RCD */ 0x00000003, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x00000007, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x0000000e, /* EMC_QSAFE */ 0x0000000e, /* EMC_RDV */ 0x00000010, /* EMC_RDV_MASK */ 0x000008e4, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000239, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000004b, /* EMC_AR2PDEN */ 0x0000000e, /* EMC_RW2PDEN */ 0x00000052, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000009, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000924, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00098000, /* EMC_DLL_XFORM_ADDR0 */ 0x00098000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00098000, /* EMC_DLL_XFORM_ADDR3 */ 0x00098000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00060000, /* EMC_DLL_XFORM_DQ0 */ 0x00060000, /* EMC_DLL_XFORM_DQ1 */ 0x00060000, /* EMC_DLL_XFORM_DQ2 */ 0x00060000, /* EMC_DLL_XFORM_DQ3 */ 0x00006000, /* EMC_DLL_XFORM_DQ4 */ 0x00006000, /* EMC_DLL_XFORM_DQ5 */ 0x00006000, /* EMC_DLL_XFORM_DQ6 */ 0x00006000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000096, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0173000e, /* EMC_MRS_WAIT_CNT */ 0x0173000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000d3b3, /* EMC_CFG_PIPE */ 0x800012d7, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x08000004, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000007, /* MC_EMEM_ARB_TIMING_RC */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000b0607, /* MC_EMEM_ARB_DA_COVERS */ 0x77450e08, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000004, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000090, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00350049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008003b, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0043, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff002d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510036, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0087, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x000008cd, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000321, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2680, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_396000_04_V5.0.12_V0.9", /* DVFS table version */ 396000, /* SDRAM frequency */ 850, /* min voltage */ 900, /* gpu min voltage */ "pllm_out0", /* clock source id */ 0x00000002, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000011, /* EMC_RC */ 0x00000066, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000000c, /* EMC_RAS */ 0x00000004, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000004, /* EMC_RD_RCD */ 0x00000004, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000001, /* EMC_EINPUT */ 0x00000008, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000000, /* EMC_QRST */ 0x0000000f, /* EMC_QSAFE */ 0x00000010, /* EMC_RDV */ 0x00000012, /* EMC_RDV_MASK */ 0x00000bd1, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000002f4, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000063, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000006c, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x0000000d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000c11, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00070000, /* EMC_DLL_XFORM_ADDR0 */ 0x00070000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00070000, /* EMC_DLL_XFORM_ADDR3 */ 0x00070000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00038000, /* EMC_DLL_XFORM_DQ0 */ 0x00038000, /* EMC_DLL_XFORM_DQ1 */ 0x00038000, /* EMC_DLL_XFORM_DQ2 */ 0x00038000, /* EMC_DLL_XFORM_DQ3 */ 0x00003800, /* EMC_DLL_XFORM_DQ4 */ 0x00003800, /* EMC_DLL_XFORM_DQ5 */ 0x00003800, /* EMC_DLL_XFORM_DQ6 */ 0x00003800, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x000000c6, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x015b000e, /* EMC_MRS_WAIT_CNT */ 0x015b000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x8000188b, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x0f000005, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000007, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */ 0x7586120a, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000a, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000be, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00280049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008002d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0033, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0022, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0037, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0066, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x00000895, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000521, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2180, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_528000_04_V5.0.12_V0.9", /* DVFS table version */ 528000, /* SDRAM frequency */ 870, /* min voltage */ 900, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000018, /* EMC_RC */ 0x00000088, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000010, /* EMC_RAS */ 0x00000006, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000009, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000d, /* EMC_W2P */ 0x00000006, /* EMC_RD_RCD */ 0x00000006, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000001, /* EMC_EINPUT */ 0x00000009, /* EMC_EINPUT_DURATION */ 0x00030000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000000, /* EMC_QRST */ 0x00000010, /* EMC_QSAFE */ 0x00000012, /* EMC_RDV */ 0x00000014, /* EMC_RDV_MASK */ 0x00000fd6, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000003f5, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000b, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000085, /* EMC_AR2PDEN */ 0x00000012, /* EMC_RW2PDEN */ 0x00000090, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000013, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001017, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe01200b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00054000, /* EMC_DLL_XFORM_ADDR0 */ 0x00054000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00054000, /* EMC_DLL_XFORM_ADDR3 */ 0x00054000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000e, /* EMC_DLL_XFORM_DQ0 */ 0x0000000e, /* EMC_DLL_XFORM_DQ1 */ 0x0000000e, /* EMC_DLL_XFORM_DQ2 */ 0x0000000e, /* EMC_DLL_XFORM_DQ3 */ 0x0000000e, /* EMC_DLL_XFORM_DQ4 */ 0x0000000e, /* EMC_DLL_XFORM_DQ5 */ 0x0000000e, /* EMC_DLL_XFORM_DQ6 */ 0x0000000e, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0123133d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0139000e, /* EMC_MRS_WAIT_CNT */ 0x0139000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000042a0, /* EMC_CFG_PIPE */ 0x80002062, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x0f000007, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RC */ 0x00000007, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000a, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06050202, /* MC_EMEM_ARB_DA_TURNS */ 0x0010090c, /* MC_EMEM_ARB_DA_COVERS */ 0x7428180d, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000d, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000fd, /* MC_PTSA_GRANT_DECREMENT */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00c1003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00c10080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080021, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00c10026, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00c1001a, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00c10024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00c10029, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00c10065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00c1002a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe0120069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000941, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_600000_02_V5.0.12_V0.9", /* DVFS table version */ 600000, /* SDRAM frequency */ 910, /* min voltage */ 900, /* gpu min voltage */ "pllc_ud", /* clock source id */ 0xe0000000, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000001b, /* EMC_RC */ 0x0000009b, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000013, /* EMC_RAS */ 0x00000007, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x0000000b, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x00000010, /* EMC_W2P */ 0x00000007, /* EMC_RD_RCD */ 0x00000007, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x0000000a, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x0000000b, /* EMC_EINPUT_DURATION */ 0x00070000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x00000012, /* EMC_QSAFE */ 0x00000016, /* EMC_RDV */ 0x00000018, /* EMC_RDV_MASK */ 0x00001208, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000482, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000d, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000097, /* EMC_AR2PDEN */ 0x00000015, /* EMC_RW2PDEN */ 0x000000a3, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000015, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001248, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe00e00b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00048000, /* EMC_DLL_XFORM_ADDR0 */ 0x00048000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00048000, /* EMC_DLL_XFORM_ADDR3 */ 0x00048000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000d, /* EMC_DLL_XFORM_DQ0 */ 0x0000000d, /* EMC_DLL_XFORM_DQ1 */ 0x0000000d, /* EMC_DLL_XFORM_DQ2 */ 0x0000000d, /* EMC_DLL_XFORM_DQ3 */ 0x0000000d, /* EMC_DLL_XFORM_DQ4 */ 0x0000000d, /* EMC_DLL_XFORM_DQ5 */ 0x0000000d, /* EMC_DLL_XFORM_DQ6 */ 0x0000000d, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0121113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0127000e, /* EMC_MRS_WAIT_CNT */ 0x0127000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000040a0, /* EMC_CFG_PIPE */ 0x800024a9, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000e, /* EMC_QPOP */ 0x00000009, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000003, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000e, /* MC_EMEM_ARB_TIMING_RC */ 0x00000009, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000b, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000007, /* MC_EMEM_ARB_TIMING_W2R */ 0x07050202, /* MC_EMEM_ARB_DA_TURNS */ 0x00130b0e, /* MC_EMEM_ARB_DA_COVERS */ 0x73a91b0f, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000f, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000120, /* MC_PTSA_GRANT_DECREMENT */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00aa003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00aa0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008001d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00aa0022, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00aa0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00aa0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00aa0025, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe00e0069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000b61, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200010, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_792000_06_V5.0.12_V0.9", /* DVFS table version */ 792000, /* SDRAM frequency */ 980, /* min voltage */ 1100, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000024, /* EMC_RC */ 0x000000cd, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000019, /* EMC_RAS */ 0x0000000a, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x0000000a, /* EMC_RD_RCD */ 0x0000000a, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x0000000b, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00080000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000018, /* EMC_RDV */ 0x0000001a, /* EMC_RDV_MASK */ 0x000017e2, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000005f8, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000011, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000c7, /* EMC_AR2PDEN */ 0x00000018, /* EMC_RW2PDEN */ 0x000000d7, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x0000001d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000008, /* EMC_TCLKSTABLE */ 0x00000008, /* EMC_TCLKSTOP */ 0x00001822, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe00700b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000008, /* EMC_DLL_XFORM_DQS0 */ 0x00000008, /* EMC_DLL_XFORM_DQS1 */ 0x00000008, /* EMC_DLL_XFORM_DQS2 */ 0x00000008, /* EMC_DLL_XFORM_DQS3 */ 0x00000008, /* EMC_DLL_XFORM_DQS4 */ 0x00000008, /* EMC_DLL_XFORM_DQS5 */ 0x00000008, /* EMC_DLL_XFORM_DQS6 */ 0x00000008, /* EMC_DLL_XFORM_DQS7 */ 0x00000008, /* EMC_DLL_XFORM_DQS8 */ 0x00000008, /* EMC_DLL_XFORM_DQS9 */ 0x00000008, /* EMC_DLL_XFORM_DQS10 */ 0x00000008, /* EMC_DLL_XFORM_DQS11 */ 0x00000008, /* EMC_DLL_XFORM_DQS12 */ 0x00000008, /* EMC_DLL_XFORM_DQS13 */ 0x00000008, /* EMC_DLL_XFORM_DQS14 */ 0x00000008, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00034000, /* EMC_DLL_XFORM_ADDR0 */ 0x00034000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00034000, /* EMC_DLL_XFORM_ADDR3 */ 0x00034000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000a, /* EMC_DLL_XFORM_DQ0 */ 0x0000000a, /* EMC_DLL_XFORM_DQ1 */ 0x0000000a, /* EMC_DLL_XFORM_DQ2 */ 0x0000000a, /* EMC_DLL_XFORM_DQ3 */ 0x0000000a, /* EMC_DLL_XFORM_DQ4 */ 0x0000000a, /* EMC_DLL_XFORM_DQ5 */ 0x0000000a, /* EMC_DLL_XFORM_DQ6 */ 0x0000000a, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000000, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x61861820, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x61861800, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x80003012, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000f, /* EMC_QPOP */ 0x0e00000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000f, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08060202, /* MC_EMEM_ARB_DA_TURNS */ 0x00170e13, /* MC_EMEM_ARB_DA_COVERS */ 0x736c2414, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0070069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000d71, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200018, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1200, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "03_924000_06_V5.0.12_V0.9", /* DVFS table version */ 924000, /* SDRAM frequency */ 1010, /* min voltage */ 1100, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000002b, /* EMC_RC */ 0x000000f0, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000001e, /* EMC_RAS */ 0x0000000b, /* EMC_RP */ 0x00000009, /* EMC_R2W */ 0x0000000f, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x00000016, /* EMC_W2P */ 0x0000000b, /* EMC_RD_RCD */ 0x0000000b, /* EMC_WR_RCD */ 0x00000004, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000007, /* EMC_WDV */ 0x00000007, /* EMC_WDV_MASK */ 0x0000000d, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000f, /* EMC_EINPUT_DURATION */ 0x000a0000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000016, /* EMC_QSAFE */ 0x0000001a, /* EMC_RDV */ 0x0000001c, /* EMC_RDV_MASK */ 0x00001be7, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000006f9, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000004, /* EMC_PDEX2WR */ 0x00000015, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000e7, /* EMC_AR2PDEN */ 0x0000001b, /* EMC_RW2PDEN */ 0x000000fb, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000006, /* EMC_TCKE */ 0x00000007, /* EMC_TCKESR */ 0x00000006, /* EMC_TPD */ 0x00000022, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x0000000a, /* EMC_TCLKSTABLE */ 0x0000000a, /* EMC_TCLKSTOP */ 0x00001c28, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab898, /* EMC_FBIO_CFG5 */ 0xe00400b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x007f800a, /* EMC_DLL_XFORM_DQS0 */ 0x007f800a, /* EMC_DLL_XFORM_DQS1 */ 0x007f800a, /* EMC_DLL_XFORM_DQS2 */ 0x007f800a, /* EMC_DLL_XFORM_DQS3 */ 0x007f800a, /* EMC_DLL_XFORM_DQS4 */ 0x007f800a, /* EMC_DLL_XFORM_DQS5 */ 0x007f800a, /* EMC_DLL_XFORM_DQS6 */ 0x007f800a, /* EMC_DLL_XFORM_DQS7 */ 0x007f800a, /* EMC_DLL_XFORM_DQS8 */ 0x007f800a, /* EMC_DLL_XFORM_DQS9 */ 0x007f800a, /* EMC_DLL_XFORM_DQS10 */ 0x007f800a, /* EMC_DLL_XFORM_DQS11 */ 0x007f800a, /* EMC_DLL_XFORM_DQS12 */ 0x007f800a, /* EMC_DLL_XFORM_DQS13 */ 0x007f800a, /* EMC_DLL_XFORM_DQS14 */ 0x007f800a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x0002c000, /* EMC_DLL_XFORM_ADDR0 */ 0x0002c000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x0002c000, /* EMC_DLL_XFORM_ADDR3 */ 0x0002c000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS15 */ 0x00000008, /* EMC_DLL_XFORM_DQ0 */ 0x00000008, /* EMC_DLL_XFORM_DQ1 */ 0x00000008, /* EMC_DLL_XFORM_DQ2 */ 0x00000008, /* EMC_DLL_XFORM_DQ3 */ 0x00000008, /* EMC_DLL_XFORM_DQ4 */ 0x00000008, /* EMC_DLL_XFORM_DQ5 */ 0x00000008, /* EMC_DLL_XFORM_DQ6 */ 0x00000008, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000000, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x5d75d720, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x5d75d700, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000128, /* EMC_ZCAL_WAIT_CNT */ 0x00cd000e, /* EMC_MRS_WAIT_CNT */ 0x00cd000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x800037ea, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000011, /* EMC_QPOP */ 0x0e00000d, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000005, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000006, /* MC_EMEM_ARB_TIMING_RP */ 0x00000016, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000e, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000011, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000e, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000009, /* MC_EMEM_ARB_TIMING_W2R */ 0x09060202, /* MC_EMEM_ARB_DA_TURNS */ 0x001a1016, /* MC_EMEM_ARB_DA_COVERS */ 0x734e2a17, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000017, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000001bb, /* MC_PTSA_GRANT_DECREMENT */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x006e003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x006e0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x006e0019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x006e0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x006e0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x006e001b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x006e0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x006e001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x0000004c, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0040069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430303, /* EMC_AUTO_CAL_CONFIG */ 0x80000f15, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200020, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1180, /* expected dvfs latency (ns) */ }, }; #ifndef CONFIG_ARCH_TEGRA_13x_SOC static struct tegra12_emc_table ardbeg_ddr3_emc_table_pm359[] = { { 0x18, /* V5.0.12 */ "01_102000_01_V5.0.12_V0.9", /* DVFS table version */ 102000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000006, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000004, /* EMC_RC */ 0x0000001a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000003, /* EMC_RAS */ 0x00000001, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000001, /* EMC_RD_RCD */ 0x00000001, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000304, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000c1, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000018, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000001c, /* EMC_TXSR */ 0x0000001c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000003, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000031c, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000033, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x08000001, /* MC_EMEM_ARB_CFG */ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0403, /* MC_EMEM_ARB_DA_COVERS */ 0x73c30504, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 6890, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.12 */ "01_792000_02_V5.0.12_V0.9", /* DVFS table version */ 792000, /* SDRAM frequency */ 980, /* min voltage */ 1100, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000024, /* EMC_RC */ 0x000000cd, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000019, /* EMC_RAS */ 0x0000000a, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x0000000a, /* EMC_RD_RCD */ 0x0000000a, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x0000000b, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00080000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000018, /* EMC_RDV */ 0x0000001a, /* EMC_RDV_MASK */ 0x000017e2, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000005f8, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000011, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000c7, /* EMC_AR2PDEN */ 0x00000018, /* EMC_RW2PDEN */ 0x000000d7, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x0000001d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000008, /* EMC_TCLKSTABLE */ 0x00000008, /* EMC_TCLKSTOP */ 0x00001822, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe00700b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000006, /* EMC_DLL_XFORM_DQS0 */ 0x00000006, /* EMC_DLL_XFORM_DQS1 */ 0x007f800a, /* EMC_DLL_XFORM_DQS2 */ 0x00000006, /* EMC_DLL_XFORM_DQS3 */ 0x00000006, /* EMC_DLL_XFORM_DQS4 */ 0x00004006, /* EMC_DLL_XFORM_DQS5 */ 0x00004006, /* EMC_DLL_XFORM_DQS6 */ 0x00000006, /* EMC_DLL_XFORM_DQS7 */ 0x00000006, /* EMC_DLL_XFORM_DQS8 */ 0x00000006, /* EMC_DLL_XFORM_DQS9 */ 0x007f800a, /* EMC_DLL_XFORM_DQS10 */ 0x00000006, /* EMC_DLL_XFORM_DQS11 */ 0x00000006, /* EMC_DLL_XFORM_DQS12 */ 0x00004006, /* EMC_DLL_XFORM_DQS13 */ 0x00004006, /* EMC_DLL_XFORM_DQS14 */ 0x00000006, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00034000, /* EMC_DLL_XFORM_ADDR0 */ 0x00034000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00034000, /* EMC_DLL_XFORM_ADDR3 */ 0x00034000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00000008, /* EMC_DLL_XFORM_DQ0 */ 0x00000008, /* EMC_DLL_XFORM_DQ1 */ 0x00000002, /* EMC_DLL_XFORM_DQ2 */ 0x00000008, /* EMC_DLL_XFORM_DQ3 */ 0x00000008, /* EMC_DLL_XFORM_DQ4 */ 0x00000008, /* EMC_DLL_XFORM_DQ5 */ 0x00000008, /* EMC_DLL_XFORM_DQ6 */ 0x00000008, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x61861820, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x61861800, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x80003012, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000f, /* EMC_QPOP */ 0x0e00000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000f, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08060202, /* MC_EMEM_ARB_DA_TURNS */ 0x00170e13, /* MC_EMEM_ARB_DA_COVERS */ 0x736c2414, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0070069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430f0f, /* EMC_AUTO_CAL_CONFIG */ 0x80000d71, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200018, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1200, /* expected dvfs latency (ns) */ }, }; #else static struct tegra12_emc_table t132_laguna_erss_ddr3_emc_table_pm359[] = { { 0x19, /* V6.0.4 */ "02_204000_01_V6.0.4_V1.1", /* DVFS table version */ 204000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000009, /* EMC_RC */ 0x00000035, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000006, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000005, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000004, /* EMC_EINPUT */ 0x00000006, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000003, /* EMC_QRST */ 0x0000000d, /* EMC_QSAFE */ 0x0000000f, /* EMC_RDV */ 0x00000011, /* EMC_RDV_MASK */ 0x00000607, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000181, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000032, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000038, /* EMC_TXSR */ 0x00000038, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000007, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000638, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00008000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00008000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00090000, /* EMC_DLL_XFORM_DQ0 */ 0x00090000, /* EMC_DLL_XFORM_DQ1 */ 0x00090000, /* EMC_DLL_XFORM_DQ2 */ 0x00090000, /* EMC_DLL_XFORM_DQ3 */ 0x00009000, /* EMC_DLL_XFORM_DQ4 */ 0x00009000, /* EMC_DLL_XFORM_DQ5 */ 0x00009000, /* EMC_DLL_XFORM_DQ6 */ 0x00009000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000707, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000066, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000d2b3, /* EMC_CFG_PIPE */ 0x80000d22, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x01000003, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000004, /* MC_EMEM_ARB_TIMING_RC */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000004, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0504, /* MC_EMEM_ARB_DA_COVERS */ 0x73840a05, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000062, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x004e0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080057, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0063, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0036, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff006b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510050, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x0000088d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 3420, /* expected dvfs latency (ns) */ }, { 0x19, /* V6.0.4 */ "02_732000_01_V6.0.4_V1.1", /* DVFS table version */ 732000, /* SDRAM frequency */ 980, /* min voltage */ 980, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000022, /* EMC_RC */ 0x000000be, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000017, /* EMC_RAS */ 0x00000009, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x00000009, /* EMC_RD_RCD */ 0x00000009, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x0000000b, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00080000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000018, /* EMC_RDV */ 0x0000001a, /* EMC_RDV_MASK */ 0x0000160e, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000583, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000010, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000b8, /* EMC_AR2PDEN */ 0x00000018, /* EMC_RW2PDEN */ 0x000000c7, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x0000001b, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000008, /* EMC_TCLKSTABLE */ 0x00000008, /* EMC_TCLKSTOP */ 0x0000164e, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe00900b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000009, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x00000009, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x00000007, /* EMC_DLL_XFORM_DQS4 */ 0x007fc00a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x00000007, /* EMC_DLL_XFORM_DQS7 */ 0x00000009, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x00000009, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x00000007, /* EMC_DLL_XFORM_DQS12 */ 0x007fc00a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x007fc009, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00034000, /* EMC_DLL_XFORM_ADDR0 */ 0x00034000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00034000, /* EMC_DLL_XFORM_ADDR3 */ 0x00034000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000006, /* EMC_DLI_TRIM_TXDQS15 */ 0x00000008, /* EMC_DLL_XFORM_DQ0 */ 0x00000008, /* EMC_DLL_XFORM_DQ1 */ 0x00000008, /* EMC_DLL_XFORM_DQ2 */ 0x00000008, /* EMC_DLL_XFORM_DQ3 */ 0x00000008, /* EMC_DLL_XFORM_DQ4 */ 0x00000008, /* EMC_DLL_XFORM_DQ5 */ 0x00000008, /* EMC_DLL_XFORM_DQ6 */ 0x00000008, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0121113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000000, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51351220, /* EMC_XM2DQSPADCTRL3 */ 0x00511514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x4924d200, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0106000e, /* EMC_MRS_WAIT_CNT */ 0x0106000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x80002c81, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000f, /* EMC_QPOP */ 0x0000000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000011, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000b, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000e, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08060202, /* MC_EMEM_ARB_DA_TURNS */ 0x00160d11, /* MC_EMEM_ARB_DA_COVERS */ 0x736b2112, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000012, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000015f, /* MC_PTSA_GRANT_DECREMENT */ 0x008b0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x008b0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x008b003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x008b0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x008b0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x008b0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x008b0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x008b0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x008b0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x008b0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080018, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x0000008b, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x008b0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x008b001c, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x008b0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x008b0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x008b001e, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x0000008b, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x008b008b, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x008b008b, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x008b008b, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x008b008b, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x008b0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x008b001e, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0090069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000d71, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200018, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1230, /* expected dvfs latency (ns) */ }, }; #endif static struct tegra12_emc_table ardbeg_emc_table[] = { { 0x19, /* V5.0.14 */ "09_12750_03_V5.0.14_V1.1", /* DVFS table version */ 12750, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000003e, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000003, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000060, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000018, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000005, /* EMC_TXSR */ 0x00000005, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000064, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000007, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x800001c5, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40040001, /* MC_EMEM_ARB_CFG */ 0x8000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x77e30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000007, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 57820, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_20400_03_V5.0.14_V1.1", /* DVFS table version */ 20400, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000026, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000005, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x0000009a, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000026, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000006, /* EMC_TXSR */ 0x00000006, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x000000a0, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x0000000b, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000023a, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40020001, /* MC_EMEM_ARB_CFG */ 0x80000012, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x76230303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000000a, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 35610, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_40800_03_V5.0.14_V1.1", /* DVFS table version */ 40800, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000012, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000001, /* EMC_RC */ 0x0000000a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000001, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000134, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000004d, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000008, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000000c, /* EMC_TXSR */ 0x0000000c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000013f, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000015, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000370, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0xa0000001, /* MC_EMEM_ARB_CFG */ 0x80000017, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x74a30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000014, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 20850, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_68000_03_V5.0.14_V1.1", /* DVFS table version */ 68000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000000a, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000003, /* EMC_RC */ 0x00000011, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000202, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000080, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000000f, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000013, /* EMC_TXSR */ 0x00000013, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000001, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000213, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000022, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000050e, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x00000001, /* MC_EMEM_ARB_CFG */ 0x8000001e, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x74230403, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000021, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00b0, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00e90049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00a3, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ee00ef, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 10720, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_102000_03_V5.0.14_V1.1", /* DVFS table version */ 102000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000006, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000004, /* EMC_RC */ 0x0000001a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000003, /* EMC_RAS */ 0x00000001, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000001, /* EMC_RD_RCD */ 0x00000001, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000304, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000c1, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000018, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000001c, /* EMC_TXSR */ 0x0000001c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000003, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000031c, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000033, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x08000001, /* MC_EMEM_ARB_CFG */ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0403, /* MC_EMEM_ARB_DA_COVERS */ 0x73c30504, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 6890, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_204000_04_V5.0.14_V1.1", /* DVFS table version */ 204000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000009, /* EMC_RC */ 0x00000035, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000006, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000005, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000004, /* EMC_EINPUT */ 0x00000006, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000003, /* EMC_QRST */ 0x0000000d, /* EMC_QSAFE */ 0x0000000f, /* EMC_RDV */ 0x00000011, /* EMC_RDV_MASK */ 0x00000607, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000181, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000032, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000038, /* EMC_TXSR */ 0x00000038, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000007, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000638, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00008000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00008000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00060000, /* EMC_DLL_XFORM_DQ0 */ 0x00060000, /* EMC_DLL_XFORM_DQ1 */ 0x00060000, /* EMC_DLL_XFORM_DQ2 */ 0x00060000, /* EMC_DLL_XFORM_DQ3 */ 0x00006000, /* EMC_DLL_XFORM_DQ4 */ 0x00006000, /* EMC_DLL_XFORM_DQ5 */ 0x00006000, /* EMC_DLL_XFORM_DQ6 */ 0x00006000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000707, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000066, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000d2b3, /* EMC_CFG_PIPE */ 0x80000d22, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x01000003, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000004, /* MC_EMEM_ARB_TIMING_RC */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000004, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0404, /* MC_EMEM_ARB_DA_COVERS */ 0x73840a05, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000062, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x004e0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080057, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0063, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0036, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff006b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510050, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008cd, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 3420, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_300000_04_V5.0.14_V1.1", /* DVFS table version */ 300000, /* SDRAM frequency */ 820, /* min voltage */ 820, /* gpu min voltage */ "pllc_out0", /* clock source id */ 0x20000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000000d, /* EMC_RC */ 0x0000004d, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000009, /* EMC_RAS */ 0x00000003, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x00000009, /* EMC_W2P */ 0x00000003, /* EMC_RD_RCD */ 0x00000003, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x00000007, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x0000000e, /* EMC_QSAFE */ 0x00000010, /* EMC_RDV */ 0x00000012, /* EMC_RDV_MASK */ 0x000008e4, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000239, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000004b, /* EMC_AR2PDEN */ 0x0000000e, /* EMC_RW2PDEN */ 0x00000052, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000009, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000924, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00040000, /* EMC_DLL_XFORM_ADDR0 */ 0x00040000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00040000, /* EMC_DLL_XFORM_ADDR3 */ 0x00040000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00050000, /* EMC_DLL_XFORM_DQ0 */ 0x00050000, /* EMC_DLL_XFORM_DQ1 */ 0x00050000, /* EMC_DLL_XFORM_DQ2 */ 0x00050000, /* EMC_DLL_XFORM_DQ3 */ 0x00005000, /* EMC_DLL_XFORM_DQ4 */ 0x00005000, /* EMC_DLL_XFORM_DQ5 */ 0x00005000, /* EMC_DLL_XFORM_DQ6 */ 0x00005000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000096, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0173000e, /* EMC_MRS_WAIT_CNT */ 0x0173000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x800012d7, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x08000004, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000007, /* MC_EMEM_ARB_TIMING_RC */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000b0607, /* MC_EMEM_ARB_DA_COVERS */ 0x77450e08, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000004, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000090, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00350049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008003b, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0043, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff002d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510036, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0087, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x000008d5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000321, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2680, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_396000_06_V5.0.14_V1.1", /* DVFS table version */ 396000, /* SDRAM frequency */ 850, /* min voltage */ 850, /* gpu min voltage */ "pllm_out0", /* clock source id */ 0x00000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000011, /* EMC_RC */ 0x00000066, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000000c, /* EMC_RAS */ 0x00000004, /* EMC_RP */ 0x00000005, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000004, /* EMC_RD_RCD */ 0x00000004, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000001, /* EMC_EINPUT */ 0x00000008, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000000, /* EMC_QRST */ 0x0000000f, /* EMC_QSAFE */ 0x00000010, /* EMC_RDV */ 0x00000012, /* EMC_RDV_MASK */ 0x00000bd1, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000002f4, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000063, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000006c, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x0000000d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000c11, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00040000, /* EMC_DLL_XFORM_ADDR0 */ 0x00040000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00040000, /* EMC_DLL_XFORM_ADDR3 */ 0x00040000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00044000, /* EMC_DLL_XFORM_DQ0 */ 0x00044000, /* EMC_DLL_XFORM_DQ1 */ 0x00044000, /* EMC_DLL_XFORM_DQ2 */ 0x00044000, /* EMC_DLL_XFORM_DQ3 */ 0x00004400, /* EMC_DLL_XFORM_DQ4 */ 0x00004400, /* EMC_DLL_XFORM_DQ5 */ 0x00004400, /* EMC_DLL_XFORM_DQ6 */ 0x00004400, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x000000c6, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x015b000e, /* EMC_MRS_WAIT_CNT */ 0x015b000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x8000188b, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x0f000005, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000007, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */ 0x7586120a, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000a, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000be, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00280049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008002d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0033, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0022, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0037, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0066, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x00000895, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000521, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2180, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_528000_05_V5.0.14_V1.1", /* DVFS table version */ 528000, /* SDRAM frequency */ 880, /* min voltage */ 870, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000018, /* EMC_RC */ 0x00000088, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000010, /* EMC_RAS */ 0x00000006, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000009, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000d, /* EMC_W2P */ 0x00000006, /* EMC_RD_RCD */ 0x00000006, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000007, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x00000009, /* EMC_EINPUT_DURATION */ 0x00040000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000010, /* EMC_QSAFE */ 0x00000013, /* EMC_RDV */ 0x00000015, /* EMC_RDV_MASK */ 0x00000fd6, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000003f5, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000b, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000085, /* EMC_AR2PDEN */ 0x00000012, /* EMC_RW2PDEN */ 0x00000090, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000013, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001017, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0xe01200b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00054000, /* EMC_DLL_XFORM_ADDR0 */ 0x00054000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00054000, /* EMC_DLL_XFORM_ADDR3 */ 0x00054000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000e, /* EMC_DLL_XFORM_DQ0 */ 0x0000000e, /* EMC_DLL_XFORM_DQ1 */ 0x0000000e, /* EMC_DLL_XFORM_DQ2 */ 0x0000000e, /* EMC_DLL_XFORM_DQ3 */ 0x0000000e, /* EMC_DLL_XFORM_DQ4 */ 0x0000000e, /* EMC_DLL_XFORM_DQ5 */ 0x0000000e, /* EMC_DLL_XFORM_DQ6 */ 0x0000000e, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0123133d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0139000e, /* EMC_MRS_WAIT_CNT */ 0x0139000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000042a0, /* EMC_CFG_PIPE */ 0x80002062, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000b, /* EMC_QPOP */ 0x0f000007, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RC */ 0x00000007, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000a, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06050202, /* MC_EMEM_ARB_DA_TURNS */ 0x0010090c, /* MC_EMEM_ARB_DA_COVERS */ 0x7428180d, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000d, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000fd, /* MC_PTSA_GRANT_DECREMENT */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00c1003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00c10080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080021, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00c10026, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00c1001a, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00c10024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00c10029, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00c10065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00c1002a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe0120069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000941, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_600000_05_V5.0.14_V1.1", /* DVFS table version */ 600000, /* SDRAM frequency */ 910, /* min voltage */ 910, /* gpu min voltage */ "pllc_ud", /* clock source id */ 0xe0000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000001b, /* EMC_RC */ 0x0000009b, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000013, /* EMC_RAS */ 0x00000007, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x0000000b, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x00000010, /* EMC_W2P */ 0x00000007, /* EMC_RD_RCD */ 0x00000007, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x0000000a, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x0000000b, /* EMC_EINPUT_DURATION */ 0x00070000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x00000012, /* EMC_QSAFE */ 0x00000016, /* EMC_RDV */ 0x00000018, /* EMC_RDV_MASK */ 0x00001208, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000482, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000d, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000097, /* EMC_AR2PDEN */ 0x00000015, /* EMC_RW2PDEN */ 0x000000a3, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000015, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001248, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0xe00e00b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x0004c000, /* EMC_DLL_XFORM_ADDR0 */ 0x0004c000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x0004c000, /* EMC_DLL_XFORM_ADDR3 */ 0x0004c000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000a, /* EMC_DLL_XFORM_DQ0 */ 0x0000000a, /* EMC_DLL_XFORM_DQ1 */ 0x0000000a, /* EMC_DLL_XFORM_DQ2 */ 0x0000000a, /* EMC_DLL_XFORM_DQ3 */ 0x0000000a, /* EMC_DLL_XFORM_DQ4 */ 0x0000000a, /* EMC_DLL_XFORM_DQ5 */ 0x0000000a, /* EMC_DLL_XFORM_DQ6 */ 0x0000000a, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0121113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0127000e, /* EMC_MRS_WAIT_CNT */ 0x0127000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000040a0, /* EMC_CFG_PIPE */ 0x800024a9, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000e, /* EMC_QPOP */ 0x00000009, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000003, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000e, /* MC_EMEM_ARB_TIMING_RC */ 0x00000009, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000b, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000007, /* MC_EMEM_ARB_TIMING_W2R */ 0x07050202, /* MC_EMEM_ARB_DA_TURNS */ 0x00130b0e, /* MC_EMEM_ARB_DA_COVERS */ 0x73a91b0f, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000f, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000120, /* MC_PTSA_GRANT_DECREMENT */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00aa003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00aa0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008001d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00aa0022, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00aa0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00aa0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00aa0025, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe00e0069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000b61, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200010, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_792000_06_V5.0.14_V1.1", /* DVFS table version */ 792000, /* SDRAM frequency */ 980, /* min voltage */ 980, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000024, /* EMC_RC */ 0x000000cd, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000019, /* EMC_RAS */ 0x0000000a, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x0000000a, /* EMC_RD_RCD */ 0x0000000a, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x0000000b, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00080000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000018, /* EMC_RDV */ 0x0000001a, /* EMC_RDV_MASK */ 0x000017e2, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000005f8, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000011, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000c7, /* EMC_AR2PDEN */ 0x00000018, /* EMC_RW2PDEN */ 0x000000d7, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x0000001d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000008, /* EMC_TCLKSTABLE */ 0x00000008, /* EMC_TCLKSTOP */ 0x00001822, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0xe00700b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000006, /* EMC_DLL_XFORM_DQS0 */ 0x00000006, /* EMC_DLL_XFORM_DQS1 */ 0x00000006, /* EMC_DLL_XFORM_DQS2 */ 0x00000006, /* EMC_DLL_XFORM_DQS3 */ 0x00000006, /* EMC_DLL_XFORM_DQS4 */ 0x00000006, /* EMC_DLL_XFORM_DQS5 */ 0x00000006, /* EMC_DLL_XFORM_DQS6 */ 0x00000006, /* EMC_DLL_XFORM_DQS7 */ 0x00000006, /* EMC_DLL_XFORM_DQS8 */ 0x00000006, /* EMC_DLL_XFORM_DQS9 */ 0x00000006, /* EMC_DLL_XFORM_DQS10 */ 0x00000006, /* EMC_DLL_XFORM_DQS11 */ 0x00000006, /* EMC_DLL_XFORM_DQS12 */ 0x00000006, /* EMC_DLL_XFORM_DQS13 */ 0x00000006, /* EMC_DLL_XFORM_DQS14 */ 0x00000006, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00030000, /* EMC_DLL_XFORM_ADDR0 */ 0x00030000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00030000, /* EMC_DLL_XFORM_ADDR3 */ 0x00030000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00000009, /* EMC_DLL_XFORM_DQ0 */ 0x00000009, /* EMC_DLL_XFORM_DQ1 */ 0x00000009, /* EMC_DLL_XFORM_DQ2 */ 0x00000009, /* EMC_DLL_XFORM_DQ3 */ 0x00000009, /* EMC_DLL_XFORM_DQ4 */ 0x00000009, /* EMC_DLL_XFORM_DQ5 */ 0x00000009, /* EMC_DLL_XFORM_DQ6 */ 0x00000009, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x61861820, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x61861800, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x80003012, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000f, /* EMC_QPOP */ 0x0e00000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000f, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08060202, /* MC_EMEM_ARB_DA_TURNS */ 0x00170e13, /* MC_EMEM_ARB_DA_COVERS */ 0x736c2414, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0070069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000d71, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200018, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1200, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_924000_06_V5.0.14_V1.1", /* DVFS table version */ 924000, /* SDRAM frequency */ 1010, /* min voltage */ 1010, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000002b, /* EMC_RC */ 0x000000f0, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000001e, /* EMC_RAS */ 0x0000000b, /* EMC_RP */ 0x0000000a, /* EMC_R2W */ 0x0000000f, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x00000016, /* EMC_W2P */ 0x0000000b, /* EMC_RD_RCD */ 0x0000000b, /* EMC_WR_RCD */ 0x00000004, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000007, /* EMC_WDV */ 0x00000007, /* EMC_WDV_MASK */ 0x0000000d, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000f, /* EMC_EINPUT_DURATION */ 0x000a0000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000016, /* EMC_QSAFE */ 0x0000001a, /* EMC_RDV */ 0x0000001c, /* EMC_RDV_MASK */ 0x00001be7, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000006f9, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000004, /* EMC_PDEX2WR */ 0x00000015, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000e7, /* EMC_AR2PDEN */ 0x0000001b, /* EMC_RW2PDEN */ 0x000000fb, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000006, /* EMC_TCKE */ 0x00000007, /* EMC_TCKESR */ 0x00000006, /* EMC_TPD */ 0x00000022, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x0000000a, /* EMC_TCLKSTABLE */ 0x0000000a, /* EMC_TCLKSTOP */ 0x00001c28, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b898, /* EMC_FBIO_CFG5 */ 0xe00400b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000003, /* EMC_DLL_XFORM_DQS0 */ 0x00000003, /* EMC_DLL_XFORM_DQS1 */ 0x00000003, /* EMC_DLL_XFORM_DQS2 */ 0x00000003, /* EMC_DLL_XFORM_DQS3 */ 0x00000003, /* EMC_DLL_XFORM_DQS4 */ 0x00000003, /* EMC_DLL_XFORM_DQS5 */ 0x00000003, /* EMC_DLL_XFORM_DQS6 */ 0x00000003, /* EMC_DLL_XFORM_DQS7 */ 0x00000003, /* EMC_DLL_XFORM_DQS8 */ 0x00000003, /* EMC_DLL_XFORM_DQS9 */ 0x00000003, /* EMC_DLL_XFORM_DQS10 */ 0x00000003, /* EMC_DLL_XFORM_DQS11 */ 0x00000003, /* EMC_DLL_XFORM_DQS12 */ 0x00000003, /* EMC_DLL_XFORM_DQS13 */ 0x00000003, /* EMC_DLL_XFORM_DQS14 */ 0x00000003, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00030000, /* EMC_DLL_XFORM_ADDR0 */ 0x00030000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00030000, /* EMC_DLL_XFORM_ADDR3 */ 0x00030000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000a, /* EMC_DLL_XFORM_DQ0 */ 0x0000000a, /* EMC_DLL_XFORM_DQ1 */ 0x0000000a, /* EMC_DLL_XFORM_DQ2 */ 0x0000000a, /* EMC_DLL_XFORM_DQ3 */ 0x0000000a, /* EMC_DLL_XFORM_DQ4 */ 0x0000000a, /* EMC_DLL_XFORM_DQ5 */ 0x0000000a, /* EMC_DLL_XFORM_DQ6 */ 0x0000000a, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x5d75d720, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x5d75d700, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000128, /* EMC_ZCAL_WAIT_CNT */ 0x00cd000e, /* EMC_MRS_WAIT_CNT */ 0x00cd000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x800037ea, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000011, /* EMC_QPOP */ 0x0e00000d, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000005, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000006, /* MC_EMEM_ARB_TIMING_RP */ 0x00000016, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000e, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000011, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000e, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000007, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000009, /* MC_EMEM_ARB_TIMING_W2R */ 0x09070202, /* MC_EMEM_ARB_DA_TURNS */ 0x001a1016, /* MC_EMEM_ARB_DA_COVERS */ 0x734e2a17, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000017, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000001bb, /* MC_PTSA_GRANT_DECREMENT */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x006e003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x006e0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x006e0019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x006e0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x006e0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x006e001b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x006e0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x006e001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x0000004c, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0040069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000f15, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200020, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1180, /* expected dvfs latency (ns) */ }, }; static struct tegra12_emc_table ardbeg_4GB_emc_table[] = { { 0x19, /* V5.0.14 */ "03_12750_02_V5.0.14_V1.1", /* DVFS table version */ 12750, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000003e, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000004, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000060, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000018, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000005, /* EMC_TXSR */ 0x00000005, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000064, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000007, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x800001c5, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40040001, /* MC_EMEM_ARB_CFG */ 0x8000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x77e30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000007, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00001221, /* Mode Register 0 */ 0x00100003, /* Mode Register 1 */ 0x00200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 57820, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_20400_02_V5.0.14_V1.1", /* DVFS table version */ 20400, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000026, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000007, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x0000009a, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000026, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000008, /* EMC_TXSR */ 0x00000008, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x000000a0, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x0000000b, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000023a, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40020001, /* MC_EMEM_ARB_CFG */ 0x80000012, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x77430303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000000a, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00001221, /* Mode Register 0 */ 0x00100003, /* Mode Register 1 */ 0x00200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 35610, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_40800_02_V5.0.14_V1.1", /* DVFS table version */ 40800, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000012, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000001, /* EMC_RC */ 0x0000000e, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000001, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000134, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000004d, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000000c, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000000f, /* EMC_TXSR */ 0x0000000f, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000013f, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000015, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000370, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0xa0000001, /* MC_EMEM_ARB_CFG */ 0x80000017, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x75e30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000014, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00001221, /* Mode Register 0 */ 0x00100003, /* Mode Register 1 */ 0x00200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 20850, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_68000_02_V5.0.14_V1.1", /* DVFS table version */ 68000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000000a, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000003, /* EMC_RC */ 0x00000017, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000202, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000080, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000015, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000019, /* EMC_TXSR */ 0x00000019, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000001, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000213, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000022, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000050e, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x00000001, /* MC_EMEM_ARB_CFG */ 0x8000001e, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x75430403, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000021, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00b0, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00e90049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00a3, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ee00ef, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00001221, /* Mode Register 0 */ 0x00100003, /* Mode Register 1 */ 0x00200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 10720, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_102000_03_V5.0.14_V1.1", /* DVFS table version */ 102000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000006, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000004, /* EMC_RC */ 0x00000023, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000003, /* EMC_RAS */ 0x00000001, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000001, /* EMC_RD_RCD */ 0x00000001, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000304, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000c1, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000021, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000025, /* EMC_TXSR */ 0x00000025, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000003, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000031c, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000033, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x08000001, /* MC_EMEM_ARB_CFG */ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0403, /* MC_EMEM_ARB_DA_COVERS */ 0x74e30504, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00001221, /* Mode Register 0 */ 0x00100003, /* Mode Register 1 */ 0x00200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 6890, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_204000_03_V5.0.14_V1.1", /* DVFS table version */ 204000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000009, /* EMC_RC */ 0x00000047, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000006, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000005, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000004, /* EMC_EINPUT */ 0x00000006, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000003, /* EMC_QRST */ 0x0000000d, /* EMC_QSAFE */ 0x0000000f, /* EMC_RDV */ 0x00000011, /* EMC_RDV_MASK */ 0x00000607, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000181, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000044, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000004a, /* EMC_TXSR */ 0x0000004a, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000007, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000638, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00070000, /* EMC_DLL_XFORM_DQS0 */ 0x00070000, /* EMC_DLL_XFORM_DQS1 */ 0x00070000, /* EMC_DLL_XFORM_DQS2 */ 0x00070000, /* EMC_DLL_XFORM_DQS3 */ 0x00070000, /* EMC_DLL_XFORM_DQS4 */ 0x00070000, /* EMC_DLL_XFORM_DQS5 */ 0x00070000, /* EMC_DLL_XFORM_DQS6 */ 0x00070000, /* EMC_DLL_XFORM_DQS7 */ 0x00070000, /* EMC_DLL_XFORM_DQS8 */ 0x00070000, /* EMC_DLL_XFORM_DQS9 */ 0x00070000, /* EMC_DLL_XFORM_DQS10 */ 0x00070000, /* EMC_DLL_XFORM_DQS11 */ 0x00070000, /* EMC_DLL_XFORM_DQS12 */ 0x00070000, /* EMC_DLL_XFORM_DQS13 */ 0x00070000, /* EMC_DLL_XFORM_DQS14 */ 0x00070000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00008000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00008000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00070000, /* EMC_DLL_XFORM_DQ0 */ 0x00070000, /* EMC_DLL_XFORM_DQ1 */ 0x00070000, /* EMC_DLL_XFORM_DQ2 */ 0x00070000, /* EMC_DLL_XFORM_DQ3 */ 0x00007000, /* EMC_DLL_XFORM_DQ4 */ 0x00007000, /* EMC_DLL_XFORM_DQ5 */ 0x00007000, /* EMC_DLL_XFORM_DQ6 */ 0x00007000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000707, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000066, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000d2b3, /* EMC_CFG_PIPE */ 0x80000d22, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x01000003, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000004, /* MC_EMEM_ARB_TIMING_RC */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000004, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0404, /* MC_EMEM_ARB_DA_COVERS */ 0x74a40a05, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000062, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x004e0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080057, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0063, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0036, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff006b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510050, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008cd, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00001221, /* Mode Register 0 */ 0x00100003, /* Mode Register 1 */ 0x00200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 3420, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_300000_03_V5.0.14_V1.1", /* DVFS table version */ 300000, /* SDRAM frequency */ 820, /* min voltage */ 820, /* gpu min voltage */ "pllc_out0", /* clock source id */ 0x20000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000000d, /* EMC_RC */ 0x00000067, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000009, /* EMC_RAS */ 0x00000003, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x00000009, /* EMC_W2P */ 0x00000003, /* EMC_RD_RCD */ 0x00000003, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x00000007, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x0000000e, /* EMC_QSAFE */ 0x00000010, /* EMC_RDV */ 0x00000012, /* EMC_RDV_MASK */ 0x000008e4, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000239, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000065, /* EMC_AR2PDEN */ 0x0000000e, /* EMC_RW2PDEN */ 0x0000006c, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000009, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000924, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00098000, /* EMC_DLL_XFORM_ADDR0 */ 0x00098000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00098000, /* EMC_DLL_XFORM_ADDR3 */ 0x00098000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00050000, /* EMC_DLL_XFORM_DQ0 */ 0x00050000, /* EMC_DLL_XFORM_DQ1 */ 0x00050000, /* EMC_DLL_XFORM_DQ2 */ 0x00050000, /* EMC_DLL_XFORM_DQ3 */ 0x00005000, /* EMC_DLL_XFORM_DQ4 */ 0x00005000, /* EMC_DLL_XFORM_DQ5 */ 0x00005000, /* EMC_DLL_XFORM_DQ6 */ 0x00005000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000096, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0117000e, /* EMC_MRS_WAIT_CNT */ 0x0117000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x800012d7, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x08000004, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000007, /* MC_EMEM_ARB_TIMING_RC */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000b0607, /* MC_EMEM_ARB_DA_COVERS */ 0x77450e08, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000004, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000090, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00350049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008003b, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0043, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff002d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510036, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0087, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x000008d5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000321, /* Mode Register 0 */ 0x00100002, /* Mode Register 1 */ 0x00200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2680, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_396000_03_V5.0.14_V1.1", /* DVFS table version */ 396000, /* SDRAM frequency */ 850, /* min voltage */ 850, /* gpu min voltage */ "pllm_out0", /* clock source id */ 0x00000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000011, /* EMC_RC */ 0x00000089, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000000c, /* EMC_RAS */ 0x00000004, /* EMC_RP */ 0x00000005, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000004, /* EMC_RD_RCD */ 0x00000004, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000001, /* EMC_EINPUT */ 0x00000008, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000000, /* EMC_QRST */ 0x0000000f, /* EMC_QSAFE */ 0x00000010, /* EMC_RDV */ 0x00000012, /* EMC_RDV_MASK */ 0x00000bd1, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000002f4, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000087, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000008f, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x0000000d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000c11, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00070000, /* EMC_DLL_XFORM_ADDR0 */ 0x00070000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00070000, /* EMC_DLL_XFORM_ADDR3 */ 0x00070000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00044000, /* EMC_DLL_XFORM_DQ0 */ 0x00044000, /* EMC_DLL_XFORM_DQ1 */ 0x00044000, /* EMC_DLL_XFORM_DQ2 */ 0x00044000, /* EMC_DLL_XFORM_DQ3 */ 0x00004400, /* EMC_DLL_XFORM_DQ4 */ 0x00004400, /* EMC_DLL_XFORM_DQ5 */ 0x00004400, /* EMC_DLL_XFORM_DQ6 */ 0x00004400, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x000000c6, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x00f5000e, /* EMC_MRS_WAIT_CNT */ 0x00f5000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x8000188b, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x0f000005, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000007, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */ 0x7586120a, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000a, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000be, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00280049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008002d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0033, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0022, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0037, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0066, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x00000895, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000521, /* Mode Register 0 */ 0x00100002, /* Mode Register 1 */ 0x00200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2180, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_528000_03_V5.0.14_V1.1", /* DVFS table version */ 528000, /* SDRAM frequency */ 880, /* min voltage */ 870, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000018, /* EMC_RC */ 0x000000b7, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000010, /* EMC_RAS */ 0x00000006, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000009, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000d, /* EMC_W2P */ 0x00000006, /* EMC_RD_RCD */ 0x00000006, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000007, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x00000009, /* EMC_EINPUT_DURATION */ 0x00040000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000010, /* EMC_QSAFE */ 0x00000013, /* EMC_RDV */ 0x00000015, /* EMC_RDV_MASK */ 0x00000fd6, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000003f5, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000b, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000b4, /* EMC_AR2PDEN */ 0x00000012, /* EMC_RW2PDEN */ 0x000000bf, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000013, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001017, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0xe01200b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00054000, /* EMC_DLL_XFORM_ADDR0 */ 0x00054000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00054000, /* EMC_DLL_XFORM_ADDR3 */ 0x00054000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000e, /* EMC_DLL_XFORM_DQ0 */ 0x0000000e, /* EMC_DLL_XFORM_DQ1 */ 0x0000000e, /* EMC_DLL_XFORM_DQ2 */ 0x0000000e, /* EMC_DLL_XFORM_DQ3 */ 0x0000000e, /* EMC_DLL_XFORM_DQ4 */ 0x0000000e, /* EMC_DLL_XFORM_DQ5 */ 0x0000000e, /* EMC_DLL_XFORM_DQ6 */ 0x0000000e, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0123133d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x00c8000e, /* EMC_MRS_WAIT_CNT */ 0x00c8000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000042a0, /* EMC_CFG_PIPE */ 0x80002062, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000b, /* EMC_QPOP */ 0x0f000007, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RC */ 0x00000007, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000a, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06050202, /* MC_EMEM_ARB_DA_TURNS */ 0x0010090c, /* MC_EMEM_ARB_DA_COVERS */ 0x7488180d, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000d, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000fd, /* MC_PTSA_GRANT_DECREMENT */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00c1003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00c10080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080021, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00c10026, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00c1001a, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00c10024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00c10029, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00c10065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00c1002a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe0120069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000941, /* Mode Register 0 */ 0x00100002, /* Mode Register 1 */ 0x00200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_600000_02_V5.0.14_V1.1", /* DVFS table version */ 600000, /* SDRAM frequency */ 910, /* min voltage */ 910, /* gpu min voltage */ "pllc_ud", /* clock source id */ 0xe0000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000001b, /* EMC_RC */ 0x000000d0, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000013, /* EMC_RAS */ 0x00000007, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x0000000b, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x00000010, /* EMC_W2P */ 0x00000007, /* EMC_RD_RCD */ 0x00000007, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x0000000a, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x0000000b, /* EMC_EINPUT_DURATION */ 0x00070000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x00000012, /* EMC_QSAFE */ 0x00000016, /* EMC_RDV */ 0x00000018, /* EMC_RDV_MASK */ 0x00001208, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000482, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000d, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000cc, /* EMC_AR2PDEN */ 0x00000015, /* EMC_RW2PDEN */ 0x000000d8, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000015, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001248, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0xe00e00b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000008, /* EMC_DLL_XFORM_DQS0 */ 0x00000008, /* EMC_DLL_XFORM_DQS1 */ 0x00000008, /* EMC_DLL_XFORM_DQS2 */ 0x00000008, /* EMC_DLL_XFORM_DQS3 */ 0x00000008, /* EMC_DLL_XFORM_DQS4 */ 0x00000008, /* EMC_DLL_XFORM_DQS5 */ 0x00000008, /* EMC_DLL_XFORM_DQS6 */ 0x00000008, /* EMC_DLL_XFORM_DQS7 */ 0x00000008, /* EMC_DLL_XFORM_DQS8 */ 0x00000008, /* EMC_DLL_XFORM_DQS9 */ 0x00000008, /* EMC_DLL_XFORM_DQS10 */ 0x00000008, /* EMC_DLL_XFORM_DQS11 */ 0x00000008, /* EMC_DLL_XFORM_DQS12 */ 0x00000008, /* EMC_DLL_XFORM_DQS13 */ 0x00000008, /* EMC_DLL_XFORM_DQS14 */ 0x00000008, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00048000, /* EMC_DLL_XFORM_ADDR0 */ 0x00048000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00048000, /* EMC_DLL_XFORM_ADDR3 */ 0x00048000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000b, /* EMC_DLL_XFORM_DQ0 */ 0x0000000b, /* EMC_DLL_XFORM_DQ1 */ 0x0000000b, /* EMC_DLL_XFORM_DQ2 */ 0x0000000b, /* EMC_DLL_XFORM_DQ3 */ 0x0000000b, /* EMC_DLL_XFORM_DQ4 */ 0x0000000b, /* EMC_DLL_XFORM_DQ5 */ 0x0000000b, /* EMC_DLL_XFORM_DQ6 */ 0x0000000b, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0121113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x00b0000e, /* EMC_MRS_WAIT_CNT */ 0x00b0000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000040a0, /* EMC_CFG_PIPE */ 0x800024a9, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000e, /* EMC_QPOP */ 0x00000009, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000003, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000e, /* MC_EMEM_ARB_TIMING_RC */ 0x00000009, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000b, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000007, /* MC_EMEM_ARB_TIMING_W2R */ 0x07050202, /* MC_EMEM_ARB_DA_TURNS */ 0x00130b0e, /* MC_EMEM_ARB_DA_COVERS */ 0x74891b0f, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000f, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000120, /* MC_PTSA_GRANT_DECREMENT */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00aa003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00aa0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008001d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00aa0022, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00aa0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00aa0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00aa0025, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe00e0069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000b61, /* Mode Register 0 */ 0x00100002, /* Mode Register 1 */ 0x00200010, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "03_792000_04_V5.0.14_V1.1", /* DVFS table version */ 792000, /* SDRAM frequency */ 980, /* min voltage */ 980, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000024, /* EMC_RC */ 0x00000114, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000019, /* EMC_RAS */ 0x0000000a, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x0000000a, /* EMC_RD_RCD */ 0x0000000a, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x0000000b, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00080000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000018, /* EMC_RDV */ 0x0000001a, /* EMC_RDV_MASK */ 0x000017e2, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000005f8, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000011, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000010d, /* EMC_AR2PDEN */ 0x00000018, /* EMC_RW2PDEN */ 0x0000011e, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x0000001d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000008, /* EMC_TCLKSTABLE */ 0x00000008, /* EMC_TCLKSTOP */ 0x00001822, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0xe00700b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000004, /* EMC_DLL_XFORM_DQS0 */ 0x00000004, /* EMC_DLL_XFORM_DQS1 */ 0x00000004, /* EMC_DLL_XFORM_DQS2 */ 0x00000004, /* EMC_DLL_XFORM_DQS3 */ 0x00000004, /* EMC_DLL_XFORM_DQS4 */ 0x00000004, /* EMC_DLL_XFORM_DQS5 */ 0x00000004, /* EMC_DLL_XFORM_DQS6 */ 0x00000004, /* EMC_DLL_XFORM_DQS7 */ 0x00000004, /* EMC_DLL_XFORM_DQS8 */ 0x00000004, /* EMC_DLL_XFORM_DQS9 */ 0x00000004, /* EMC_DLL_XFORM_DQS10 */ 0x00000004, /* EMC_DLL_XFORM_DQS11 */ 0x00000004, /* EMC_DLL_XFORM_DQS12 */ 0x00000004, /* EMC_DLL_XFORM_DQS13 */ 0x00000004, /* EMC_DLL_XFORM_DQS14 */ 0x00000004, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00034000, /* EMC_DLL_XFORM_ADDR0 */ 0x00034000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00034000, /* EMC_DLL_XFORM_ADDR3 */ 0x00034000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000b, /* EMC_DLL_XFORM_DQ0 */ 0x0000000b, /* EMC_DLL_XFORM_DQ1 */ 0x0000000b, /* EMC_DLL_XFORM_DQ2 */ 0x0000000b, /* EMC_DLL_XFORM_DQ3 */ 0x0000000b, /* EMC_DLL_XFORM_DQ4 */ 0x0000000b, /* EMC_DLL_XFORM_DQ5 */ 0x0000000b, /* EMC_DLL_XFORM_DQ6 */ 0x0000000b, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x69a69a20, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x69a69a00, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x006f000e, /* EMC_MRS_WAIT_CNT */ 0x006f000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x80003012, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000f, /* EMC_QPOP */ 0x0e00000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000f, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08060202, /* MC_EMEM_ARB_DA_TURNS */ 0x00170e13, /* MC_EMEM_ARB_DA_COVERS */ 0x746c2414, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0070069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000d71, /* Mode Register 0 */ 0x00100002, /* Mode Register 1 */ 0x00200018, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1200, /* expected dvfs latency (ns) */ }, }; static struct tegra12_emc_table ardbeg_lpddr3_emc_table[] = { { 0x19, /* V5.0.14 */ "09_12750_04_V5.0.14_V1.1", /* DVFS table version */ 12750, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000003e, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000003, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000030, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000000c, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x00000003, /* EMC_TXSR */ 0x00000002, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000036, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000011, /* EMC_ZCAL_WAIT_CNT */ 0x000d0011, /* EMC_MRS_WAIT_CNT */ 0x000d0011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x80000164, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40040001, /* MC_EMEM_ARB_CFG */ 0x8000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05040102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090402, /* MC_EMEM_ARB_DA_COVERS */ 0x77c30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000007, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 57820, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_20400_04_V5.0.14_V1.1", /* DVFS table version */ 20400, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000026, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000001, /* EMC_RC */ 0x00000003, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x0000004d, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000013, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x00000003, /* EMC_TXSR */ 0x00000003, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000055, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000011, /* EMC_ZCAL_WAIT_CNT */ 0x00150011, /* EMC_MRS_WAIT_CNT */ 0x00150011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x8000019f, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40020001, /* MC_EMEM_ARB_CFG */ 0x80000012, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05040102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090402, /* MC_EMEM_ARB_DA_COVERS */ 0x74e30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000000a, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 35610, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_40800_04_V5.0.14_V1.1", /* DVFS table version */ 40800, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000012, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000002, /* EMC_RC */ 0x00000005, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x0000009a, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000026, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x00000006, /* EMC_TXSR */ 0x00000006, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x000000aa, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000011, /* EMC_ZCAL_WAIT_CNT */ 0x00290011, /* EMC_MRS_WAIT_CNT */ 0x00290011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x8000023a, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0xa0000001, /* MC_EMEM_ARB_CFG */ 0x80000017, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05040102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090402, /* MC_EMEM_ARB_DA_COVERS */ 0x73030303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000014, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 20850, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_68000_04_V5.0.14_V1.1", /* DVFS table version */ 68000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000000a, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000004, /* EMC_RC */ 0x00000008, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000101, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000040, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x0000000a, /* EMC_TXSR */ 0x0000000a, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x0000011b, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000019, /* EMC_ZCAL_WAIT_CNT */ 0x00440011, /* EMC_MRS_WAIT_CNT */ 0x00440011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x80000309, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x00000001, /* MC_EMEM_ARB_CFG */ 0x8000001e, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05040102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090402, /* MC_EMEM_ARB_DA_COVERS */ 0x72630403, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000021, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00b0, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00e90049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00a3, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ee00ef, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 10720, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_102000_04_V5.0.14_V1.1", /* DVFS table version */ 102000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000006, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000006, /* EMC_RC */ 0x0000000d, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000004, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000182, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000060, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x0000000f, /* EMC_TXSR */ 0x0000000f, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x000001a9, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000025, /* EMC_ZCAL_WAIT_CNT */ 0x00660011, /* EMC_MRS_WAIT_CNT */ 0x00660011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x8000040b, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x08000001, /* MC_EMEM_ARB_CFG */ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */ 0x00000001, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05040102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090403, /* MC_EMEM_ARB_DA_COVERS */ 0x72430504, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 6890, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_204000_04_V5.0.14_V1.1", /* DVFS table version */ 204000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000000c, /* EMC_RC */ 0x0000001a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000008, /* EMC_RAS */ 0x00000003, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000003, /* EMC_RD_RCD */ 0x00000003, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000002, /* EMC_WDV */ 0x00000002, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000003, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x00000007, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x0000000e, /* EMC_QSAFE */ 0x0000000f, /* EMC_RDV */ 0x00000011, /* EMC_RDV_MASK */ 0x00000304, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000c1, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000003, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x0000001d, /* EMC_TXSR */ 0x0000001d, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000004, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000009, /* EMC_TFAW */ 0x00000005, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000351, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00080000, /* EMC_DLL_XFORM_ADDR0 */ 0x00080000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00080000, /* EMC_DLL_XFORM_ADDR3 */ 0x00080000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00090000, /* EMC_DLL_XFORM_DQ0 */ 0x00090000, /* EMC_DLL_XFORM_DQ1 */ 0x00090000, /* EMC_DLL_XFORM_DQ2 */ 0x00090000, /* EMC_DLL_XFORM_DQ3 */ 0x00009000, /* EMC_DLL_XFORM_DQ4 */ 0x00009000, /* EMC_DLL_XFORM_DQ5 */ 0x00009000, /* EMC_DLL_XFORM_DQ6 */ 0x00009000, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000004a, /* EMC_ZCAL_WAIT_CNT */ 0x00cc0011, /* EMC_MRS_WAIT_CNT */ 0x00cc0011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x0000d3b3, /* EMC_CFG_PIPE */ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x01000003, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000006, /* MC_EMEM_ARB_TIMING_RC */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05050103, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0506, /* MC_EMEM_ARB_DA_COVERS */ 0x71e40a07, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000062, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x004e0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080057, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0063, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0036, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff006b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510050, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000017, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008cf, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 3420, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_300000_04_V5.0.14_V1.1", /* DVFS table version */ 300000, /* SDRAM frequency */ 820, /* min voltage */ 820, /* gpu min voltage */ "pllc_out0", /* clock source id */ 0x20000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000011, /* EMC_RC */ 0x00000026, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000000c, /* EMC_RAS */ 0x00000005, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000005, /* EMC_RD_RCD */ 0x00000005, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000002, /* EMC_WDV */ 0x00000002, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000003, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x00000008, /* EMC_EINPUT_DURATION */ 0x00030000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x0000000f, /* EMC_QSAFE */ 0x00000012, /* EMC_RDV */ 0x00000014, /* EMC_RDV_MASK */ 0x0000046e, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000011b, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000005, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x0000002a, /* EMC_TXSR */ 0x0000002a, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x0000000d, /* EMC_TFAW */ 0x00000007, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x000004e0, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00020000, /* EMC_DLL_XFORM_DQS0 */ 0x00020000, /* EMC_DLL_XFORM_DQS1 */ 0x00020000, /* EMC_DLL_XFORM_DQS2 */ 0x00020000, /* EMC_DLL_XFORM_DQS3 */ 0x00020000, /* EMC_DLL_XFORM_DQS4 */ 0x00020000, /* EMC_DLL_XFORM_DQS5 */ 0x00020000, /* EMC_DLL_XFORM_DQS6 */ 0x00020000, /* EMC_DLL_XFORM_DQS7 */ 0x00020000, /* EMC_DLL_XFORM_DQS8 */ 0x00020000, /* EMC_DLL_XFORM_DQS9 */ 0x00020000, /* EMC_DLL_XFORM_DQS10 */ 0x00020000, /* EMC_DLL_XFORM_DQS11 */ 0x00020000, /* EMC_DLL_XFORM_DQS12 */ 0x00020000, /* EMC_DLL_XFORM_DQS13 */ 0x00020000, /* EMC_DLL_XFORM_DQS14 */ 0x00020000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00060000, /* EMC_DLL_XFORM_ADDR0 */ 0x00060000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00060000, /* EMC_DLL_XFORM_ADDR3 */ 0x00060000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00060000, /* EMC_DLL_XFORM_DQ0 */ 0x00060000, /* EMC_DLL_XFORM_DQ1 */ 0x00060000, /* EMC_DLL_XFORM_DQ2 */ 0x00060000, /* EMC_DLL_XFORM_DQ3 */ 0x00006000, /* EMC_DLL_XFORM_DQ4 */ 0x00006000, /* EMC_DLL_XFORM_DQ5 */ 0x00006000, /* EMC_DLL_XFORM_DQ6 */ 0x00006000, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x01231239, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000006c, /* EMC_ZCAL_WAIT_CNT */ 0x012c0011, /* EMC_MRS_WAIT_CNT */ 0x012c0011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x800009ed, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000b, /* EMC_QPOP */ 0x08000004, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000007, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05050103, /* MC_EMEM_ARB_DA_TURNS */ 0x000c0709, /* MC_EMEM_ARB_DA_COVERS */ 0x71c50e0a, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000004, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000090, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00350049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008003b, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0043, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff002d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510036, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0087, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x0000001f, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x000008d7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 2680, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_396000_05_V5.0.14_V1.1", /* DVFS table version */ 396000, /* SDRAM frequency */ 850, /* min voltage */ 850, /* gpu min voltage */ "pllm_out0", /* clock source id */ 0x00000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000017, /* EMC_RC */ 0x00000033, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000010, /* EMC_RAS */ 0x00000007, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000007, /* EMC_RD_RCD */ 0x00000007, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000002, /* EMC_WDV */ 0x00000002, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000003, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x00000009, /* EMC_EINPUT_DURATION */ 0x00030000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000010, /* EMC_QSAFE */ 0x00000012, /* EMC_RDV */ 0x00000014, /* EMC_RDV_MASK */ 0x000005d9, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000176, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000007, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000e, /* EMC_RW2PDEN */ 0x00000038, /* EMC_TXSR */ 0x00000038, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000012, /* EMC_TFAW */ 0x00000009, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000670, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00020000, /* EMC_DLL_XFORM_DQS0 */ 0x00020000, /* EMC_DLL_XFORM_DQS1 */ 0x00020000, /* EMC_DLL_XFORM_DQS2 */ 0x00020000, /* EMC_DLL_XFORM_DQS3 */ 0x00020000, /* EMC_DLL_XFORM_DQS4 */ 0x00020000, /* EMC_DLL_XFORM_DQS5 */ 0x00020000, /* EMC_DLL_XFORM_DQS6 */ 0x00020000, /* EMC_DLL_XFORM_DQS7 */ 0x00020000, /* EMC_DLL_XFORM_DQS8 */ 0x00020000, /* EMC_DLL_XFORM_DQS9 */ 0x00020000, /* EMC_DLL_XFORM_DQS10 */ 0x00020000, /* EMC_DLL_XFORM_DQS11 */ 0x00020000, /* EMC_DLL_XFORM_DQS12 */ 0x00020000, /* EMC_DLL_XFORM_DQS13 */ 0x00020000, /* EMC_DLL_XFORM_DQS14 */ 0x00020000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00050000, /* EMC_DLL_XFORM_ADDR0 */ 0x00050000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00050000, /* EMC_DLL_XFORM_ADDR3 */ 0x00050000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00040000, /* EMC_DLL_XFORM_DQ0 */ 0x00040000, /* EMC_DLL_XFORM_DQ1 */ 0x00040000, /* EMC_DLL_XFORM_DQ2 */ 0x00040000, /* EMC_DLL_XFORM_DQ3 */ 0x00004000, /* EMC_DLL_XFORM_DQ4 */ 0x00004000, /* EMC_DLL_XFORM_DQ5 */ 0x00004000, /* EMC_DLL_XFORM_DQ6 */ 0x00004000, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x01231239, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000008f, /* EMC_ZCAL_WAIT_CNT */ 0x018c0011, /* EMC_MRS_WAIT_CNT */ 0x018c0011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x80000cc7, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000b, /* EMC_QPOP */ 0x0f000005, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RC */ 0x00000007, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000009, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05050103, /* MC_EMEM_ARB_DA_TURNS */ 0x000e090c, /* MC_EMEM_ARB_DA_COVERS */ 0x71c6120d, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000a, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000be, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00280049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008002d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0033, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0022, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0037, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0066, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000028, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x00000897, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 2180, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_528000_05_V5.0.14_V1.1", /* DVFS table version */ 528000, /* SDRAM frequency */ 880, /* min voltage */ 870, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000001f, /* EMC_RC */ 0x00000044, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000016, /* EMC_RAS */ 0x00000009, /* EMC_RP */ 0x0000000a, /* EMC_R2W */ 0x00000009, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000d, /* EMC_W2P */ 0x00000009, /* EMC_RD_RCD */ 0x00000009, /* EMC_WR_RCD */ 0x00000005, /* EMC_RRD */ 0x00000004, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000002, /* EMC_WDV */ 0x00000002, /* EMC_WDV_MASK */ 0x00000008, /* EMC_QUSE */ 0x00000003, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x0000000a, /* EMC_EINPUT_DURATION */ 0x00050000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x00000011, /* EMC_QSAFE */ 0x00000015, /* EMC_RDV */ 0x00000017, /* EMC_RDV_MASK */ 0x000007cd, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000001f3, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000003, /* EMC_PDEX2RD */ 0x00000009, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x00000011, /* EMC_RW2PDEN */ 0x0000004a, /* EMC_TXSR */ 0x0000004a, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000008, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000019, /* EMC_TFAW */ 0x0000000c, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000895, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0xe01200b9, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000010, /* EMC_DLL_XFORM_ADDR0 */ 0x00000010, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000010, /* EMC_DLL_XFORM_ADDR3 */ 0x00000010, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000e, /* EMC_DLL_XFORM_DQ0 */ 0x0000000e, /* EMC_DLL_XFORM_DQ1 */ 0x0000000e, /* EMC_DLL_XFORM_DQ2 */ 0x0000000e, /* EMC_DLL_XFORM_DQ3 */ 0x0000000e, /* EMC_DLL_XFORM_DQ4 */ 0x0000000e, /* EMC_DLL_XFORM_DQ5 */ 0x0000000e, /* EMC_DLL_XFORM_DQ6 */ 0x0000000e, /* EMC_DLL_XFORM_DQ7 */ 0x00000220, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0123123d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc004, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x000000bf, /* EMC_ZCAL_WAIT_CNT */ 0x02100013, /* EMC_MRS_WAIT_CNT */ 0x02100013, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x000042a0, /* EMC_CFG_PIPE */ 0x800010b3, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000d, /* EMC_QPOP */ 0x0f000007, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000003, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */ 0x00000010, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000a, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000d, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06060103, /* MC_EMEM_ARB_DA_TURNS */ 0x00120b10, /* MC_EMEM_ARB_DA_COVERS */ 0x71c81811, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000d, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000fd, /* MC_PTSA_GRANT_DECREMENT */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00c1003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00c10080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080021, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00c10026, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00c1001a, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00c10024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00c10029, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00c10065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00c1002a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000034, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x0000089f, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0xe0120069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x000100c3, /* Mode Register 1 */ 0x00020006, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_600000_04_V5.0.14_V1.1", /* DVFS table version */ 600000, /* SDRAM frequency */ 910, /* min voltage */ 910, /* gpu min voltage */ "pllc_ud", /* clock source id */ 0xe0000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000023, /* EMC_RC */ 0x0000004d, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000019, /* EMC_RAS */ 0x0000000a, /* EMC_RP */ 0x0000000a, /* EMC_R2W */ 0x0000000b, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x0000000f, /* EMC_W2P */ 0x0000000a, /* EMC_RD_RCD */ 0x0000000a, /* EMC_WR_RCD */ 0x00000005, /* EMC_RRD */ 0x00000004, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000004, /* EMC_WDV */ 0x00000004, /* EMC_WDV_MASK */ 0x0000000a, /* EMC_QUSE */ 0x00000004, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00070000, /* EMC_PUTERM_EXTRA */ 0x00000005, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000018, /* EMC_RDV */ 0x0000001a, /* EMC_RDV_MASK */ 0x000008e4, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000239, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000004, /* EMC_PDEX2WR */ 0x00000004, /* EMC_PDEX2RD */ 0x0000000a, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x00000013, /* EMC_RW2PDEN */ 0x00000054, /* EMC_TXSR */ 0x00000054, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000009, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x0000001c, /* EMC_TFAW */ 0x0000000d, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x000009c0, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0xe00e00b9, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000010, /* EMC_DLL_XFORM_ADDR0 */ 0x00000010, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000010, /* EMC_DLL_XFORM_ADDR3 */ 0x00000010, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000d, /* EMC_DLL_XFORM_DQ0 */ 0x0000000d, /* EMC_DLL_XFORM_DQ1 */ 0x0000000d, /* EMC_DLL_XFORM_DQ2 */ 0x0000000d, /* EMC_DLL_XFORM_DQ3 */ 0x0000000d, /* EMC_DLL_XFORM_DQ4 */ 0x0000000d, /* EMC_DLL_XFORM_DQ5 */ 0x0000000d, /* EMC_DLL_XFORM_DQ6 */ 0x0000000d, /* EMC_DLL_XFORM_DQ7 */ 0x00000220, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0121103d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc004, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x000000d8, /* EMC_ZCAL_WAIT_CNT */ 0x02580014, /* EMC_MRS_WAIT_CNT */ 0x02580014, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000005, /* EMC_CTT_DURATION */ 0x000040a0, /* EMC_CFG_PIPE */ 0x800012d6, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000010, /* EMC_QPOP */ 0x00000009, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000012, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000b, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000e, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000a, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000007, /* MC_EMEM_ARB_TIMING_W2R */ 0x07060103, /* MC_EMEM_ARB_DA_TURNS */ 0x00140d12, /* MC_EMEM_ARB_DA_COVERS */ 0x71c91b13, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000f, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000120, /* MC_PTSA_GRANT_DECREMENT */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00aa003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00aa0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008001d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00aa0022, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00aa0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00aa0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00aa0025, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x0000003a, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x0000089f, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0xe00e0069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x000100e3, /* Mode Register 1 */ 0x00020007, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_792000_04_V5.0.14_V1.1", /* DVFS table version */ 792000, /* SDRAM frequency */ 980, /* min voltage */ 980, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000002f, /* EMC_RC */ 0x00000066, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000021, /* EMC_RAS */ 0x0000000e, /* EMC_RP */ 0x0000000e, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x0000000e, /* EMC_RD_RCD */ 0x0000000e, /* EMC_WR_RCD */ 0x00000007, /* EMC_RRD */ 0x00000004, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x0000000e, /* EMC_QUSE */ 0x00000004, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x0000000f, /* EMC_EINPUT_DURATION */ 0x000b0000, /* EMC_PUTERM_EXTRA */ 0x00000006, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x00000016, /* EMC_QSAFE */ 0x0000001d, /* EMC_RDV */ 0x0000001f, /* EMC_RDV_MASK */ 0x00000bd1, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000002f4, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000005, /* EMC_PDEX2WR */ 0x00000005, /* EMC_PDEX2RD */ 0x0000000e, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x00000017, /* EMC_RW2PDEN */ 0x0000006f, /* EMC_TXSR */ 0x0000006f, /* EMC_TXSRDLL */ 0x00000006, /* EMC_TCKE */ 0x0000000c, /* EMC_TCKESR */ 0x00000006, /* EMC_TPD */ 0x00000026, /* EMC_TFAW */ 0x00000011, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000cdf, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0xe00700b9, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000005, /* EMC_DLL_XFORM_DQS0 */ 0x00000005, /* EMC_DLL_XFORM_DQS1 */ 0x00000005, /* EMC_DLL_XFORM_DQS2 */ 0x00000005, /* EMC_DLL_XFORM_DQS3 */ 0x00000005, /* EMC_DLL_XFORM_DQS4 */ 0x00000005, /* EMC_DLL_XFORM_DQS5 */ 0x00000005, /* EMC_DLL_XFORM_DQS6 */ 0x00000005, /* EMC_DLL_XFORM_DQS7 */ 0x00000005, /* EMC_DLL_XFORM_DQS8 */ 0x00000005, /* EMC_DLL_XFORM_DQS9 */ 0x00000005, /* EMC_DLL_XFORM_DQS10 */ 0x00000005, /* EMC_DLL_XFORM_DQS11 */ 0x00000005, /* EMC_DLL_XFORM_DQS12 */ 0x00000005, /* EMC_DLL_XFORM_DQS13 */ 0x00000005, /* EMC_DLL_XFORM_DQS14 */ 0x00000005, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00004014, /* EMC_DLL_XFORM_ADDR0 */ 0x00004014, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00004014, /* EMC_DLL_XFORM_ADDR3 */ 0x00004014, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000b, /* EMC_DLL_XFORM_DQ0 */ 0x0000000b, /* EMC_DLL_XFORM_DQ1 */ 0x0000000b, /* EMC_DLL_XFORM_DQ2 */ 0x0000000b, /* EMC_DLL_XFORM_DQ3 */ 0x0000000b, /* EMC_DLL_XFORM_DQ4 */ 0x0000000b, /* EMC_DLL_XFORM_DQ5 */ 0x0000000b, /* EMC_DLL_XFORM_DQ6 */ 0x0000000b, /* EMC_DLL_XFORM_DQ7 */ 0x00000220, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0120103d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc004, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x61861820, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x61861800, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000011e, /* EMC_ZCAL_WAIT_CNT */ 0x03180017, /* EMC_MRS_WAIT_CNT */ 0x03180017, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000006, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x8000188b, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000014, /* EMC_QPOP */ 0x0e00000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000006, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000007, /* MC_EMEM_ARB_TIMING_RP */ 0x00000018, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000f, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000013, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000003, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08080103, /* MC_EMEM_ARB_DA_TURNS */ 0x001a1118, /* MC_EMEM_ARB_DA_COVERS */ 0x71ac2419, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x0000004c, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x0000089f, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0xe0070069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430404, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010043, /* Mode Register 1 */ 0x0002001a, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 1200, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "09_924000_03_V5.0.14_V1.1", /* DVFS table version */ 924000, /* SDRAM frequency */ 1010, /* min voltage */ 1010, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000037, /* EMC_RC */ 0x00000078, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000026, /* EMC_RAS */ 0x00000010, /* EMC_RP */ 0x00000010, /* EMC_R2W */ 0x00000010, /* EMC_W2R */ 0x00000006, /* EMC_R2P */ 0x00000017, /* EMC_W2P */ 0x00000010, /* EMC_RD_RCD */ 0x00000010, /* EMC_WR_RCD */ 0x00000009, /* EMC_RRD */ 0x00000005, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000007, /* EMC_WDV */ 0x00000007, /* EMC_WDV_MASK */ 0x00000011, /* EMC_QUSE */ 0x00000004, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000006, /* EMC_EINPUT */ 0x00000011, /* EMC_EINPUT_DURATION */ 0x000e0000, /* EMC_PUTERM_EXTRA */ 0x00000006, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000005, /* EMC_QRST */ 0x00000018, /* EMC_QSAFE */ 0x00000020, /* EMC_RDV */ 0x00000022, /* EMC_RDV_MASK */ 0x00000dd4, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000375, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000006, /* EMC_PDEX2WR */ 0x00000006, /* EMC_PDEX2RD */ 0x00000010, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000001b, /* EMC_RW2PDEN */ 0x00000082, /* EMC_TXSR */ 0x00000082, /* EMC_TXSRDLL */ 0x00000007, /* EMC_TCKE */ 0x0000000e, /* EMC_TCKESR */ 0x00000007, /* EMC_TPD */ 0x0000002d, /* EMC_TFAW */ 0x00000014, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000f04, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a896, /* EMC_FBIO_CFG5 */ 0xe00400b9, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x007f4009, /* EMC_DLL_XFORM_DQS0 */ 0x007f8009, /* EMC_DLL_XFORM_DQS1 */ 0x007f800b, /* EMC_DLL_XFORM_DQS2 */ 0x007f8009, /* EMC_DLL_XFORM_DQS3 */ 0x007f8009, /* EMC_DLL_XFORM_DQS4 */ 0x007f800b, /* EMC_DLL_XFORM_DQS5 */ 0x007fc009, /* EMC_DLL_XFORM_DQS6 */ 0x007f8009, /* EMC_DLL_XFORM_DQS7 */ 0x007f4009, /* EMC_DLL_XFORM_DQS8 */ 0x007f8009, /* EMC_DLL_XFORM_DQS9 */ 0x007f800b, /* EMC_DLL_XFORM_DQS10 */ 0x007f8009, /* EMC_DLL_XFORM_DQS11 */ 0x007f8009, /* EMC_DLL_XFORM_DQS12 */ 0x007f800b, /* EMC_DLL_XFORM_DQS13 */ 0x007fc009, /* EMC_DLL_XFORM_DQS14 */ 0x007f8009, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000010, /* EMC_DLL_XFORM_ADDR0 */ 0x00000010, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000010, /* EMC_DLL_XFORM_ADDR3 */ 0x00000010, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000b, /* EMC_DLL_XFORM_DQ0 */ 0x0000000b, /* EMC_DLL_XFORM_DQ1 */ 0x0000000b, /* EMC_DLL_XFORM_DQ2 */ 0x0000000b, /* EMC_DLL_XFORM_DQ3 */ 0x0000000b, /* EMC_DLL_XFORM_DQ4 */ 0x0000000b, /* EMC_DLL_XFORM_DQ5 */ 0x0000000b, /* EMC_DLL_XFORM_DQ6 */ 0x0000000b, /* EMC_DLL_XFORM_DQ7 */ 0x00000220, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0120103d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc004, /* EMC_XM2CLKPADCTRL */ 0x00000101, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x5db59b20, /* EMC_XM2DQSPADCTRL3 */ 0x00513594, /* EMC_XM2DQSPADCTRL4 */ 0x00515556, /* EMC_XM2DQSPADCTRL5 */ 0x61949400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000014d, /* EMC_ZCAL_WAIT_CNT */ 0x039c0019, /* EMC_MRS_WAIT_CNT */ 0x039c0019, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000006, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x80001c77, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000017, /* EMC_QPOP */ 0x0e00000d, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000007, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000008, /* MC_EMEM_ARB_TIMING_RP */ 0x0000001b, /* MC_EMEM_ARB_TIMING_RC */ 0x00000012, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000017, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000004, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000e, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000009, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000009, /* MC_EMEM_ARB_TIMING_W2R */ 0x09090104, /* MC_EMEM_ARB_DA_TURNS */ 0x001e141b, /* MC_EMEM_ARB_DA_COVERS */ 0x71ae2a1c, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000017, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000001bb, /* MC_PTSA_GRANT_DECREMENT */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x006e003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x006e0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x006e0019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x006e0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x006e0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x006e001b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x006e0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x006e001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000058, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x0000089f, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0xe0040069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430808, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x0002001c, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 1180, /* expected dvfs latency (ns) */ }, }; static struct tegra12_emc_table ardbeg_lpddr3_emc_table_der[] = { { 0x19, /* V5.0.16 */ "09_12750_04_V5.0.16_V1.1", /* DVFS table version */ 12750, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000003e, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000003, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x0000000b, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000002, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x00000003, /* EMC_TXSR */ 0x00000002, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000036, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000011, /* EMC_ZCAL_WAIT_CNT */ 0x000d0011, /* EMC_MRS_WAIT_CNT */ 0x000d0011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x8000011c, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40040001, /* MC_EMEM_ARB_CFG */ 0x8000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05040102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090402, /* MC_EMEM_ARB_DA_COVERS */ 0x77c30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000007, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 57820, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_20400_04_V5.0.16_V1.1", /* DVFS table version */ 20400, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000026, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000001, /* EMC_RC */ 0x00000003, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000013, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000004, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x00000003, /* EMC_TXSR */ 0x00000003, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000055, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000011, /* EMC_ZCAL_WAIT_CNT */ 0x00150011, /* EMC_MRS_WAIT_CNT */ 0x00150011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x8000012a, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40020001, /* MC_EMEM_ARB_CFG */ 0x80000012, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05040102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090402, /* MC_EMEM_ARB_DA_COVERS */ 0x74e30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000000a, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 35610, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_40800_04_V5.0.16_V1.1", /* DVFS table version */ 40800, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000012, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000002, /* EMC_RC */ 0x00000005, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000026, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000009, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x00000006, /* EMC_TXSR */ 0x00000006, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x000000aa, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000011, /* EMC_ZCAL_WAIT_CNT */ 0x00290011, /* EMC_MRS_WAIT_CNT */ 0x00290011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x80000151, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0xa0000001, /* MC_EMEM_ARB_CFG */ 0x80000017, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05040102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090402, /* MC_EMEM_ARB_DA_COVERS */ 0x73030303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000014, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 20850, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_68000_04_V5.0.16_V1.1", /* DVFS table version */ 68000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000000a, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000004, /* EMC_RC */ 0x00000008, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000040, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000010, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x0000000a, /* EMC_TXSR */ 0x0000000a, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x0000011b, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000019, /* EMC_ZCAL_WAIT_CNT */ 0x00440011, /* EMC_MRS_WAIT_CNT */ 0x00440011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x80000185, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x00000001, /* MC_EMEM_ARB_CFG */ 0x8000001e, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05040102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090402, /* MC_EMEM_ARB_DA_COVERS */ 0x72630403, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000021, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00b0, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00e90049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00a3, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ee00ef, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 10720, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_102000_04_V5.0.16_V1.1", /* DVFS table version */ 102000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000006, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000006, /* EMC_RC */ 0x0000000d, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000004, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000001, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000060, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000018, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000002, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x0000000f, /* EMC_TXSR */ 0x0000000f, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000003, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000004, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x000001a9, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR0 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR3 */ 0x000fc000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000404, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x00000025, /* EMC_ZCAL_WAIT_CNT */ 0x00660011, /* EMC_MRS_WAIT_CNT */ 0x00660011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f3f3, /* EMC_CFG_PIPE */ 0x800001c5, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x08000001, /* MC_EMEM_ARB_CFG */ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */ 0x00000001, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05050102, /* MC_EMEM_ARB_DA_TURNS */ 0x00090403, /* MC_EMEM_ARB_DA_COVERS */ 0x72430504, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000015, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008c7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 6890, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_204000_04_V5.0.16_V1.1", /* DVFS table version */ 204000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000000c, /* EMC_RC */ 0x0000001a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000008, /* EMC_RAS */ 0x00000004, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000004, /* EMC_RD_RCD */ 0x00000004, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000002, /* EMC_WDV */ 0x00000002, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000003, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x00000007, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x0000000e, /* EMC_QSAFE */ 0x0000000f, /* EMC_RDV */ 0x00000011, /* EMC_RDV_MASK */ 0x000000c1, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000030, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000004, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x0000001d, /* EMC_TXSR */ 0x0000001d, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000004, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000009, /* EMC_TFAW */ 0x00000005, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000351, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a296, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00048000, /* EMC_DLL_XFORM_DQS0 */ 0x00048000, /* EMC_DLL_XFORM_DQS1 */ 0x00048000, /* EMC_DLL_XFORM_DQS2 */ 0x00048000, /* EMC_DLL_XFORM_DQS3 */ 0x00048000, /* EMC_DLL_XFORM_DQS4 */ 0x00048000, /* EMC_DLL_XFORM_DQS5 */ 0x00048000, /* EMC_DLL_XFORM_DQS6 */ 0x00048000, /* EMC_DLL_XFORM_DQS7 */ 0x00048000, /* EMC_DLL_XFORM_DQS8 */ 0x00048000, /* EMC_DLL_XFORM_DQS9 */ 0x00048000, /* EMC_DLL_XFORM_DQS10 */ 0x00048000, /* EMC_DLL_XFORM_DQS11 */ 0x00048000, /* EMC_DLL_XFORM_DQS12 */ 0x00048000, /* EMC_DLL_XFORM_DQS13 */ 0x00048000, /* EMC_DLL_XFORM_DQS14 */ 0x00048000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00080000, /* EMC_DLL_XFORM_ADDR0 */ 0x00080000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00080000, /* EMC_DLL_XFORM_ADDR3 */ 0x00080000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00090000, /* EMC_DLL_XFORM_DQ0 */ 0x00090000, /* EMC_DLL_XFORM_DQ1 */ 0x00090000, /* EMC_DLL_XFORM_DQ2 */ 0x00090000, /* EMC_DLL_XFORM_DQ3 */ 0x00009000, /* EMC_DLL_XFORM_DQ4 */ 0x00009000, /* EMC_DLL_XFORM_DQ5 */ 0x00009000, /* EMC_DLL_XFORM_DQ6 */ 0x00009000, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0130b018, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000004a, /* EMC_ZCAL_WAIT_CNT */ 0x00cc0011, /* EMC_MRS_WAIT_CNT */ 0x00cc0011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x0000d3b3, /* EMC_CFG_PIPE */ 0x80000287, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x01000003, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000006, /* MC_EMEM_ARB_TIMING_RC */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05050103, /* MC_EMEM_ARB_DA_TURNS */ 0x000b0606, /* MC_EMEM_ARB_DA_COVERS */ 0x71e40a07, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000062, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x004e0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080057, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0063, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0036, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff006b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510050, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000017, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3200000, /* EMC_CFG */ 0x000008cf, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 3420, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_300000_04_V5.0.16_V1.1", /* DVFS table version */ 300000, /* SDRAM frequency */ 820, /* min voltage */ 820, /* gpu min voltage */ "pllc_out0", /* clock source id */ 0x20000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000012, /* EMC_RC */ 0x00000026, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000000d, /* EMC_RAS */ 0x00000005, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000005, /* EMC_RD_RCD */ 0x00000005, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000002, /* EMC_WDV */ 0x00000002, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000003, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x00000008, /* EMC_EINPUT_DURATION */ 0x00030000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x0000000f, /* EMC_QSAFE */ 0x00000012, /* EMC_RDV */ 0x00000014, /* EMC_RDV_MASK */ 0x0000011c, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000047, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000005, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000c, /* EMC_RW2PDEN */ 0x0000002a, /* EMC_TXSR */ 0x0000002a, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x0000000d, /* EMC_TFAW */ 0x00000007, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x000004e0, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00020000, /* EMC_DLL_XFORM_DQS0 */ 0x00020000, /* EMC_DLL_XFORM_DQS1 */ 0x00020000, /* EMC_DLL_XFORM_DQS2 */ 0x00020000, /* EMC_DLL_XFORM_DQS3 */ 0x00020000, /* EMC_DLL_XFORM_DQS4 */ 0x00020000, /* EMC_DLL_XFORM_DQS5 */ 0x00020000, /* EMC_DLL_XFORM_DQS6 */ 0x00020000, /* EMC_DLL_XFORM_DQS7 */ 0x00020000, /* EMC_DLL_XFORM_DQS8 */ 0x00020000, /* EMC_DLL_XFORM_DQS9 */ 0x00020000, /* EMC_DLL_XFORM_DQS10 */ 0x00020000, /* EMC_DLL_XFORM_DQS11 */ 0x00020000, /* EMC_DLL_XFORM_DQS12 */ 0x00020000, /* EMC_DLL_XFORM_DQS13 */ 0x00020000, /* EMC_DLL_XFORM_DQS14 */ 0x00020000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00060000, /* EMC_DLL_XFORM_ADDR0 */ 0x00060000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00060000, /* EMC_DLL_XFORM_ADDR3 */ 0x00060000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00060000, /* EMC_DLL_XFORM_DQ0 */ 0x00060000, /* EMC_DLL_XFORM_DQ1 */ 0x00060000, /* EMC_DLL_XFORM_DQ2 */ 0x00060000, /* EMC_DLL_XFORM_DQ3 */ 0x00006000, /* EMC_DLL_XFORM_DQ4 */ 0x00006000, /* EMC_DLL_XFORM_DQ5 */ 0x00006000, /* EMC_DLL_XFORM_DQ6 */ 0x00006000, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x01231239, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000006c, /* EMC_ZCAL_WAIT_CNT */ 0x012c0011, /* EMC_MRS_WAIT_CNT */ 0x012c0011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x8000033e, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000b, /* EMC_QPOP */ 0x08000004, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000007, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05050103, /* MC_EMEM_ARB_DA_TURNS */ 0x000c0709, /* MC_EMEM_ARB_DA_COVERS */ 0x71c50e0a, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000004, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000090, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00350049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008003b, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0043, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff002d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510036, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0087, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x0000001f, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x000008d7, /* EMC_CFG_2 */ 0x0004013c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 2680, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_396000_05_V5.0.16_V1.1", /* DVFS table version */ 396000, /* SDRAM frequency */ 850, /* min voltage */ 850, /* gpu min voltage */ "pllm_out0", /* clock source id */ 0x00000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000018, /* EMC_RC */ 0x00000033, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000011, /* EMC_RAS */ 0x00000007, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000007, /* EMC_RD_RCD */ 0x00000007, /* EMC_WR_RCD */ 0x00000004, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000002, /* EMC_WDV */ 0x00000002, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000003, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x00000009, /* EMC_EINPUT_DURATION */ 0x00030000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000010, /* EMC_QSAFE */ 0x00000012, /* EMC_RDV */ 0x00000014, /* EMC_RDV_MASK */ 0x00000176, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000005d, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000007, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000000e, /* EMC_RW2PDEN */ 0x00000038, /* EMC_TXSR */ 0x00000038, /* EMC_TXSRDLL */ 0x00000003, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000003, /* EMC_TPD */ 0x00000012, /* EMC_TFAW */ 0x0000000a, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000670, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0x005800a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00020000, /* EMC_DLL_XFORM_DQS0 */ 0x00020000, /* EMC_DLL_XFORM_DQS1 */ 0x00020000, /* EMC_DLL_XFORM_DQS2 */ 0x00020000, /* EMC_DLL_XFORM_DQS3 */ 0x00020000, /* EMC_DLL_XFORM_DQS4 */ 0x00020000, /* EMC_DLL_XFORM_DQS5 */ 0x00020000, /* EMC_DLL_XFORM_DQS6 */ 0x00020000, /* EMC_DLL_XFORM_DQS7 */ 0x00020000, /* EMC_DLL_XFORM_DQS8 */ 0x00020000, /* EMC_DLL_XFORM_DQS9 */ 0x00020000, /* EMC_DLL_XFORM_DQS10 */ 0x00020000, /* EMC_DLL_XFORM_DQS11 */ 0x00020000, /* EMC_DLL_XFORM_DQS12 */ 0x00020000, /* EMC_DLL_XFORM_DQS13 */ 0x00020000, /* EMC_DLL_XFORM_DQS14 */ 0x00020000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00050000, /* EMC_DLL_XFORM_ADDR0 */ 0x00050000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00050000, /* EMC_DLL_XFORM_ADDR3 */ 0x00050000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00040000, /* EMC_DLL_XFORM_DQ0 */ 0x00040000, /* EMC_DLL_XFORM_DQ1 */ 0x00040000, /* EMC_DLL_XFORM_DQ2 */ 0x00040000, /* EMC_DLL_XFORM_DQ3 */ 0x00004000, /* EMC_DLL_XFORM_DQ4 */ 0x00004000, /* EMC_DLL_XFORM_DQ5 */ 0x00004000, /* EMC_DLL_XFORM_DQ6 */ 0x00004000, /* EMC_DLL_XFORM_DQ7 */ 0x00000200, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x01231239, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc000, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000008f, /* EMC_ZCAL_WAIT_CNT */ 0x018c0011, /* EMC_MRS_WAIT_CNT */ 0x018c0011, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x800003f4, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000b, /* EMC_QPOP */ 0x0f000005, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RC */ 0x00000007, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000009, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_W2R */ 0x05050103, /* MC_EMEM_ARB_DA_TURNS */ 0x000e090c, /* MC_EMEM_ARB_DA_COVERS */ 0x71c6120d, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000a, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000be, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00280049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008002d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0033, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0022, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0037, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0066, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000028, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x00000897, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0x00580068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x00020004, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 2180, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_528000_05_V5.0.16_V1.1", /* DVFS table version */ 528000, /* SDRAM frequency */ 880, /* min voltage */ 870, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000020, /* EMC_RC */ 0x00000044, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000017, /* EMC_RAS */ 0x0000000a, /* EMC_RP */ 0x0000000a, /* EMC_R2W */ 0x00000009, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000d, /* EMC_W2P */ 0x0000000a, /* EMC_RD_RCD */ 0x0000000a, /* EMC_WR_RCD */ 0x00000006, /* EMC_RRD */ 0x00000004, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000002, /* EMC_WDV */ 0x00000002, /* EMC_WDV_MASK */ 0x00000008, /* EMC_QUSE */ 0x00000003, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x0000000a, /* EMC_EINPUT_DURATION */ 0x00050000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x00000011, /* EMC_QSAFE */ 0x00000015, /* EMC_RDV */ 0x00000017, /* EMC_RDV_MASK */ 0x000001f3, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000007c, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000003, /* EMC_PDEX2RD */ 0x0000000a, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x00000011, /* EMC_RW2PDEN */ 0x0000004a, /* EMC_TXSR */ 0x0000004a, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000008, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000019, /* EMC_TFAW */ 0x0000000d, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000895, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0xe01200b9, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000010, /* EMC_DLL_XFORM_ADDR0 */ 0x00000010, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000010, /* EMC_DLL_XFORM_ADDR3 */ 0x00000010, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000e, /* EMC_DLL_XFORM_DQ0 */ 0x0000000e, /* EMC_DLL_XFORM_DQ1 */ 0x0000000e, /* EMC_DLL_XFORM_DQ2 */ 0x0000000e, /* EMC_DLL_XFORM_DQ3 */ 0x0000000e, /* EMC_DLL_XFORM_DQ4 */ 0x0000000e, /* EMC_DLL_XFORM_DQ5 */ 0x0000000e, /* EMC_DLL_XFORM_DQ6 */ 0x0000000e, /* EMC_DLL_XFORM_DQ7 */ 0x00000220, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0123123d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc004, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x000000bf, /* EMC_ZCAL_WAIT_CNT */ 0x02100013, /* EMC_MRS_WAIT_CNT */ 0x02100013, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x000042a0, /* EMC_CFG_PIPE */ 0x800004ef, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000d, /* EMC_QPOP */ 0x0f000007, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000011, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000a, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000d, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000003, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06060103, /* MC_EMEM_ARB_DA_TURNS */ 0x00130c11, /* MC_EMEM_ARB_DA_COVERS */ 0x71c81812, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000d, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000fd, /* MC_PTSA_GRANT_DECREMENT */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00c1003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00c10080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080021, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00c10026, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00c1001a, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00c10024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00c10029, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00c10065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00c1002a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000034, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x0000089f, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0xe0120069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x000100c3, /* Mode Register 1 */ 0x00020006, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_600000_04_V5.0.16_V1.1", /* DVFS table version */ 600000, /* SDRAM frequency */ 910, /* min voltage */ 910, /* gpu min voltage */ "pllc_ud", /* clock source id */ 0xe0000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000025, /* EMC_RC */ 0x0000004d, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000001a, /* EMC_RAS */ 0x0000000b, /* EMC_RP */ 0x0000000a, /* EMC_R2W */ 0x0000000b, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x0000000f, /* EMC_W2P */ 0x0000000b, /* EMC_RD_RCD */ 0x0000000b, /* EMC_WR_RCD */ 0x00000007, /* EMC_RRD */ 0x00000004, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000004, /* EMC_WDV */ 0x00000004, /* EMC_WDV_MASK */ 0x0000000a, /* EMC_QUSE */ 0x00000004, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00070000, /* EMC_PUTERM_EXTRA */ 0x00000005, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000018, /* EMC_RDV */ 0x0000001a, /* EMC_RDV_MASK */ 0x00000237, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000008d, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000004, /* EMC_PDEX2WR */ 0x00000004, /* EMC_PDEX2RD */ 0x0000000b, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x00000013, /* EMC_RW2PDEN */ 0x00000054, /* EMC_TXSR */ 0x00000054, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000009, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x0000001c, /* EMC_TFAW */ 0x0000000e, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x000009c0, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0xe00e00b9, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000010, /* EMC_DLL_XFORM_ADDR0 */ 0x00000010, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000010, /* EMC_DLL_XFORM_ADDR3 */ 0x00000010, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000d, /* EMC_DLL_XFORM_DQ0 */ 0x0000000d, /* EMC_DLL_XFORM_DQ1 */ 0x0000000d, /* EMC_DLL_XFORM_DQ2 */ 0x0000000d, /* EMC_DLL_XFORM_DQ3 */ 0x0000000d, /* EMC_DLL_XFORM_DQ4 */ 0x0000000d, /* EMC_DLL_XFORM_DQ5 */ 0x0000000d, /* EMC_DLL_XFORM_DQ6 */ 0x0000000d, /* EMC_DLL_XFORM_DQ7 */ 0x00000220, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0121103d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc004, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x000000d8, /* EMC_ZCAL_WAIT_CNT */ 0x02580014, /* EMC_MRS_WAIT_CNT */ 0x02580014, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000005, /* EMC_CTT_DURATION */ 0x000040a0, /* EMC_CFG_PIPE */ 0x80000578, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000010, /* EMC_QPOP */ 0x00000009, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000e, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000003, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000a, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000007, /* MC_EMEM_ARB_TIMING_W2R */ 0x07060103, /* MC_EMEM_ARB_DA_TURNS */ 0x00150e13, /* MC_EMEM_ARB_DA_COVERS */ 0x71c91b14, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000f, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000120, /* MC_PTSA_GRANT_DECREMENT */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00aa003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00aa0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008001d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00aa0022, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00aa0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00aa0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00aa0025, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x0000003a, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x0000089f, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0xe00e0069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x000100e3, /* Mode Register 1 */ 0x00020007, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_792000_04_V5.0.16_V1.1", /* DVFS table version */ 792000, /* SDRAM frequency */ 980, /* min voltage */ 980, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000030, /* EMC_RC */ 0x00000066, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000022, /* EMC_RAS */ 0x0000000f, /* EMC_RP */ 0x0000000e, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x0000000f, /* EMC_RD_RCD */ 0x0000000f, /* EMC_WR_RCD */ 0x00000009, /* EMC_RRD */ 0x00000004, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x0000000e, /* EMC_QUSE */ 0x00000004, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x0000000f, /* EMC_EINPUT_DURATION */ 0x000b0000, /* EMC_PUTERM_EXTRA */ 0x00000006, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x00000016, /* EMC_QSAFE */ 0x0000001d, /* EMC_RDV */ 0x0000001f, /* EMC_RDV_MASK */ 0x000002ec, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000bb, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000005, /* EMC_PDEX2WR */ 0x00000005, /* EMC_PDEX2RD */ 0x0000000f, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x00000017, /* EMC_RW2PDEN */ 0x0000006f, /* EMC_TXSR */ 0x0000006f, /* EMC_TXSRDLL */ 0x00000006, /* EMC_TCKE */ 0x0000000c, /* EMC_TCKESR */ 0x00000006, /* EMC_TPD */ 0x00000026, /* EMC_TFAW */ 0x00000013, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000cdf, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a096, /* EMC_FBIO_CFG5 */ 0xe00700b9, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000005, /* EMC_DLL_XFORM_DQS0 */ 0x00000005, /* EMC_DLL_XFORM_DQS1 */ 0x00000005, /* EMC_DLL_XFORM_DQS2 */ 0x00000005, /* EMC_DLL_XFORM_DQS3 */ 0x00000005, /* EMC_DLL_XFORM_DQS4 */ 0x00000005, /* EMC_DLL_XFORM_DQS5 */ 0x00000005, /* EMC_DLL_XFORM_DQS6 */ 0x00000005, /* EMC_DLL_XFORM_DQS7 */ 0x00000005, /* EMC_DLL_XFORM_DQS8 */ 0x00000005, /* EMC_DLL_XFORM_DQS9 */ 0x00000005, /* EMC_DLL_XFORM_DQS10 */ 0x00000005, /* EMC_DLL_XFORM_DQS11 */ 0x00000005, /* EMC_DLL_XFORM_DQS12 */ 0x00000005, /* EMC_DLL_XFORM_DQS13 */ 0x00000005, /* EMC_DLL_XFORM_DQS14 */ 0x00000005, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00004014, /* EMC_DLL_XFORM_ADDR0 */ 0x00004014, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00004014, /* EMC_DLL_XFORM_ADDR3 */ 0x00004014, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000b, /* EMC_DLL_XFORM_DQ0 */ 0x0000000b, /* EMC_DLL_XFORM_DQ1 */ 0x0000000b, /* EMC_DLL_XFORM_DQ2 */ 0x0000000b, /* EMC_DLL_XFORM_DQ3 */ 0x0000000b, /* EMC_DLL_XFORM_DQ4 */ 0x0000000b, /* EMC_DLL_XFORM_DQ5 */ 0x0000000b, /* EMC_DLL_XFORM_DQ6 */ 0x0000000b, /* EMC_DLL_XFORM_DQ7 */ 0x00000220, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0120103d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc004, /* EMC_XM2CLKPADCTRL */ 0x00000606, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x61861820, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x61861800, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000011e, /* EMC_ZCAL_WAIT_CNT */ 0x03180017, /* EMC_MRS_WAIT_CNT */ 0x03180017, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000006, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x800006e5, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000014, /* EMC_QPOP */ 0x0e00000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000006, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000007, /* MC_EMEM_ARB_TIMING_RP */ 0x00000019, /* MC_EMEM_ARB_TIMING_RC */ 0x00000010, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000013, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000004, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08080103, /* MC_EMEM_ARB_DA_TURNS */ 0x001b1219, /* MC_EMEM_ARB_DA_COVERS */ 0x71ac241a, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x0000004c, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x0000089f, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0xe0070069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430404, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010043, /* Mode Register 1 */ 0x0002001a, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 1200, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.16 */ "09_924000_03_V5.0.16_V1.1", /* DVFS table version */ 924000, /* SDRAM frequency */ 1010, /* min voltage */ 1010, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000039, /* EMC_RC */ 0x00000078, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000028, /* EMC_RAS */ 0x00000012, /* EMC_RP */ 0x00000010, /* EMC_R2W */ 0x00000010, /* EMC_W2R */ 0x00000006, /* EMC_R2P */ 0x00000017, /* EMC_W2P */ 0x00000012, /* EMC_RD_RCD */ 0x00000012, /* EMC_WR_RCD */ 0x0000000a, /* EMC_RRD */ 0x00000005, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000007, /* EMC_WDV */ 0x00000007, /* EMC_WDV_MASK */ 0x00000011, /* EMC_QUSE */ 0x00000004, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000006, /* EMC_EINPUT */ 0x00000011, /* EMC_EINPUT_DURATION */ 0x000e0000, /* EMC_PUTERM_EXTRA */ 0x00000006, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000005, /* EMC_QRST */ 0x00000018, /* EMC_QSAFE */ 0x00000020, /* EMC_RDV */ 0x00000022, /* EMC_RDV_MASK */ 0x00000369, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000da, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000006, /* EMC_PDEX2WR */ 0x00000006, /* EMC_PDEX2RD */ 0x00000012, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000001, /* EMC_AR2PDEN */ 0x0000001b, /* EMC_RW2PDEN */ 0x00000082, /* EMC_TXSR */ 0x00000082, /* EMC_TXSRDLL */ 0x00000007, /* EMC_TCKE */ 0x0000000e, /* EMC_TCKESR */ 0x00000007, /* EMC_TPD */ 0x0000002d, /* EMC_TFAW */ 0x00000016, /* EMC_TRPAB */ 0x00000003, /* EMC_TCLKSTABLE */ 0x00000003, /* EMC_TCLKSTOP */ 0x00000f04, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1361a896, /* EMC_FBIO_CFG5 */ 0xe00400b9, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x007f4009, /* EMC_DLL_XFORM_DQS0 */ 0x007f8009, /* EMC_DLL_XFORM_DQS1 */ 0x007f800b, /* EMC_DLL_XFORM_DQS2 */ 0x007f8009, /* EMC_DLL_XFORM_DQS3 */ 0x007f8009, /* EMC_DLL_XFORM_DQS4 */ 0x007f800b, /* EMC_DLL_XFORM_DQS5 */ 0x007fc009, /* EMC_DLL_XFORM_DQS6 */ 0x007f8009, /* EMC_DLL_XFORM_DQS7 */ 0x007f4009, /* EMC_DLL_XFORM_DQS8 */ 0x007f8009, /* EMC_DLL_XFORM_DQS9 */ 0x007f800b, /* EMC_DLL_XFORM_DQS10 */ 0x007f8009, /* EMC_DLL_XFORM_DQS11 */ 0x007f8009, /* EMC_DLL_XFORM_DQS12 */ 0x007f800b, /* EMC_DLL_XFORM_DQS13 */ 0x007fc009, /* EMC_DLL_XFORM_DQS14 */ 0x007f8009, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000010, /* EMC_DLL_XFORM_ADDR0 */ 0x00000010, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000010, /* EMC_DLL_XFORM_ADDR3 */ 0x00000010, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000b, /* EMC_DLL_XFORM_DQ0 */ 0x0000000b, /* EMC_DLL_XFORM_DQ1 */ 0x0000000b, /* EMC_DLL_XFORM_DQ2 */ 0x0000000b, /* EMC_DLL_XFORM_DQ3 */ 0x0000000b, /* EMC_DLL_XFORM_DQ4 */ 0x0000000b, /* EMC_DLL_XFORM_DQ5 */ 0x0000000b, /* EMC_DLL_XFORM_DQ6 */ 0x0000000b, /* EMC_DLL_XFORM_DQ7 */ 0x00000220, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00100100, /* EMC_XM2CMDPADCTRL5 */ 0x0120103d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc004, /* EMC_XM2CLKPADCTRL */ 0x00000101, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f008, /* EMC_XM2COMPPADCTRL */ 0x07070000, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x015ddddd, /* EMC_XM2VTTGENPADCTRL3 */ 0x5db59b20, /* EMC_XM2DQSPADCTRL3 */ 0x00513594, /* EMC_XM2DQSPADCTRL4 */ 0x00515556, /* EMC_XM2DQSPADCTRL5 */ 0x61949400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00064000, /* EMC_ZCAL_INTERVAL */ 0x0000014d, /* EMC_ZCAL_WAIT_CNT */ 0x039c0019, /* EMC_MRS_WAIT_CNT */ 0x039c0019, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000006, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x800007e0, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000017, /* EMC_QPOP */ 0x0e00000d, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000008, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000009, /* MC_EMEM_ARB_TIMING_RP */ 0x0000001d, /* MC_EMEM_ARB_TIMING_RC */ 0x00000013, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000017, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000005, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000e, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000001, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000009, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000009, /* MC_EMEM_ARB_TIMING_W2R */ 0x09090104, /* MC_EMEM_ARB_DA_TURNS */ 0x0020161d, /* MC_EMEM_ARB_DA_COVERS */ 0x71ae2a1e, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000017, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000001bb, /* MC_PTSA_GRANT_DECREMENT */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x006e003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x006e0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x006e0019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x006e0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x006e0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x006e001b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x006e0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x006e001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000058, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0xf3300000, /* EMC_CFG */ 0x0000089f, /* EMC_CFG_2 */ 0x0004001c, /* EMC_SEL_DPD_CTRL */ 0xe0040069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430808, /* EMC_AUTO_CAL_CONFIG */ 0x00000000, /* Mode Register 0 */ 0x00010083, /* Mode Register 1 */ 0x0002001c, /* Mode Register 2 */ 0x800b0000, /* Mode Register 4 */ 1180, /* expected dvfs latency (ns) */ }, }; static struct tegra12_emc_table ardbeg_lpddr3_emc_table_E1781[] = { { 0x18, /* V5.0.10 */ "02_102000_02_V5.0.10_V0.8", /* DVFS table version */ 102000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000006, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000004, /* EMC_RC */ 0x0000001a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000003, /* EMC_RAS */ 0x00000001, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000001, /* EMC_RD_RCD */ 0x00000001, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000304, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000c1, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000018, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000001c, /* EMC_TXSR */ 0x0000001c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000003, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000031c, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1069a298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00008000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00008000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0007c000, /* EMC_DLL_XFORM_DQ0 */ 0x0007c000, /* EMC_DLL_XFORM_DQ1 */ 0x0007c000, /* EMC_DLL_XFORM_DQ2 */ 0x0007c000, /* EMC_DLL_XFORM_DQ3 */ 0x00007c00, /* EMC_DLL_XFORM_DQ4 */ 0x00007c00, /* EMC_DLL_XFORM_DQ5 */ 0x00007c00, /* EMC_DLL_XFORM_DQ6 */ 0x00007c00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000707, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000033, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x08000001, /* MC_EMEM_ARB_CFG */ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0403, /* MC_EMEM_ARB_DA_COVERS */ 0x73c30504, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 6890, /* expected dvfs latency (ns) */ }, { 0x18, /* V5.0.10 */ "02_792000_03_V5.0.10_V0.8", /* DVFS table version */ 792000, /* SDRAM frequency */ 1000, /* min voltage */ 1100, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 164, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000024, /* EMC_RC */ 0x000000cd, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000019, /* EMC_RAS */ 0x0000000a, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x0000000a, /* EMC_RD_RCD */ 0x0000000a, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x0000000b, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00080000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000017, /* EMC_RDV */ 0x00000019, /* EMC_RDV_MASK */ 0x000017e2, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000005f8, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000011, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000c7, /* EMC_AR2PDEN */ 0x00000018, /* EMC_RW2PDEN */ 0x000000d7, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x0000001d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000008, /* EMC_TCLKSTABLE */ 0x00000008, /* EMC_TCLKSTOP */ 0x00001822, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x1049b098, /* EMC_FBIO_CFG5 */ 0xe00700b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000006, /* EMC_DLL_XFORM_DQS0 */ 0x00000006, /* EMC_DLL_XFORM_DQS1 */ 0x00000006, /* EMC_DLL_XFORM_DQS2 */ 0x00000006, /* EMC_DLL_XFORM_DQS3 */ 0x00000006, /* EMC_DLL_XFORM_DQS4 */ 0x00000006, /* EMC_DLL_XFORM_DQS5 */ 0x00000006, /* EMC_DLL_XFORM_DQS6 */ 0x00000006, /* EMC_DLL_XFORM_DQS7 */ 0x00000006, /* EMC_DLL_XFORM_DQS8 */ 0x00000006, /* EMC_DLL_XFORM_DQS9 */ 0x00000006, /* EMC_DLL_XFORM_DQS10 */ 0x00000006, /* EMC_DLL_XFORM_DQS11 */ 0x00000006, /* EMC_DLL_XFORM_DQS12 */ 0x00000006, /* EMC_DLL_XFORM_DQS13 */ 0x00000006, /* EMC_DLL_XFORM_DQS14 */ 0x00000006, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x0000400e, /* EMC_DLL_XFORM_ADDR0 */ 0x0000400e, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x0000400e, /* EMC_DLL_XFORM_ADDR3 */ 0x0000400e, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000b, /* EMC_DLL_XFORM_DQ0 */ 0x0000000b, /* EMC_DLL_XFORM_DQ1 */ 0x0000000b, /* EMC_DLL_XFORM_DQ2 */ 0x0000000b, /* EMC_DLL_XFORM_DQ3 */ 0x0000000b, /* EMC_DLL_XFORM_DQ4 */ 0x0000000b, /* EMC_DLL_XFORM_DQ5 */ 0x0000000b, /* EMC_DLL_XFORM_DQ6 */ 0x0000000b, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x61861820, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x61861800, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x000040a0, /* EMC_CFG_PIPE */ 0x80003012, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000f, /* EMC_QPOP */ 0x0e00000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000f, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08060202, /* MC_EMEM_ARB_DA_TURNS */ 0x00170e13, /* MC_EMEM_ARB_DA_COVERS */ 0x736c2414, /* MC_EMEM_ARB_MISC0 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0070069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430404, /* EMC_AUTO_CAL_CONFIG */ 0x80000d71, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200018, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1200, /* expected dvfs latency (ns) */ }, }; static struct tegra12_emc_table jetson_tk1_ddr3_emc_table[] = { { 0x19, /* V5.0.17 */ "01_12750_01_V5.0.17_V1.1", /* DVFS table version */ 12750, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000003e, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000003, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000060, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000018, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000005, /* EMC_TXSR */ 0x00000005, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000064, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00080000, /* EMC_DLL_XFORM_DQS0 */ 0x00080000, /* EMC_DLL_XFORM_DQS1 */ 0x00080000, /* EMC_DLL_XFORM_DQS2 */ 0x00080000, /* EMC_DLL_XFORM_DQS3 */ 0x00080000, /* EMC_DLL_XFORM_DQS4 */ 0x00080000, /* EMC_DLL_XFORM_DQS5 */ 0x00080000, /* EMC_DLL_XFORM_DQS6 */ 0x00080000, /* EMC_DLL_XFORM_DQS7 */ 0x00080000, /* EMC_DLL_XFORM_DQS8 */ 0x00080000, /* EMC_DLL_XFORM_DQS9 */ 0x00080000, /* EMC_DLL_XFORM_DQS10 */ 0x00080000, /* EMC_DLL_XFORM_DQS11 */ 0x00080000, /* EMC_DLL_XFORM_DQS12 */ 0x00080000, /* EMC_DLL_XFORM_DQS13 */ 0x00080000, /* EMC_DLL_XFORM_DQS14 */ 0x00080000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000007, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x800001c5, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40040001, /* MC_EMEM_ARB_CFG */ 0x8000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0502, /* MC_EMEM_ARB_DA_COVERS */ 0x77e30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000007, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 57820, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_20400_01_V5.0.17_V1.1", /* DVFS table version */ 20400, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000026, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000005, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x0000009a, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000026, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000006, /* EMC_TXSR */ 0x00000006, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x000000a0, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00080000, /* EMC_DLL_XFORM_DQS0 */ 0x00080000, /* EMC_DLL_XFORM_DQS1 */ 0x00080000, /* EMC_DLL_XFORM_DQS2 */ 0x00080000, /* EMC_DLL_XFORM_DQS3 */ 0x00080000, /* EMC_DLL_XFORM_DQS4 */ 0x00080000, /* EMC_DLL_XFORM_DQS5 */ 0x00080000, /* EMC_DLL_XFORM_DQS6 */ 0x00080000, /* EMC_DLL_XFORM_DQS7 */ 0x00080000, /* EMC_DLL_XFORM_DQS8 */ 0x00080000, /* EMC_DLL_XFORM_DQS9 */ 0x00080000, /* EMC_DLL_XFORM_DQS10 */ 0x00080000, /* EMC_DLL_XFORM_DQS11 */ 0x00080000, /* EMC_DLL_XFORM_DQS12 */ 0x00080000, /* EMC_DLL_XFORM_DQS13 */ 0x00080000, /* EMC_DLL_XFORM_DQS14 */ 0x00080000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x0000000b, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000023a, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40020001, /* MC_EMEM_ARB_CFG */ 0x80000012, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0502, /* MC_EMEM_ARB_DA_COVERS */ 0x76230303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000000a, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 35610, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_40800_01_V5.0.17_V1.1", /* DVFS table version */ 40800, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000012, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000001, /* EMC_RC */ 0x0000000a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000001, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000134, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000004d, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000008, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000000c, /* EMC_TXSR */ 0x0000000c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000013f, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00080000, /* EMC_DLL_XFORM_DQS0 */ 0x00080000, /* EMC_DLL_XFORM_DQS1 */ 0x00080000, /* EMC_DLL_XFORM_DQS2 */ 0x00080000, /* EMC_DLL_XFORM_DQS3 */ 0x00080000, /* EMC_DLL_XFORM_DQS4 */ 0x00080000, /* EMC_DLL_XFORM_DQS5 */ 0x00080000, /* EMC_DLL_XFORM_DQS6 */ 0x00080000, /* EMC_DLL_XFORM_DQS7 */ 0x00080000, /* EMC_DLL_XFORM_DQS8 */ 0x00080000, /* EMC_DLL_XFORM_DQS9 */ 0x00080000, /* EMC_DLL_XFORM_DQS10 */ 0x00080000, /* EMC_DLL_XFORM_DQS11 */ 0x00080000, /* EMC_DLL_XFORM_DQS12 */ 0x00080000, /* EMC_DLL_XFORM_DQS13 */ 0x00080000, /* EMC_DLL_XFORM_DQS14 */ 0x00080000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000015, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000370, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0xa0000001, /* MC_EMEM_ARB_CFG */ 0x80000017, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0502, /* MC_EMEM_ARB_DA_COVERS */ 0x74a30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000014, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 20850, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_68000_01_V5.0.17_V1.1", /* DVFS table version */ 68000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000000a, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000003, /* EMC_RC */ 0x00000011, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000202, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000080, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000000f, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000013, /* EMC_TXSR */ 0x00000013, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000001, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000213, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00080000, /* EMC_DLL_XFORM_DQS0 */ 0x00080000, /* EMC_DLL_XFORM_DQS1 */ 0x00080000, /* EMC_DLL_XFORM_DQS2 */ 0x00080000, /* EMC_DLL_XFORM_DQS3 */ 0x00080000, /* EMC_DLL_XFORM_DQS4 */ 0x00080000, /* EMC_DLL_XFORM_DQS5 */ 0x00080000, /* EMC_DLL_XFORM_DQS6 */ 0x00080000, /* EMC_DLL_XFORM_DQS7 */ 0x00080000, /* EMC_DLL_XFORM_DQS8 */ 0x00080000, /* EMC_DLL_XFORM_DQS9 */ 0x00080000, /* EMC_DLL_XFORM_DQS10 */ 0x00080000, /* EMC_DLL_XFORM_DQS11 */ 0x00080000, /* EMC_DLL_XFORM_DQS12 */ 0x00080000, /* EMC_DLL_XFORM_DQS13 */ 0x00080000, /* EMC_DLL_XFORM_DQS14 */ 0x00080000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000022, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000050e, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x00000001, /* MC_EMEM_ARB_CFG */ 0x8000001e, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0502, /* MC_EMEM_ARB_DA_COVERS */ 0x74230403, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000021, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00b0, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00e90049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00a3, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ee00ef, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 10720, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_102000_01_V5.0.17_V1.1", /* DVFS table version */ 102000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000006, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000004, /* EMC_RC */ 0x0000001a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000003, /* EMC_RAS */ 0x00000001, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000001, /* EMC_RD_RCD */ 0x00000001, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000304, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000c1, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000018, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000001c, /* EMC_TXSR */ 0x0000001c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000002, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000031c, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00080000, /* EMC_DLL_XFORM_DQS0 */ 0x00080000, /* EMC_DLL_XFORM_DQS1 */ 0x00080000, /* EMC_DLL_XFORM_DQS2 */ 0x00080000, /* EMC_DLL_XFORM_DQS3 */ 0x00080000, /* EMC_DLL_XFORM_DQS4 */ 0x00080000, /* EMC_DLL_XFORM_DQS5 */ 0x00080000, /* EMC_DLL_XFORM_DQS6 */ 0x00080000, /* EMC_DLL_XFORM_DQS7 */ 0x00080000, /* EMC_DLL_XFORM_DQS8 */ 0x00080000, /* EMC_DLL_XFORM_DQS9 */ 0x00080000, /* EMC_DLL_XFORM_DQS10 */ 0x00080000, /* EMC_DLL_XFORM_DQS11 */ 0x00080000, /* EMC_DLL_XFORM_DQS12 */ 0x00080000, /* EMC_DLL_XFORM_DQS13 */ 0x00080000, /* EMC_DLL_XFORM_DQS14 */ 0x00080000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000033, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x08000001, /* MC_EMEM_ARB_CFG */ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0503, /* MC_EMEM_ARB_DA_COVERS */ 0x73c30504, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 6890, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_204000_01_V5.0.17_V1.1", /* DVFS table version */ 204000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000009, /* EMC_RC */ 0x00000035, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000006, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000005, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000004, /* EMC_EINPUT */ 0x00000006, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000003, /* EMC_QRST */ 0x0000000d, /* EMC_QSAFE */ 0x0000000f, /* EMC_RDV */ 0x00000011, /* EMC_RDV_MASK */ 0x00000607, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000181, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000032, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000038, /* EMC_TXSR */ 0x00000038, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000006, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000638, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00080000, /* EMC_DLL_XFORM_DQS0 */ 0x00080000, /* EMC_DLL_XFORM_DQS1 */ 0x00080000, /* EMC_DLL_XFORM_DQS2 */ 0x00080000, /* EMC_DLL_XFORM_DQS3 */ 0x00080000, /* EMC_DLL_XFORM_DQS4 */ 0x00080000, /* EMC_DLL_XFORM_DQS5 */ 0x00080000, /* EMC_DLL_XFORM_DQS6 */ 0x00080000, /* EMC_DLL_XFORM_DQS7 */ 0x00080000, /* EMC_DLL_XFORM_DQS8 */ 0x00080000, /* EMC_DLL_XFORM_DQS9 */ 0x00080000, /* EMC_DLL_XFORM_DQS10 */ 0x00080000, /* EMC_DLL_XFORM_DQS11 */ 0x00080000, /* EMC_DLL_XFORM_DQS12 */ 0x00080000, /* EMC_DLL_XFORM_DQS13 */ 0x00080000, /* EMC_DLL_XFORM_DQS14 */ 0x00080000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00008000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00008000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00090000, /* EMC_DLL_XFORM_DQ0 */ 0x00090000, /* EMC_DLL_XFORM_DQ1 */ 0x00090000, /* EMC_DLL_XFORM_DQ2 */ 0x00090000, /* EMC_DLL_XFORM_DQ3 */ 0x00009000, /* EMC_DLL_XFORM_DQ4 */ 0x00009000, /* EMC_DLL_XFORM_DQ5 */ 0x00009000, /* EMC_DLL_XFORM_DQ6 */ 0x00009000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000707, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000066, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000d2b3, /* EMC_CFG_PIPE */ 0x80000d22, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x01000003, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000004, /* MC_EMEM_ARB_TIMING_RC */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000003, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0504, /* MC_EMEM_ARB_DA_COVERS */ 0x73840a05, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000062, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x004e0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080057, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0063, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0036, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff006b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510050, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008cd, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 3420, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_300000_01_V5.0.17_V1.1", /* DVFS table version */ 300000, /* SDRAM frequency */ 820, /* min voltage */ 820, /* gpu min voltage */ "pllc_out0", /* clock source id */ 0x20000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000000d, /* EMC_RC */ 0x0000004d, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000009, /* EMC_RAS */ 0x00000003, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x00000009, /* EMC_W2P */ 0x00000003, /* EMC_RD_RCD */ 0x00000003, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x00000007, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x0000000e, /* EMC_QSAFE */ 0x00000010, /* EMC_RDV */ 0x00000012, /* EMC_RDV_MASK */ 0x000008e4, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000239, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000004b, /* EMC_AR2PDEN */ 0x0000000e, /* EMC_RW2PDEN */ 0x00000052, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000008, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000924, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00098000, /* EMC_DLL_XFORM_ADDR0 */ 0x00098000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00098000, /* EMC_DLL_XFORM_ADDR3 */ 0x00098000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00050000, /* EMC_DLL_XFORM_DQ0 */ 0x00050000, /* EMC_DLL_XFORM_DQ1 */ 0x00050000, /* EMC_DLL_XFORM_DQ2 */ 0x00050000, /* EMC_DLL_XFORM_DQ3 */ 0x00005000, /* EMC_DLL_XFORM_DQ4 */ 0x00005000, /* EMC_DLL_XFORM_DQ5 */ 0x00005000, /* EMC_DLL_XFORM_DQ6 */ 0x00005000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000096, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0173000e, /* EMC_MRS_WAIT_CNT */ 0x0173000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x800012d7, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x08000004, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000007, /* MC_EMEM_ARB_TIMING_RC */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000004, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000b0607, /* MC_EMEM_ARB_DA_COVERS */ 0x77450e08, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000004, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000090, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00350049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008003b, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0043, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff002d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510036, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0087, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x000008d5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000321, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2680, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_396000_01_V5.0.17_V1.1", /* DVFS table version */ 396000, /* SDRAM frequency */ 850, /* min voltage */ 850, /* gpu min voltage */ "pllm_out0", /* clock source id */ 0x00000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000011, /* EMC_RC */ 0x00000066, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000000c, /* EMC_RAS */ 0x00000004, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000004, /* EMC_RD_RCD */ 0x00000004, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000001, /* EMC_EINPUT */ 0x00000008, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000000, /* EMC_QRST */ 0x0000000f, /* EMC_QSAFE */ 0x00000010, /* EMC_RDV */ 0x00000012, /* EMC_RDV_MASK */ 0x00000bd1, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000002f4, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000063, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000006c, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x0000000b, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000c11, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00070000, /* EMC_DLL_XFORM_ADDR0 */ 0x00070000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00070000, /* EMC_DLL_XFORM_ADDR3 */ 0x00070000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00038000, /* EMC_DLL_XFORM_DQ0 */ 0x00038000, /* EMC_DLL_XFORM_DQ1 */ 0x00038000, /* EMC_DLL_XFORM_DQ2 */ 0x00038000, /* EMC_DLL_XFORM_DQ3 */ 0x00003800, /* EMC_DLL_XFORM_DQ4 */ 0x00003800, /* EMC_DLL_XFORM_DQ5 */ 0x00003800, /* EMC_DLL_XFORM_DQ6 */ 0x00003800, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x000000c6, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x015b000e, /* EMC_MRS_WAIT_CNT */ 0x015b000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x8000188b, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x0f000005, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000006, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */ 0x7586120a, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000a, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000be, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00280049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008002d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0033, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0022, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0037, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0066, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x00000895, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000521, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2180, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_528000_01_V5.0.17_V1.1", /* DVFS table version */ 528000, /* SDRAM frequency */ 880, /* min voltage */ 870, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000018, /* EMC_RC */ 0x00000088, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000010, /* EMC_RAS */ 0x00000006, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000009, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000d, /* EMC_W2P */ 0x00000006, /* EMC_RD_RCD */ 0x00000006, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000001, /* EMC_EINPUT */ 0x00000009, /* EMC_EINPUT_DURATION */ 0x00030000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000000, /* EMC_QRST */ 0x00000010, /* EMC_QSAFE */ 0x00000012, /* EMC_RDV */ 0x00000014, /* EMC_RDV_MASK */ 0x00000fd6, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000003f5, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000b, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000085, /* EMC_AR2PDEN */ 0x00000012, /* EMC_RW2PDEN */ 0x00000090, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000010, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001017, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe01200b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00054000, /* EMC_DLL_XFORM_ADDR0 */ 0x00054000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00054000, /* EMC_DLL_XFORM_ADDR3 */ 0x00054000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000c, /* EMC_DLL_XFORM_DQ0 */ 0x0000000c, /* EMC_DLL_XFORM_DQ1 */ 0x0000000c, /* EMC_DLL_XFORM_DQ2 */ 0x0000000c, /* EMC_DLL_XFORM_DQ3 */ 0x0000000c, /* EMC_DLL_XFORM_DQ4 */ 0x0000000c, /* EMC_DLL_XFORM_DQ5 */ 0x0000000c, /* EMC_DLL_XFORM_DQ6 */ 0x0000000c, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0123133d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0139000e, /* EMC_MRS_WAIT_CNT */ 0x0139000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000042a0, /* EMC_CFG_PIPE */ 0x80002062, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x0f000007, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RC */ 0x00000007, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000008, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06050202, /* MC_EMEM_ARB_DA_TURNS */ 0x0010090c, /* MC_EMEM_ARB_DA_COVERS */ 0x7428180d, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000d, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000fd, /* MC_PTSA_GRANT_DECREMENT */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00c1003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00c10080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080021, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00c10026, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00c1001a, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00c10024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00c10029, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00c10065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00c1002a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe0120069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000941, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_600000_01_V5.0.17_V1.1", /* DVFS table version */ 600000, /* SDRAM frequency */ 910, /* min voltage */ 910, /* gpu min voltage */ "pllc_ud", /* clock source id */ 0xe0000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000001b, /* EMC_RC */ 0x0000009b, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000013, /* EMC_RAS */ 0x00000007, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x0000000b, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x00000010, /* EMC_W2P */ 0x00000007, /* EMC_RD_RCD */ 0x00000007, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x0000000a, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x0000000b, /* EMC_EINPUT_DURATION */ 0x00070000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x00000012, /* EMC_QSAFE */ 0x00000016, /* EMC_RDV */ 0x00000018, /* EMC_RDV_MASK */ 0x00001208, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000482, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000d, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000097, /* EMC_AR2PDEN */ 0x00000015, /* EMC_RW2PDEN */ 0x000000a3, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000013, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001248, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe00e00b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00048000, /* EMC_DLL_XFORM_ADDR0 */ 0x00048000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00048000, /* EMC_DLL_XFORM_ADDR3 */ 0x00048000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000d, /* EMC_DLL_XFORM_DQ0 */ 0x0000000d, /* EMC_DLL_XFORM_DQ1 */ 0x0000000d, /* EMC_DLL_XFORM_DQ2 */ 0x0000000d, /* EMC_DLL_XFORM_DQ3 */ 0x0000000d, /* EMC_DLL_XFORM_DQ4 */ 0x0000000d, /* EMC_DLL_XFORM_DQ5 */ 0x0000000d, /* EMC_DLL_XFORM_DQ6 */ 0x0000000d, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0121113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0127000e, /* EMC_MRS_WAIT_CNT */ 0x0127000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000040a0, /* EMC_CFG_PIPE */ 0x800024aa, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000e, /* EMC_QPOP */ 0x00000009, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000003, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000e, /* MC_EMEM_ARB_TIMING_RC */ 0x00000009, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000a, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000b, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000007, /* MC_EMEM_ARB_TIMING_W2R */ 0x07050202, /* MC_EMEM_ARB_DA_TURNS */ 0x00130b0e, /* MC_EMEM_ARB_DA_COVERS */ 0x73a91b0f, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000f, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000120, /* MC_PTSA_GRANT_DECREMENT */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00aa003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00aa0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008001d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00aa0022, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00aa0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00aa0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00aa0025, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe00e0069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000b61, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200010, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_792000_01_V5.0.17_V1.1", /* DVFS table version */ 792000, /* SDRAM frequency */ 980, /* min voltage */ 980, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000024, /* EMC_RC */ 0x000000cd, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000019, /* EMC_RAS */ 0x0000000a, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x0000000a, /* EMC_RD_RCD */ 0x0000000a, /* EMC_WR_RCD */ 0x00000004, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x0000000b, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00080000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000018, /* EMC_RDV */ 0x0000001a, /* EMC_RDV_MASK */ 0x000017e2, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000005f8, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000011, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000c7, /* EMC_AR2PDEN */ 0x00000018, /* EMC_RW2PDEN */ 0x000000d7, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x00000019, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000008, /* EMC_TCLKSTABLE */ 0x00000008, /* EMC_TCLKSTOP */ 0x00001822, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe00700b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x007fc008, /* EMC_DLL_XFORM_DQS0 */ 0x007fc008, /* EMC_DLL_XFORM_DQS1 */ 0x007fc008, /* EMC_DLL_XFORM_DQS2 */ 0x007fc008, /* EMC_DLL_XFORM_DQS3 */ 0x007fc008, /* EMC_DLL_XFORM_DQS4 */ 0x007fc008, /* EMC_DLL_XFORM_DQS5 */ 0x007fc008, /* EMC_DLL_XFORM_DQS6 */ 0x007fc008, /* EMC_DLL_XFORM_DQS7 */ 0x007fc008, /* EMC_DLL_XFORM_DQS8 */ 0x007fc008, /* EMC_DLL_XFORM_DQS9 */ 0x007fc008, /* EMC_DLL_XFORM_DQS10 */ 0x007fc008, /* EMC_DLL_XFORM_DQS11 */ 0x007fc008, /* EMC_DLL_XFORM_DQS12 */ 0x007fc008, /* EMC_DLL_XFORM_DQS13 */ 0x007fc008, /* EMC_DLL_XFORM_DQS14 */ 0x007fc008, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00034000, /* EMC_DLL_XFORM_ADDR0 */ 0x00034000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00034000, /* EMC_DLL_XFORM_ADDR3 */ 0x00034000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000a, /* EMC_DLL_XFORM_DQ0 */ 0x0000000a, /* EMC_DLL_XFORM_DQ1 */ 0x0000000a, /* EMC_DLL_XFORM_DQ2 */ 0x0000000a, /* EMC_DLL_XFORM_DQ3 */ 0x0000000a, /* EMC_DLL_XFORM_DQ4 */ 0x0000000a, /* EMC_DLL_XFORM_DQ5 */ 0x0000000a, /* EMC_DLL_XFORM_DQ6 */ 0x0000000a, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000000, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x61861820, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x61861800, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x80003012, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000f, /* EMC_QPOP */ 0x0e00000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000d, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08060202, /* MC_EMEM_ARB_DA_TURNS */ 0x00170e13, /* MC_EMEM_ARB_DA_COVERS */ 0x736c2414, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0070069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000d71, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200018, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1200, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.17 */ "01_924000_01_V5.0.17_V1.1", /* DVFS table version */ 924000, /* SDRAM frequency */ 1010, /* min voltage */ 1010, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000002b, /* EMC_RC */ 0x000000f0, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000001e, /* EMC_RAS */ 0x0000000b, /* EMC_RP */ 0x00000009, /* EMC_R2W */ 0x0000000f, /* EMC_W2R */ 0x00000005, /* EMC_R2P */ 0x00000016, /* EMC_W2P */ 0x0000000b, /* EMC_RD_RCD */ 0x0000000b, /* EMC_WR_RCD */ 0x00000004, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000007, /* EMC_WDV */ 0x00000007, /* EMC_WDV_MASK */ 0x0000000d, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000f, /* EMC_EINPUT_DURATION */ 0x000a0000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000016, /* EMC_QSAFE */ 0x0000001a, /* EMC_RDV */ 0x0000001c, /* EMC_RDV_MASK */ 0x00001be7, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000006f9, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000004, /* EMC_PDEX2WR */ 0x00000015, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000e7, /* EMC_AR2PDEN */ 0x0000001b, /* EMC_RW2PDEN */ 0x000000fb, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000006, /* EMC_TCKE */ 0x00000007, /* EMC_TCKESR */ 0x00000006, /* EMC_TPD */ 0x0000001e, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x0000000a, /* EMC_TCLKSTABLE */ 0x0000000a, /* EMC_TCLKSTOP */ 0x00001c28, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab898, /* EMC_FBIO_CFG5 */ 0xe00400b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x007f800a, /* EMC_DLL_XFORM_DQS0 */ 0x007f800a, /* EMC_DLL_XFORM_DQS1 */ 0x007f800a, /* EMC_DLL_XFORM_DQS2 */ 0x007f800a, /* EMC_DLL_XFORM_DQS3 */ 0x007f800a, /* EMC_DLL_XFORM_DQS4 */ 0x007f800a, /* EMC_DLL_XFORM_DQS5 */ 0x007f800a, /* EMC_DLL_XFORM_DQS6 */ 0x007f800a, /* EMC_DLL_XFORM_DQS7 */ 0x007f800a, /* EMC_DLL_XFORM_DQS8 */ 0x007f800a, /* EMC_DLL_XFORM_DQS9 */ 0x007f800a, /* EMC_DLL_XFORM_DQS10 */ 0x007f800a, /* EMC_DLL_XFORM_DQS11 */ 0x007f800a, /* EMC_DLL_XFORM_DQS12 */ 0x007f800a, /* EMC_DLL_XFORM_DQS13 */ 0x007f800a, /* EMC_DLL_XFORM_DQS14 */ 0x007f800a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x0002c000, /* EMC_DLL_XFORM_ADDR0 */ 0x0002c000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x0002c000, /* EMC_DLL_XFORM_ADDR3 */ 0x0002c000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000004, /* EMC_DLI_TRIM_TXDQS15 */ 0x00000008, /* EMC_DLL_XFORM_DQ0 */ 0x00000008, /* EMC_DLL_XFORM_DQ1 */ 0x00000008, /* EMC_DLL_XFORM_DQ2 */ 0x00000008, /* EMC_DLL_XFORM_DQ3 */ 0x00000008, /* EMC_DLL_XFORM_DQ4 */ 0x00000008, /* EMC_DLL_XFORM_DQ5 */ 0x00000008, /* EMC_DLL_XFORM_DQ6 */ 0x00000008, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000000, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x5d75d720, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x5d75d700, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000128, /* EMC_ZCAL_WAIT_CNT */ 0x00cd000e, /* EMC_MRS_WAIT_CNT */ 0x00cd000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x800037ea, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000011, /* EMC_QPOP */ 0x0e00000d, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000005, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000006, /* MC_EMEM_ARB_TIMING_RP */ 0x00000016, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000e, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000f, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000e, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000009, /* MC_EMEM_ARB_TIMING_W2R */ 0x09060202, /* MC_EMEM_ARB_DA_TURNS */ 0x001a1016, /* MC_EMEM_ARB_DA_COVERS */ 0x734e2a17, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000017, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000001bb, /* MC_PTSA_GRANT_DECREMENT */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x006e003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x006e0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x006e0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x006e0019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x006e0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x006e0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x006e001b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x0000006e, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x006e0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x006e001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x0000004c, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0040069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430303, /* EMC_AUTO_CAL_CONFIG */ 0x80000f15, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200020, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1180, /* expected dvfs latency (ns) */ }, }; static struct tegra12_emc_table pm375_ddr3_emc_table[] = { { 0x19, /* V5.0.14 */ "04_12750_03_V5.0.14_V1.1", /* DVFS table version */ 12750, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000003e, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000003, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000060, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000018, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000005, /* EMC_TXSR */ 0x00000005, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000064, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000007, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x800001c5, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40040001, /* MC_EMEM_ARB_CFG */ 0x8000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x77e30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000007, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 57820, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_20400_03_V5.0.14_V1.1", /* DVFS table version */ 20400, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000026, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000000, /* EMC_RC */ 0x00000005, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000000, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x0000009a, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000026, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000007, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000006, /* EMC_TXSR */ 0x00000006, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x000000a0, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x0000000b, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000023a, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x40020001, /* MC_EMEM_ARB_CFG */ 0x80000012, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x76230303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000000a, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 35610, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_40800_03_V5.0.14_V1.1", /* DVFS table version */ 40800, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000012, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000001, /* EMC_RC */ 0x0000000a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000001, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000134, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x0000004d, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000008, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000000c, /* EMC_TXSR */ 0x0000000c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000000, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000013f, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000015, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000370, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0xa0000001, /* MC_EMEM_ARB_CFG */ 0x80000017, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x74a30303, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000014, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 20850, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_68000_03_V5.0.14_V1.1", /* DVFS table version */ 68000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x4000000a, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000003, /* EMC_RC */ 0x00000011, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000002, /* EMC_RAS */ 0x00000000, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000000, /* EMC_RD_RCD */ 0x00000000, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000202, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000080, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000000f, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000013, /* EMC_TXSR */ 0x00000013, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000001, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000213, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000022, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x8000050e, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x00000001, /* MC_EMEM_ARB_CFG */ 0x8000001e, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0402, /* MC_EMEM_ARB_DA_COVERS */ 0x74230403, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000021, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff00b0, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00e90049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff00a3, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00ee00ef, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 10720, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_102000_03_V5.0.14_V1.1", /* DVFS table version */ 102000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000006, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000004, /* EMC_RC */ 0x0000001a, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000003, /* EMC_RAS */ 0x00000001, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000001, /* EMC_RD_RCD */ 0x00000001, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000005, /* EMC_EINPUT */ 0x00000005, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000004, /* EMC_QRST */ 0x0000000c, /* EMC_QSAFE */ 0x0000000d, /* EMC_RDV */ 0x0000000f, /* EMC_RDV_MASK */ 0x00000304, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000000c1, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000018, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000001c, /* EMC_TXSR */ 0x0000001c, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000003, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x0000031c, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x000fc000, /* EMC_DLL_XFORM_DQ0 */ 0x000fc000, /* EMC_DLL_XFORM_DQ1 */ 0x000fc000, /* EMC_DLL_XFORM_DQ2 */ 0x000fc000, /* EMC_DLL_XFORM_DQ3 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ4 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ5 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ6 */ 0x0000fc00, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000e0e, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000033, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00000000, /* EMC_ZCAL_INTERVAL */ 0x00000042, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000f2f3, /* EMC_CFG_PIPE */ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x08000001, /* MC_EMEM_ARB_CFG */ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0403, /* MC_EMEM_ARB_DA_COVERS */ 0x73c30504, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x000008c5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 6890, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_204000_04_V5.0.14_V1.1", /* DVFS table version */ 204000, /* SDRAM frequency */ 800, /* min voltage */ 800, /* gpu min voltage */ "pllp_out0", /* clock source id */ 0x40000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000009, /* EMC_RC */ 0x00000035, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000006, /* EMC_RAS */ 0x00000002, /* EMC_RP */ 0x00000005, /* EMC_R2W */ 0x0000000a, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x0000000b, /* EMC_W2P */ 0x00000002, /* EMC_RD_RCD */ 0x00000002, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000003, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000004, /* EMC_EINPUT */ 0x00000006, /* EMC_EINPUT_DURATION */ 0x00010000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000003, /* EMC_QRST */ 0x0000000d, /* EMC_QSAFE */ 0x0000000f, /* EMC_RDV */ 0x00000011, /* EMC_RDV_MASK */ 0x00000607, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000181, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x00000002, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000032, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x00000038, /* EMC_TXSR */ 0x00000038, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000007, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000638, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x106aa298, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00064000, /* EMC_DLL_XFORM_DQS0 */ 0x00064000, /* EMC_DLL_XFORM_DQS1 */ 0x00064000, /* EMC_DLL_XFORM_DQS2 */ 0x00064000, /* EMC_DLL_XFORM_DQS3 */ 0x00064000, /* EMC_DLL_XFORM_DQS4 */ 0x00064000, /* EMC_DLL_XFORM_DQS5 */ 0x00064000, /* EMC_DLL_XFORM_DQS6 */ 0x00064000, /* EMC_DLL_XFORM_DQS7 */ 0x00064000, /* EMC_DLL_XFORM_DQS8 */ 0x00064000, /* EMC_DLL_XFORM_DQS9 */ 0x00064000, /* EMC_DLL_XFORM_DQS10 */ 0x00064000, /* EMC_DLL_XFORM_DQS11 */ 0x00064000, /* EMC_DLL_XFORM_DQS12 */ 0x00064000, /* EMC_DLL_XFORM_DQS13 */ 0x00064000, /* EMC_DLL_XFORM_DQS14 */ 0x00064000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00000000, /* EMC_DLL_XFORM_ADDR0 */ 0x00000000, /* EMC_DLL_XFORM_ADDR1 */ 0x00008000, /* EMC_DLL_XFORM_ADDR2 */ 0x00000000, /* EMC_DLL_XFORM_ADDR3 */ 0x00000000, /* EMC_DLL_XFORM_ADDR4 */ 0x00008000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00090000, /* EMC_DLL_XFORM_DQ0 */ 0x00090000, /* EMC_DLL_XFORM_DQ1 */ 0x00090000, /* EMC_DLL_XFORM_DQ2 */ 0x00090000, /* EMC_DLL_XFORM_DQ3 */ 0x00009000, /* EMC_DLL_XFORM_DQ4 */ 0x00009000, /* EMC_DLL_XFORM_DQ5 */ 0x00009000, /* EMC_DLL_XFORM_DQ6 */ 0x00009000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000707, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451400, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000066, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT */ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x0000d2b3, /* EMC_CFG_PIPE */ 0x80000d22, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x01000003, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */ 0x00000004, /* MC_EMEM_ARB_TIMING_RC */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000004, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040203, /* MC_EMEM_ARB_DA_TURNS */ 0x000a0404, /* MC_EMEM_ARB_DA_COVERS */ 0x73840a05, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000062, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x004e0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080057, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0063, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0036, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff006b, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510050, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73240000, /* EMC_CFG */ 0x0000088d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000008, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80001221, /* Mode Register 0 */ 0x80100003, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 3420, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_300000_04_V5.0.14_V1.1", /* DVFS table version */ 300000, /* SDRAM frequency */ 820, /* min voltage */ 820, /* gpu min voltage */ "pllc_out0", /* clock source id */ 0x20000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000000d, /* EMC_RC */ 0x0000004d, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000009, /* EMC_RAS */ 0x00000003, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x00000009, /* EMC_W2P */ 0x00000003, /* EMC_RD_RCD */ 0x00000003, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x00000007, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x0000000e, /* EMC_QSAFE */ 0x00000010, /* EMC_RDV */ 0x00000012, /* EMC_RDV_MASK */ 0x000008e4, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000239, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x0000004b, /* EMC_AR2PDEN */ 0x0000000e, /* EMC_RW2PDEN */ 0x00000052, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000009, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000924, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00098000, /* EMC_DLL_XFORM_ADDR0 */ 0x00098000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00098000, /* EMC_DLL_XFORM_ADDR3 */ 0x00098000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00060000, /* EMC_DLL_XFORM_DQ0 */ 0x00060000, /* EMC_DLL_XFORM_DQ1 */ 0x00060000, /* EMC_DLL_XFORM_DQ2 */ 0x00060000, /* EMC_DLL_XFORM_DQ3 */ 0x00006000, /* EMC_DLL_XFORM_DQ4 */ 0x00006000, /* EMC_DLL_XFORM_DQ5 */ 0x00006000, /* EMC_DLL_XFORM_DQ6 */ 0x00006000, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000096, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0173000e, /* EMC_MRS_WAIT_CNT */ 0x0173000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x800012d7, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x08000004, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000007, /* MC_EMEM_ARB_TIMING_RC */ 0x00000004, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000b0607, /* MC_EMEM_ARB_DA_COVERS */ 0x77450e08, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000004, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000090, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00350049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008003b, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0043, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff002d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510036, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0087, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x000008d5, /* EMC_CFG_2 */ 0x00040128, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000321, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2680, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_396000_05_V5.0.14_V1.1", /* DVFS table version */ 396000, /* SDRAM frequency */ 850, /* min voltage */ 850, /* gpu min voltage */ "pllm_out0", /* clock source id */ 0x00000002, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000011, /* EMC_RC */ 0x00000066, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x0000000c, /* EMC_RAS */ 0x00000004, /* EMC_RP */ 0x00000004, /* EMC_R2W */ 0x00000008, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000a, /* EMC_W2P */ 0x00000004, /* EMC_RD_RCD */ 0x00000004, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000005, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000001, /* EMC_EINPUT */ 0x00000008, /* EMC_EINPUT_DURATION */ 0x00020000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000000, /* EMC_QRST */ 0x0000000f, /* EMC_QSAFE */ 0x00000010, /* EMC_RDV */ 0x00000012, /* EMC_RDV_MASK */ 0x00000bd1, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000002f4, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000001, /* EMC_PDEX2WR */ 0x00000008, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000063, /* EMC_AR2PDEN */ 0x0000000f, /* EMC_RW2PDEN */ 0x0000006c, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x0000000d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000005, /* EMC_TCLKSTABLE */ 0x00000005, /* EMC_TCLKSTOP */ 0x00000c11, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0x002c00a0, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00030000, /* EMC_DLL_XFORM_DQS0 */ 0x00030000, /* EMC_DLL_XFORM_DQS1 */ 0x00030000, /* EMC_DLL_XFORM_DQS2 */ 0x00030000, /* EMC_DLL_XFORM_DQS3 */ 0x00030000, /* EMC_DLL_XFORM_DQS4 */ 0x00030000, /* EMC_DLL_XFORM_DQS5 */ 0x00030000, /* EMC_DLL_XFORM_DQS6 */ 0x00030000, /* EMC_DLL_XFORM_DQS7 */ 0x00030000, /* EMC_DLL_XFORM_DQS8 */ 0x00030000, /* EMC_DLL_XFORM_DQS9 */ 0x00030000, /* EMC_DLL_XFORM_DQS10 */ 0x00030000, /* EMC_DLL_XFORM_DQS11 */ 0x00030000, /* EMC_DLL_XFORM_DQS12 */ 0x00030000, /* EMC_DLL_XFORM_DQS13 */ 0x00030000, /* EMC_DLL_XFORM_DQS14 */ 0x00030000, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00070000, /* EMC_DLL_XFORM_ADDR0 */ 0x00070000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00070000, /* EMC_DLL_XFORM_ADDR3 */ 0x00070000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x00038000, /* EMC_DLL_XFORM_DQ0 */ 0x00038000, /* EMC_DLL_XFORM_DQ1 */ 0x00038000, /* EMC_DLL_XFORM_DQ2 */ 0x00038000, /* EMC_DLL_XFORM_DQ3 */ 0x00003800, /* EMC_DLL_XFORM_DQ4 */ 0x00003800, /* EMC_DLL_XFORM_DQ5 */ 0x00003800, /* EMC_DLL_XFORM_DQ6 */ 0x00003800, /* EMC_DLL_XFORM_DQ7 */ 0x10000280, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x01231339, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc081, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */ 0x000000c6, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x015b000e, /* EMC_MRS_WAIT_CNT */ 0x015b000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000052a3, /* EMC_CFG_PIPE */ 0x8000188b, /* EMC_DYN_SELF_REF_CONTROL */ 0x00000009, /* EMC_QPOP */ 0x0f000005, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */ 0x00000007, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */ 0x7586120a, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000a, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000be, /* MC_PTSA_GRANT_DECREMENT */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00280049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008002d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00ff0033, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00ff0022, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00ff0037, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00ff0066, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73340000, /* EMC_CFG */ 0x00000895, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0x002c0068, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000521, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200000, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 2180, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_528000_05_V5.0.14_V1.1", /* DVFS table version */ 528000, /* SDRAM frequency */ 880, /* min voltage */ 870, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000018, /* EMC_RC */ 0x00000088, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000010, /* EMC_RAS */ 0x00000006, /* EMC_RP */ 0x00000006, /* EMC_R2W */ 0x00000009, /* EMC_W2R */ 0x00000002, /* EMC_R2P */ 0x0000000d, /* EMC_W2P */ 0x00000006, /* EMC_RD_RCD */ 0x00000006, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000003, /* EMC_WDV */ 0x00000003, /* EMC_WDV_MASK */ 0x00000006, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000001, /* EMC_EINPUT */ 0x00000009, /* EMC_EINPUT_DURATION */ 0x00030000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000000, /* EMC_QRST */ 0x00000010, /* EMC_QSAFE */ 0x00000012, /* EMC_RDV */ 0x00000014, /* EMC_RDV_MASK */ 0x00000fd6, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000003f5, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000b, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000085, /* EMC_AR2PDEN */ 0x00000012, /* EMC_RW2PDEN */ 0x00000090, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000013, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001017, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe01200b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00054000, /* EMC_DLL_XFORM_ADDR0 */ 0x00054000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00054000, /* EMC_DLL_XFORM_ADDR3 */ 0x00054000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000e, /* EMC_DLL_XFORM_DQ0 */ 0x0000000e, /* EMC_DLL_XFORM_DQ1 */ 0x0000000e, /* EMC_DLL_XFORM_DQ2 */ 0x0000000e, /* EMC_DLL_XFORM_DQ3 */ 0x0000000e, /* EMC_DLL_XFORM_DQ4 */ 0x0000000e, /* EMC_DLL_XFORM_DQ5 */ 0x0000000e, /* EMC_DLL_XFORM_DQ6 */ 0x0000000e, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0123133d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0139000e, /* EMC_MRS_WAIT_CNT */ 0x0139000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000042a0, /* EMC_CFG_PIPE */ 0x80002062, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000a, /* EMC_QPOP */ 0x0f000007, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RC */ 0x00000007, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000a, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */ 0x06050202, /* MC_EMEM_ARB_DA_TURNS */ 0x0010090c, /* MC_EMEM_ARB_DA_COVERS */ 0x7428180d, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000d, /* MC_MLL_MPCORER_PTSA_RATE */ 0x000000fd, /* MC_PTSA_GRANT_DECREMENT */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00c1003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00c10080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080021, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00c10004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00c10026, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00c1001a, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00c10024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00c10029, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000c1, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00c10065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00c1002a, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe0120069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000941, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200008, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_600000_03_V5.0.14_V1.1", /* DVFS table version */ 600000, /* SDRAM frequency */ 910, /* min voltage */ 910, /* gpu min voltage */ "pllc_ud", /* clock source id */ 0xe0000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x0000001b, /* EMC_RC */ 0x0000009b, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000013, /* EMC_RAS */ 0x00000007, /* EMC_RP */ 0x00000007, /* EMC_R2W */ 0x0000000b, /* EMC_W2R */ 0x00000003, /* EMC_R2P */ 0x00000010, /* EMC_W2P */ 0x00000007, /* EMC_RD_RCD */ 0x00000007, /* EMC_WR_RCD */ 0x00000002, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000005, /* EMC_WDV */ 0x00000005, /* EMC_WDV_MASK */ 0x0000000a, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000003, /* EMC_EINPUT */ 0x0000000b, /* EMC_EINPUT_DURATION */ 0x00070000, /* EMC_PUTERM_EXTRA */ 0x00000003, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000002, /* EMC_QRST */ 0x00000012, /* EMC_QSAFE */ 0x00000016, /* EMC_RDV */ 0x00000018, /* EMC_RDV_MASK */ 0x00001208, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x00000482, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000002, /* EMC_PDEX2WR */ 0x0000000d, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x00000097, /* EMC_AR2PDEN */ 0x00000015, /* EMC_RW2PDEN */ 0x000000a3, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000004, /* EMC_TCKE */ 0x00000005, /* EMC_TCKESR */ 0x00000004, /* EMC_TPD */ 0x00000015, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000006, /* EMC_TCLKSTABLE */ 0x00000006, /* EMC_TCLKSTOP */ 0x00001248, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe00e00b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00048000, /* EMC_DLL_XFORM_ADDR0 */ 0x00048000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00048000, /* EMC_DLL_XFORM_ADDR3 */ 0x00048000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000d, /* EMC_DLL_XFORM_DQ0 */ 0x0000000d, /* EMC_DLL_XFORM_DQ1 */ 0x0000000d, /* EMC_DLL_XFORM_DQ2 */ 0x0000000d, /* EMC_DLL_XFORM_DQ3 */ 0x0000000d, /* EMC_DLL_XFORM_DQ4 */ 0x0000000d, /* EMC_DLL_XFORM_DQ5 */ 0x0000000d, /* EMC_DLL_XFORM_DQ6 */ 0x0000000d, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0121113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000505, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x51451420, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x51451400, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x0127000e, /* EMC_MRS_WAIT_CNT */ 0x0127000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000003, /* EMC_CTT_DURATION */ 0x000040a0, /* EMC_CFG_PIPE */ 0x800024a9, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000e, /* EMC_QPOP */ 0x00000009, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000003, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */ 0x0000000e, /* MC_EMEM_ARB_TIMING_RC */ 0x00000009, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000b, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000007, /* MC_EMEM_ARB_TIMING_W2R */ 0x07050202, /* MC_EMEM_ARB_DA_TURNS */ 0x00130b0e, /* MC_EMEM_ARB_DA_COVERS */ 0x73a91b0f, /* MC_EMEM_ARB_MISC0 */ 0x70000f03, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x0000000f, /* MC_MLL_MPCORER_PTSA_RATE */ 0x00000120, /* MC_PTSA_GRANT_DECREMENT */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x00aa003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00aa0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x0008001d, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00aa0022, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00aa0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x000000aa, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00aa0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x00aa0025, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040008, /* EMC_SEL_DPD_CTRL */ 0xe00e0069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000b61, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200010, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1440, /* expected dvfs latency (ns) */ }, { 0x19, /* V5.0.14 */ "04_792000_06_V5.0.14_V1.1", /* DVFS table version */ 792000, /* SDRAM frequency */ 980, /* min voltage */ 980, /* gpu min voltage */ "pllm_ud", /* clock source id */ 0x80000000, /* CLK_SOURCE_EMC */ 165, /* number of burst_regs */ 31, /* number of up_down_regs */ { 0x00000024, /* EMC_RC */ 0x000000cd, /* EMC_RFC */ 0x00000000, /* EMC_RFC_SLR */ 0x00000019, /* EMC_RAS */ 0x0000000a, /* EMC_RP */ 0x00000008, /* EMC_R2W */ 0x0000000d, /* EMC_W2R */ 0x00000004, /* EMC_R2P */ 0x00000013, /* EMC_W2P */ 0x0000000a, /* EMC_RD_RCD */ 0x0000000a, /* EMC_WR_RCD */ 0x00000003, /* EMC_RRD */ 0x00000002, /* EMC_REXT */ 0x00000000, /* EMC_WEXT */ 0x00000006, /* EMC_WDV */ 0x00000006, /* EMC_WDV_MASK */ 0x0000000b, /* EMC_QUSE */ 0x00000002, /* EMC_QUSE_WIDTH */ 0x00000000, /* EMC_IBDLY */ 0x00000002, /* EMC_EINPUT */ 0x0000000d, /* EMC_EINPUT_DURATION */ 0x00080000, /* EMC_PUTERM_EXTRA */ 0x00000004, /* EMC_PUTERM_WIDTH */ 0x00000000, /* EMC_PUTERM_ADJ */ 0x00000000, /* EMC_CDB_CNTL_1 */ 0x00000000, /* EMC_CDB_CNTL_2 */ 0x00000000, /* EMC_CDB_CNTL_3 */ 0x00000001, /* EMC_QRST */ 0x00000014, /* EMC_QSAFE */ 0x00000018, /* EMC_RDV */ 0x0000001a, /* EMC_RDV_MASK */ 0x000017e2, /* EMC_REFRESH */ 0x00000000, /* EMC_BURST_REFRESH_NUM */ 0x000005f8, /* EMC_PRE_REFRESH_REQ_CNT */ 0x00000003, /* EMC_PDEX2WR */ 0x00000011, /* EMC_PDEX2RD */ 0x00000001, /* EMC_PCHG2PDEN */ 0x00000000, /* EMC_ACT2PDEN */ 0x000000c7, /* EMC_AR2PDEN */ 0x00000018, /* EMC_RW2PDEN */ 0x000000d7, /* EMC_TXSR */ 0x00000200, /* EMC_TXSRDLL */ 0x00000005, /* EMC_TCKE */ 0x00000006, /* EMC_TCKESR */ 0x00000005, /* EMC_TPD */ 0x0000001d, /* EMC_TFAW */ 0x00000000, /* EMC_TRPAB */ 0x00000008, /* EMC_TCLKSTABLE */ 0x00000008, /* EMC_TCLKSTOP */ 0x00001822, /* EMC_TREFBW */ 0x00000000, /* EMC_FBIO_CFG6 */ 0x00000000, /* EMC_ODT_WRITE */ 0x00000000, /* EMC_ODT_READ */ 0x104ab098, /* EMC_FBIO_CFG5 */ 0xe00700b1, /* EMC_CFG_DIG_DLL */ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */ 0x00000008, /* EMC_DLL_XFORM_DQS0 */ 0x00000008, /* EMC_DLL_XFORM_DQS1 */ 0x00000008, /* EMC_DLL_XFORM_DQS2 */ 0x00000008, /* EMC_DLL_XFORM_DQS3 */ 0x00000008, /* EMC_DLL_XFORM_DQS4 */ 0x00000008, /* EMC_DLL_XFORM_DQS5 */ 0x00000008, /* EMC_DLL_XFORM_DQS6 */ 0x00000008, /* EMC_DLL_XFORM_DQS7 */ 0x00000008, /* EMC_DLL_XFORM_DQS8 */ 0x00000008, /* EMC_DLL_XFORM_DQS9 */ 0x00000008, /* EMC_DLL_XFORM_DQS10 */ 0x00000008, /* EMC_DLL_XFORM_DQS11 */ 0x00000008, /* EMC_DLL_XFORM_DQS12 */ 0x00000008, /* EMC_DLL_XFORM_DQS13 */ 0x00000008, /* EMC_DLL_XFORM_DQS14 */ 0x00000008, /* EMC_DLL_XFORM_DQS15 */ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */ 0x00034000, /* EMC_DLL_XFORM_ADDR0 */ 0x00034000, /* EMC_DLL_XFORM_ADDR1 */ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */ 0x00034000, /* EMC_DLL_XFORM_ADDR3 */ 0x00034000, /* EMC_DLL_XFORM_ADDR4 */ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS0 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS1 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS2 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS3 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS4 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS5 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS6 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS7 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS8 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS9 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS10 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS11 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS12 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS13 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS14 */ 0x00000005, /* EMC_DLI_TRIM_TXDQS15 */ 0x0000000a, /* EMC_DLL_XFORM_DQ0 */ 0x0000000a, /* EMC_DLL_XFORM_DQ1 */ 0x0000000a, /* EMC_DLL_XFORM_DQ2 */ 0x0000000a, /* EMC_DLL_XFORM_DQ3 */ 0x0000000a, /* EMC_DLL_XFORM_DQ4 */ 0x0000000a, /* EMC_DLL_XFORM_DQ5 */ 0x0000000a, /* EMC_DLL_XFORM_DQ6 */ 0x0000000a, /* EMC_DLL_XFORM_DQ7 */ 0x100002a0, /* EMC_XM2CMDPADCTRL */ 0x00000000, /* EMC_XM2CMDPADCTRL4 */ 0x00111111, /* EMC_XM2CMDPADCTRL5 */ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL2 */ 0x00000000, /* EMC_XM2DQPADCTRL3 */ 0x77ffc085, /* EMC_XM2CLKPADCTRL */ 0x00000000, /* EMC_XM2CLKPADCTRL2 */ 0x81f1f108, /* EMC_XM2COMPPADCTRL */ 0x07070004, /* EMC_XM2VTTGENPADCTRL */ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */ 0x61861820, /* EMC_XM2DQSPADCTRL3 */ 0x00514514, /* EMC_XM2DQSPADCTRL4 */ 0x00514514, /* EMC_XM2DQSPADCTRL5 */ 0x61861800, /* EMC_XM2DQSPADCTRL6 */ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */ 0x00000000, /* EMC_TXDSRVTTGEN */ 0x00000000, /* EMC_FBIO_SPARE */ 0x00020000, /* EMC_ZCAL_INTERVAL */ 0x00000100, /* EMC_ZCAL_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT */ 0x00f7000e, /* EMC_MRS_WAIT_CNT2 */ 0x00000000, /* EMC_CTT */ 0x00000004, /* EMC_CTT_DURATION */ 0x00004080, /* EMC_CFG_PIPE */ 0x80003012, /* EMC_DYN_SELF_REF_CONTROL */ 0x0000000f, /* EMC_QPOP */ 0x0e00000b, /* MC_EMEM_ARB_CFG */ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */ 0x0000000f, /* MC_EMEM_ARB_TIMING_FAW */ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */ 0x08060202, /* MC_EMEM_ARB_DA_TURNS */ 0x00170e13, /* MC_EMEM_ARB_DA_COVERS */ 0x736c2414, /* MC_EMEM_ARB_MISC0 */ 0x70000f02, /* MC_EMEM_ARB_MISC1 */ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */ }, { 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */ }, 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */ 0x00000802, /* EMC_CTT_TERM_CTRL */ 0x73300000, /* EMC_CFG */ 0x0000089d, /* EMC_CFG_2 */ 0x00040000, /* EMC_SEL_DPD_CTRL */ 0xe0070069, /* EMC_CFG_DIG_DLL */ 0x00000000, /* EMC_BGBIAS_CTL0 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */ 0x80000d71, /* Mode Register 0 */ 0x80100002, /* Mode Register 1 */ 0x80200018, /* Mode Register 2 */ 0x00000000, /* Mode Register 4 */ 1200, /* expected dvfs latency (ns) */ }, }; static struct tegra12_emc_pdata ardbeg_ddr3_emc_pdata_pm358 = { .description = "ardbeg_emc_tables", .tables = ardbeg_ddr3_emc_table_pm358, .num_tables = ARRAY_SIZE(ardbeg_ddr3_emc_table_pm358), }; static struct tegra12_emc_pdata ardbeg_ddr3_emc_pdata_pm359 = { .description = "ardbeg_emc_tables", #ifndef CONFIG_ARCH_TEGRA_13x_SOC .tables = ardbeg_ddr3_emc_table_pm359, .num_tables = ARRAY_SIZE(ardbeg_ddr3_emc_table_pm359), #else .tables = t132_laguna_erss_ddr3_emc_table_pm359, .num_tables = ARRAY_SIZE(t132_laguna_erss_ddr3_emc_table_pm359), #endif }; static struct tegra12_emc_pdata ardbeg_emc_pdata = { .description = "ardbeg_emc_tables", .tables = ardbeg_emc_table, .num_tables = ARRAY_SIZE(ardbeg_emc_table), }; static struct tegra12_emc_pdata ardbeg_4GB_emc_pdata = { .description = "ardbeg_emc_tables", .tables = ardbeg_4GB_emc_table, .num_tables = ARRAY_SIZE(ardbeg_4GB_emc_table), }; static struct tegra12_emc_pdata ardbeg_lpddr3_emc_pdata = { .description = "ardbeg_emc_tables", .tables = ardbeg_lpddr3_emc_table, .tables_derated = ardbeg_lpddr3_emc_table_der, .num_tables = ARRAY_SIZE(ardbeg_lpddr3_emc_table), }; static struct tegra12_emc_pdata ardbeg_lpddr3_emc_pdata_E1781 = { .description = "ardbeg_emc_tables", .tables = ardbeg_lpddr3_emc_table_E1781, .num_tables = ARRAY_SIZE(ardbeg_lpddr3_emc_table_E1781), }; static struct tegra12_emc_pdata jetson_tk1_2GB_emc_pdata = { .description = "ardbeg_emc_tables", .tables = jetson_tk1_ddr3_emc_table, .num_tables = ARRAY_SIZE(jetson_tk1_ddr3_emc_table), }; static struct tegra12_emc_pdata pm375_2GB_emc_pdata = { .description = "ardbeg_emc_tables", .tables = pm375_ddr3_emc_table, .num_tables = ARRAY_SIZE(pm375_ddr3_emc_table), }; /* * Also handles Ardbeg init. */ int __init ardbeg_emc_init(void) { struct board_info bi; /* If Device Tree Partition contains emc-tables, load them */ if (of_find_compatible_node(NULL, NULL, "nvidia,tegra12-emc")) { pr_info("Loading EMC tables from DeviceTree.\n"); } else { tegra_get_board_info(&bi); switch (bi.board_id) { case BOARD_PM358: pr_info("Loading PM358 EMC tables.\n"); tegra_emc_device.dev.platform_data = &ardbeg_ddr3_emc_pdata_pm358; break; case BOARD_PM359: pr_info("Loading PM359 EMC tables.\n"); tegra_emc_device.dev.platform_data = &ardbeg_ddr3_emc_pdata_pm359; break; case BOARD_E1780: case BOARD_E1782: if (tegra_get_memory_type()) { pr_info("Loading Ardbeg 4GB EMC tables.\n"); tegra_emc_device.dev.platform_data = &ardbeg_4GB_emc_pdata; } else { pr_info("Loading Ardbeg EMC tables.\n"); tegra_emc_device.dev.platform_data = &ardbeg_emc_pdata; } break; case BOARD_E1792: pr_info("Loading Ardbeg EMC tables.\n"); tegra_emc_device.dev.platform_data = &ardbeg_lpddr3_emc_pdata; break; case BOARD_E1781: pr_info("Loading Ardbeg (1781) EMC tables\n"); tegra_emc_device.dev.platform_data = &ardbeg_lpddr3_emc_pdata_E1781; break; case BOARD_PM375: if (of_machine_is_compatible("nvidia,jetson-tk1")) { pr_info("Loading jetson TK1 EMC tables.\n"); tegra_emc_device.dev.platform_data = &jetson_tk1_2GB_emc_pdata; } else { pr_info("Loading PM375 EMC tables.\n"); tegra_emc_device.dev.platform_data = &pm375_2GB_emc_pdata; } break; default: pr_info("emc dvfs table not present\n"); return -EINVAL; } platform_device_register(&tegra_emc_device); } tegra12_emc_init(); return 0; }
gpl-2.0
oxp-edward/linux-3.5
arch/sh/boards/mach-ap325rxa/setup.c
54
16720
/* * Renesas - AP-325RXA * (Compatible with Algo System ., LTD. - AP-320A) * * Copyright (C) 2008 Renesas Solutions Corp. * Author : Yusuke Goda <goda.yuske@renesas.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mtd/physmap.h> #include <linux/mtd/sh_flctl.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/smsc911x.h> #include <linux/gpio.h> #include <linux/videodev2.h> #include <linux/sh_intc.h> #include <media/ov772x.h> #include <media/soc_camera.h> #include <media/soc_camera_platform.h> #include <media/sh_mobile_ceu.h> #include <video/sh_mobile_lcdc.h> #include <asm/io.h> #include <asm/clock.h> #include <asm/suspend.h> #include <cpu/sh7723.h> static struct smsc911x_platform_config smsc911x_config = { .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .flags = SMSC911X_USE_32BIT, }; static struct resource smsc9118_resources[] = { [0] = { .start = 0xb6080000, .end = 0xb60fffff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x660), .end = evt2irq(0x660), .flags = IORESOURCE_IRQ, } }; static struct platform_device smsc9118_device = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(smsc9118_resources), .resource = smsc9118_resources, .dev = { .platform_data = &smsc911x_config, }, }; /* * AP320 and AP325RXA has CPLD data in NOR Flash(0xA80000-0xABFFFF). * If this area erased, this board can not boot. */ static struct mtd_partition ap325rxa_nor_flash_partitions[] = { { .name = "uboot", .offset = 0, .size = (1 * 1024 * 1024), .mask_flags = MTD_WRITEABLE, /* Read-only */ }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = (2 * 1024 * 1024), }, { .name = "free-area0", .offset = MTDPART_OFS_APPEND, .size = ((7 * 1024 * 1024) + (512 * 1024)), }, { .name = "CPLD-Data", .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, /* Read-only */ .size = (1024 * 128 * 2), }, { .name = "free-area1", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data ap325rxa_nor_flash_data = { .width = 2, .parts = ap325rxa_nor_flash_partitions, .nr_parts = ARRAY_SIZE(ap325rxa_nor_flash_partitions), }; static struct resource ap325rxa_nor_flash_resources[] = { [0] = { .name = "NOR Flash", .start = 0x00000000, .end = 0x00ffffff, .flags = IORESOURCE_MEM, } }; static struct platform_device ap325rxa_nor_flash_device = { .name = "physmap-flash", .resource = ap325rxa_nor_flash_resources, .num_resources = ARRAY_SIZE(ap325rxa_nor_flash_resources), .dev = { .platform_data = &ap325rxa_nor_flash_data, }, }; static struct mtd_partition nand_partition_info[] = { { .name = "nand_data", .offset = 0, .size = MTDPART_SIZ_FULL, }, }; static struct resource nand_flash_resources[] = { [0] = { .start = 0xa4530000, .end = 0xa45300ff, .flags = IORESOURCE_MEM, } }; static struct sh_flctl_platform_data nand_flash_data = { .parts = nand_partition_info, .nr_parts = ARRAY_SIZE(nand_partition_info), .flcmncr_val = FCKSEL_E | TYPESEL_SET | NANWF_E, .has_hwecc = 1, }; static struct platform_device nand_flash_device = { .name = "sh_flctl", .resource = nand_flash_resources, .num_resources = ARRAY_SIZE(nand_flash_resources), .dev = { .platform_data = &nand_flash_data, }, }; #define FPGA_LCDREG 0xB4100180 #define FPGA_BKLREG 0xB4100212 #define FPGA_LCDREG_VAL 0x0018 #define PORT_MSELCRB 0xA4050182 #define PORT_HIZCRC 0xA405015C #define PORT_DRVCRA 0xA405018A #define PORT_DRVCRB 0xA405018C static int ap320_wvga_set_brightness(int brightness) { if (brightness) { gpio_set_value(GPIO_PTS3, 0); __raw_writew(0x100, FPGA_BKLREG); } else { __raw_writew(0, FPGA_BKLREG); gpio_set_value(GPIO_PTS3, 1); } return 0; } static int ap320_wvga_get_brightness(void) { return gpio_get_value(GPIO_PTS3); } static void ap320_wvga_power_on(void) { msleep(100); /* ASD AP-320/325 LCD ON */ __raw_writew(FPGA_LCDREG_VAL, FPGA_LCDREG); } static void ap320_wvga_power_off(void) { /* ASD AP-320/325 LCD OFF */ __raw_writew(0, FPGA_LCDREG); } static const struct fb_videomode ap325rxa_lcdc_modes[] = { { .name = "LB070WV1", .xres = 800, .yres = 480, .left_margin = 32, .right_margin = 160, .hsync_len = 8, .upper_margin = 63, .lower_margin = 80, .vsync_len = 1, .sync = 0, /* hsync and vsync are active low */ }, }; static struct sh_mobile_lcdc_info lcdc_info = { .clock_source = LCDC_CLK_EXTERNAL, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .interface_type = RGB18, .clock_divider = 1, .lcd_modes = ap325rxa_lcdc_modes, .num_modes = ARRAY_SIZE(ap325rxa_lcdc_modes), .panel_cfg = { .width = 152, /* 7.0 inch */ .height = 91, .display_on = ap320_wvga_power_on, .display_off = ap320_wvga_power_off, }, .bl_info = { .name = "sh_mobile_lcdc_bl", .max_brightness = 1, .set_brightness = ap320_wvga_set_brightness, .get_brightness = ap320_wvga_get_brightness, }, } }; static struct resource lcdc_resources[] = { [0] = { .name = "LCDC", .start = 0xfe940000, /* P4-only space */ .end = 0xfe942fff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x580), .flags = IORESOURCE_IRQ, }, }; static struct platform_device lcdc_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(lcdc_resources), .resource = lcdc_resources, .dev = { .platform_data = &lcdc_info, }, }; static void camera_power(int val) { gpio_set_value(GPIO_PTZ5, val); /* RST_CAM/RSTB */ mdelay(10); } #ifdef CONFIG_I2C /* support for the old ncm03j camera */ static unsigned char camera_ncm03j_magic[] = { 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8, 0x1D, 0x00, 0x1E, 0x8A, 0x21, 0x00, 0x33, 0x36, 0x36, 0x60, 0x37, 0x08, 0x3B, 0x31, 0x44, 0x0F, 0x46, 0xF0, 0x4B, 0x28, 0x4C, 0x21, 0x4D, 0x55, 0x4E, 0x1B, 0x4F, 0xC7, 0x50, 0xFC, 0x51, 0x12, 0x58, 0x02, 0x66, 0xC0, 0x67, 0x46, 0x6B, 0xA0, 0x6C, 0x34, 0x7E, 0x25, 0x7F, 0x25, 0x8D, 0x0F, 0x92, 0x40, 0x93, 0x04, 0x94, 0x26, 0x95, 0x0A, 0x99, 0x03, 0x9A, 0xF0, 0x9B, 0x14, 0x9D, 0x7A, 0xC5, 0x02, 0xD6, 0x07, 0x59, 0x00, 0x5A, 0x1A, 0x5B, 0x2A, 0x5C, 0x37, 0x5D, 0x42, 0x5E, 0x56, 0xC8, 0x00, 0xC9, 0x1A, 0xCA, 0x2A, 0xCB, 0x37, 0xCC, 0x42, 0xCD, 0x56, 0xCE, 0x00, 0xCF, 0x1A, 0xD0, 0x2A, 0xD1, 0x37, 0xD2, 0x42, 0xD3, 0x56, 0x5F, 0x68, 0x60, 0x87, 0x61, 0xA3, 0x62, 0xBC, 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F, }; static int camera_probe(void) { struct i2c_adapter *a = i2c_get_adapter(0); struct i2c_msg msg; int ret; if (!a) return -ENODEV; camera_power(1); msg.addr = 0x6e; msg.buf = camera_ncm03j_magic; msg.len = 2; msg.flags = 0; ret = i2c_transfer(a, &msg, 1); camera_power(0); return ret; } static int camera_set_capture(struct soc_camera_platform_info *info, int enable) { struct i2c_adapter *a = i2c_get_adapter(0); struct i2c_msg msg; int ret = 0; int i; camera_power(0); if (!enable) return 0; /* no disable for now */ camera_power(1); for (i = 0; i < ARRAY_SIZE(camera_ncm03j_magic); i += 2) { u_int8_t buf[8]; msg.addr = 0x6e; msg.buf = buf; msg.len = 2; msg.flags = 0; buf[0] = camera_ncm03j_magic[i]; buf[1] = camera_ncm03j_magic[i + 1]; ret = (ret < 0) ? ret : i2c_transfer(a, &msg, 1); } return ret; } static int ap325rxa_camera_add(struct soc_camera_device *icd); static void ap325rxa_camera_del(struct soc_camera_device *icd); static struct soc_camera_platform_info camera_info = { .format_name = "UYVY", .format_depth = 16, .format = { .code = V4L2_MBUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_SMPTE170M, .field = V4L2_FIELD_NONE, .width = 640, .height = 480, }, .mbus_param = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH, .mbus_type = V4L2_MBUS_PARALLEL, .set_capture = camera_set_capture, }; static struct soc_camera_link camera_link = { .bus_id = 0, .add_device = ap325rxa_camera_add, .del_device = ap325rxa_camera_del, .module_name = "soc_camera_platform", .priv = &camera_info, }; static struct platform_device *camera_device; static void ap325rxa_camera_release(struct device *dev) { soc_camera_platform_release(&camera_device); } static int ap325rxa_camera_add(struct soc_camera_device *icd) { int ret = soc_camera_platform_add(icd, &camera_device, &camera_link, ap325rxa_camera_release, 0); if (ret < 0) return ret; ret = camera_probe(); if (ret < 0) soc_camera_platform_del(icd, camera_device, &camera_link); return ret; } static void ap325rxa_camera_del(struct soc_camera_device *icd) { soc_camera_platform_del(icd, camera_device, &camera_link); } #endif /* CONFIG_I2C */ static int ov7725_power(struct device *dev, int mode) { camera_power(0); if (mode) camera_power(1); return 0; } static struct sh_mobile_ceu_info sh_mobile_ceu_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource ceu_resources[] = { [0] = { .name = "CEU", .start = 0xfe910000, .end = 0xfe91009f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x880), .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device ceu_device = { .name = "sh_mobile_ceu", .id = 0, /* "ceu0" clock */ .num_resources = ARRAY_SIZE(ceu_resources), .resource = ceu_resources, .dev = { .platform_data = &sh_mobile_ceu_info, }, }; static struct resource sdhi0_cn3_resources[] = { [0] = { .name = "SDHI0", .start = 0x04ce0000, .end = 0x04ce00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xe80), .flags = IORESOURCE_IRQ, }, }; static struct sh_mobile_sdhi_info sdhi0_cn3_data = { .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi0_cn3_device = { .name = "sh_mobile_sdhi", .id = 0, /* "sdhi0" clock */ .num_resources = ARRAY_SIZE(sdhi0_cn3_resources), .resource = sdhi0_cn3_resources, .dev = { .platform_data = &sdhi0_cn3_data, }, }; static struct resource sdhi1_cn7_resources[] = { [0] = { .name = "SDHI1", .start = 0x04cf0000, .end = 0x04cf00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x4e0), .flags = IORESOURCE_IRQ, }, }; static struct sh_mobile_sdhi_info sdhi1_cn7_data = { .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device sdhi1_cn7_device = { .name = "sh_mobile_sdhi", .id = 1, /* "sdhi1" clock */ .num_resources = ARRAY_SIZE(sdhi1_cn7_resources), .resource = sdhi1_cn7_resources, .dev = { .platform_data = &sdhi1_cn7_data, }, }; static struct i2c_board_info __initdata ap325rxa_i2c_devices[] = { { I2C_BOARD_INFO("pcf8563", 0x51), }, }; static struct i2c_board_info ap325rxa_i2c_camera[] = { { I2C_BOARD_INFO("ov772x", 0x21), }, }; static struct ov772x_camera_info ov7725_info = { .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP, .edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0), }; static struct soc_camera_link ov7725_link = { .bus_id = 0, .power = ov7725_power, .board_info = &ap325rxa_i2c_camera[0], .i2c_adapter_id = 0, .priv = &ov7725_info, }; static struct platform_device ap325rxa_camera[] = { { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &ov7725_link, }, }, { .name = "soc-camera-pdrv", .id = 1, .dev = { .platform_data = &camera_link, }, }, }; static struct platform_device *ap325rxa_devices[] __initdata = { &smsc9118_device, &ap325rxa_nor_flash_device, &lcdc_device, &ceu_device, &nand_flash_device, &sdhi0_cn3_device, &sdhi1_cn7_device, &ap325rxa_camera[0], &ap325rxa_camera[1], }; extern char ap325rxa_sdram_enter_start; extern char ap325rxa_sdram_enter_end; extern char ap325rxa_sdram_leave_start; extern char ap325rxa_sdram_leave_end; static int __init ap325rxa_devices_setup(void) { /* register board specific self-refresh code */ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF, &ap325rxa_sdram_enter_start, &ap325rxa_sdram_enter_end, &ap325rxa_sdram_leave_start, &ap325rxa_sdram_leave_end); /* LD3 and LD4 LEDs */ gpio_request(GPIO_PTX5, NULL); /* RUN */ gpio_direction_output(GPIO_PTX5, 1); gpio_export(GPIO_PTX5, 0); gpio_request(GPIO_PTX4, NULL); /* INDICATOR */ gpio_direction_output(GPIO_PTX4, 0); gpio_export(GPIO_PTX4, 0); /* SW1 input */ gpio_request(GPIO_PTF7, NULL); /* MODE */ gpio_direction_input(GPIO_PTF7); gpio_export(GPIO_PTF7, 0); /* LCDC */ gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); gpio_request(GPIO_FN_LCDD12, NULL); gpio_request(GPIO_FN_LCDD11, NULL); gpio_request(GPIO_FN_LCDD10, NULL); gpio_request(GPIO_FN_LCDD9, NULL); gpio_request(GPIO_FN_LCDD8, NULL); gpio_request(GPIO_FN_LCDD7, NULL); gpio_request(GPIO_FN_LCDD6, NULL); gpio_request(GPIO_FN_LCDD5, NULL); gpio_request(GPIO_FN_LCDD4, NULL); gpio_request(GPIO_FN_LCDD3, NULL); gpio_request(GPIO_FN_LCDD2, NULL); gpio_request(GPIO_FN_LCDD1, NULL); gpio_request(GPIO_FN_LCDD0, NULL); gpio_request(GPIO_FN_LCDLCLK_PTR, NULL); gpio_request(GPIO_FN_LCDDCK, NULL); gpio_request(GPIO_FN_LCDVEPWC, NULL); gpio_request(GPIO_FN_LCDVCPWC, NULL); gpio_request(GPIO_FN_LCDVSYN, NULL); gpio_request(GPIO_FN_LCDHSYN, NULL); gpio_request(GPIO_FN_LCDDISP, NULL); gpio_request(GPIO_FN_LCDDON, NULL); /* LCD backlight */ gpio_request(GPIO_PTS3, NULL); gpio_direction_output(GPIO_PTS3, 1); /* CEU */ gpio_request(GPIO_FN_VIO_CLK2, NULL); gpio_request(GPIO_FN_VIO_VD2, NULL); gpio_request(GPIO_FN_VIO_HD2, NULL); gpio_request(GPIO_FN_VIO_FLD, NULL); gpio_request(GPIO_FN_VIO_CKO, NULL); gpio_request(GPIO_FN_VIO_D15, NULL); gpio_request(GPIO_FN_VIO_D14, NULL); gpio_request(GPIO_FN_VIO_D13, NULL); gpio_request(GPIO_FN_VIO_D12, NULL); gpio_request(GPIO_FN_VIO_D11, NULL); gpio_request(GPIO_FN_VIO_D10, NULL); gpio_request(GPIO_FN_VIO_D9, NULL); gpio_request(GPIO_FN_VIO_D8, NULL); gpio_request(GPIO_PTZ7, NULL); gpio_direction_output(GPIO_PTZ7, 0); /* OE_CAM */ gpio_request(GPIO_PTZ6, NULL); gpio_direction_output(GPIO_PTZ6, 0); /* STBY_CAM */ gpio_request(GPIO_PTZ5, NULL); gpio_direction_output(GPIO_PTZ5, 0); /* RST_CAM */ gpio_request(GPIO_PTZ4, NULL); gpio_direction_output(GPIO_PTZ4, 0); /* SADDR */ __raw_writew(__raw_readw(PORT_MSELCRB) & ~0x0001, PORT_MSELCRB); /* FLCTL */ gpio_request(GPIO_FN_FCE, NULL); gpio_request(GPIO_FN_NAF7, NULL); gpio_request(GPIO_FN_NAF6, NULL); gpio_request(GPIO_FN_NAF5, NULL); gpio_request(GPIO_FN_NAF4, NULL); gpio_request(GPIO_FN_NAF3, NULL); gpio_request(GPIO_FN_NAF2, NULL); gpio_request(GPIO_FN_NAF1, NULL); gpio_request(GPIO_FN_NAF0, NULL); gpio_request(GPIO_FN_FCDE, NULL); gpio_request(GPIO_FN_FOE, NULL); gpio_request(GPIO_FN_FSC, NULL); gpio_request(GPIO_FN_FWE, NULL); gpio_request(GPIO_FN_FRB, NULL); __raw_writew(0, PORT_HIZCRC); __raw_writew(0xFFFF, PORT_DRVCRA); __raw_writew(0xFFFF, PORT_DRVCRB); platform_resource_setup_memory(&ceu_device, "ceu", 4 << 20); /* SDHI0 - CN3 - SD CARD */ gpio_request(GPIO_FN_SDHI0CD_PTD, NULL); gpio_request(GPIO_FN_SDHI0WP_PTD, NULL); gpio_request(GPIO_FN_SDHI0D3_PTD, NULL); gpio_request(GPIO_FN_SDHI0D2_PTD, NULL); gpio_request(GPIO_FN_SDHI0D1_PTD, NULL); gpio_request(GPIO_FN_SDHI0D0_PTD, NULL); gpio_request(GPIO_FN_SDHI0CMD_PTD, NULL); gpio_request(GPIO_FN_SDHI0CLK_PTD, NULL); /* SDHI1 - CN7 - MICRO SD CARD */ gpio_request(GPIO_FN_SDHI1CD, NULL); gpio_request(GPIO_FN_SDHI1D3, NULL); gpio_request(GPIO_FN_SDHI1D2, NULL); gpio_request(GPIO_FN_SDHI1D1, NULL); gpio_request(GPIO_FN_SDHI1D0, NULL); gpio_request(GPIO_FN_SDHI1CMD, NULL); gpio_request(GPIO_FN_SDHI1CLK, NULL); i2c_register_board_info(0, ap325rxa_i2c_devices, ARRAY_SIZE(ap325rxa_i2c_devices)); return platform_add_devices(ap325rxa_devices, ARRAY_SIZE(ap325rxa_devices)); } arch_initcall(ap325rxa_devices_setup); /* Return the board specific boot mode pin configuration */ static int ap325rxa_mode_pins(void) { /* MD0=0, MD1=0, MD2=0: Clock Mode 0 * MD3=0: 16-bit Area0 Bus Width * MD5=1: Little Endian * TSTMD=1, MD8=1: Test Mode Disabled */ return MODE_PIN5 | MODE_PIN8; } static struct sh_machine_vector mv_ap325rxa __initmv = { .mv_name = "AP-325RXA", .mv_mode_pins = ap325rxa_mode_pins, };
gpl-2.0
scjen/rts-pj2
drivers/rtc/rtc-wm831x.c
54
14579
/* * Real Time Clock driver for Wolfson Microelectronics WM831x * * Copyright (C) 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/interrupt.h> #include <linux/ioctl.h> #include <linux/completion.h> #include <linux/mfd/wm831x/core.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/random.h> /* * R16416 (0x4020) - RTC Write Counter */ #define WM831X_RTC_WR_CNT_MASK 0xFFFF /* RTC_WR_CNT - [15:0] */ #define WM831X_RTC_WR_CNT_SHIFT 0 /* RTC_WR_CNT - [15:0] */ #define WM831X_RTC_WR_CNT_WIDTH 16 /* RTC_WR_CNT - [15:0] */ /* * R16417 (0x4021) - RTC Time 1 */ #define WM831X_RTC_TIME_MASK 0xFFFF /* RTC_TIME - [15:0] */ #define WM831X_RTC_TIME_SHIFT 0 /* RTC_TIME - [15:0] */ #define WM831X_RTC_TIME_WIDTH 16 /* RTC_TIME - [15:0] */ /* * R16418 (0x4022) - RTC Time 2 */ #define WM831X_RTC_TIME_MASK 0xFFFF /* RTC_TIME - [15:0] */ #define WM831X_RTC_TIME_SHIFT 0 /* RTC_TIME - [15:0] */ #define WM831X_RTC_TIME_WIDTH 16 /* RTC_TIME - [15:0] */ /* * R16419 (0x4023) - RTC Alarm 1 */ #define WM831X_RTC_ALM_MASK 0xFFFF /* RTC_ALM - [15:0] */ #define WM831X_RTC_ALM_SHIFT 0 /* RTC_ALM - [15:0] */ #define WM831X_RTC_ALM_WIDTH 16 /* RTC_ALM - [15:0] */ /* * R16420 (0x4024) - RTC Alarm 2 */ #define WM831X_RTC_ALM_MASK 0xFFFF /* RTC_ALM - [15:0] */ #define WM831X_RTC_ALM_SHIFT 0 /* RTC_ALM - [15:0] */ #define WM831X_RTC_ALM_WIDTH 16 /* RTC_ALM - [15:0] */ /* * R16421 (0x4025) - RTC Control */ #define WM831X_RTC_VALID 0x8000 /* RTC_VALID */ #define WM831X_RTC_VALID_MASK 0x8000 /* RTC_VALID */ #define WM831X_RTC_VALID_SHIFT 15 /* RTC_VALID */ #define WM831X_RTC_VALID_WIDTH 1 /* RTC_VALID */ #define WM831X_RTC_SYNC_BUSY 0x4000 /* RTC_SYNC_BUSY */ #define WM831X_RTC_SYNC_BUSY_MASK 0x4000 /* RTC_SYNC_BUSY */ #define WM831X_RTC_SYNC_BUSY_SHIFT 14 /* RTC_SYNC_BUSY */ #define WM831X_RTC_SYNC_BUSY_WIDTH 1 /* RTC_SYNC_BUSY */ #define WM831X_RTC_ALM_ENA 0x0400 /* RTC_ALM_ENA */ #define WM831X_RTC_ALM_ENA_MASK 0x0400 /* RTC_ALM_ENA */ #define WM831X_RTC_ALM_ENA_SHIFT 10 /* RTC_ALM_ENA */ #define WM831X_RTC_ALM_ENA_WIDTH 1 /* RTC_ALM_ENA */ #define WM831X_RTC_PINT_FREQ_MASK 0x0070 /* RTC_PINT_FREQ - [6:4] */ #define WM831X_RTC_PINT_FREQ_SHIFT 4 /* RTC_PINT_FREQ - [6:4] */ #define WM831X_RTC_PINT_FREQ_WIDTH 3 /* RTC_PINT_FREQ - [6:4] */ /* * R16422 (0x4026) - RTC Trim */ #define WM831X_RTC_TRIM_MASK 0x03FF /* RTC_TRIM - [9:0] */ #define WM831X_RTC_TRIM_SHIFT 0 /* RTC_TRIM - [9:0] */ #define WM831X_RTC_TRIM_WIDTH 10 /* RTC_TRIM - [9:0] */ #define WM831X_SET_TIME_RETRIES 5 #define WM831X_GET_TIME_RETRIES 5 struct wm831x_rtc { struct wm831x *wm831x; struct rtc_device *rtc; unsigned int alarm_enabled:1; }; static void wm831x_rtc_add_randomness(struct wm831x *wm831x) { int ret; u16 reg; /* * The write counter contains a pseudo-random number which is * regenerated every time we set the RTC so it should be a * useful per-system source of entropy. */ ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER); if (ret >= 0) { reg = ret; add_device_randomness(&reg, sizeof(reg)); } else { dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n", ret); } } /* * Read current time and date in RTC */ static int wm831x_rtc_readtime(struct device *dev, struct rtc_time *tm) { struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev); struct wm831x *wm831x = wm831x_rtc->wm831x; u16 time1[2], time2[2]; int ret; int count = 0; /* Has the RTC been programmed? */ ret = wm831x_reg_read(wm831x, WM831X_RTC_CONTROL); if (ret < 0) { dev_err(dev, "Failed to read RTC control: %d\n", ret); return ret; } if (!(ret & WM831X_RTC_VALID)) { dev_dbg(dev, "RTC not yet configured\n"); return -EINVAL; } /* Read twice to make sure we don't read a corrupt, partially * incremented, value. */ do { ret = wm831x_bulk_read(wm831x, WM831X_RTC_TIME_1, 2, time1); if (ret != 0) continue; ret = wm831x_bulk_read(wm831x, WM831X_RTC_TIME_1, 2, time2); if (ret != 0) continue; if (memcmp(time1, time2, sizeof(time1)) == 0) { u32 time = (time1[0] << 16) | time1[1]; rtc_time_to_tm(time, tm); return rtc_valid_tm(tm); } } while (++count < WM831X_GET_TIME_RETRIES); dev_err(dev, "Timed out reading current time\n"); return -EIO; } /* * Set current time and date in RTC */ static int wm831x_rtc_set_mmss(struct device *dev, unsigned long time) { struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev); struct wm831x *wm831x = wm831x_rtc->wm831x; struct rtc_time new_tm; unsigned long new_time; int ret; int count = 0; ret = wm831x_reg_write(wm831x, WM831X_RTC_TIME_1, (time >> 16) & 0xffff); if (ret < 0) { dev_err(dev, "Failed to write TIME_1: %d\n", ret); return ret; } ret = wm831x_reg_write(wm831x, WM831X_RTC_TIME_2, time & 0xffff); if (ret < 0) { dev_err(dev, "Failed to write TIME_2: %d\n", ret); return ret; } /* Wait for the update to complete - should happen first time * round but be conservative. */ do { msleep(1); ret = wm831x_reg_read(wm831x, WM831X_RTC_CONTROL); if (ret < 0) ret = WM831X_RTC_SYNC_BUSY; } while (!(ret & WM831X_RTC_SYNC_BUSY) && ++count < WM831X_SET_TIME_RETRIES); if (ret & WM831X_RTC_SYNC_BUSY) { dev_err(dev, "Timed out writing RTC update\n"); return -EIO; } /* Check that the update was accepted; security features may * have caused the update to be ignored. */ ret = wm831x_rtc_readtime(dev, &new_tm); if (ret < 0) return ret; ret = rtc_tm_to_time(&new_tm, &new_time); if (ret < 0) { dev_err(dev, "Failed to convert time: %d\n", ret); return ret; } /* Allow a second of change in case of tick */ if (new_time - time > 1) { dev_err(dev, "RTC update not permitted by hardware\n"); return -EPERM; } return 0; } /* * Read alarm time and date in RTC */ static int wm831x_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev); int ret; u16 data[2]; u32 time; ret = wm831x_bulk_read(wm831x_rtc->wm831x, WM831X_RTC_ALARM_1, 2, data); if (ret != 0) { dev_err(dev, "Failed to read alarm time: %d\n", ret); return ret; } time = (data[0] << 16) | data[1]; rtc_time_to_tm(time, &alrm->time); ret = wm831x_reg_read(wm831x_rtc->wm831x, WM831X_RTC_CONTROL); if (ret < 0) { dev_err(dev, "Failed to read RTC control: %d\n", ret); return ret; } if (ret & WM831X_RTC_ALM_ENA) alrm->enabled = 1; else alrm->enabled = 0; return 0; } static int wm831x_rtc_stop_alarm(struct wm831x_rtc *wm831x_rtc) { wm831x_rtc->alarm_enabled = 0; return wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL, WM831X_RTC_ALM_ENA, 0); } static int wm831x_rtc_start_alarm(struct wm831x_rtc *wm831x_rtc) { wm831x_rtc->alarm_enabled = 1; return wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL, WM831X_RTC_ALM_ENA, WM831X_RTC_ALM_ENA); } static int wm831x_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev); struct wm831x *wm831x = wm831x_rtc->wm831x; int ret; unsigned long time; ret = rtc_tm_to_time(&alrm->time, &time); if (ret < 0) { dev_err(dev, "Failed to convert time: %d\n", ret); return ret; } ret = wm831x_rtc_stop_alarm(wm831x_rtc); if (ret < 0) { dev_err(dev, "Failed to stop alarm: %d\n", ret); return ret; } ret = wm831x_reg_write(wm831x, WM831X_RTC_ALARM_1, (time >> 16) & 0xffff); if (ret < 0) { dev_err(dev, "Failed to write ALARM_1: %d\n", ret); return ret; } ret = wm831x_reg_write(wm831x, WM831X_RTC_ALARM_2, time & 0xffff); if (ret < 0) { dev_err(dev, "Failed to write ALARM_2: %d\n", ret); return ret; } if (alrm->enabled) { ret = wm831x_rtc_start_alarm(wm831x_rtc); if (ret < 0) { dev_err(dev, "Failed to start alarm: %d\n", ret); return ret; } } return 0; } static int wm831x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev); if (enabled) return wm831x_rtc_start_alarm(wm831x_rtc); else return wm831x_rtc_stop_alarm(wm831x_rtc); } static int wm831x_rtc_update_irq_enable(struct device *dev, unsigned int enabled) { struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev); int val; if (enabled) val = 1 << WM831X_RTC_PINT_FREQ_SHIFT; else val = 0; return wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL, WM831X_RTC_PINT_FREQ_MASK, val); } static irqreturn_t wm831x_alm_irq(int irq, void *data) { struct wm831x_rtc *wm831x_rtc = data; rtc_update_irq(wm831x_rtc->rtc, 1, RTC_IRQF | RTC_AF); return IRQ_HANDLED; } static irqreturn_t wm831x_per_irq(int irq, void *data) { struct wm831x_rtc *wm831x_rtc = data; rtc_update_irq(wm831x_rtc->rtc, 1, RTC_IRQF | RTC_UF); return IRQ_HANDLED; } static const struct rtc_class_ops wm831x_rtc_ops = { .read_time = wm831x_rtc_readtime, .set_mmss = wm831x_rtc_set_mmss, .read_alarm = wm831x_rtc_readalarm, .set_alarm = wm831x_rtc_setalarm, .alarm_irq_enable = wm831x_rtc_alarm_irq_enable, .update_irq_enable = wm831x_rtc_update_irq_enable, }; #ifdef CONFIG_PM /* Turn off the alarm if it should not be a wake source. */ static int wm831x_rtc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(&pdev->dev); int ret, enable; if (wm831x_rtc->alarm_enabled && device_may_wakeup(&pdev->dev)) enable = WM831X_RTC_ALM_ENA; else enable = 0; ret = wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL, WM831X_RTC_ALM_ENA, enable); if (ret != 0) dev_err(&pdev->dev, "Failed to update RTC alarm: %d\n", ret); return 0; } /* Enable the alarm if it should be enabled (in case it was disabled to * prevent use as a wake source). */ static int wm831x_rtc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(&pdev->dev); int ret; if (wm831x_rtc->alarm_enabled) { ret = wm831x_rtc_start_alarm(wm831x_rtc); if (ret != 0) dev_err(&pdev->dev, "Failed to restart RTC alarm: %d\n", ret); } return 0; } /* Unconditionally disable the alarm */ static int wm831x_rtc_freeze(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(&pdev->dev); int ret; ret = wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL, WM831X_RTC_ALM_ENA, 0); if (ret != 0) dev_err(&pdev->dev, "Failed to stop RTC alarm: %d\n", ret); return 0; } #else #define wm831x_rtc_suspend NULL #define wm831x_rtc_resume NULL #define wm831x_rtc_freeze NULL #endif static int wm831x_rtc_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_rtc *wm831x_rtc; int per_irq = platform_get_irq_byname(pdev, "PER"); int alm_irq = platform_get_irq_byname(pdev, "ALM"); int ret = 0; wm831x_rtc = kzalloc(sizeof(*wm831x_rtc), GFP_KERNEL); if (wm831x_rtc == NULL) return -ENOMEM; platform_set_drvdata(pdev, wm831x_rtc); wm831x_rtc->wm831x = wm831x; ret = wm831x_reg_read(wm831x, WM831X_RTC_CONTROL); if (ret < 0) { dev_err(&pdev->dev, "Failed to read RTC control: %d\n", ret); goto err; } if (ret & WM831X_RTC_ALM_ENA) wm831x_rtc->alarm_enabled = 1; device_init_wakeup(&pdev->dev, 1); wm831x_rtc->rtc = rtc_device_register("wm831x", &pdev->dev, &wm831x_rtc_ops, THIS_MODULE); if (IS_ERR(wm831x_rtc->rtc)) { ret = PTR_ERR(wm831x_rtc->rtc); goto err; } ret = wm831x_request_irq(wm831x, per_irq, wm831x_per_irq, IRQF_TRIGGER_RISING, "wm831x_rtc_per", wm831x_rtc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n", per_irq, ret); } ret = wm831x_request_irq(wm831x, alm_irq, wm831x_alm_irq, IRQF_TRIGGER_RISING, "wm831x_rtc_alm", wm831x_rtc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n", alm_irq, ret); } wm831x_rtc_add_randomness(wm831x); return 0; err: kfree(wm831x_rtc); return ret; } static int __devexit wm831x_rtc_remove(struct platform_device *pdev) { struct wm831x_rtc *wm831x_rtc = platform_get_drvdata(pdev); int per_irq = platform_get_irq_byname(pdev, "PER"); int alm_irq = platform_get_irq_byname(pdev, "ALM"); wm831x_free_irq(wm831x_rtc->wm831x, alm_irq, wm831x_rtc); wm831x_free_irq(wm831x_rtc->wm831x, per_irq, wm831x_rtc); rtc_device_unregister(wm831x_rtc->rtc); kfree(wm831x_rtc); return 0; } static struct dev_pm_ops wm831x_rtc_pm_ops = { .suspend = wm831x_rtc_suspend, .resume = wm831x_rtc_resume, .freeze = wm831x_rtc_freeze, .thaw = wm831x_rtc_resume, .restore = wm831x_rtc_resume, .poweroff = wm831x_rtc_suspend, }; static struct platform_driver wm831x_rtc_driver = { .probe = wm831x_rtc_probe, .remove = __devexit_p(wm831x_rtc_remove), .driver = { .name = "wm831x-rtc", .pm = &wm831x_rtc_pm_ops, }, }; static int __init wm831x_rtc_init(void) { return platform_driver_register(&wm831x_rtc_driver); } module_init(wm831x_rtc_init); static void __exit wm831x_rtc_exit(void) { platform_driver_unregister(&wm831x_rtc_driver); } module_exit(wm831x_rtc_exit); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("RTC driver for the WM831x series PMICs"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-rtc");
gpl-2.0
omega-roms/G900I_Omega_Kernel_LL_5.0
mm/compaction.c
54
32681
/* * linux/mm/compaction.c * * Memory compaction for the reduction of external fragmentation. Note that * this heavily depends upon page migration to do all the real heavy * lifting * * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> */ #include <linux/swap.h> #include <linux/migrate.h> #include <linux/compaction.h> #include <linux/mm_inline.h> #include <linux/backing-dev.h> #include <linux/sysctl.h> #include <linux/sysfs.h> #include "internal.h" #ifdef CONFIG_COMPACTION static inline void count_compact_event(enum vm_event_item item) { count_vm_event(item); } static inline void count_compact_events(enum vm_event_item item, long delta) { count_vm_events(item, delta); } #else #define count_compact_event(item) do { } while (0) #define count_compact_events(item, delta) do { } while (0) #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA #define CREATE_TRACE_POINTS #include <trace/events/compaction.h> static unsigned long release_freepages(struct list_head *freelist) { struct page *page, *next; unsigned long count = 0; list_for_each_entry_safe(page, next, freelist, lru) { list_del(&page->lru); __free_page(page); count++; } return count; } static void map_pages(struct list_head *list) { struct page *page; list_for_each_entry(page, list, lru) { arch_alloc_page(page, 0); kernel_map_pages(page, 1, 1); } } static inline bool migrate_async_suitable(int migratetype) { return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; } #ifdef CONFIG_COMPACTION /* Returns true if the pageblock should be scanned for pages to isolate. */ static inline bool isolation_suitable(struct compact_control *cc, struct page *page) { if (cc->ignore_skip_hint) return true; return !get_pageblock_skip(page); } /* * This function is called to clear all cached information on pageblocks that * should be skipped for page isolation when the migrate and free page scanner * meet. */ static void __reset_isolation_suitable(struct zone *zone) { unsigned long start_pfn = zone->zone_start_pfn; unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; unsigned long pfn; zone->compact_cached_migrate_pfn = start_pfn; zone->compact_cached_free_pfn = end_pfn; zone->compact_blockskip_flush = false; /* Walk the zone and mark every pageblock as suitable for isolation */ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { struct page *page; cond_resched(); if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); if (zone != page_zone(page)) continue; clear_pageblock_skip(page); } } void reset_isolation_suitable(pg_data_t *pgdat) { int zoneid; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { struct zone *zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; /* Only flush if a full compaction finished recently */ if (zone->compact_blockskip_flush) __reset_isolation_suitable(zone); } } /* * If no pages were isolated then mark this pageblock to be skipped in the * future. The information is later cleared by __reset_isolation_suitable(). */ static void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long nr_isolated, bool migrate_scanner) { struct zone *zone = cc->zone; if (!page) return; if (!nr_isolated) { unsigned long pfn = page_to_pfn(page); set_pageblock_skip(page); /* Update where compaction should restart */ if (migrate_scanner) { if (!cc->finished_update_migrate && pfn > zone->compact_cached_migrate_pfn) zone->compact_cached_migrate_pfn = pfn; } else { if (!cc->finished_update_free && pfn < zone->compact_cached_free_pfn) zone->compact_cached_free_pfn = pfn; } } } #else static inline bool isolation_suitable(struct compact_control *cc, struct page *page) { return true; } static void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long nr_isolated, bool migrate_scanner) { } #endif /* CONFIG_COMPACTION */ static inline bool should_release_lock(spinlock_t *lock) { return need_resched() || spin_is_contended(lock); } /* * Compaction requires the taking of some coarse locks that are potentially * very heavily contended. Check if the process needs to be scheduled or * if the lock is contended. For async compaction, back out in the event * if contention is severe. For sync compaction, schedule. * * Returns true if the lock is held. * Returns false if the lock is released and compaction should abort */ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, bool locked, struct compact_control *cc) { if (should_release_lock(lock)) { if (locked) { spin_unlock_irqrestore(lock, *flags); locked = false; } /* async aborts if taking too long or contended */ if (!cc->sync) { cc->contended = true; return false; } cond_resched(); } if (!locked) spin_lock_irqsave(lock, *flags); return true; } static inline bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, struct compact_control *cc) { return compact_checklock_irqsave(lock, flags, false, cc); } /* Returns true if the page is within a block suitable for migration to */ static bool suitable_migration_target(struct page *page) { int migratetype = get_pageblock_migratetype(page); /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) return false; /* If the page is a large free page, then allow migration */ if (PageBuddy(page) && page_order(page) >= pageblock_order) return true; /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ if (migrate_async_suitable(migratetype)) return true; /* Otherwise skip the block */ return false; } /* * Isolate free pages onto a private freelist. Caller must hold zone->lock. * If @strict is true, will abort returning 0 on any invalid PFNs or non-free * pages inside of the pageblock (even though it may still end up isolating * some pages). */ static unsigned long isolate_freepages_block(struct compact_control *cc, unsigned long blockpfn, unsigned long end_pfn, struct list_head *freelist, bool strict) { int nr_scanned = 0, total_isolated = 0; struct page *cursor, *valid_page = NULL; unsigned long flags; bool locked = false; cursor = pfn_to_page(blockpfn); /* Isolate free pages. */ for (; blockpfn < end_pfn; blockpfn++, cursor++) { int isolated, i; struct page *page = cursor; nr_scanned++; if (!pfn_valid_within(blockpfn)) goto isolate_fail; if (!valid_page) valid_page = page; if (!PageBuddy(page)) goto isolate_fail; /* * The zone lock must be held to isolate freepages. * Unfortunately this is a very coarse lock and can be * heavily contended if there are parallel allocations * or parallel compactions. For async compaction do not * spin on the lock and we acquire the lock as late as * possible. */ locked = compact_checklock_irqsave(&cc->zone->lock, &flags, locked, cc); if (!locked) break; /* Recheck this is a suitable migration target under lock */ if (!strict && !suitable_migration_target(page)) break; /* Recheck this is a buddy page under lock */ if (!PageBuddy(page)) goto isolate_fail; /* Found a free page, break it into order-0 pages */ isolated = split_free_page(page); total_isolated += isolated; for (i = 0; i < isolated; i++) { list_add(&page->lru, freelist); page++; } /* If a page was split, advance to the end of it */ if (isolated) { blockpfn += isolated - 1; cursor += isolated - 1; continue; } isolate_fail: if(strict) break; } trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); /* * If strict isolation is requested by CMA then check that all the * pages requested were isolated. If there were any failures, 0 is * returned and CMA will fail. */ if(strict && blockpfn < end_pfn) total_isolated = 0; if (locked) spin_unlock_irqrestore(&cc->zone->lock, flags); /* Update the pageblock-skip if the whole pageblock was scanned */ if (blockpfn == end_pfn) update_pageblock_skip(cc, valid_page, total_isolated, false); count_compact_events(COMPACTFREE_SCANNED, nr_scanned); if (total_isolated) count_compact_events(COMPACTISOLATED, total_isolated); return total_isolated; } /** * isolate_freepages_range() - isolate free pages. * @start_pfn: The first PFN to start isolating. * @end_pfn: The one-past-last PFN. * * Non-free pages, invalid PFNs, or zone boundaries within the * [start_pfn, end_pfn) range are considered errors, cause function to * undo its actions and return zero. * * Otherwise, function returns one-past-the-last PFN of isolated page * (which may be greater then end_pfn if end fell in a middle of * a free page). */ unsigned long isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn) { unsigned long isolated, pfn, block_end_pfn; LIST_HEAD(freelist); for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) break; /* * On subsequent iterations ALIGN() is actually not needed, * but we keep it that we not to complicate the code. */ block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); block_end_pfn = min(block_end_pfn, end_pfn); isolated = isolate_freepages_block(cc, pfn, block_end_pfn, &freelist, true); /* * In strict mode, isolate_freepages_block() returns 0 if * there are any holes in the block (ie. invalid PFNs or * non-free pages). */ if (!isolated) break; /* * If we managed to isolate pages, it is always (1 << n) * * pageblock_nr_pages for some non-negative n. (Max order * page may span two pageblocks). */ } /* split_free_page does not map the pages */ map_pages(&freelist); if (pfn < end_pfn) { /* Loop terminated early, cleanup. */ release_freepages(&freelist); return 0; } /* We don't use freelists for anything. */ return pfn; } /* Update the number of anon and file isolated pages in the zone */ static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) { struct page *page; unsigned int count[2] = { 0, }; list_for_each_entry(page, &cc->migratepages, lru) count[!!page_is_file_cache(page)]++; /* If locked we can use the interrupt unsafe versions */ if (locked) { __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); } else { mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); } } /* Similar to reclaim, but different enough that they don't share logic */ static bool too_many_isolated(struct zone *zone) { unsigned long active, inactive, isolated; inactive = zone_page_state(zone, NR_INACTIVE_FILE) + zone_page_state(zone, NR_INACTIVE_ANON); active = zone_page_state(zone, NR_ACTIVE_FILE) + zone_page_state(zone, NR_ACTIVE_ANON); isolated = zone_page_state(zone, NR_ISOLATED_FILE) + zone_page_state(zone, NR_ISOLATED_ANON); return isolated > (inactive + active) / 2; } /** * isolate_migratepages_range() - isolate all migrate-able pages in range. * @zone: Zone pages are in. * @cc: Compaction control structure. * @low_pfn: The first PFN of the range. * @end_pfn: The one-past-the-last PFN of the range. * @unevictable: true if it allows to isolate unevictable pages * * Isolate all pages that can be migrated from the range specified by * [low_pfn, end_pfn). Returns zero if there is a fatal signal * pending), otherwise PFN of the first page that was not scanned * (which may be both less, equal to or more then end_pfn). * * Assumes that cc->migratepages is empty and cc->nr_migratepages is * zero. * * Apart from cc->migratepages and cc->nr_migratetypes this function * does not modify any cc's fields, in particular it does not modify * (or read for that matter) cc->migrate_pfn. */ unsigned long isolate_migratepages_range(struct zone *zone, struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn, bool unevictable) { unsigned long last_pageblock_nr = 0, pageblock_nr; unsigned long nr_scanned = 0, nr_isolated = 0; struct list_head *migratelist = &cc->migratepages; isolate_mode_t mode = 0; unsigned long flags; bool locked = false; struct page *page = NULL, *valid_page = NULL; /* * Ensure that there are not too many pages isolated from the LRU * list by either parallel reclaimers or compaction. If there are, * delay for some time until fewer pages are isolated */ while (unlikely(too_many_isolated(zone))) { /* async migration should just abort */ if (!cc->sync) return 0; congestion_wait(BLK_RW_ASYNC, HZ/10); if (fatal_signal_pending(current)) return 0; } /* Time to isolate some pages for migration */ cond_resched(); for (; low_pfn < end_pfn; low_pfn++) { /* give a chance to irqs before checking need_resched() */ if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) { if (should_release_lock(&zone->lru_lock)) { spin_unlock_irqrestore(&zone->lru_lock, flags); locked = false; } } /* * migrate_pfn does not necessarily start aligned to a * pageblock. Ensure that pfn_valid is called when moving * into a new MAX_ORDER_NR_PAGES range in case of large * memory holes within the zone */ if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { if (!pfn_valid(low_pfn)) { low_pfn += MAX_ORDER_NR_PAGES - 1; continue; } } if (!pfn_valid_within(low_pfn)) continue; nr_scanned++; /* * Get the page and ensure the page is within the same zone. * See the comment in isolate_freepages about overlapping * nodes. It is deliberate that the new zone lock is not taken * as memory compaction should not move pages between nodes. */ page = pfn_to_page(low_pfn); if (page_zone(page) != zone) continue; if (!valid_page) valid_page = page; /* If isolation recently failed, do not retry */ pageblock_nr = low_pfn >> pageblock_order; if (!isolation_suitable(cc, page)) goto next_pageblock; /* Skip if free */ if (PageBuddy(page)) continue; /* * For async migration, also only scan in MOVABLE blocks. Async * migration is optimistic to see if the minimum amount of work * satisfies the allocation */ if (!cc->sync && last_pageblock_nr != pageblock_nr && !migrate_async_suitable(get_pageblock_migratetype(page))) { cc->finished_update_migrate = true; goto next_pageblock; } /* Check may be lockless but that's ok as we recheck later */ if (!PageLRU(page)) continue; /* * PageLRU is set. lru_lock normally excludes isolation * splitting and collapsing (collapsing has already happened * if PageLRU is set) but the lock is not necessarily taken * here and it is wasteful to take it just to check transhuge. * Check TransHuge without lock and skip the whole pageblock if * it's either a transhuge or hugetlbfs page, as calling * compound_order() without preventing THP from splitting the * page underneath us may return surprising results. */ if (PageTransHuge(page)) { if (!locked) goto next_pageblock; low_pfn += (1 << compound_order(page)) - 1; continue; } /* Check if it is ok to still hold the lock */ locked = compact_checklock_irqsave(&zone->lru_lock, &flags, locked, cc); if (!locked || fatal_signal_pending(current)) break; /* Recheck PageLRU and PageTransHuge under lock */ if (!PageLRU(page)) continue; if (PageTransHuge(page)) { low_pfn += (1 << compound_order(page)) - 1; continue; } if (!cc->sync) mode |= ISOLATE_ASYNC_MIGRATE; if (unevictable) mode |= ISOLATE_UNEVICTABLE; /* Try isolate the page */ if (__isolate_lru_page(page, mode) != 0) continue; VM_BUG_ON(PageTransCompound(page)); /* Successfully isolated */ cc->finished_update_migrate = true; del_page_from_lru_list(zone, page, page_lru(page)); #if defined(CONFIG_CMA_PAGE_COUNTING) if (unevictable) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES + 1 + page_lru(page), -1); #endif list_add(&page->lru, migratelist); cc->nr_migratepages++; nr_isolated++; /* Avoid isolating too much */ if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { ++low_pfn; break; } continue; next_pageblock: low_pfn += pageblock_nr_pages; low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; last_pageblock_nr = pageblock_nr; } acct_isolated(zone, locked, cc); if (locked) spin_unlock_irqrestore(&zone->lru_lock, flags); /* Update the pageblock-skip if the whole pageblock was scanned */ if (low_pfn == end_pfn) update_pageblock_skip(cc, valid_page, nr_isolated, true); trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); if (nr_isolated) count_compact_events(COMPACTISOLATED, nr_isolated); return low_pfn; } #endif /* CONFIG_COMPACTION || CONFIG_CMA */ #ifdef CONFIG_COMPACTION /* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them. */ static void isolate_freepages(struct zone *zone, struct compact_control *cc) { struct page *page; unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; int nr_freepages = cc->nr_freepages; struct list_head *freelist = &cc->freepages; /* * Initialise the free scanner. The starting point is where we last * scanned from (or the end of the zone if starting). The low point * is the end of the pageblock the migration scanner is using. */ pfn = cc->free_pfn; low_pfn = cc->migrate_pfn + pageblock_nr_pages; /* * Take care that if the migration scanner is at the end of the zone * that the free scanner does not accidentally move to the next zone * in the next isolation cycle. */ high_pfn = min(low_pfn, pfn); zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; /* * Isolate free pages until enough are available to migrate the * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated. */ for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; pfn -= pageblock_nr_pages) { unsigned long isolated; if (!pfn_valid(pfn)) continue; /* * Check for overlapping nodes/zones. It's possible on some * configurations to have a setup like * node0 node1 node0 * i.e. it's possible that all pages within a zones range of * pages do not belong to a single zone. */ page = pfn_to_page(pfn); if (page_zone(page) != zone) continue; /* Check the block is suitable for migration */ if (!suitable_migration_target(page)) continue; /* If isolation recently failed, do not retry */ if (!isolation_suitable(cc, page)) continue; /* Found a block suitable for isolating free pages from */ isolated = 0; /* * As pfn may not start aligned, pfn+pageblock_nr_page * may cross a MAX_ORDER_NR_PAGES boundary and miss * a pfn_valid check. Ensure isolate_freepages_block() * only scans within a pageblock */ end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); end_pfn = min(end_pfn, zone_end_pfn); isolated = isolate_freepages_block(cc, pfn, end_pfn, freelist, false); nr_freepages += isolated; /* * Record the highest PFN we isolated pages from. When next * looking for free pages, the search will restart here as * page migration may have returned some pages to the allocator */ if (isolated) { cc->finished_update_free = true; high_pfn = max(high_pfn, pfn); } } /* split_free_page does not map the pages */ map_pages(freelist); cc->free_pfn = high_pfn; cc->nr_freepages = nr_freepages; } /* * This is a migrate-callback that "allocates" freepages by taking pages * from the isolated freelists in the block we are migrating to. */ static struct page *compaction_alloc(struct page *migratepage, unsigned long data, int **result) { struct compact_control *cc = (struct compact_control *)data; struct page *freepage; /* Isolate free pages if necessary */ if (list_empty(&cc->freepages)) { isolate_freepages(cc->zone, cc); if (list_empty(&cc->freepages)) return NULL; } freepage = list_entry(cc->freepages.next, struct page, lru); list_del(&freepage->lru); cc->nr_freepages--; return freepage; } /* * We cannot control nr_migratepages and nr_freepages fully when migration is * running as migrate_pages() has no knowledge of compact_control. When * migration is complete, we count the number of pages on the lists by hand. */ static void update_nr_listpages(struct compact_control *cc) { int nr_migratepages = 0; int nr_freepages = 0; struct page *page; list_for_each_entry(page, &cc->migratepages, lru) nr_migratepages++; list_for_each_entry(page, &cc->freepages, lru) nr_freepages++; cc->nr_migratepages = nr_migratepages; cc->nr_freepages = nr_freepages; } /* possible outcome of isolate_migratepages */ typedef enum { ISOLATE_ABORT, /* Abort compaction now */ ISOLATE_NONE, /* No pages isolated, continue scanning */ ISOLATE_SUCCESS, /* Pages isolated, migrate */ } isolate_migrate_t; /* * Isolate all pages that can be migrated from the block pointed to by * the migrate scanner within compact_control. */ static isolate_migrate_t isolate_migratepages(struct zone *zone, struct compact_control *cc) { unsigned long low_pfn, end_pfn; /* Do not scan outside zone boundaries */ low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); /* Only scan within a pageblock boundary */ end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); /* Do not cross the free scanner or scan within a memory hole */ if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { cc->migrate_pfn = end_pfn; return ISOLATE_NONE; } /* Perform the isolation */ low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false); if (!low_pfn || cc->contended) return ISOLATE_ABORT; cc->migrate_pfn = low_pfn; return ISOLATE_SUCCESS; } static int compact_finished(struct zone *zone, struct compact_control *cc) { unsigned int order; unsigned long watermark; if (fatal_signal_pending(current)) return COMPACT_PARTIAL; /* Compaction run completes if the migrate and free scanner meet */ if (cc->free_pfn <= cc->migrate_pfn) { /* * Mark that the PG_migrate_skip information should be cleared * by kswapd when it goes to sleep. kswapd does not set the * flag itself as the decision to be clear should be directly * based on an allocation request. */ if (!current_is_kswapd()) zone->compact_blockskip_flush = true; return COMPACT_COMPLETE; } /* * order == -1 is expected when compacting via * /proc/sys/vm/compact_memory */ if (cc->order == -1) return COMPACT_CONTINUE; /* Compaction run is not finished if the watermark is not met */ watermark = low_wmark_pages(zone); watermark += (1 << cc->order); if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) return COMPACT_CONTINUE; /* Direct compactor: Is a suitable page free? */ for (order = cc->order; order < MAX_ORDER; order++) { struct free_area *area = &zone->free_area[order]; /* Job done if page is free of the right migratetype */ if (!list_empty(&area->free_list[cc->migratetype])) return COMPACT_PARTIAL; /* Job done if allocation would set block type */ if (cc->order >= pageblock_order && area->nr_free) return COMPACT_PARTIAL; } return COMPACT_CONTINUE; } /* * compaction_suitable: Is this suitable to run compaction on this zone now? * Returns * COMPACT_SKIPPED - If there are too few free pages for compaction * COMPACT_PARTIAL - If the allocation would succeed without compaction * COMPACT_CONTINUE - If compaction should run now */ unsigned long compaction_suitable(struct zone *zone, int order) { int fragindex; unsigned long watermark; /* * order == -1 is expected when compacting via * /proc/sys/vm/compact_memory */ if (order == -1) return COMPACT_CONTINUE; /* * Watermarks for order-0 must be met for compaction. Note the 2UL. * This is because during migration, copies of pages need to be * allocated and for a short time, the footprint is higher */ watermark = low_wmark_pages(zone) + (2UL << order); if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) return COMPACT_SKIPPED; /* * fragmentation index determines if allocation failures are due to * low memory or external fragmentation * * index of -1000 implies allocations might succeed depending on * watermarks * index towards 0 implies failure is due to lack of memory * index towards 1000 implies failure is due to fragmentation * * Only compact if a failure would be due to fragmentation. */ fragindex = fragmentation_index(zone, order); if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) return COMPACT_SKIPPED; if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 0, 0)) return COMPACT_PARTIAL; return COMPACT_CONTINUE; } static int compact_zone(struct zone *zone, struct compact_control *cc) { int ret; unsigned long start_pfn = zone->zone_start_pfn; unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; ret = compaction_suitable(zone, cc->order); switch (ret) { case COMPACT_PARTIAL: case COMPACT_SKIPPED: /* Compaction is likely to fail */ return ret; case COMPACT_CONTINUE: /* Fall through to compaction */ ; } /* * Setup to move all movable pages to the end of the zone. Used cached * information on where the scanners should start but check that it * is initialised by ensuring the values are within zone boundaries. */ cc->migrate_pfn = zone->compact_cached_migrate_pfn; cc->free_pfn = zone->compact_cached_free_pfn; if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); zone->compact_cached_free_pfn = cc->free_pfn; } if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { cc->migrate_pfn = start_pfn; zone->compact_cached_migrate_pfn = cc->migrate_pfn; } /* * Clear pageblock skip if there were failures recently and compaction * is about to be retried after being deferred. kswapd does not do * this reset as it'll reset the cached information when going to sleep. */ if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) __reset_isolation_suitable(zone); migrate_prep_local(); while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { unsigned long nr_migrate, nr_remaining; int err; switch (isolate_migratepages(zone, cc)) { case ISOLATE_ABORT: ret = COMPACT_PARTIAL; putback_lru_pages(&cc->migratepages); cc->nr_migratepages = 0; goto out; case ISOLATE_NONE: continue; case ISOLATE_SUCCESS: ; } nr_migrate = cc->nr_migratepages; err = migrate_pages(&cc->migratepages, compaction_alloc, (unsigned long)cc, false, cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); update_nr_listpages(cc); nr_remaining = cc->nr_migratepages; trace_mm_compaction_migratepages(nr_migrate - nr_remaining, nr_remaining); /* Release LRU pages not migrated */ if (err) { putback_lru_pages(&cc->migratepages); cc->nr_migratepages = 0; if (err == -ENOMEM) { ret = COMPACT_PARTIAL; goto out; } } } out: /* Release free pages and check accounting */ cc->nr_freepages -= release_freepages(&cc->freepages); VM_BUG_ON(cc->nr_freepages != 0); return ret; } static unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, bool sync, bool *contended) { unsigned long ret; struct compact_control cc = { .nr_freepages = 0, .nr_migratepages = 0, .order = order, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, .sync = sync, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); ret = compact_zone(zone, &cc); VM_BUG_ON(!list_empty(&cc.freepages)); VM_BUG_ON(!list_empty(&cc.migratepages)); *contended = cc.contended; return ret; } int sysctl_extfrag_threshold = 500; /** * try_to_compact_pages - Direct compact to satisfy a high-order allocation * @zonelist: The zonelist used for the current allocation * @order: The order of the current allocation * @gfp_mask: The GFP mask of the current allocation * @nodemask: The allowed nodes to allocate from * @sync: Whether migration is synchronous or not * @contended: Return value that is true if compaction was aborted due to lock contention * @page: Optionally capture a free page of the requested order during compaction * * This is the main entry point for direct page compaction. */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, bool sync, bool *contended) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; int may_perform_io = gfp_mask & __GFP_IO; struct zoneref *z; struct zone *zone; int rc = COMPACT_SKIPPED; int alloc_flags = 0; /* Check if the GFP flags allow compaction */ if (!order || !may_enter_fs || !may_perform_io) return rc; count_compact_event(COMPACTSTALL); #ifdef CONFIG_CMA if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; #endif /* Compact each zone in the list */ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { int status; status = compact_zone_order(zone, order, gfp_mask, sync, contended); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, alloc_flags)) break; } return rc; } /* Compact all zones within a node */ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) { int zoneid; struct zone *zone; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; cc->nr_freepages = 0; cc->nr_migratepages = 0; cc->zone = zone; INIT_LIST_HEAD(&cc->freepages); INIT_LIST_HEAD(&cc->migratepages); if (cc->order == -1 || !compaction_deferred(zone, cc->order)) compact_zone(zone, cc); if (cc->order > 0) { int ok = zone_watermark_ok(zone, cc->order, low_wmark_pages(zone), 0, 0); if (ok && cc->order >= zone->compact_order_failed) zone->compact_order_failed = cc->order + 1; /* Currently async compaction is never deferred. */ else if (!ok && cc->sync) defer_compaction(zone, cc->order); } VM_BUG_ON(!list_empty(&cc->freepages)); VM_BUG_ON(!list_empty(&cc->migratepages)); } return 0; } int compact_pgdat(pg_data_t *pgdat, int order) { struct compact_control cc = { .order = order, .sync = false, }; return __compact_pgdat(pgdat, &cc); } static int compact_node(int nid) { struct compact_control cc = { .order = -1, .sync = true, }; return __compact_pgdat(NODE_DATA(nid), &cc); } /* Compact all nodes in the system */ static void compact_nodes(void) { int nid; /* Flush pending updates to the LRU lists */ lru_add_drain_all(); for_each_online_node(nid) compact_node(nid); } /* The written value is actually unused, all memory is compacted */ int sysctl_compact_memory; /* This is the entry point for compacting all nodes via /proc/sys/vm */ int sysctl_compaction_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { if (write) compact_nodes(); return 0; } int sysctl_extfrag_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec_minmax(table, write, buffer, length, ppos); return 0; } #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) ssize_t sysfs_compact_node(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nid = dev->id; if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { /* Flush pending updates to the LRU lists */ lru_add_drain_all(); compact_node(nid); } return count; } static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); int compaction_register_node(struct node *node) { return device_create_file(&node->dev, &dev_attr_compact); } void compaction_unregister_node(struct node *node) { return device_remove_file(&node->dev, &dev_attr_compact); } #endif /* CONFIG_SYSFS && CONFIG_NUMA */ #endif /* CONFIG_COMPACTION */
gpl-2.0
onyx-intl/ak98_kernel
arch/sh/kernel/cpu/sh4a/clock-sh7786.c
566
3252
/* * arch/sh/kernel/cpu/sh4a/clock-sh7786.c * * SH7786 support for the clock framework * * Copyright (C) 2008, 2009 Renesas Solutions Corp. * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * Based on SH7785 * Copyright (C) 2007 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int ifc_divisors[] = { 1, 2, 4, 1 }; static int sfc_divisors[] = { 1, 1, 4, 1 }; static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 1, 24, 32, 1, 1, 1, 1, 1, 1 }; static int mfc_divisors[] = { 1, 1, 4, 1 }; static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 16, 1, 24, 32, 1, 48, 1, 1, 1, 1 }; static void master_clk_init(struct clk *clk) { clk->rate *= pfc_divisors[ctrl_inl(FRQMR1) & 0x000f]; } static struct clk_ops sh7786_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int idx = (ctrl_inl(FRQMR1) & 0x000f); return clk->parent->rate / pfc_divisors[idx]; } static struct clk_ops sh7786_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int idx = ((ctrl_inl(FRQMR1) >> 16) & 0x000f); return clk->parent->rate / bfc_divisors[idx]; } static struct clk_ops sh7786_bus_clk_ops = { .recalc = bus_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int idx = ((ctrl_inl(FRQMR1) >> 28) & 0x0003); return clk->parent->rate / ifc_divisors[idx]; } static struct clk_ops sh7786_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct clk_ops *sh7786_clk_ops[] = { &sh7786_master_clk_ops, &sh7786_module_clk_ops, &sh7786_bus_clk_ops, &sh7786_cpu_clk_ops, }; void __init arch_init_clk_ops(struct clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7786_clk_ops)) *ops = sh7786_clk_ops[idx]; } static unsigned long shyway_clk_recalc(struct clk *clk) { int idx = ((ctrl_inl(FRQMR1) >> 20) & 0x0003); return clk->parent->rate / sfc_divisors[idx]; } static struct clk_ops sh7786_shyway_clk_ops = { .recalc = shyway_clk_recalc, }; static struct clk sh7786_shyway_clk = { .name = "shyway_clk", .flags = CLK_ENABLE_ON_INIT, .ops = &sh7786_shyway_clk_ops, }; static unsigned long ddr_clk_recalc(struct clk *clk) { int idx = ((ctrl_inl(FRQMR1) >> 12) & 0x0003); return clk->parent->rate / mfc_divisors[idx]; } static struct clk_ops sh7786_ddr_clk_ops = { .recalc = ddr_clk_recalc, }; static struct clk sh7786_ddr_clk = { .name = "ddr_clk", .flags = CLK_ENABLE_ON_INIT, .ops = &sh7786_ddr_clk_ops, }; /* * Additional SH7786-specific on-chip clocks that aren't already part of the * clock framework */ static struct clk *sh7786_onchip_clocks[] = { &sh7786_shyway_clk, &sh7786_ddr_clk, }; int __init arch_clk_init(void) { struct clk *clk; int i, ret = 0; cpg_clk_init(); clk = clk_get(NULL, "master_clk"); for (i = 0; i < ARRAY_SIZE(sh7786_onchip_clocks); i++) { struct clk *clkp = sh7786_onchip_clocks[i]; clkp->parent = clk; ret |= clk_register(clkp); } clk_put(clk); return ret; }
gpl-2.0
j1nx/Amlogic-reff16-kernel
drivers/mtd/chips/cfi_probe.c
822
11670
/* Common Flash Interface probe code. (C) 2000 Red Hat. GPL'd. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/mtd/xip.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> #include <linux/mtd/gen_probe.h> //#define DEBUG_CFI #ifdef DEBUG_CFI static void print_cfi_ident(struct cfi_ident *); #endif static int cfi_probe_chip(struct map_info *map, __u32 base, unsigned long *chip_map, struct cfi_private *cfi); static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi); struct mtd_info *cfi_probe(struct map_info *map); #ifdef CONFIG_MTD_XIP /* only needed for short periods, so this is rather simple */ #define xip_disable() local_irq_disable() #define xip_allowed(base, map) \ do { \ (void) map_read(map, base); \ xip_iprefetch(); \ local_irq_enable(); \ } while (0) #define xip_enable(base, map, cfi) \ do { \ cfi_qry_mode_off(base, map, cfi); \ xip_allowed(base, map); \ } while (0) #define xip_disable_qry(base, map, cfi) \ do { \ xip_disable(); \ cfi_qry_mode_on(base, map, cfi); \ } while (0) #else #define xip_disable() do { } while (0) #define xip_allowed(base, map) do { } while (0) #define xip_enable(base, map, cfi) do { } while (0) #define xip_disable_qry(base, map, cfi) do { } while (0) #endif /* check for QRY. in: interleave,type,mode ret: table index, <0 for error */ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, unsigned long *chip_map, struct cfi_private *cfi) { int i; if ((base + 0) >= map->size) { printk(KERN_NOTICE "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n", (unsigned long)base, map->size -1); return 0; } if ((base + 0xff) >= map->size) { printk(KERN_NOTICE "Probe at base[0x55](0x%08lx) past the end of the map(0x%08lx)\n", (unsigned long)base + 0x55, map->size -1); return 0; } xip_disable(); if (!cfi_qry_mode_on(base, map, cfi)) { xip_enable(base, map, cfi); return 0; } if (!cfi->numchips) { /* This is the first time we're called. Set up the CFI stuff accordingly and return */ return cfi_chip_setup(map, cfi); } /* Check each previous chip to see if it's an alias */ for (i=0; i < (base >> cfi->chipshift); i++) { unsigned long start; if(!test_bit(i, chip_map)) { /* Skip location; no valid chip at this address */ continue; } start = i << cfi->chipshift; /* This chip should be in read mode if it's one we've already touched. */ if (cfi_qry_present(map, start, cfi)) { /* Eep. This chip also had the QRY marker. * Is it an alias for the new one? */ cfi_qry_mode_off(start, map, cfi); /* If the QRY marker goes away, it's an alias */ if (!cfi_qry_present(map, start, cfi)) { xip_allowed(base, map); printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", map->name, base, start); return 0; } /* Yes, it's actually got QRY for data. Most * unfortunate. Stick the new chip in read mode * too and if it's the same, assume it's an alias. */ /* FIXME: Use other modes to do a proper check */ cfi_qry_mode_off(base, map, cfi); if (cfi_qry_present(map, base, cfi)) { xip_allowed(base, map); printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", map->name, base, start); return 0; } } } /* OK, if we got to here, then none of the previous chips appear to be aliases for the current one. */ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */ cfi->numchips++; /* Put it back into Read Mode */ cfi_qry_mode_off(base, map, cfi); xip_allowed(base, map); printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", map->name, cfi->interleave, cfi->device_type*8, base, map->bankwidth*8); return 1; } static int __xipram cfi_chip_setup(struct map_info *map, struct cfi_private *cfi) { int ofs_factor = cfi->interleave*cfi->device_type; __u32 base = 0; int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor); int i; xip_enable(base, map, cfi); #ifdef DEBUG_CFI printk("Number of erase regions: %d\n", num_erase_regions); #endif if (!num_erase_regions) return 0; cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); if (!cfi->cfiq) { printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); return 0; } memset(cfi->cfiq,0,sizeof(struct cfi_ident)); cfi->cfi_mode = CFI_MODE_CFI; /* Read the CFI info structure */ xip_disable_qry(base, map, cfi); for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor); /* Note we put the device back into Read Mode BEFORE going into Auto * Select Mode, as some devices support nesting of modes, others * don't. This way should always work. * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and * so should be treated as nops or illegal (and so put the device * back into Read Mode, which is a nop in this case). */ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL); cfi->mfr = cfi_read_query16(map, base); cfi->id = cfi_read_query16(map, base + ofs_factor); /* Get AMD/Spansion extended JEDEC ID */ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e) cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 | cfi_read_query(map, base + 0xf * ofs_factor); /* Put it back into Read Mode */ cfi_qry_mode_off(base, map, cfi); xip_allowed(base, map); /* Do any necessary byteswapping */ cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID); cfi->cfiq->P_ADR = le16_to_cpu(cfi->cfiq->P_ADR); cfi->cfiq->A_ID = le16_to_cpu(cfi->cfiq->A_ID); cfi->cfiq->A_ADR = le16_to_cpu(cfi->cfiq->A_ADR); cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc); cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize); #ifdef DEBUG_CFI /* Dump the information therein */ print_cfi_ident(cfi->cfiq); #endif for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]); #ifdef DEBUG_CFI printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n", i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff, (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1); #endif } printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", map->name, cfi->interleave, cfi->device_type*8, base, map->bankwidth*8); return 1; } #ifdef DEBUG_CFI static char *vendorname(__u16 vendor) { switch (vendor) { case P_ID_NONE: return "None"; case P_ID_INTEL_EXT: return "Intel/Sharp Extended"; case P_ID_AMD_STD: return "AMD/Fujitsu Standard"; case P_ID_INTEL_STD: return "Intel/Sharp Standard"; case P_ID_AMD_EXT: return "AMD/Fujitsu Extended"; case P_ID_WINBOND: return "Winbond Standard"; case P_ID_ST_ADV: return "ST Advanced"; case P_ID_MITSUBISHI_STD: return "Mitsubishi Standard"; case P_ID_MITSUBISHI_EXT: return "Mitsubishi Extended"; case P_ID_SST_PAGE: return "SST Page Write"; case P_ID_INTEL_PERFORMANCE: return "Intel Performance Code"; case P_ID_INTEL_DATA: return "Intel Data"; case P_ID_RESERVED: return "Not Allowed / Reserved for Future Use"; default: return "Unknown"; } } static void print_cfi_ident(struct cfi_ident *cfip) { #if 0 if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') { printk("Invalid CFI ident structure.\n"); return; } #endif printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID)); if (cfip->P_ADR) printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR); else printk("No Primary Algorithm Table\n"); printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID)); if (cfip->A_ADR) printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR); else printk("No Alternate Algorithm Table\n"); printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf); printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf); if (cfip->VppMin) { printk("Vpp Minimum: %2d.%d V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf); printk("Vpp Maximum: %2d.%d V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf); } else printk("No Vpp line\n"); printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp); printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp)); if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) { printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp); printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp)); } else printk("Full buffer write not supported\n"); printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp); printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp)); if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) { printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp); printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp)); } else printk("Chip erase not supported\n"); printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); switch(cfip->InterfaceDesc) { case CFI_INTERFACE_X8_ASYNC: printk(" - x8-only asynchronous interface\n"); break; case CFI_INTERFACE_X16_ASYNC: printk(" - x16-only asynchronous interface\n"); break; case CFI_INTERFACE_X8_BY_X16_ASYNC: printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); break; case CFI_INTERFACE_X32_ASYNC: printk(" - x32-only asynchronous interface\n"); break; case CFI_INTERFACE_X16_BY_X32_ASYNC: printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); break; case CFI_INTERFACE_NOT_ALLOWED: printk(" - Not Allowed / Reserved\n"); break; default: printk(" - Unknown\n"); break; } printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize); printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions); } #endif /* DEBUG_CFI */ static struct chip_probe cfi_chip_probe = { .name = "CFI", .probe_chip = cfi_probe_chip }; struct mtd_info *cfi_probe(struct map_info *map) { /* * Just use the generic probe stuff to call our CFI-specific * chip_probe routine in all the possible permutations, etc. */ return mtd_do_chip_probe(map, &cfi_chip_probe); } static struct mtd_chip_driver cfi_chipdrv = { .probe = cfi_probe, .name = "cfi_probe", .module = THIS_MODULE }; static int __init cfi_probe_init(void) { register_mtd_chip_driver(&cfi_chipdrv); return 0; } static void __exit cfi_probe_exit(void) { unregister_mtd_chip_driver(&cfi_chipdrv); } module_init(cfi_probe_init); module_exit(cfi_probe_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al."); MODULE_DESCRIPTION("Probe code for CFI-compliant flash chips");
gpl-2.0
Split-Screen/android_kernel_mediatek_sprout
arch/arm/mach-omap2/omap3-restart.c
1334
1075
/* * omap3-restart.c - Code common to all OMAP3xxx machines. * * Copyright (C) 2009, 2012 Texas Instruments * Copyright (C) 2010 Nokia Corporation * Tony Lindgren <tony@atomide.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include "iomap.h" #include "common.h" #include "control.h" #include "prm3xxx.h" /* Global address base setup code */ /** * omap3xxx_restart - trigger a software restart of the SoC * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c * @cmd: passed from the userspace program rebooting the system (if provided) * * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ void omap3xxx_restart(char mode, const char *cmd) { omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0)); omap3xxx_prm_dpll3_reset(); /* never returns */ while (1); }
gpl-2.0
Holong/kernel-zynq
drivers/media/pci/ttpci/av7110_av.c
1334
40325
/* * av7110_av.c: audio and video MPEG decoder stuff * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * originally based on code by: * Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org/ */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/fs.h> #include "av7110.h" #include "av7110_hw.h" #include "av7110_av.h" #include "av7110_ipack.h" /* MPEG-2 (ISO 13818 / H.222.0) stream types */ #define PROG_STREAM_MAP 0xBC #define PRIVATE_STREAM1 0xBD #define PADDING_STREAM 0xBE #define PRIVATE_STREAM2 0xBF #define AUDIO_STREAM_S 0xC0 #define AUDIO_STREAM_E 0xDF #define VIDEO_STREAM_S 0xE0 #define VIDEO_STREAM_E 0xEF #define ECM_STREAM 0xF0 #define EMM_STREAM 0xF1 #define DSM_CC_STREAM 0xF2 #define ISO13522_STREAM 0xF3 #define PROG_STREAM_DIR 0xFF #define PTS_DTS_FLAGS 0xC0 //pts_dts flags #define PTS_ONLY 0x80 #define PTS_DTS 0xC0 #define TS_SIZE 188 #define TRANS_ERROR 0x80 #define PAY_START 0x40 #define TRANS_PRIO 0x20 #define PID_MASK_HI 0x1F //flags #define TRANS_SCRMBL1 0x80 #define TRANS_SCRMBL2 0x40 #define ADAPT_FIELD 0x20 #define PAYLOAD 0x10 #define COUNT_MASK 0x0F // adaptation flags #define DISCON_IND 0x80 #define RAND_ACC_IND 0x40 #define ES_PRI_IND 0x20 #define PCR_FLAG 0x10 #define OPCR_FLAG 0x08 #define SPLICE_FLAG 0x04 #define TRANS_PRIV 0x02 #define ADAP_EXT_FLAG 0x01 // adaptation extension flags #define LTW_FLAG 0x80 #define PIECE_RATE 0x40 #define SEAM_SPLICE 0x20 static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter, struct dvb_demux_feed *feed); static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len); int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len) { struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) p2t->priv; if (!(dvbdmxfeed->ts_type & TS_PACKET)) return 0; if (buf[3] == 0xe0) // video PES do not have a length in TS buf[4] = buf[5] = 0; if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY) return dvbdmxfeed->cb.ts(buf, len, NULL, 0, &dvbdmxfeed->feed.ts, DMX_OK); else return dvb_filter_pes2ts(p2t, buf, len, 1); } static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data) { struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv; dvbdmxfeed->cb.ts(data, 188, NULL, 0, &dvbdmxfeed->feed.ts, DMX_OK); return 0; } int av7110_av_start_record(struct av7110 *av7110, int av, struct dvb_demux_feed *dvbdmxfeed) { int ret = 0; struct dvb_demux *dvbdmx = dvbdmxfeed->demux; dprintk(2, "av7110:%p, , dvb_demux_feed:%p\n", av7110, dvbdmxfeed); if (av7110->playing || (av7110->rec_mode & av)) return -EBUSY; av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0); dvbdmx->recording = 1; av7110->rec_mode |= av; switch (av7110->rec_mode) { case RP_AUDIO: dvb_filter_pes2ts_init(&av7110->p2t[0], dvbdmx->pesfilter[0]->pid, dvb_filter_pes2ts_cb, (void *) dvbdmx->pesfilter[0]); ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, AudioPES, 0); break; case RP_VIDEO: dvb_filter_pes2ts_init(&av7110->p2t[1], dvbdmx->pesfilter[1]->pid, dvb_filter_pes2ts_cb, (void *) dvbdmx->pesfilter[1]); ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, VideoPES, 0); break; case RP_AV: dvb_filter_pes2ts_init(&av7110->p2t[0], dvbdmx->pesfilter[0]->pid, dvb_filter_pes2ts_cb, (void *) dvbdmx->pesfilter[0]); dvb_filter_pes2ts_init(&av7110->p2t[1], dvbdmx->pesfilter[1]->pid, dvb_filter_pes2ts_cb, (void *) dvbdmx->pesfilter[1]); ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, AV_PES, 0); break; } return ret; } int av7110_av_start_play(struct av7110 *av7110, int av) { int ret = 0; dprintk(2, "av7110:%p, \n", av7110); if (av7110->rec_mode) return -EBUSY; if (av7110->playing & av) return -EBUSY; av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0); if (av7110->playing == RP_NONE) { av7110_ipack_reset(&av7110->ipack[0]); av7110_ipack_reset(&av7110->ipack[1]); } av7110->playing |= av; switch (av7110->playing) { case RP_AUDIO: ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AudioPES, 0); break; case RP_VIDEO: ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, VideoPES, 0); av7110->sinfo = 0; break; case RP_AV: av7110->sinfo = 0; ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AV_PES, 0); break; } return ret; } int av7110_av_stop(struct av7110 *av7110, int av) { int ret = 0; dprintk(2, "av7110:%p, \n", av7110); if (!(av7110->playing & av) && !(av7110->rec_mode & av)) return 0; av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0); if (av7110->playing) { av7110->playing &= ~av; switch (av7110->playing) { case RP_AUDIO: ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AudioPES, 0); break; case RP_VIDEO: ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, VideoPES, 0); break; case RP_NONE: ret = av7110_set_vidmode(av7110, av7110->vidmode); break; } } else { av7110->rec_mode &= ~av; switch (av7110->rec_mode) { case RP_AUDIO: ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, AudioPES, 0); break; case RP_VIDEO: ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, VideoPES, 0); break; case RP_NONE: break; } } return ret; } int av7110_pes_play(void *dest, struct dvb_ringbuffer *buf, int dlen) { int len; u32 sync; u16 blen; if (!dlen) { wake_up(&buf->queue); return -1; } while (1) { len = dvb_ringbuffer_avail(buf); if (len < 6) { wake_up(&buf->queue); return -1; } sync = DVB_RINGBUFFER_PEEK(buf, 0) << 24; sync |= DVB_RINGBUFFER_PEEK(buf, 1) << 16; sync |= DVB_RINGBUFFER_PEEK(buf, 2) << 8; sync |= DVB_RINGBUFFER_PEEK(buf, 3); if (((sync &~ 0x0f) == 0x000001e0) || ((sync &~ 0x1f) == 0x000001c0) || (sync == 0x000001bd)) break; printk("resync\n"); DVB_RINGBUFFER_SKIP(buf, 1); } blen = DVB_RINGBUFFER_PEEK(buf, 4) << 8; blen |= DVB_RINGBUFFER_PEEK(buf, 5); blen += 6; if (len < blen || blen > dlen) { //printk("buffer empty - avail %d blen %u dlen %d\n", len, blen, dlen); wake_up(&buf->queue); return -1; } dvb_ringbuffer_read(buf, dest, (size_t) blen); dprintk(2, "pread=0x%08lx, pwrite=0x%08lx\n", (unsigned long) buf->pread, (unsigned long) buf->pwrite); wake_up(&buf->queue); return blen; } int av7110_set_volume(struct av7110 *av7110, int volleft, int volright) { int err, vol, val, balance = 0; dprintk(2, "av7110:%p, \n", av7110); av7110->mixer.volume_left = volleft; av7110->mixer.volume_right = volright; switch (av7110->adac_type) { case DVB_ADAC_TI: volleft = (volleft * 256) / 1036; volright = (volright * 256) / 1036; if (volleft > 0x3f) volleft = 0x3f; if (volright > 0x3f) volright = 0x3f; if ((err = SendDAC(av7110, 3, 0x80 + volleft))) return err; return SendDAC(av7110, 4, volright); case DVB_ADAC_CRYSTAL: volleft = 127 - volleft / 2; volright = 127 - volright / 2; i2c_writereg(av7110, 0x20, 0x03, volleft); i2c_writereg(av7110, 0x20, 0x04, volright); return 0; case DVB_ADAC_MSP34x0: vol = (volleft > volright) ? volleft : volright; val = (vol * 0x73 / 255) << 8; if (vol > 0) balance = ((volright - volleft) * 127) / vol; msp_writereg(av7110, MSP_WR_DSP, 0x0001, balance << 8); msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */ msp_writereg(av7110, MSP_WR_DSP, 0x0006, val); /* headphonesr */ return 0; case DVB_ADAC_MSP34x5: vol = (volleft > volright) ? volleft : volright; val = (vol * 0x73 / 255) << 8; if (vol > 0) balance = ((volright - volleft) * 127) / vol; msp_writereg(av7110, MSP_WR_DSP, 0x0001, balance << 8); msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */ return 0; } return 0; } int av7110_set_vidmode(struct av7110 *av7110, enum av7110_video_mode mode) { int ret; dprintk(2, "av7110:%p, \n", av7110); ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, LoadVidCode, 1, mode); if (!ret && !av7110->playing) { ret = ChangePIDs(av7110, av7110->pids[DMX_PES_VIDEO], av7110->pids[DMX_PES_AUDIO], av7110->pids[DMX_PES_TELETEXT], 0, av7110->pids[DMX_PES_PCR]); if (!ret) ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, Scan, 0); } return ret; } static enum av7110_video_mode sw2mode[16] = { AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_NTSC, AV7110_VIDEO_MODE_NTSC, AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_NTSC, AV7110_VIDEO_MODE_NTSC, AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_NTSC, AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL, }; static int get_video_format(struct av7110 *av7110, u8 *buf, int count) { int i; int hsize, vsize; int sw; u8 *p; int ret = 0; dprintk(2, "av7110:%p, \n", av7110); if (av7110->sinfo) return 0; for (i = 7; i < count - 10; i++) { p = buf + i; if (p[0] || p[1] || p[2] != 0x01 || p[3] != 0xb3) continue; p += 4; hsize = ((p[1] &0xF0) >> 4) | (p[0] << 4); vsize = ((p[1] &0x0F) << 8) | (p[2]); sw = (p[3] & 0x0F); ret = av7110_set_vidmode(av7110, sw2mode[sw]); if (!ret) { dprintk(2, "playback %dx%d fr=%d\n", hsize, vsize, sw); av7110->sinfo = 1; } break; } return ret; } /**************************************************************************** * I/O buffer management and control ****************************************************************************/ static inline long aux_ring_buffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, unsigned long count) { unsigned long todo = count; int free; while (todo > 0) { if (dvb_ringbuffer_free(rbuf) < 2048) { if (wait_event_interruptible(rbuf->queue, (dvb_ringbuffer_free(rbuf) >= 2048))) return count - todo; } free = dvb_ringbuffer_free(rbuf); if (free > todo) free = todo; dvb_ringbuffer_write(rbuf, buf, free); todo -= free; buf += free; } return count - todo; } static void play_video_cb(u8 *buf, int count, void *priv) { struct av7110 *av7110 = (struct av7110 *) priv; dprintk(2, "av7110:%p, \n", av7110); if ((buf[3] & 0xe0) == 0xe0) { get_video_format(av7110, buf, count); aux_ring_buffer_write(&av7110->avout, buf, count); } else aux_ring_buffer_write(&av7110->aout, buf, count); } static void play_audio_cb(u8 *buf, int count, void *priv) { struct av7110 *av7110 = (struct av7110 *) priv; dprintk(2, "av7110:%p, \n", av7110); aux_ring_buffer_write(&av7110->aout, buf, count); } #define FREE_COND_TS (dvb_ringbuffer_free(rb) >= 4096) static ssize_t ts_play(struct av7110 *av7110, const char __user *buf, unsigned long count, int nonblock, int type) { struct dvb_ringbuffer *rb; u8 *kb; unsigned long todo = count; dprintk(2, "%s: type %d cnt %lu\n", __func__, type, count); rb = (type) ? &av7110->avout : &av7110->aout; kb = av7110->kbuf[type]; if (!kb) return -ENOBUFS; if (nonblock && !FREE_COND_TS) return -EWOULDBLOCK; while (todo >= TS_SIZE) { if (!FREE_COND_TS) { if (nonblock) return count - todo; if (wait_event_interruptible(rb->queue, FREE_COND_TS)) return count - todo; } if (copy_from_user(kb, buf, TS_SIZE)) return -EFAULT; write_ts_to_decoder(av7110, type, kb, TS_SIZE); todo -= TS_SIZE; buf += TS_SIZE; } return count - todo; } #define FREE_COND (dvb_ringbuffer_free(&av7110->avout) >= 20 * 1024 && \ dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024) static ssize_t dvb_play(struct av7110 *av7110, const char __user *buf, unsigned long count, int nonblock, int type) { unsigned long todo = count, n; dprintk(2, "av7110:%p, \n", av7110); if (!av7110->kbuf[type]) return -ENOBUFS; if (nonblock && !FREE_COND) return -EWOULDBLOCK; while (todo > 0) { if (!FREE_COND) { if (nonblock) return count - todo; if (wait_event_interruptible(av7110->avout.queue, FREE_COND)) return count - todo; } n = todo; if (n > IPACKS * 2) n = IPACKS * 2; if (copy_from_user(av7110->kbuf[type], buf, n)) return -EFAULT; av7110_ipack_instant_repack(av7110->kbuf[type], n, &av7110->ipack[type]); todo -= n; buf += n; } return count - todo; } static ssize_t dvb_play_kernel(struct av7110 *av7110, const u8 *buf, unsigned long count, int nonblock, int type) { unsigned long todo = count, n; dprintk(2, "av7110:%p, \n", av7110); if (!av7110->kbuf[type]) return -ENOBUFS; if (nonblock && !FREE_COND) return -EWOULDBLOCK; while (todo > 0) { if (!FREE_COND) { if (nonblock) return count - todo; if (wait_event_interruptible(av7110->avout.queue, FREE_COND)) return count - todo; } n = todo; if (n > IPACKS * 2) n = IPACKS * 2; av7110_ipack_instant_repack(buf, n, &av7110->ipack[type]); todo -= n; buf += n; } return count - todo; } static ssize_t dvb_aplay(struct av7110 *av7110, const char __user *buf, unsigned long count, int nonblock, int type) { unsigned long todo = count, n; dprintk(2, "av7110:%p, \n", av7110); if (!av7110->kbuf[type]) return -ENOBUFS; if (nonblock && dvb_ringbuffer_free(&av7110->aout) < 20 * 1024) return -EWOULDBLOCK; while (todo > 0) { if (dvb_ringbuffer_free(&av7110->aout) < 20 * 1024) { if (nonblock) return count - todo; if (wait_event_interruptible(av7110->aout.queue, (dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024))) return count-todo; } n = todo; if (n > IPACKS * 2) n = IPACKS * 2; if (copy_from_user(av7110->kbuf[type], buf, n)) return -EFAULT; av7110_ipack_instant_repack(av7110->kbuf[type], n, &av7110->ipack[type]); todo -= n; buf += n; } return count - todo; } void av7110_p2t_init(struct av7110_p2t *p, struct dvb_demux_feed *feed) { memset(p->pes, 0, TS_SIZE); p->counter = 0; p->pos = 0; p->frags = 0; if (feed) p->feed = feed; } static void clear_p2t(struct av7110_p2t *p) { memset(p->pes, 0, TS_SIZE); // p->counter = 0; p->pos = 0; p->frags = 0; } static int find_pes_header(u8 const *buf, long int length, int *frags) { int c = 0; int found = 0; *frags = 0; while (c < length - 3 && !found) { if (buf[c] == 0x00 && buf[c + 1] == 0x00 && buf[c + 2] == 0x01) { switch ( buf[c + 3] ) { case PROG_STREAM_MAP: case PRIVATE_STREAM2: case PROG_STREAM_DIR: case ECM_STREAM : case EMM_STREAM : case PADDING_STREAM : case DSM_CC_STREAM : case ISO13522_STREAM: case PRIVATE_STREAM1: case AUDIO_STREAM_S ... AUDIO_STREAM_E: case VIDEO_STREAM_S ... VIDEO_STREAM_E: found = 1; break; default: c++; break; } } else c++; } if (c == length - 3 && !found) { if (buf[length - 1] == 0x00) *frags = 1; if (buf[length - 2] == 0x00 && buf[length - 1] == 0x00) *frags = 2; if (buf[length - 3] == 0x00 && buf[length - 2] == 0x00 && buf[length - 1] == 0x01) *frags = 3; return -1; } return c; } void av7110_p2t_write(u8 const *buf, long int length, u16 pid, struct av7110_p2t *p) { int c, c2, l, add; int check, rest; c = 0; c2 = 0; if (p->frags){ check = 0; switch(p->frags) { case 1: if (buf[c] == 0x00 && buf[c + 1] == 0x01) { check = 1; c += 2; } break; case 2: if (buf[c] == 0x01) { check = 1; c++; } break; case 3: check = 1; } if (check) { switch (buf[c]) { case PROG_STREAM_MAP: case PRIVATE_STREAM2: case PROG_STREAM_DIR: case ECM_STREAM : case EMM_STREAM : case PADDING_STREAM : case DSM_CC_STREAM : case ISO13522_STREAM: case PRIVATE_STREAM1: case AUDIO_STREAM_S ... AUDIO_STREAM_E: case VIDEO_STREAM_S ... VIDEO_STREAM_E: p->pes[0] = 0x00; p->pes[1] = 0x00; p->pes[2] = 0x01; p->pes[3] = buf[c]; p->pos = 4; memcpy(p->pes + p->pos, buf + c, (TS_SIZE - 4) - p->pos); c += (TS_SIZE - 4) - p->pos; p_to_t(p->pes, (TS_SIZE - 4), pid, &p->counter, p->feed); clear_p2t(p); break; default: c = 0; break; } } p->frags = 0; } if (p->pos) { c2 = find_pes_header(buf + c, length - c, &p->frags); if (c2 >= 0 && c2 < (TS_SIZE - 4) - p->pos) l = c2+c; else l = (TS_SIZE - 4) - p->pos; memcpy(p->pes + p->pos, buf, l); c += l; p->pos += l; p_to_t(p->pes, p->pos, pid, &p->counter, p->feed); clear_p2t(p); } add = 0; while (c < length) { c2 = find_pes_header(buf + c + add, length - c - add, &p->frags); if (c2 >= 0) { c2 += c + add; if (c2 > c){ p_to_t(buf + c, c2 - c, pid, &p->counter, p->feed); c = c2; clear_p2t(p); add = 0; } else add = 1; } else { l = length - c; rest = l % (TS_SIZE - 4); l -= rest; p_to_t(buf + c, l, pid, &p->counter, p->feed); memcpy(p->pes, buf + c + l, rest); p->pos = rest; c = length; } } } static int write_ts_header2(u16 pid, u8 *counter, int pes_start, u8 *buf, u8 length) { int i; int c = 0; int fill; u8 tshead[4] = { 0x47, 0x00, 0x00, 0x10 }; fill = (TS_SIZE - 4) - length; if (pes_start) tshead[1] = 0x40; if (fill) tshead[3] = 0x30; tshead[1] |= (u8)((pid & 0x1F00) >> 8); tshead[2] |= (u8)(pid & 0x00FF); tshead[3] |= ((*counter)++ & 0x0F); memcpy(buf, tshead, 4); c += 4; if (fill) { buf[4] = fill - 1; c++; if (fill > 1) { buf[5] = 0x00; c++; } for (i = 6; i < fill + 4; i++) { buf[i] = 0xFF; c++; } } return c; } static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter, struct dvb_demux_feed *feed) { int l, pes_start; u8 obuf[TS_SIZE]; long c = 0; pes_start = 0; if (length > 3 && buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0x01) switch (buf[3]) { case PROG_STREAM_MAP: case PRIVATE_STREAM2: case PROG_STREAM_DIR: case ECM_STREAM : case EMM_STREAM : case PADDING_STREAM : case DSM_CC_STREAM : case ISO13522_STREAM: case PRIVATE_STREAM1: case AUDIO_STREAM_S ... AUDIO_STREAM_E: case VIDEO_STREAM_S ... VIDEO_STREAM_E: pes_start = 1; break; default: break; } while (c < length) { memset(obuf, 0, TS_SIZE); if (length - c >= (TS_SIZE - 4)){ l = write_ts_header2(pid, counter, pes_start, obuf, (TS_SIZE - 4)); memcpy(obuf + l, buf + c, TS_SIZE - l); c += TS_SIZE - l; } else { l = write_ts_header2(pid, counter, pes_start, obuf, length - c); memcpy(obuf + l, buf + c, TS_SIZE - l); c = length; } feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, DMX_OK); pes_start = 0; } } static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len) { struct ipack *ipack = &av7110->ipack[type]; if (buf[1] & TRANS_ERROR) { av7110_ipack_reset(ipack); return -1; } if (!(buf[3] & PAYLOAD)) return -1; if (buf[1] & PAY_START) av7110_ipack_flush(ipack); if (buf[3] & ADAPT_FIELD) { len -= buf[4] + 1; buf += buf[4] + 1; if (!len) return 0; } av7110_ipack_instant_repack(buf + 4, len - 4, ipack); return 0; } int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t len) { struct dvb_demux *demux = feed->demux; struct av7110 *av7110 = (struct av7110 *) demux->priv; dprintk(2, "av7110:%p, \n", av7110); if (av7110->full_ts && demux->dmx.frontend->source != DMX_MEMORY_FE) return 0; switch (feed->pes_type) { case 0: if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY) return -EINVAL; break; case 1: if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) return -EINVAL; break; default: return -1; } return write_ts_to_decoder(av7110, feed->pes_type, buf, len); } /****************************************************************************** * Video MPEG decoder events ******************************************************************************/ void dvb_video_add_event(struct av7110 *av7110, struct video_event *event) { struct dvb_video_events *events = &av7110->video_events; int wp; spin_lock_bh(&events->lock); wp = (events->eventw + 1) % MAX_VIDEO_EVENT; if (wp == events->eventr) { events->overflow = 1; events->eventr = (events->eventr + 1) % MAX_VIDEO_EVENT; } //FIXME: timestamp? memcpy(&events->events[events->eventw], event, sizeof(struct video_event)); events->eventw = wp; spin_unlock_bh(&events->lock); wake_up_interruptible(&events->wait_queue); } static int dvb_video_get_event (struct av7110 *av7110, struct video_event *event, int flags) { struct dvb_video_events *events = &av7110->video_events; if (events->overflow) { events->overflow = 0; return -EOVERFLOW; } if (events->eventw == events->eventr) { int ret; if (flags & O_NONBLOCK) return -EWOULDBLOCK; ret = wait_event_interruptible(events->wait_queue, events->eventw != events->eventr); if (ret < 0) return ret; } spin_lock_bh(&events->lock); memcpy(event, &events->events[events->eventr], sizeof(struct video_event)); events->eventr = (events->eventr + 1) % MAX_VIDEO_EVENT; spin_unlock_bh(&events->lock); return 0; } /****************************************************************************** * DVB device file operations ******************************************************************************/ static unsigned int dvb_video_poll(struct file *file, poll_table *wait) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; unsigned int mask = 0; dprintk(2, "av7110:%p, \n", av7110); if ((file->f_flags & O_ACCMODE) != O_RDONLY) poll_wait(file, &av7110->avout.queue, wait); poll_wait(file, &av7110->video_events.wait_queue, wait); if (av7110->video_events.eventw != av7110->video_events.eventr) mask = POLLPRI; if ((file->f_flags & O_ACCMODE) != O_RDONLY) { if (av7110->playing) { if (FREE_COND) mask |= (POLLOUT | POLLWRNORM); } else { /* if not playing: may play if asked for */ mask |= (POLLOUT | POLLWRNORM); } } return mask; } static ssize_t dvb_video_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; unsigned char c; dprintk(2, "av7110:%p, \n", av7110); if ((file->f_flags & O_ACCMODE) == O_RDONLY) return -EPERM; if (av7110->videostate.stream_source != VIDEO_SOURCE_MEMORY) return -EPERM; if (get_user(c, buf)) return -EFAULT; if (c == 0x47 && count % TS_SIZE == 0) return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1); else return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1); } static unsigned int dvb_audio_poll(struct file *file, poll_table *wait) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; unsigned int mask = 0; dprintk(2, "av7110:%p, \n", av7110); poll_wait(file, &av7110->aout.queue, wait); if (av7110->playing) { if (dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024) mask |= (POLLOUT | POLLWRNORM); } else /* if not playing: may play if asked for */ mask = (POLLOUT | POLLWRNORM); return mask; } static ssize_t dvb_audio_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; unsigned char c; dprintk(2, "av7110:%p, \n", av7110); if (av7110->audiostate.stream_source != AUDIO_SOURCE_MEMORY) { printk(KERN_ERR "not audio source memory\n"); return -EPERM; } if (get_user(c, buf)) return -EFAULT; if (c == 0x47 && count % TS_SIZE == 0) return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 0); else return dvb_aplay(av7110, buf, count, file->f_flags & O_NONBLOCK, 0); } static u8 iframe_header[] = { 0x00, 0x00, 0x01, 0xe0, 0x00, 0x00, 0x80, 0x00, 0x00 }; #define MIN_IFRAME 400000 static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len, int nonblock) { unsigned i, n; int progressive = 0; int match = 0; dprintk(2, "av7110:%p, \n", av7110); if (!(av7110->playing & RP_VIDEO)) { if (av7110_av_start_play(av7110, RP_VIDEO) < 0) return -EBUSY; } /* search in buf for instances of 00 00 01 b5 1? */ for (i = 0; i < len; i++) { unsigned char c; if (get_user(c, buf + i)) return -EFAULT; if (match == 5) { progressive = c & 0x08; match = 0; } if (c == 0x00) { match = (match == 1 || match == 2) ? 2 : 1; continue; } switch (match++) { case 2: if (c == 0x01) continue; break; case 3: if (c == 0xb5) continue; break; case 4: if ((c & 0xf0) == 0x10) continue; break; } match = 0; } /* setting n always > 1, fixes problems when playing stillframes consisting of I- and P-Frames */ n = MIN_IFRAME / len + 1; /* FIXME: nonblock? */ dvb_play_kernel(av7110, iframe_header, sizeof(iframe_header), 0, 1); for (i = 0; i < n; i++) dvb_play(av7110, buf, len, 0, 1); av7110_ipack_flush(&av7110->ipack[1]); if (progressive) return vidcom(av7110, AV_VIDEO_CMD_FREEZE, 1); else return 0; } static int dvb_video_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; unsigned long arg = (unsigned long) parg; int ret = 0; dprintk(1, "av7110:%p, cmd=%04x\n", av7110,cmd); if ((file->f_flags & O_ACCMODE) == O_RDONLY) { if ( cmd != VIDEO_GET_STATUS && cmd != VIDEO_GET_EVENT && cmd != VIDEO_GET_SIZE ) { return -EPERM; } } if (mutex_lock_interruptible(&av7110->ioctl_mutex)) return -ERESTARTSYS; switch (cmd) { case VIDEO_STOP: av7110->videostate.play_state = VIDEO_STOPPED; if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) ret = av7110_av_stop(av7110, RP_VIDEO); else ret = vidcom(av7110, AV_VIDEO_CMD_STOP, av7110->videostate.video_blank ? 0 : 1); if (!ret) av7110->trickmode = TRICK_NONE; break; case VIDEO_PLAY: av7110->trickmode = TRICK_NONE; if (av7110->videostate.play_state == VIDEO_FREEZED) { av7110->videostate.play_state = VIDEO_PLAYING; ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0); if (ret) break; } if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) { if (av7110->playing == RP_AV) { ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0); if (ret) break; av7110->playing &= ~RP_VIDEO; } ret = av7110_av_start_play(av7110, RP_VIDEO); } if (!ret) ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0); if (!ret) av7110->videostate.play_state = VIDEO_PLAYING; break; case VIDEO_FREEZE: av7110->videostate.play_state = VIDEO_FREEZED; if (av7110->playing & RP_VIDEO) ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Pause, 0); else ret = vidcom(av7110, AV_VIDEO_CMD_FREEZE, 1); if (!ret) av7110->trickmode = TRICK_FREEZE; break; case VIDEO_CONTINUE: if (av7110->playing & RP_VIDEO) ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Continue, 0); if (!ret) ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0); if (!ret) { av7110->videostate.play_state = VIDEO_PLAYING; av7110->trickmode = TRICK_NONE; } break; case VIDEO_SELECT_SOURCE: av7110->videostate.stream_source = (video_stream_source_t) arg; break; case VIDEO_SET_BLANK: av7110->videostate.video_blank = (int) arg; break; case VIDEO_GET_STATUS: memcpy(parg, &av7110->videostate, sizeof(struct video_status)); break; case VIDEO_GET_EVENT: ret = dvb_video_get_event(av7110, parg, file->f_flags); break; case VIDEO_GET_SIZE: memcpy(parg, &av7110->video_size, sizeof(video_size_t)); break; case VIDEO_SET_DISPLAY_FORMAT: { video_displayformat_t format = (video_displayformat_t) arg; switch (format) { case VIDEO_PAN_SCAN: av7110->display_panscan = VID_PAN_SCAN_PREF; break; case VIDEO_LETTER_BOX: av7110->display_panscan = VID_VC_AND_PS_PREF; break; case VIDEO_CENTER_CUT_OUT: av7110->display_panscan = VID_CENTRE_CUT_PREF; break; default: ret = -EINVAL; } if (ret < 0) break; av7110->videostate.display_format = format; ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetPanScanType, 1, av7110->display_panscan); break; } case VIDEO_SET_FORMAT: if (arg > 1) { ret = -EINVAL; break; } av7110->display_ar = arg; ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetMonitorType, 1, (u16) arg); break; case VIDEO_STILLPICTURE: { struct video_still_picture *pic = (struct video_still_picture *) parg; av7110->videostate.stream_source = VIDEO_SOURCE_MEMORY; dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout); ret = play_iframe(av7110, pic->iFrame, pic->size, file->f_flags & O_NONBLOCK); break; } case VIDEO_FAST_FORWARD: //note: arg is ignored by firmware if (av7110->playing & RP_VIDEO) ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Scan_I, 2, AV_PES, 0); else ret = vidcom(av7110, AV_VIDEO_CMD_FFWD, arg); if (!ret) { av7110->trickmode = TRICK_FAST; av7110->videostate.play_state = VIDEO_PLAYING; } break; case VIDEO_SLOWMOTION: if (av7110->playing&RP_VIDEO) { if (av7110->trickmode != TRICK_SLOW) ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0); if (!ret) ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg); } else { ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0); if (!ret) ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 0); if (!ret) ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg); } if (!ret) { av7110->trickmode = TRICK_SLOW; av7110->videostate.play_state = VIDEO_PLAYING; } break; case VIDEO_GET_CAPABILITIES: *(int *)parg = VIDEO_CAP_MPEG1 | VIDEO_CAP_MPEG2 | VIDEO_CAP_SYS | VIDEO_CAP_PROG; break; case VIDEO_CLEAR_BUFFER: dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout); av7110_ipack_reset(&av7110->ipack[1]); if (av7110->playing == RP_AV) { ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AV_PES, 0); if (ret) break; if (av7110->trickmode == TRICK_FAST) ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Scan_I, 2, AV_PES, 0); if (av7110->trickmode == TRICK_SLOW) { ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0); if (!ret) ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg); } if (av7110->trickmode == TRICK_FREEZE) ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 1); } break; case VIDEO_SET_STREAMTYPE: break; default: ret = -ENOIOCTLCMD; break; } mutex_unlock(&av7110->ioctl_mutex); return ret; } static int dvb_audio_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; unsigned long arg = (unsigned long) parg; int ret = 0; dprintk(1, "av7110:%p, cmd=%04x\n", av7110,cmd); if (((file->f_flags & O_ACCMODE) == O_RDONLY) && (cmd != AUDIO_GET_STATUS)) return -EPERM; if (mutex_lock_interruptible(&av7110->ioctl_mutex)) return -ERESTARTSYS; switch (cmd) { case AUDIO_STOP: if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY) ret = av7110_av_stop(av7110, RP_AUDIO); else ret = audcom(av7110, AUDIO_CMD_MUTE); if (!ret) av7110->audiostate.play_state = AUDIO_STOPPED; break; case AUDIO_PLAY: if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY) ret = av7110_av_start_play(av7110, RP_AUDIO); if (!ret) ret = audcom(av7110, AUDIO_CMD_UNMUTE); if (!ret) av7110->audiostate.play_state = AUDIO_PLAYING; break; case AUDIO_PAUSE: ret = audcom(av7110, AUDIO_CMD_MUTE); if (!ret) av7110->audiostate.play_state = AUDIO_PAUSED; break; case AUDIO_CONTINUE: if (av7110->audiostate.play_state == AUDIO_PAUSED) { av7110->audiostate.play_state = AUDIO_PLAYING; ret = audcom(av7110, AUDIO_CMD_UNMUTE | AUDIO_CMD_PCM16); } break; case AUDIO_SELECT_SOURCE: av7110->audiostate.stream_source = (audio_stream_source_t) arg; break; case AUDIO_SET_MUTE: { ret = audcom(av7110, arg ? AUDIO_CMD_MUTE : AUDIO_CMD_UNMUTE); if (!ret) av7110->audiostate.mute_state = (int) arg; break; } case AUDIO_SET_AV_SYNC: av7110->audiostate.AV_sync_state = (int) arg; ret = audcom(av7110, arg ? AUDIO_CMD_SYNC_ON : AUDIO_CMD_SYNC_OFF); break; case AUDIO_SET_BYPASS_MODE: if (FW_VERSION(av7110->arm_app) < 0x2621) ret = -EINVAL; av7110->audiostate.bypass_mode = (int)arg; break; case AUDIO_CHANNEL_SELECT: av7110->audiostate.channel_select = (audio_channel_select_t) arg; switch(av7110->audiostate.channel_select) { case AUDIO_STEREO: ret = audcom(av7110, AUDIO_CMD_STEREO); if (!ret) { if (av7110->adac_type == DVB_ADAC_CRYSTAL) i2c_writereg(av7110, 0x20, 0x02, 0x49); else if (av7110->adac_type == DVB_ADAC_MSP34x5) msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); } break; case AUDIO_MONO_LEFT: ret = audcom(av7110, AUDIO_CMD_MONO_L); if (!ret) { if (av7110->adac_type == DVB_ADAC_CRYSTAL) i2c_writereg(av7110, 0x20, 0x02, 0x4a); else if (av7110->adac_type == DVB_ADAC_MSP34x5) msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0200); } break; case AUDIO_MONO_RIGHT: ret = audcom(av7110, AUDIO_CMD_MONO_R); if (!ret) { if (av7110->adac_type == DVB_ADAC_CRYSTAL) i2c_writereg(av7110, 0x20, 0x02, 0x45); else if (av7110->adac_type == DVB_ADAC_MSP34x5) msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0210); } break; default: ret = -EINVAL; break; } break; case AUDIO_GET_STATUS: memcpy(parg, &av7110->audiostate, sizeof(struct audio_status)); break; case AUDIO_GET_CAPABILITIES: if (FW_VERSION(av7110->arm_app) < 0x2621) *(unsigned int *)parg = AUDIO_CAP_LPCM | AUDIO_CAP_MP1 | AUDIO_CAP_MP2; else *(unsigned int *)parg = AUDIO_CAP_LPCM | AUDIO_CAP_DTS | AUDIO_CAP_AC3 | AUDIO_CAP_MP1 | AUDIO_CAP_MP2; break; case AUDIO_CLEAR_BUFFER: dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout); av7110_ipack_reset(&av7110->ipack[0]); if (av7110->playing == RP_AV) ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AV_PES, 0); break; case AUDIO_SET_ID: break; case AUDIO_SET_MIXER: { struct audio_mixer *amix = (struct audio_mixer *)parg; ret = av7110_set_volume(av7110, amix->volume_left, amix->volume_right); break; } case AUDIO_SET_STREAMTYPE: break; default: ret = -ENOIOCTLCMD; } mutex_unlock(&av7110->ioctl_mutex); return ret; } static int dvb_video_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; int err; dprintk(2, "av7110:%p, \n", av7110); if ((err = dvb_generic_open(inode, file)) < 0) return err; if ((file->f_flags & O_ACCMODE) != O_RDONLY) { dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout); dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout); av7110->video_blank = 1; av7110->audiostate.AV_sync_state = 1; av7110->videostate.stream_source = VIDEO_SOURCE_DEMUX; /* empty event queue */ av7110->video_events.eventr = av7110->video_events.eventw = 0; } return 0; } static int dvb_video_release(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; dprintk(2, "av7110:%p, \n", av7110); if ((file->f_flags & O_ACCMODE) != O_RDONLY) { av7110_av_stop(av7110, RP_VIDEO); } return dvb_generic_release(inode, file); } static int dvb_audio_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; int err = dvb_generic_open(inode, file); dprintk(2, "av7110:%p, \n", av7110); if (err < 0) return err; dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout); av7110->audiostate.stream_source = AUDIO_SOURCE_DEMUX; return 0; } static int dvb_audio_release(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; dprintk(2, "av7110:%p, \n", av7110); av7110_av_stop(av7110, RP_AUDIO); return dvb_generic_release(inode, file); } /****************************************************************************** * driver registration ******************************************************************************/ static const struct file_operations dvb_video_fops = { .owner = THIS_MODULE, .write = dvb_video_write, .unlocked_ioctl = dvb_generic_ioctl, .open = dvb_video_open, .release = dvb_video_release, .poll = dvb_video_poll, .llseek = noop_llseek, }; static struct dvb_device dvbdev_video = { .priv = NULL, .users = 6, .readers = 5, /* arbitrary */ .writers = 1, .fops = &dvb_video_fops, .kernel_ioctl = dvb_video_ioctl, }; static const struct file_operations dvb_audio_fops = { .owner = THIS_MODULE, .write = dvb_audio_write, .unlocked_ioctl = dvb_generic_ioctl, .open = dvb_audio_open, .release = dvb_audio_release, .poll = dvb_audio_poll, .llseek = noop_llseek, }; static struct dvb_device dvbdev_audio = { .priv = NULL, .users = 1, .writers = 1, .fops = &dvb_audio_fops, .kernel_ioctl = dvb_audio_ioctl, }; int av7110_av_register(struct av7110 *av7110) { av7110->audiostate.AV_sync_state = 0; av7110->audiostate.mute_state = 0; av7110->audiostate.play_state = AUDIO_STOPPED; av7110->audiostate.stream_source = AUDIO_SOURCE_DEMUX; av7110->audiostate.channel_select = AUDIO_STEREO; av7110->audiostate.bypass_mode = 0; av7110->videostate.video_blank = 0; av7110->videostate.play_state = VIDEO_STOPPED; av7110->videostate.stream_source = VIDEO_SOURCE_DEMUX; av7110->videostate.video_format = VIDEO_FORMAT_4_3; av7110->videostate.display_format = VIDEO_LETTER_BOX; av7110->display_ar = VIDEO_FORMAT_4_3; av7110->display_panscan = VID_VC_AND_PS_PREF; init_waitqueue_head(&av7110->video_events.wait_queue); spin_lock_init(&av7110->video_events.lock); av7110->video_events.eventw = av7110->video_events.eventr = 0; av7110->video_events.overflow = 0; memset(&av7110->video_size, 0, sizeof (video_size_t)); dvb_register_device(&av7110->dvb_adapter, &av7110->video_dev, &dvbdev_video, av7110, DVB_DEVICE_VIDEO); dvb_register_device(&av7110->dvb_adapter, &av7110->audio_dev, &dvbdev_audio, av7110, DVB_DEVICE_AUDIO); return 0; } void av7110_av_unregister(struct av7110 *av7110) { dvb_unregister_device(av7110->audio_dev); dvb_unregister_device(av7110->video_dev); } int av7110_av_init(struct av7110 *av7110) { void (*play[])(u8 *, int, void *) = { play_audio_cb, play_video_cb }; int i, ret; for (i = 0; i < 2; i++) { struct ipack *ipack = av7110->ipack + i; ret = av7110_ipack_init(ipack, IPACKS, play[i]); if (ret < 0) { if (i) av7110_ipack_free(--ipack); goto out; } ipack->data = av7110; } dvb_ringbuffer_init(&av7110->avout, av7110->iobuf, AVOUTLEN); dvb_ringbuffer_init(&av7110->aout, av7110->iobuf + AVOUTLEN, AOUTLEN); av7110->kbuf[0] = (u8 *)(av7110->iobuf + AVOUTLEN + AOUTLEN + BMPLEN); av7110->kbuf[1] = av7110->kbuf[0] + 2 * IPACKS; out: return ret; } void av7110_av_exit(struct av7110 *av7110) { av7110_ipack_free(&av7110->ipack[0]); av7110_ipack_free(&av7110->ipack[1]); }
gpl-2.0
visi0nary/android_kernel_alps_k05ts_a
arch/arm/mach-mmp/mmp2.c
2102
4483
/* * linux/arch/arm/mach-mmp/mmp2.c * * code name MMP2 * * Copyright (C) 2009 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <asm/hardware/cache-tauros2.h> #include <asm/mach/time.h> #include <mach/addr-map.h> #include <mach/regs-apbc.h> #include <mach/cputype.h> #include <mach/irqs.h> #include <mach/dma.h> #include <mach/mfp.h> #include <mach/devices.h> #include <mach/mmp2.h> #include "common.h" #define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) static struct mfp_addr_map mmp2_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO58, 0x54), MFP_ADDR_X(GPIO59, GPIO73, 0x280), MFP_ADDR_X(GPIO74, GPIO101, 0x170), MFP_ADDR(GPIO102, 0x0), MFP_ADDR(GPIO103, 0x4), MFP_ADDR(GPIO104, 0x1fc), MFP_ADDR(GPIO105, 0x1f8), MFP_ADDR(GPIO106, 0x1f4), MFP_ADDR(GPIO107, 0x1f0), MFP_ADDR(GPIO108, 0x21c), MFP_ADDR(GPIO109, 0x218), MFP_ADDR(GPIO110, 0x214), MFP_ADDR(GPIO111, 0x200), MFP_ADDR(GPIO112, 0x244), MFP_ADDR(GPIO113, 0x25c), MFP_ADDR(GPIO114, 0x164), MFP_ADDR_X(GPIO115, GPIO122, 0x260), MFP_ADDR(GPIO123, 0x148), MFP_ADDR_X(GPIO124, GPIO141, 0xc), MFP_ADDR(GPIO142, 0x8), MFP_ADDR_X(GPIO143, GPIO151, 0x220), MFP_ADDR_X(GPIO152, GPIO153, 0x248), MFP_ADDR_X(GPIO154, GPIO155, 0x254), MFP_ADDR_X(GPIO156, GPIO159, 0x14c), MFP_ADDR(GPIO160, 0x250), MFP_ADDR(GPIO161, 0x210), MFP_ADDR(GPIO162, 0x20c), MFP_ADDR(GPIO163, 0x208), MFP_ADDR(GPIO164, 0x204), MFP_ADDR(GPIO165, 0x1ec), MFP_ADDR(GPIO166, 0x1e8), MFP_ADDR(GPIO167, 0x1e4), MFP_ADDR(GPIO168, 0x1e0), MFP_ADDR_X(TWSI1_SCL, TWSI1_SDA, 0x140), MFP_ADDR_X(TWSI4_SCL, TWSI4_SDA, 0x2bc), MFP_ADDR(PMIC_INT, 0x2c4), MFP_ADDR(CLK_REQ, 0x160), MFP_ADDR_END, }; void mmp2_clear_pmic_int(void) { void __iomem *mfpr_pmic; unsigned long data; mfpr_pmic = APB_VIRT_BASE + 0x1e000 + 0x2c4; data = __raw_readl(mfpr_pmic); __raw_writel(data | (1 << 6), mfpr_pmic); __raw_writel(data, mfpr_pmic); } void __init mmp2_init_irq(void) { mmp2_init_icu(); } static int __init mmp2_init(void) { if (cpu_is_mmp2()) { #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(0); #endif mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(mmp2_addr_map); pxa_init_dma(IRQ_MMP2_DMA_RIQ, 16); mmp2_clk_init(); } return 0; } postcore_initcall(mmp2_init); #define APBC_TIMERS APBC_REG(0x024) void __init mmp2_timer_init(void) { unsigned long clk_rst; __raw_writel(APBC_APBCLK | APBC_RST, APBC_TIMERS); /* * enable bus/functional clock, enable 6.5MHz (divider 4), * release reset */ clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(1); __raw_writel(clk_rst, APBC_TIMERS); timer_init(IRQ_MMP2_TIMER1); } /* on-chip devices */ MMP2_DEVICE(uart1, "pxa2xx-uart", 0, UART1, 0xd4030000, 0x30, 4, 5); MMP2_DEVICE(uart2, "pxa2xx-uart", 1, UART2, 0xd4017000, 0x30, 20, 21); MMP2_DEVICE(uart3, "pxa2xx-uart", 2, UART3, 0xd4018000, 0x30, 22, 23); MMP2_DEVICE(uart4, "pxa2xx-uart", 3, UART4, 0xd4016000, 0x30, 18, 19); MMP2_DEVICE(twsi1, "pxa2xx-i2c", 0, TWSI1, 0xd4011000, 0x70); MMP2_DEVICE(twsi2, "pxa2xx-i2c", 1, TWSI2, 0xd4031000, 0x70); MMP2_DEVICE(twsi3, "pxa2xx-i2c", 2, TWSI3, 0xd4032000, 0x70); MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70); MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70); MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70); MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29); MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120); MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120); MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120); MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120); MMP2_DEVICE(asram, "asram", -1, NONE, 0xe0000000, 0x4000); /* 0xd1000000 ~ 0xd101ffff is reserved for secure processor */ MMP2_DEVICE(isram, "isram", -1, NONE, 0xd1020000, 0x18000); struct resource mmp2_resource_gpio[] = { { .start = 0xd4019000, .end = 0xd4019fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_MMP2_GPIO, .end = IRQ_MMP2_GPIO, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device mmp2_device_gpio = { .name = "mmp2-gpio", .id = -1, .num_resources = ARRAY_SIZE(mmp2_resource_gpio), .resource = mmp2_resource_gpio, };
gpl-2.0
jbott/android_kernel_rpi_rpi
arch/arm/mach-omap2/powerdomains3xxx_data.c
2102
10282
/* * OMAP3 powerdomain definitions * * Copyright (C) 2007-2008, 2011 Texas Instruments, Inc. * Copyright (C) 2007-2011 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/bug.h> #include "soc.h" #include "powerdomain.h" #include "powerdomains2xxx_3xxx_data.h" #include "prcm-common.h" #include "prm2xxx_3xxx.h" #include "prm-regbits-34xx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" /* * 34XX-specific powerdomains, dependencies */ /* * Powerdomains */ static struct powerdomain iva2_pwrdm = { .name = "iva2_pwrdm", .prcm_offs = OMAP3430_IVA2_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 4, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, [1] = PWRSTS_OFF_RET, [2] = PWRSTS_OFF_RET, [3] = PWRSTS_OFF_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_ON, [1] = PWRSTS_ON, [2] = PWRSTS_OFF_ON, [3] = PWRSTS_ON, }, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain mpu_3xxx_pwrdm = { .name = "mpu_pwrdm", .prcm_offs = MPU_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .flags = PWRDM_HAS_MPU_QUIRK, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_ON, }, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain mpu_am35x_pwrdm = { .name = "mpu_pwrdm", .prcm_offs = MPU_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .flags = PWRDM_HAS_MPU_QUIRK, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_ON, }, .pwrsts_mem_on = { [0] = PWRSTS_ON, }, .voltdm = { .name = "mpu_iva" }, }; /* * The USBTLL Save-and-Restore mechanism is broken on * 3430s up to ES3.0 and 3630ES1.0. Hence this feature * needs to be disabled on these chips. * Refer: 3430 errata ID i459 and 3630 errata ID i579 * * Note: setting the SAR flag could help for errata ID i478 * which applies to 3430 <= ES3.1, but since the SAR feature * is broken, do not use it. */ static struct powerdomain core_3xxx_pre_es3_1_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* MEM1RETSTATE */ [1] = PWRSTS_OFF_RET, /* MEM2RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */ [1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain core_3xxx_es3_1_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, /* * Setting the SAR flag for errata ID i478 which applies * to 3430 <= ES3.1 */ .flags = PWRDM_HAS_HDWR_SAR, /* for USBTLL only */ .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* MEM1RETSTATE */ [1] = PWRSTS_OFF_RET, /* MEM2RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */ [1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain core_am35x_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .banks = 2, .pwrsts_mem_ret = { [0] = PWRSTS_ON, /* MEM1RETSTATE */ [1] = PWRSTS_ON, /* MEM2RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEM1ONSTATE */ [1] = PWRSTS_ON, /* MEM2ONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain dss_pwrdm = { .name = "dss_pwrdm", .prcm_offs = OMAP3430_DSS_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain dss_am35x_pwrdm = { .name = "dss_pwrdm", .prcm_offs = OMAP3430_DSS_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_ON, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; /* * Although the 34XX TRM Rev K Table 4-371 notes that retention is a * possible SGX powerstate, the SGX device itself does not support * retention. */ static struct powerdomain sgx_pwrdm = { .name = "sgx_pwrdm", .prcm_offs = OMAP3430ES2_SGX_MOD, /* XXX This is accurate for 3430 SGX, but what about GFX? */ .pwrsts = PWRSTS_OFF_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain sgx_am35x_pwrdm = { .name = "sgx_pwrdm", .prcm_offs = OMAP3430ES2_SGX_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_ON, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain cam_pwrdm = { .name = "cam_pwrdm", .prcm_offs = OMAP3430_CAM_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain per_pwrdm = { .name = "per_pwrdm", .prcm_offs = OMAP3430_PER_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain per_am35x_pwrdm = { .name = "per_pwrdm", .prcm_offs = OMAP3430_PER_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_ON, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain emu_pwrdm = { .name = "emu_pwrdm", .prcm_offs = OMAP3430_EMU_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain neon_pwrdm = { .name = "neon_pwrdm", .prcm_offs = OMAP3430_NEON_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain neon_am35x_pwrdm = { .name = "neon_pwrdm", .prcm_offs = OMAP3430_NEON_MOD, .pwrsts = PWRSTS_ON, .pwrsts_logic_ret = PWRSTS_ON, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain usbhost_pwrdm = { .name = "usbhost_pwrdm", .prcm_offs = OMAP3430ES2_USBHOST_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, /* * REVISIT: Enabling usb host save and restore mechanism seems to * leave the usb host domain permanently in ACTIVE mode after * changing the usb host power domain state from OFF to active once. * Disabling for now. */ /*.flags = PWRDM_HAS_HDWR_SAR,*/ /* for USBHOST ctrlr only */ .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; static struct powerdomain dpll1_pwrdm = { .name = "dpll1_pwrdm", .prcm_offs = MPU_MOD, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain dpll2_pwrdm = { .name = "dpll2_pwrdm", .prcm_offs = OMAP3430_IVA2_MOD, .voltdm = { .name = "mpu_iva" }, }; static struct powerdomain dpll3_pwrdm = { .name = "dpll3_pwrdm", .prcm_offs = PLL_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain dpll4_pwrdm = { .name = "dpll4_pwrdm", .prcm_offs = PLL_MOD, .voltdm = { .name = "core" }, }; static struct powerdomain dpll5_pwrdm = { .name = "dpll5_pwrdm", .prcm_offs = PLL_MOD, .voltdm = { .name = "core" }, }; /* As powerdomains are added or removed above, this list must also be changed */ static struct powerdomain *powerdomains_omap3430_common[] __initdata = { &wkup_omap2_pwrdm, &iva2_pwrdm, &mpu_3xxx_pwrdm, &neon_pwrdm, &cam_pwrdm, &dss_pwrdm, &per_pwrdm, &emu_pwrdm, &dpll1_pwrdm, &dpll2_pwrdm, &dpll3_pwrdm, &dpll4_pwrdm, NULL }; static struct powerdomain *powerdomains_omap3430es1[] __initdata = { &gfx_omap2_pwrdm, &core_3xxx_pre_es3_1_pwrdm, NULL }; /* also includes 3630ES1.0 */ static struct powerdomain *powerdomains_omap3430es2_es3_0[] __initdata = { &core_3xxx_pre_es3_1_pwrdm, &sgx_pwrdm, &usbhost_pwrdm, &dpll5_pwrdm, NULL }; /* also includes 3630ES1.1+ */ static struct powerdomain *powerdomains_omap3430es3_1plus[] __initdata = { &core_3xxx_es3_1_pwrdm, &sgx_pwrdm, &usbhost_pwrdm, &dpll5_pwrdm, NULL }; static struct powerdomain *powerdomains_am35x[] __initdata = { &wkup_omap2_pwrdm, &mpu_am35x_pwrdm, &neon_am35x_pwrdm, &core_am35x_pwrdm, &sgx_am35x_pwrdm, &dss_am35x_pwrdm, &per_am35x_pwrdm, &emu_pwrdm, &dpll1_pwrdm, &dpll3_pwrdm, &dpll4_pwrdm, &dpll5_pwrdm, NULL }; void __init omap3xxx_powerdomains_init(void) { unsigned int rev; if (!cpu_is_omap34xx()) return; pwrdm_register_platform_funcs(&omap3_pwrdm_operations); rev = omap_rev(); if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) { pwrdm_register_pwrdms(powerdomains_am35x); } else { pwrdm_register_pwrdms(powerdomains_omap3430_common); switch (rev) { case OMAP3430_REV_ES1_0: pwrdm_register_pwrdms(powerdomains_omap3430es1); break; case OMAP3430_REV_ES2_0: case OMAP3430_REV_ES2_1: case OMAP3430_REV_ES3_0: case OMAP3630_REV_ES1_0: pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0); break; case OMAP3430_REV_ES3_1: case OMAP3430_REV_ES3_1_2: case OMAP3630_REV_ES1_1: case OMAP3630_REV_ES1_2: pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus); break; default: WARN(1, "OMAP3 powerdomain init: unknown chip type\n"); } } pwrdm_complete_init(); }
gpl-2.0
detule/lge-linux-msm
drivers/message/fusion/mptctl.c
2614
88341
/* * linux/drivers/message/fusion/mptctl.c * mpt Ioctl driver. * For use with LSI PCI chip/adapters * running LSI Fusion MPT (Message Passing Technology) firmware. * * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. NO WARRANTY THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. DISCLAIMER OF LIABILITY NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> /* for mdelay */ #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/compat.h> #include <asm/io.h> #include <asm/uaccess.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #define COPYRIGHT "Copyright (c) 1999-2008 LSI Corporation" #define MODULEAUTHOR "LSI Corporation" #include "mptbase.h" #include "mptctl.h" /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #define my_NAME "Fusion MPT misc device (ioctl) driver" #define my_VERSION MPT_LINUX_VERSION_COMMON #define MYNAM "mptctl" MODULE_AUTHOR(MODULEAUTHOR); MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); MODULE_VERSION(my_VERSION); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static DEFINE_MUTEX(mpctl_mutex); static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS; static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ struct buflist { u8 *kptr; int len; }; /* * Function prototypes. Called from OS entry point mptctl_ioctl. * arg contents specific to function. */ static int mptctl_fw_download(unsigned long arg); static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd); static int mptctl_gettargetinfo(unsigned long arg); static int mptctl_readtest(unsigned long arg); static int mptctl_mpt_command(unsigned long arg); static int mptctl_eventquery(unsigned long arg); static int mptctl_eventenable(unsigned long arg); static int mptctl_eventreport(unsigned long arg); static int mptctl_replace_fw(unsigned long arg); static int mptctl_do_reset(unsigned long arg); static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd); static int mptctl_hp_targetinfo(unsigned long arg); static int mptctl_probe(struct pci_dev *, const struct pci_device_id *); static void mptctl_remove(struct pci_dev *); #ifdef CONFIG_COMPAT static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg); #endif /* * Private function calls. */ static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr); static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen); static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc); /* * Reset Handler cleanup function */ static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); /* * Event Handler function */ static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); static struct fasync_struct *async_queue=NULL; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Scatter gather list (SGL) sizes and limits... */ //#define MAX_SCSI_FRAGS 9 #define MAX_FRAGS_SPILL1 9 #define MAX_FRAGS_SPILL2 15 #define FRAGS_PER_BUCKET (MAX_FRAGS_SPILL2 + 1) //#define MAX_CHAIN_FRAGS 64 //#define MAX_CHAIN_FRAGS (15+15+15+16) #define MAX_CHAIN_FRAGS (4 * MAX_FRAGS_SPILL2 + 1) // Define max sg LIST bytes ( == (#frags + #chains) * 8 bytes each) // Works out to: 592d bytes! (9+1)*8 + 4*(15+1)*8 // ^----------------- 80 + 512 #define MAX_SGL_BYTES ((MAX_FRAGS_SPILL1 + 1 + (4 * FRAGS_PER_BUCKET)) * 8) /* linux only seems to ever give 128kB MAX contiguous (GFP_USER) mem bytes */ #define MAX_KMALLOC_SZ (128*1024) #define MPT_IOCTL_DEFAULT_TIMEOUT 10 /* Default timeout value (seconds) */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptctl_syscall_down - Down the MPT adapter syscall semaphore. * @ioc: Pointer to MPT adapter * @nonblock: boolean, non-zero if O_NONBLOCK is set * * All of the ioctl commands can potentially sleep, which is illegal * with a spinlock held, thus we perform mutual exclusion here. * * Returns negative errno on error, or zero for success. */ static inline int mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock) { int rc = 0; if (nonblock) { if (!mutex_trylock(&ioc->ioctl_cmds.mutex)) rc = -EAGAIN; } else { if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex)) rc = -ERESTARTSYS; } return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * This is the callback for any message we have posted. The message itself * will be returned to the message pool when we return from the IRQ * * This runs in irq context so be short and sweet. */ static int mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) { char *sense_data; int req_index; int sz; if (!req) return 0; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function " "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function, req, reply)); /* * Handling continuation of the same reply. Processing the first * reply, and eating the other replys that come later. */ if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext) goto out_continuation; ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; if (!reply) goto out; ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID; sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength); memcpy(ioc->ioctl_cmds.reply, reply, sz); if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo) dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name, le16_to_cpu(reply->u.reply.IOCStatus), le32_to_cpu(reply->u.reply.IOCLogInfo))); if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) || (req->u.hdr.Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState) dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "scsi_status (0x%02x), scsi_state (0x%02x), " "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name, reply->u.sreply.SCSIStatus, reply->u.sreply.SCSIState, le16_to_cpu(reply->u.sreply.TaskTag), le32_to_cpu(reply->u.sreply.TransferCount))); if (reply->u.sreply.SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { sz = req->u.scsireq.SenseBufferLength; req_index = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); memcpy(ioc->ioctl_cmds.sense, sense_data, sz); ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID; } } out: /* We are done, issue wake up */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) { mpt_clear_taskmgmt_in_progress_flag(ioc); ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; complete(&ioc->ioctl_cmds.done); if (ioc->bus_type == SAS) ioc->schedule_target_reset(ioc); } else { ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; complete(&ioc->ioctl_cmds.done); } } out_continuation: if (reply && (reply->u.reply.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) return 0; return 1; } static int mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { if (!mf) return 0; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr)); ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; if (!mr) goto out; ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID; memcpy(ioc->taskmgmt_cmds.reply, mr, min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); out: if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { mpt_clear_taskmgmt_in_progress_flag(ioc); ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; complete(&ioc->taskmgmt_cmds.done); if (ioc->bus_type == SAS) ioc->schedule_target_reset(ioc); return 1; } return 0; } static int mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id) { MPT_FRAME_HDR *mf; SCSITaskMgmt_t *pScsiTm; SCSITaskMgmtReply_t *pScsiTmReply; int ii; int retval; unsigned long timeout; unsigned long time_count; u16 iocstatus; mutex_lock(&ioc->taskmgmt_cmds.mutex); if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { mutex_unlock(&ioc->taskmgmt_cmds.mutex); return -EPERM; } retval = 0; mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc); if (mf == NULL) { dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n", ioc->name)); mpt_clear_taskmgmt_in_progress_flag(ioc); retval = -ENOMEM; goto tm_done; } dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", ioc->name, mf)); pScsiTm = (SCSITaskMgmt_t *) mf; memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; pScsiTm->TaskType = tm_type; if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) && (ioc->bus_type == FC)) pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; pScsiTm->TargetID = target_id; pScsiTm->Bus = bus_id; pScsiTm->ChainOffset = 0; pScsiTm->Reserved = 0; pScsiTm->Reserved1 = 0; pScsiTm->TaskMsgContext = 0; for (ii= 0; ii < 8; ii++) pScsiTm->LUN[ii] = 0; for (ii=0; ii < 7; ii++) pScsiTm->Reserved2[ii] = 0; switch (ioc->bus_type) { case FC: timeout = 40; break; case SAS: timeout = 30; break; case SPI: default: timeout = 10; break; } dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n", ioc->name, tm_type, timeout)); INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) time_count = jiffies; if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf); else { retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc, sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP); if (retval != 0) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "TaskMgmt send_handshake FAILED!" " (ioc %p, mf %p, rc=%d) \n", ioc->name, ioc, mf, retval)); mpt_free_msg_frame(ioc, mf); mpt_clear_taskmgmt_in_progress_flag(ioc); goto tm_done; } } /* Now wait for the command to complete */ ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ); if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt failed\n", ioc->name)); mpt_free_msg_frame(ioc, mf); mpt_clear_taskmgmt_in_progress_flag(ioc); if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) retval = 0; else retval = -1; /* return failure */ goto tm_done; } if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt failed\n", ioc->name)); retval = -1; /* return failure */ goto tm_done; } pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, " "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, " "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus, pScsiTmReply->TargetID, tm_type, le16_to_cpu(pScsiTmReply->IOCStatus), le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode, le32_to_cpu(pScsiTmReply->TerminationCount))); iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED || iocstatus == MPI_IOCSTATUS_SUCCESS) retval = 0; else { dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt failed\n", ioc->name)); retval = -1; /* return failure */ } tm_done: mutex_unlock(&ioc->taskmgmt_cmds.mutex); CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) return retval; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* mptctl_timeout_expired * * Expecting an interrupt, however timed out. * */ static void mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) { unsigned long flags; int ret_val = -1; SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf; u8 function = mf->u.hdr.Function; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n", ioc->name, __func__)); if (mpt_fwfault_debug) mpt_halt_firmware(ioc); spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) mpt_free_msg_frame(ioc, mf); return; } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) if (ioc->bus_type == SAS) { if (function == MPI_FUNCTION_SCSI_IO_REQUEST) ret_val = mptctl_do_taskmgmt(ioc, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, scsi_req->Bus, scsi_req->TargetID); else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ret_val = mptctl_do_taskmgmt(ioc, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, scsi_req->Bus, 0); if (!ret_val) return; } else { if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) || (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) ret_val = mptctl_do_taskmgmt(ioc, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, scsi_req->Bus, 0); if (!ret_val) return; } dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n", ioc->name)); mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); mpt_free_msg_frame(ioc, mf); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* mptctl_ioc_reset * * Clean-up functionality. Used only if there has been a * reload of the FW due. * */ static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { switch(reset_phase) { case MPT_IOC_SETUP_RESET: dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); break; case MPT_IOC_PRE_RESET: dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); break; case MPT_IOC_POST_RESET: dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET; complete(&ioc->ioctl_cmds.done); } break; default: break; } return 1; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* ASYNC Event Notification Support */ static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { u8 event; event = le32_to_cpu(pEvReply->Event) & 0xFF; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n", ioc->name, __func__)); if(async_queue == NULL) return 1; /* Raise SIGIO for persistent events. * TODO - this define is not in MPI spec yet, * but they plan to set it to 0x21 */ if (event == 0x21 ) { ioc->aen_event_read_flag=1; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n", ioc->name)); devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n", ioc->name)); kill_fasync(&async_queue, SIGIO, POLL_IN); return 1; } /* This flag is set after SIGIO was raised, and * remains set until the application has read * the event log via ioctl=MPTEVENTREPORT */ if(ioc->aen_event_read_flag) return 1; /* Signal only for the events that are * requested for by the application */ if (ioc->events && (ioc->eventTypes & ( 1 << event))) { ioc->aen_event_read_flag=1; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n", ioc->name)); devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n", ioc->name)); kill_fasync(&async_queue, SIGIO, POLL_IN); } return 1; } static int mptctl_fasync(int fd, struct file *filep, int mode) { MPT_ADAPTER *ioc; int ret; mutex_lock(&mpctl_mutex); list_for_each_entry(ioc, &ioc_list, list) ioc->aen_event_read_flag=0; ret = fasync_helper(fd, filep, mode, &async_queue); mutex_unlock(&mpctl_mutex); return ret; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * MPT ioctl handler * cmd - specify the particular IOCTL command to be issued * arg - data specific to the command. Must not be null. */ static long __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { mpt_ioctl_header __user *uhdr = (void __user *) arg; mpt_ioctl_header khdr; int iocnum; unsigned iocnumX; int nonblock = (file->f_flags & O_NONBLOCK); int ret; MPT_ADAPTER *iocp = NULL; if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - " "Unable to copy mpt_ioctl_header data @ %p\n", __FILE__, __LINE__, uhdr); return -EFAULT; } ret = -ENXIO; /* (-6) No such device or address */ /* Verify intended MPT adapter - set iocnum and the adapter * pointer (iocp) */ iocnumX = khdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) return -ENODEV; if (!iocp->active) { printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n", __FILE__, __LINE__); return -EFAULT; } /* Handle those commands that are just returning * information stored in the driver. * These commands should never time out and are unaffected * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { return mptctl_gettargetinfo(arg); } else if (cmd == MPTTEST) { return mptctl_readtest(arg); } else if (cmd == MPTEVENTQUERY) { return mptctl_eventquery(arg); } else if (cmd == MPTEVENTENABLE) { return mptctl_eventenable(arg); } else if (cmd == MPTEVENTREPORT) { return mptctl_eventreport(arg); } else if (cmd == MPTFWREPLACE) { return mptctl_replace_fw(arg); } /* All of these commands require an interrupt or * are unknown/illegal. */ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; if (cmd == MPTFWDOWNLOAD) ret = mptctl_fw_download(arg); else if (cmd == MPTCOMMAND) ret = mptctl_mpt_command(arg); else if (cmd == MPTHARDRESET) ret = mptctl_do_reset(arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) ret = mptctl_hp_targetinfo(arg); else ret = -EINVAL; mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; } static long mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&mpctl_mutex); ret = __mptctl_ioctl(file, cmd, arg); mutex_unlock(&mpctl_mutex); return ret; } static int mptctl_do_reset(unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; MPT_ADAPTER *iocp; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " "Unable to copy mpt_ioctl_diag_reset struct @ %p\n", __FILE__, __LINE__, urinfo); return -EFAULT; } if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", __FILE__, __LINE__, krinfo.hdr.iocnum); return -ENODEV; /* (-6) No such device or address */ } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) { printk (MYIOC_s_ERR_FMT "%s@%d::mptctl_do_reset - reset failed.\n", iocp->name, __FILE__, __LINE__); return -1; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * MPT FW download function. Cast the arg into the mpt_fw_xfer structure. * This structure contains: iocnum, firmware length (bytes), * pointer to user space memory where the fw image is stored. * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENXIO if no such device * -EAGAIN if resource problem * -ENOMEM if no memory for SGE * -EMLINK if too many chain buffers required * -EBADRQC if adapter does not support FW download * -EBUSY if adapter is busy * -ENOMSG if FW upload returned bad status */ static int mptctl_fw_download(unsigned long arg) { struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; struct mpt_fw_xfer kfwdl; if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) { printk(KERN_ERR MYNAM "%s@%d::_ioctl_fwdl - " "Unable to copy mpt_fw_xfer struct @ %p\n", __FILE__, __LINE__, ufwdl); return -EFAULT; } return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * FW Download engine. * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENXIO if no such device * -EAGAIN if resource problem * -ENOMEM if no memory for SGE * -EMLINK if too many chain buffers required * -EBADRQC if adapter does not support FW download * -EBUSY if adapter is busy * -ENOMSG if FW upload returned bad status */ static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; MPT_ADAPTER *iocp; FWDownloadTCSGE_t *ptsge; MptSge_t *sgl, *sgIn; char *sgOut; struct buflist *buflist; struct buflist *bl; dma_addr_t sgl_dma; int ret; int numfrags = 0; int maxfrags; int n = 0; u32 sgdir; u32 nib; int fw_bytes_copied = 0; int i; int sge_offset = 0; u16 iocstat; pFWDownloadReply_t ReplyMsg = NULL; unsigned long timeleft; if (mpt_verify_adapter(ioc, &iocp) < 0) { printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", ioc); return -ENODEV; /* (-6) No such device or address */ } else { /* Valid device. Get a message frame and construct the FW download message. */ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) return -EAGAIN; } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.bufp = %p\n", iocp->name, ufwbuf)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", iocp->name, (int)fwlen)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n", iocp->name, ioc)); dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; sgOut = (char *) (ptsge + 1); /* * Construct f/w download request */ dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW; dlmsg->Reserved = 0; dlmsg->ChainOffset = 0; dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; if (iocp->facts.MsgVersion >= MPI_VERSION_01_05) dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; else dlmsg->MsgFlags = 0; /* Set up the Transaction SGE. */ ptsge->Reserved = 0; ptsge->ContextSize = 0; ptsge->DetailsLength = 12; ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; ptsge->Reserved_0100_Checksum = 0; ptsge->ImageOffset = 0; ptsge->ImageSize = cpu_to_le32(fwlen); /* Add the SGL */ /* * Need to kmalloc area(s) for holding firmware image bytes. * But we need to do it piece meal, using a proper * scatter gather list (with 128kB MAX hunks). * * A practical limit here might be # of sg hunks that fit into * a single IOC request frame; 12 or 8 (see below), so: * For FC9xx: 12 x 128kB == 1.5 mB (max) * For C1030: 8 x 128kB == 1 mB (max) * We could support chaining, but things get ugly(ier:) * * Set the sge_offset to the start of the sgl (bytes). */ sgdir = 0x04000000; /* IOC will READ from sys mem */ sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t); if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset, &numfrags, &buflist, &sgl_dma, iocp)) == NULL) return -ENOMEM; /* * We should only need SGL with 2 simple_32bit entries (up to 256 kB) * for FC9xx f/w image, but calculate max number of sge hunks * we can fit into a request frame, and limit ourselves to that. * (currently no chain support) * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE * Request maxfrags * 128 12 * 96 8 * 64 4 */ maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) / iocp->SGE_size; if (numfrags > maxfrags) { ret = -EMLINK; goto fwdl_out; } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: sgl buffer = %p, sgfrags = %d\n", iocp->name, sgl, numfrags)); /* * Parse SG list, copying sgl itself, * plus f/w image hunks from user space as we go... */ ret = -EFAULT; sgIn = sgl; bl = buflist; for (i=0; i < numfrags; i++) { /* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE * Skip everything but Simple. If simple, copy from * user space into kernel space. * Note: we should not have anything but Simple as * Chain SGE are illegal. */ nib = (sgIn->FlagsLength & 0x30000000) >> 28; if (nib == 0 || nib == 3) { ; } else if (sgIn->Address) { iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); n++; if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " "Unable to copy f/w buffer hunk#%d @ %p\n", iocp->name, __FILE__, __LINE__, n, ufwbuf); goto fwdl_out; } fw_bytes_copied += bl->len; } sgIn++; bl++; sgOut += iocp->SGE_size; } DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); /* * Finally, perform firmware download. */ ReplyMsg = NULL; SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext); INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, iocp, mf); /* Now wait for the command to complete */ retry_wait: timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60); if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { ret = -ETIME; printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { mpt_free_msg_frame(iocp, mf); goto fwdl_out; } if (!timeleft) { printk(MYIOC_s_WARN_FMT "FW download timeout, doorbell=0x%08x\n", iocp->name, mpt_GetIocState(iocp, 0)); mptctl_timeout_expired(iocp, mf); } else goto retry_wait; goto fwdl_out; } if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); mpt_free_msg_frame(iocp, mf); ret = -ENODATA; goto fwdl_out; } if (sgl) kfree_sgl(sgl, sgl_dma, buflist, iocp); ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply; iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstat == MPI_IOCSTATUS_SUCCESS) { printk(MYIOC_s_INFO_FMT "F/W update successful!\n", iocp->name); return 0; } else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) { printk(MYIOC_s_WARN_FMT "Hmmm... F/W download not supported!?!\n", iocp->name); printk(MYIOC_s_WARN_FMT "(time to go bang on somebodies door)\n", iocp->name); return -EBADRQC; } else if (iocstat == MPI_IOCSTATUS_BUSY) { printk(MYIOC_s_WARN_FMT "IOC_BUSY!\n", iocp->name); printk(MYIOC_s_WARN_FMT "(try again later?)\n", iocp->name); return -EBUSY; } else { printk(MYIOC_s_WARN_FMT "ioctl_fwdl() returned [bad] status = %04xh\n", iocp->name, iocstat); printk(MYIOC_s_WARN_FMT "(bad VooDoo)\n", iocp->name); return -ENOMSG; } return 0; fwdl_out: CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status); SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0); kfree_sgl(sgl, sgl_dma, buflist, iocp); return ret; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * SGE Allocation routine * * Inputs: bytes - number of bytes to be transferred * sgdir - data direction * sge_offset - offset (in bytes) from the start of the request * frame to the first SGE * ioc - pointer to the mptadapter * Outputs: frags - number of scatter gather elements * blp - point to the buflist pointer * sglbuf_dma - pointer to the (dma) sgl * Returns: Null if failes * pointer to the (virtual) sgl if successful. */ static MptSge_t * kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc) { MptSge_t *sglbuf = NULL; /* pointer to array of SGE */ /* and chain buffers */ struct buflist *buflist = NULL; /* kernel routine */ MptSge_t *sgl; int numfrags = 0; int fragcnt = 0; int alloc_sz = min(bytes,MAX_KMALLOC_SZ); // avoid kernel warning msg! int bytes_allocd = 0; int this_alloc; dma_addr_t pa; // phys addr int i, buflist_ent; int sg_spill = MAX_FRAGS_SPILL1; int dir; /* initialization */ *frags = 0; *blp = NULL; /* Allocate and initialize an array of kernel * structures for the SG elements. */ i = MAX_SGL_BYTES / 8; buflist = kzalloc(i, GFP_USER); if (!buflist) return NULL; buflist_ent = 0; /* Allocate a single block of memory to store the sg elements and * the chain buffers. The calling routine is responsible for * copying the data in this array into the correct place in the * request and chain buffers. */ sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma); if (sglbuf == NULL) goto free_and_fail; if (sgdir & 0x04000000) dir = PCI_DMA_TODEVICE; else dir = PCI_DMA_FROMDEVICE; /* At start: * sgl = sglbuf = point to beginning of sg buffer * buflist_ent = 0 = first kernel structure * sg_spill = number of SGE that can be written before the first * chain element. * */ sgl = sglbuf; sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1; while (bytes_allocd < bytes) { this_alloc = min(alloc_sz, bytes-bytes_allocd); buflist[buflist_ent].len = this_alloc; buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev, this_alloc, &pa); if (buflist[buflist_ent].kptr == NULL) { alloc_sz = alloc_sz / 2; if (alloc_sz == 0) { printk(MYIOC_s_WARN_FMT "-SG: No can do - " "not enough memory! :-(\n", ioc->name); printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n", ioc->name, numfrags); goto free_and_fail; } continue; } else { dma_addr_t dma_addr; bytes_allocd += this_alloc; sgl->FlagsLength = (0x10000000|sgdir|this_alloc); dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); sgl->Address = dma_addr; fragcnt++; numfrags++; sgl++; buflist_ent++; } if (bytes_allocd >= bytes) break; /* Need to chain? */ if (fragcnt == sg_spill) { printk(MYIOC_s_WARN_FMT "-SG: No can do - " "Chain required! :-(\n", ioc->name); printk(MYIOC_s_WARN_FMT "(freeing %d frags)\n", ioc->name, numfrags); goto free_and_fail; } /* overflow check... */ if (numfrags*8 > MAX_SGL_BYTES){ /* GRRRRR... */ printk(MYIOC_s_WARN_FMT "-SG: No can do - " "too many SG frags! :-(\n", ioc->name); printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n", ioc->name, numfrags); goto free_and_fail; } } /* Last sge fixup: set LE+eol+eob bits */ sgl[-1].FlagsLength |= 0xC1000000; *frags = numfrags; *blp = buflist; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - " "%d SG frags generated!\n", ioc->name, numfrags)); dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - " "last (big) alloc_sz=%d\n", ioc->name, alloc_sz)); return sglbuf; free_and_fail: if (sglbuf != NULL) { for (i = 0; i < numfrags; i++) { dma_addr_t dma_addr; u8 *kptr; int len; if ((sglbuf[i].FlagsLength >> 24) == 0x30) continue; dma_addr = sglbuf[i].Address; kptr = buflist[i].kptr; len = buflist[i].len; pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); } pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma); } kfree(buflist); return NULL; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Routine to free the SGL elements. */ static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc) { MptSge_t *sg = sgl; struct buflist *bl = buflist; u32 nib; int dir; int n = 0; if (sg->FlagsLength & 0x04000000) dir = PCI_DMA_TODEVICE; else dir = PCI_DMA_FROMDEVICE; nib = (sg->FlagsLength & 0xF0000000) >> 28; while (! (nib & 0x4)) { /* eob */ /* skip ignore/chain. */ if (nib == 0 || nib == 3) { ; } else if (sg->Address) { dma_addr_t dma_addr; void *kptr; int len; dma_addr = sg->Address; kptr = bl->kptr; len = bl->len; pci_unmap_single(ioc->pcidev, dma_addr, len, dir); pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); n++; } sg++; bl++; nib = (le32_to_cpu(sg->FlagsLength) & 0xF0000000) >> 28; } /* we're at eob! */ if (sg->Address) { dma_addr_t dma_addr; void *kptr; int len; dma_addr = sg->Address; kptr = bl->kptr; len = bl->len; pci_unmap_single(ioc->pcidev, dma_addr, len, dir); pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); n++; } pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma); kfree(buflist); dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n", ioc->name, n)); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_getiocinfo - Query the host adapter for IOC information. * @arg: User space argument * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENODEV if no such device/adapter */ static int mptctl_getiocinfo (unsigned long arg, unsigned int data_size) { struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_iocinfo *karg; MPT_ADAPTER *ioc; struct pci_dev *pdev; int iocnum; unsigned int port; int cim_rev; struct scsi_device *sdev; VirtDevice *vdevice; /* Add of PCI INFO results in unaligned access for * IA64 and Sparc. Reset long to int. Return no PCI * data for obsolete format. */ if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev0)) cim_rev = 0; else if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev1)) cim_rev = 1; else if (data_size == sizeof(struct mpt_ioctl_iocinfo)) cim_rev = 2; else if (data_size == (sizeof(struct mpt_ioctl_iocinfo_rev0)+12)) cim_rev = 0; /* obsolete */ else return -EFAULT; karg = kmalloc(data_size, GFP_KERNEL); if (karg == NULL) { printk(KERN_ERR MYNAM "%s::mpt_ioctl_iocinfo() @%d - no memory available!\n", __FILE__, __LINE__); return -ENOMEM; } if (copy_from_user(karg, uarg, data_size)) { printk(KERN_ERR MYNAM "%s@%d::mptctl_getiocinfo - " "Unable to read in mpt_ioctl_iocinfo struct @ %p\n", __FILE__, __LINE__, uarg); kfree(karg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); kfree(karg); return -ENODEV; } /* Verify the data transfer size is correct. */ if (karg->hdr.maxDataSize != data_size) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " "Structure size mismatch. Command not completed.\n", ioc->name, __FILE__, __LINE__); kfree(karg); return -EFAULT; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_getiocinfo called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ if (ioc->bus_type == SAS) karg->adapterType = MPT_IOCTL_INTERFACE_SAS; else if (ioc->bus_type == FC) karg->adapterType = MPT_IOCTL_INTERFACE_FC; else karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; if (karg->hdr.port > 1) { kfree(karg); return -EINVAL; } port = karg->hdr.port; karg->port = port; pdev = (struct pci_dev *) ioc->pcidev; karg->pciId = pdev->device; karg->hwRev = pdev->revision; karg->subSystemDevice = pdev->subsystem_device; karg->subSystemVendor = pdev->subsystem_vendor; if (cim_rev == 1) { /* Get the PCI bus, device, and function numbers for the IOC */ karg->pciInfo.u.bits.busNumber = pdev->bus->number; karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); } else if (cim_rev == 2) { /* Get the PCI bus, device, function and segment ID numbers for the IOC */ karg->pciInfo.u.bits.busNumber = pdev->bus->number; karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); } /* Get number of devices */ karg->numDevices = 0; if (ioc->sh) { shost_for_each_device(sdev, ioc->sh) { vdevice = sdev->hostdata; if (vdevice == NULL || vdevice->vtarget == NULL) continue; if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; karg->numDevices++; } } /* Set the BIOS and FW Version */ karg->FWVersion = ioc->facts.FWVersion.Word; karg->BIOSVersion = ioc->biosVersion; /* Set the Version Strings. */ strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH); karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0'; karg->busChangeEvent = 0; karg->hostId = ioc->pfacts[port].PortSCSIID; karg->rsvd[0] = karg->rsvd[1] = 0; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, karg, data_size)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " "Unable to write out mpt_ioctl_iocinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); kfree(karg); return -EFAULT; } kfree(karg); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_gettargetinfo - Query the host adapter for target information. * @arg: User space argument * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENODEV if no such device/adapter */ static int mptctl_gettargetinfo (unsigned long arg) { struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo karg; MPT_ADAPTER *ioc; VirtDevice *vdevice; char *pmem; int *pdata; int iocnum; int numDevices = 0; int lun; int maxWordsLeft; int numBytes; u8 port; struct scsi_device *sdev; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_gettargetinfo - " "Unable to read in mpt_ioctl_targetinfo struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", ioc->name)); /* Get the port number and set the maximum number of bytes * in the returned structure. * Ignore the port setting. */ numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); maxWordsLeft = numBytes/sizeof(int); port = karg.hdr.port; if (maxWordsLeft <= 0) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", ioc->name, __FILE__, __LINE__); return -ENOMEM; } /* Fill in the data and return the structure to the calling * program */ /* struct mpt_ioctl_targetinfo does not contain sufficient space * for the target structures so when the IOCTL is called, there is * not sufficient stack space for the structure. Allocate memory, * populate the memory, copy back to the user, then free memory. * targetInfo format: * bits 31-24: reserved * 23-16: LUN * 15- 8: Bus Number * 7- 0: Target ID */ pmem = kzalloc(numBytes, GFP_KERNEL); if (!pmem) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", ioc->name, __FILE__, __LINE__); return -ENOMEM; } pdata = (int *) pmem; /* Get number of devices */ if (ioc->sh){ shost_for_each_device(sdev, ioc->sh) { if (!maxWordsLeft) continue; vdevice = sdev->hostdata; if (vdevice == NULL || vdevice->vtarget == NULL) continue; if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; lun = (vdevice->vtarget->raidVolume) ? 0x80 : vdevice->lun; *pdata = (((u8)lun << 16) + (vdevice->vtarget->channel << 8) + (vdevice->vtarget->id )); pdata++; numDevices++; --maxWordsLeft; } } karg.numDevices = numDevices; /* Copy part of the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_targetinfo))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); kfree(pmem); return -EFAULT; } /* Copy the remaining data from kernel memory to user memory */ if (copy_to_user(uarg->targetInfo, pmem, numBytes)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, pdata); kfree(pmem); return -EFAULT; } kfree(pmem); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* MPT IOCTL Test function. * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENODEV if no such device/adapter */ static int mptctl_readtest (unsigned long arg) { struct mpt_ioctl_test __user *uarg = (void __user *) arg; struct mpt_ioctl_test karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - " "Unable to read in mpt_ioctl_test struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ #ifdef MFCNT karg.chip_type = ioc->mfcnt; #else karg.chip_type = ioc->pcidev->device; #endif strncpy (karg.name, ioc->name, MPT_MAX_NAME); karg.name[MPT_MAX_NAME-1]='\0'; strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH); karg.product[MPT_PRODUCT_LENGTH-1]='\0'; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_test))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_readtest - " "Unable to write out mpt_ioctl_test struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_eventquery - Query the host adapter for the event types * that are being logged. * @arg: User space argument * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -ENODEV if no such device/adapter */ static int mptctl_eventquery (unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " "Unable to read in mpt_ioctl_eventquery struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; karg.eventTypes = ioc->eventTypes; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventquery - " "Unable to write out mpt_ioctl_eventquery struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_eventenable (unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " "Unable to read in mpt_ioctl_eventenable struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { /* Have not yet allocated memory - do so now. */ int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); ioc->events = kzalloc(sz, GFP_KERNEL); if (!ioc->events) { printk(MYIOC_s_ERR_FMT ": ERROR - Insufficient memory to add adapter!\n", ioc->name); return -ENOMEM; } ioc->alloc_total += sz; ioc->eventContext = 0; } /* Update the IOC event logging flag. */ ioc->eventTypes = karg.eventTypes; return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_eventreport (unsigned long arg) { struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; struct mpt_ioctl_eventreport karg; MPT_ADAPTER *ioc; int iocnum; int numBytes, maxEvents, max; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventreport - " "Unable to read in mpt_ioctl_eventreport struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", ioc->name)); numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; /* If fewer than 1 event is requested, there must have * been some type of error. */ if ((max < 1) || !ioc->events) return -ENODATA; /* reset this flag so SIGIO can restart */ ioc->aen_event_read_flag=0; /* Copy the data from kernel memory to user memory */ numBytes = max * sizeof(MPT_IOCTL_EVENTS); if (copy_to_user(uarg->eventData, ioc->events, numBytes)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventreport - " "Unable to write out mpt_ioctl_eventreport struct @ %p\n", ioc->name, __FILE__, __LINE__, ioc->events); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_replace_fw (unsigned long arg) { struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg; struct mpt_ioctl_replace_fw karg; MPT_ADAPTER *ioc; int iocnum; int newFwSize; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_replace_fw - " "Unable to read in mpt_ioctl_replace_fw struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n", ioc->name)); /* If caching FW, Free the old FW image */ if (ioc->cached_fw == NULL) return 0; mpt_free_fw_memory(ioc); /* Allocate memory for the new FW image */ newFwSize = karg.newImageSize; if (newFwSize & 0x01) newFwSize += 1; if (newFwSize & 0x02) newFwSize += 2; mpt_alloc_fw_memory(ioc, newFwSize); if (ioc->cached_fw == NULL) return -ENOMEM; /* Copy the data from user memory to kernel space */ if (copy_from_user(ioc->cached_fw, uarg->newImage, newFwSize)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_replace_fw - " "Unable to read in mpt_ioctl_replace_fw image " "@ %p\n", ioc->name, __FILE__, __LINE__, uarg); mpt_free_fw_memory(ioc); return -EFAULT; } /* Update IOCFactsReply */ ioc->facts.FWImageSize = newFwSize; return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* MPT IOCTL MPTCOMMAND function. * Cast the arg into the mpt_ioctl_mpt_command structure. * * Outputs: None. * Return: 0 if successful * -EBUSY if previous command timeout and IOC reset is not complete. * -EFAULT if data unavailable * -ENODEV if no such device/adapter * -ETIME if timer expires * -ENOMEM if memory allocation error */ static int mptctl_mpt_command (unsigned long arg) { struct mpt_ioctl_command __user *uarg = (void __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *ioc; int iocnum; int rc; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_mpt_command - " "Unable to read in mpt_ioctl_command struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } rc = mptctl_do_mpt_command (karg, &uarg->MF); return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* Worker routine for the IOCTL MPTCOMMAND and MPTCOMMAND32 (sparc) commands. * * Outputs: None. * Return: 0 if successful * -EBUSY if previous command timeout and IOC reset is not complete. * -EFAULT if data unavailable * -ENODEV if no such device/adapter * -ETIME if timer expires * -ENOMEM if memory allocation error * -EPERM if SCSI I/O and target is untagged */ static int mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) { MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *hdr; char *psge; struct buflist bufIn; /* data In buffer */ struct buflist bufOut; /* data Out buffer */ dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; int sgSize = 0; /* Num SG elements */ int iocnum, flagsLength; int sz, rc = 0; int msgContext; u16 req_idx; ulong timeout; unsigned long timeleft; struct scsi_device *sdev; unsigned long flags; u8 function; /* bufIn and bufOut are used for user to kernel space transfers */ bufIn.kptr = bufOut.kptr = NULL; bufIn.len = bufOut.len = 0; if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " "Busy with diagnostic reset\n", __FILE__, __LINE__); return -EBUSY; } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); /* Verify that the final request frame will not be too large. */ sz = karg.dataSgeOffset * 4; if (karg.dataInSize > 0) sz += ioc->SGE_size; if (karg.dataOutSize > 0) sz += ioc->SGE_size; if (sz > ioc->req_sz) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Request frame too large (%d) maximum (%d)\n", ioc->name, __FILE__, __LINE__, sz, ioc->req_sz); return -EFAULT; } /* Get a free request frame and save the message context. */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) return -EAGAIN; hdr = (MPIHeader_t *) mf; msgContext = le32_to_cpu(hdr->MsgContext); req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); /* Copy the request frame * Reset the saved message context. * Request frame in user space */ if (copy_from_user(mf, mfPtr, karg.dataSgeOffset * 4)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to read MF from mpt_ioctl_command struct @ %p\n", ioc->name, __FILE__, __LINE__, mfPtr); function = -1; rc = -EFAULT; goto done_free_mem; } hdr->MsgContext = cpu_to_le32(msgContext); function = hdr->Function; /* Verify that this request is allowed. */ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", ioc->name, hdr->Function, mf)); switch (function) { case MPI_FUNCTION_IOC_FACTS: case MPI_FUNCTION_PORT_FACTS: karg.dataOutSize = karg.dataInSize = 0; break; case MPI_FUNCTION_CONFIG: { Config_t *config_frame; config_frame = (Config_t *)mf; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\ttype=0x%02x ext_type=0x%02x " "number=0x%02x action=0x%02x\n", ioc->name, config_frame->Header.PageType, config_frame->ExtPageType, config_frame->Header.PageNumber, config_frame->Action)); break; } case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND: case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND: case MPI_FUNCTION_FW_UPLOAD: case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: case MPI_FUNCTION_FW_DOWNLOAD: case MPI_FUNCTION_FC_PRIMITIVE_SEND: case MPI_FUNCTION_TOOLBOX: case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: break; case MPI_FUNCTION_SCSI_IO_REQUEST: if (ioc->sh) { SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; int qtag = MPI_SCSIIO_CONTROL_UNTAGGED; int scsidir = 0; int dataSize; u32 id; id = (ioc->devices_per_bus == 0) ? 256 : ioc->devices_per_bus; if (pScsiReq->TargetID > id) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Target ID out of bounds. \n", ioc->name, __FILE__, __LINE__); rc = -ENODEV; goto done_free_mem; } if (pScsiReq->Bus >= ioc->number_of_buses) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Target Bus out of bounds. \n", ioc->name, __FILE__, __LINE__); rc = -ENODEV; goto done_free_mem; } pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested * more sense data than driver * can provide, if so, reset this parameter * set the sense buffer pointer low address * update the control field to specify Q type */ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; else pScsiReq->SenseBufferLength = karg.maxSenseBytes; pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (req_idx * MPT_SENSE_BUFFER_ALLOC)); shost_for_each_device(sdev, ioc->sh) { struct scsi_target *starget = scsi_target(sdev); VirtTarget *vtarget = starget->hostdata; if (vtarget == NULL) continue; if ((pScsiReq->TargetID == vtarget->id) && (pScsiReq->Bus == vtarget->channel) && (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; } /* Have the IOCTL driver set the direction based * on the dataOutSize (ordering issue with Sparc). */ if (karg.dataOutSize > 0) { scsidir = MPI_SCSIIO_CONTROL_WRITE; dataSize = karg.dataOutSize; } else { scsidir = MPI_SCSIIO_CONTROL_READ; dataSize = karg.dataInSize; } pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_SMP_PASSTHROUGH: /* Check mf->PassthruFlags to determine if * transfer is ImmediateMode or not. * Immediate mode returns data in the ReplyFrame. * Else, we are sending request and response data * in two SGLs at the end of the mf. */ break; case MPI_FUNCTION_SATA_PASSTHROUGH: if (!ioc->sh) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_RAID_ACTION: /* Just add a SGE */ break; case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: if (ioc->sh) { SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; int scsidir = MPI_SCSIIO_CONTROL_READ; int dataSize; pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested * more sense data than driver * can provide, if so, reset this parameter * set the sense buffer pointer low address * update the control field to specify Q type */ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; else pScsiReq->SenseBufferLength = karg.maxSenseBytes; pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (req_idx * MPT_SENSE_BUFFER_ALLOC)); /* All commands to physical devices are tagged */ /* Have the IOCTL driver set the direction based * on the dataOutSize (ordering issue with Sparc). */ if (karg.dataOutSize > 0) { scsidir = MPI_SCSIIO_CONTROL_WRITE; dataSize = karg.dataOutSize; } else { scsidir = MPI_SCSIIO_CONTROL_READ; dataSize = karg.dataInSize; } pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_SCSI_TASK_MGMT: { SCSITaskMgmt_t *pScsiTm; pScsiTm = (SCSITaskMgmt_t *)mf; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tTaskType=0x%x MsgFlags=0x%x " "TaskMsgContext=0x%x id=%d channel=%d\n", ioc->name, pScsiTm->TaskType, le32_to_cpu (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags, pScsiTm->TargetID, pScsiTm->Bus)); break; } case MPI_FUNCTION_IOC_INIT: { IOCInit_t *pInit = (IOCInit_t *) mf; u32 high_addr, sense_high; /* Verify that all entries in the IOC INIT match * existing setup (and in LE format). */ if (sizeof(dma_addr_t) == sizeof(u64)) { high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32)); sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32)); } else { high_addr = 0; sense_high= 0; } if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) || (pInit->MaxBuses != ioc->facts.MaxBuses) || (pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) || (pInit->HostMfaHighAddr != high_addr) || (pInit->SenseBufferHighAddr != sense_high)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "IOC_INIT issued with 1 or more incorrect parameters. Rejected.\n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } } break; default: /* * MPI_FUNCTION_PORT_ENABLE * MPI_FUNCTION_TARGET_CMD_BUFFER_POST * MPI_FUNCTION_TARGET_ASSIST * MPI_FUNCTION_TARGET_STATUS_SEND * MPI_FUNCTION_TARGET_MODE_ABORT * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET * MPI_FUNCTION_IO_UNIT_RESET * MPI_FUNCTION_HANDSHAKE * MPI_FUNCTION_REPLY_FRAME_REMOVAL * MPI_FUNCTION_EVENT_NOTIFICATION * (driver handles event notification) * MPI_FUNCTION_EVENT_ACK */ /* What to do with these??? CHECK ME!!! MPI_FUNCTION_FC_LINK_SRVC_BUF_POST MPI_FUNCTION_FC_LINK_SRVC_RSP MPI_FUNCTION_FC_ABORT MPI_FUNCTION_LAN_SEND MPI_FUNCTION_LAN_RECEIVE MPI_FUNCTION_LAN_RESET */ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Illegal request (function 0x%x) \n", ioc->name, __FILE__, __LINE__, hdr->Function); rc = -EFAULT; goto done_free_mem; } /* Add the SGL ( at most one data in SGE and one data out SGE ) * In the case of two SGE's - the data out (write) will always * preceede the data in (read) SGE. psgList is used to free the * allocated memory. */ psge = (char *) (((int *) mf) + karg.dataSgeOffset); flagsLength = 0; if (karg.dataOutSize > 0) sgSize ++; if (karg.dataInSize > 0) sgSize ++; if (sgSize > 0) { /* Set up the dataOut memory allocation */ if (karg.dataOutSize > 0) { if (karg.dataInSize > 0) { flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_DIRECTION) << MPI_SGE_FLAGS_SHIFT; } else { flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; } flagsLength |= karg.dataOutSize; bufOut.len = karg.dataOutSize; bufOut.kptr = pci_alloc_consistent( ioc->pcidev, bufOut.len, &dma_addr_out); if (bufOut.kptr == NULL) { rc = -ENOMEM; goto done_free_mem; } else { /* Set up this SGE. * Copy to MF and to sglbuf */ ioc->add_sge(psge, flagsLength, dma_addr_out); psge += ioc->SGE_size; /* Copy user data to kernel space. */ if (copy_from_user(bufOut.kptr, karg.dataOutBufPtr, bufOut.len)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - Unable " "to read user data " "struct @ %p\n", ioc->name, __FILE__, __LINE__,karg.dataOutBufPtr); rc = -EFAULT; goto done_free_mem; } } } if (karg.dataInSize > 0) { flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; flagsLength |= karg.dataInSize; bufIn.len = karg.dataInSize; bufIn.kptr = pci_alloc_consistent(ioc->pcidev, bufIn.len, &dma_addr_in); if (bufIn.kptr == NULL) { rc = -ENOMEM; goto done_free_mem; } else { /* Set up this SGE * Copy to MF and to sglbuf */ ioc->add_sge(psge, flagsLength, dma_addr_in); } } } else { /* Add a NULL SGE */ ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); } SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext); INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { mutex_lock(&ioc->taskmgmt_cmds.mutex); if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) mpt_put_msg_frame_hi_pri(mptctl_id, ioc, mf); else { rc =mpt_send_handshake_request(mptctl_id, ioc, sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); if (rc != 0) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED! (ioc %p, mf %p)\n", ioc->name, ioc, mf)); mpt_clear_taskmgmt_in_progress_flag(ioc); rc = -ENODATA; mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } } } else mpt_put_msg_frame(mptctl_id, ioc, mf); /* Now wait for the command to complete */ timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; retry_wait: timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*timeout); if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { rc = -ETIME; dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n", ioc->name, __func__)); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } if (!timeleft) { printk(MYIOC_s_WARN_FMT "mpt cmd timeout, doorbell=0x%08x" " function=0x%x\n", ioc->name, mpt_GetIocState(ioc, 0), function); if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); mptctl_timeout_expired(ioc, mf); mf = NULL; } else goto retry_wait; goto done_free_mem; } if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); mf = NULL; /* If a valid reply frame, copy to the user. * Offset 2: reply length in U32's */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { if (karg.maxReplyBytes < ioc->reply_sz) { sz = min(karg.maxReplyBytes, 4*ioc->ioctl_cmds.reply[2]); } else { sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]); } if (sz > 0) { if (copy_to_user(karg.replyFrameBufPtr, ioc->ioctl_cmds.reply, sz)){ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write out reply frame %p\n", ioc->name, __FILE__, __LINE__, karg.replyFrameBufPtr); rc = -ENODATA; goto done_free_mem; } } } /* If valid sense data, copy to user. */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) { sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); if (sz > 0) { if (copy_to_user(karg.senseDataPtr, ioc->ioctl_cmds.sense, sz)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write sense data to user %p\n", ioc->name, __FILE__, __LINE__, karg.senseDataPtr); rc = -ENODATA; goto done_free_mem; } } } /* If the overall status is _GOOD and data in, copy data * to user. */ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) && (karg.dataInSize > 0) && (bufIn.kptr)) { if (copy_to_user(karg.dataInBufPtr, bufIn.kptr, karg.dataInSize)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write data to user %p\n", ioc->name, __FILE__, __LINE__, karg.dataInBufPtr); rc = -ENODATA; } } done_free_mem: CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); /* Free the allocated memory. */ if (bufOut.kptr != NULL) { pci_free_consistent(ioc->pcidev, bufOut.len, (void *) bufOut.kptr, dma_addr_out); } if (bufIn.kptr != NULL) { pci_free_consistent(ioc->pcidev, bufIn.len, (void *) bufIn.kptr, dma_addr_in); } /* mf is null if command issued successfully * otherwise, failure occurred after mf acquired. */ if (mf) mpt_free_msg_frame(ioc, mf); return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* Prototype Routine for the HOST INFO command. * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -EBUSY if previous command timeout and IOC reset is not complete. * -ENODEV if no such device/adapter * -ETIME if timer expires * -ENOMEM if memory allocation error */ static int mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) { hp_host_info_t __user *uarg = (void __user *) arg; MPT_ADAPTER *ioc; struct pci_dev *pdev; char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; int iocnum; int rc, cim_rev; ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *mpi_hdr; unsigned long timeleft; int retval; /* Reset long to int. Should affect IA64 and SPARC only */ if (data_size == sizeof(hp_host_info_t)) cim_rev = 1; else if (data_size == sizeof(hp_host_info_rev0_t)) cim_rev = 0; /* obsolete */ else return -EFAULT; if (copy_from_user(&karg, uarg, sizeof(hp_host_info_t))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_host_info - " "Unable to read in hp_host_info struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ pdev = (struct pci_dev *) ioc->pcidev; karg.vendor = pdev->vendor; karg.device = pdev->device; karg.subsystem_id = pdev->subsystem_device; karg.subsystem_vendor = pdev->subsystem_vendor; karg.devfn = pdev->devfn; karg.bus = pdev->bus->number; /* Save the SCSI host no. if * SCSI driver loaded */ if (ioc->sh != NULL) karg.host_no = ioc->sh->host_no; else karg.host_no = -1; /* Reformat the fw_version into a string */ karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ? ((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0'; karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0'; karg.fw_version[2] = '.'; karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ? ((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0'; karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0'; karg.fw_version[5] = '.'; karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ? ((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0'; karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0'; karg.fw_version[8] = '.'; karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ? ((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0'; karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0'; karg.fw_version[11] = '\0'; /* Issue a config request to get the device serial number */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.pageAddr = 0; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ cfg.timeout = 10; strncpy(karg.serial_number, " ", 24); if (mpt_config(ioc, &cfg) == 0) { if (cfg.cfghdr.hdr->PageLength > 0) { /* Issue the second config page request */ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); if (pbuf) { cfg.physAddr = buf_dma; if (mpt_config(ioc, &cfg) == 0) { ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; if (strlen(pdata->BoardTracerNumber) > 1) { strncpy(karg.serial_number, pdata->BoardTracerNumber, 24); karg.serial_number[24-1]='\0'; } } pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); pbuf = NULL; } } } rc = mpt_GetIocState(ioc, 1); switch (rc) { case MPI_IOC_STATE_OPERATIONAL: karg.ioc_status = HP_STATUS_OK; break; case MPI_IOC_STATE_FAULT: karg.ioc_status = HP_STATUS_FAILED; break; case MPI_IOC_STATE_RESET: case MPI_IOC_STATE_READY: default: karg.ioc_status = HP_STATUS_OTHER; break; } karg.base_io_addr = pci_resource_start(pdev, 0); if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) karg.bus_phys_width = HP_BUS_WIDTH_UNK; else karg.bus_phys_width = HP_BUS_WIDTH_16; karg.hard_resets = 0; karg.soft_resets = 0; karg.timeouts = 0; if (ioc->sh != NULL) { MPT_SCSI_HOST *hd = shost_priv(ioc->sh); if (hd && (cim_rev == 1)) { karg.hard_resets = ioc->hard_resets; karg.soft_resets = ioc->soft_resets; karg.timeouts = ioc->timeouts; } } /* * Gather ISTWI(Industry Standard Two Wire Interface) Data */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", ioc->name, __func__)); goto out; } IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; mpi_hdr = (MPIHeader_t *) mf; memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; IstwiRWRequest->MsgContext = mpi_hdr->MsgContext; IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; IstwiRWRequest->NumAddressBytes = 0x01; IstwiRWRequest->DataLength = cpu_to_le16(0x04); if (pdev->devfn & 1) IstwiRWRequest->DeviceAddr = 0xB2; else IstwiRWRequest->DeviceAddr = 0xB0; pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); if (!pbuf) goto out; ioc->add_sge((char *)&IstwiRWRequest->SGL, (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); retval = 0; SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, IstwiRWRequest->MsgContext); INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, ioc, mf); retry_wait: timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*MPT_IOCTL_DEFAULT_TIMEOUT); if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { retval = -ETIME; printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { mpt_free_msg_frame(ioc, mf); goto out; } if (!timeleft) { printk(MYIOC_s_WARN_FMT "HOST INFO command timeout, doorbell=0x%08x\n", ioc->name, mpt_GetIocState(ioc, 0)); mptctl_timeout_expired(ioc, mf); } else goto retry_wait; goto out; } /* *ISTWI Data Definition * pbuf[0] = FW_VERSION = 0x4 * pbuf[1] = Bay Count = 6 or 4 or 2, depending on * the config, you should be seeing one out of these three values * pbuf[2] = Drive Installed Map = bit pattern depend on which * bays have drives in them * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) karg.rsvd = *(u32 *)pbuf; out: CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); if (pbuf) pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hpgethostinfo - " "Unable to write out hp_host_info @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* Prototype Routine for the TARGET INFO command. * * Outputs: None. * Return: 0 if successful * -EFAULT if data unavailable * -EBUSY if previous command timeout and IOC reset is not complete. * -ENODEV if no such device/adapter * -ETIME if timer expires * -ENOMEM if memory allocation error */ static int mptctl_hp_targetinfo(unsigned long arg) { hp_target_info_t __user *uarg = (void __user *) arg; SCSIDevicePage0_t *pg0_alloc; SCSIDevicePage3_t *pg3_alloc; MPT_ADAPTER *ioc; MPT_SCSI_HOST *hd = NULL; hp_target_info_t karg; int iocnum; int data_sz; dma_addr_t page_dma; CONFIGPARMS cfg; ConfigPageHeader_t hdr; int tmp, np, rc = 0; if (copy_from_user(&karg, uarg, sizeof(hp_target_info_t))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_targetinfo - " "Unable to read in hp_host_targetinfo struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", ioc->name)); /* There is nothing to do for FCP parts. */ if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) return 0; if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) return 0; if (ioc->sh->host_no != karg.hdr.host) return -ENODEV; /* Get the data transfer speeds */ data_sz = ioc->spi_data.sdp0length * 4; pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); if (pg0_alloc) { hdr.PageVersion = ioc->spi_data.sdp0version; hdr.PageLength = data_sz; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; cfg.cfghdr.hdr = &hdr; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; cfg.dir = 0; cfg.timeout = 0; cfg.physAddr = page_dma; cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; if ((rc = mpt_config(ioc, &cfg)) == 0) { np = le32_to_cpu(pg0_alloc->NegotiatedParameters); karg.negotiated_width = np & MPI_SCSIDEVPAGE0_NP_WIDE ? HP_BUS_WIDTH_16 : HP_BUS_WIDTH_8; if (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) { tmp = (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8; if (tmp < 0x09) karg.negotiated_speed = HP_DEV_SPEED_ULTRA320; else if (tmp <= 0x09) karg.negotiated_speed = HP_DEV_SPEED_ULTRA160; else if (tmp <= 0x0A) karg.negotiated_speed = HP_DEV_SPEED_ULTRA2; else if (tmp <= 0x0C) karg.negotiated_speed = HP_DEV_SPEED_ULTRA; else if (tmp <= 0x25) karg.negotiated_speed = HP_DEV_SPEED_FAST; else karg.negotiated_speed = HP_DEV_SPEED_ASYNC; } else karg.negotiated_speed = HP_DEV_SPEED_ASYNC; } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma); } /* Set defaults */ karg.message_rejects = -1; karg.phase_errors = -1; karg.parity_errors = -1; karg.select_timeouts = -1; /* Get the target error parameters */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 3; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; cfg.cfghdr.hdr = &hdr; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.timeout = 0; cfg.physAddr = -1; if ((mpt_config(ioc, &cfg) == 0) && (cfg.cfghdr.hdr->PageLength > 0)) { /* Issue the second config page request */ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; data_sz = (int) cfg.cfghdr.hdr->PageLength * 4; pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent( ioc->pcidev, data_sz, &page_dma); if (pg3_alloc) { cfg.physAddr = page_dma; cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; if ((rc = mpt_config(ioc, &cfg)) == 0) { karg.message_rejects = (u32) le16_to_cpu(pg3_alloc->MsgRejectCount); karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount); karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount); } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma); } } hd = shost_priv(ioc->sh); if (hd != NULL) karg.select_timeouts = hd->sel_timeout[karg.hdr.id]; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_target_info_t))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hp_target_info - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static const struct file_operations mptctl_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .fasync = mptctl_fasync, .unlocked_ioctl = mptctl_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_mpctl_ioctl, #endif }; static struct miscdevice mptctl_miscdev = { MPT_MINOR, MYNAM, &mptctl_fops }; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #ifdef CONFIG_COMPAT static int compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_fw_xfer32 kfw32; struct mpt_fw_xfer kfw; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = kfw32.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n", iocp->name)); kfw.iocnum = iocnum; kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; } static int compat_mpt_command(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_ioctl_command32 karg32; struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = karg32.hdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n", iocp->name)); /* Copy data to karg */ karg.hdr.iocnum = karg32.hdr.iocnum; karg.hdr.port = karg32.hdr.port; karg.timeout = karg32.timeout; karg.maxReplyBytes = karg32.maxReplyBytes; karg.dataInSize = karg32.dataInSize; karg.dataOutSize = karg32.dataOutSize; karg.maxSenseBytes = karg32.maxSenseBytes; karg.dataSgeOffset = karg32.dataSgeOffset; karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr; karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr; karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr; karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr; /* Pass new structure to do_mpt_command */ ret = mptctl_do_mpt_command (karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; } static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&mpctl_mutex); switch (cmd) { case MPTIOCINFO: case MPTIOCINFO1: case MPTIOCINFO2: case MPTTARGETINFO: case MPTEVENTQUERY: case MPTEVENTENABLE: case MPTEVENTREPORT: case MPTHARDRESET: case HP_GETHOSTINFO: case HP_GETTARGETINFO: case MPTTEST: ret = __mptctl_ioctl(f, cmd, arg); break; case MPTCOMMAND32: ret = compat_mpt_command(f, cmd, arg); break; case MPTFWDOWNLOAD32: ret = compat_mptfwxfer_ioctl(f, cmd, arg); break; default: ret = -ENOIOCTLCMD; break; } mutex_unlock(&mpctl_mutex); return ret; } #endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_probe - Installs ioctl devices per bus. * @pdev: Pointer to pci_dev structure * * Returns 0 for success, non-zero for failure. * */ static int mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); mutex_init(&ioc->ioctl_cmds.mutex); init_completion(&ioc->ioctl_cmds.done); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptctl_remove - Removed ioctl devices * @pdev: Pointer to pci_dev structure * * */ static void mptctl_remove(struct pci_dev *pdev) { } static struct mpt_pci_driver mptctl_driver = { .probe = mptctl_probe, .remove = mptctl_remove, }; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int __init mptctl_init(void) { int err; int where = 1; show_mptmod_ver(my_NAME, my_VERSION); mpt_device_driver_register(&mptctl_driver, MPTCTL_DRIVER); /* Register this device */ err = misc_register(&mptctl_miscdev); if (err < 0) { printk(KERN_ERR MYNAM ": Can't register misc device [minor=%d].\n", MPT_MINOR); goto out_fail; } printk(KERN_INFO MYNAM ": Registered with Fusion MPT base driver\n"); printk(KERN_INFO MYNAM ": /dev/%s @ (major,minor=%d,%d)\n", mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor); /* * Install our handler */ ++where; mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER, "mptctl_reply"); if (!mptctl_id || mptctl_id >= MPT_MAX_PROTOCOL_DRIVERS) { printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); misc_deregister(&mptctl_miscdev); err = -EBUSY; goto out_fail; } mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER, "mptctl_taskmgmt_reply"); if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) { printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); mpt_deregister(mptctl_id); misc_deregister(&mptctl_miscdev); err = -EBUSY; goto out_fail; } mpt_reset_register(mptctl_id, mptctl_ioc_reset); mpt_event_register(mptctl_id, mptctl_event_process); return 0; out_fail: mpt_device_driver_deregister(MPTCTL_DRIVER); return err; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static void mptctl_exit(void) { misc_deregister(&mptctl_miscdev); printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n", mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor); /* De-register event handler from base module */ mpt_event_deregister(mptctl_id); /* De-register reset handler from base module */ mpt_reset_deregister(mptctl_id); /* De-register callback handler from base module */ mpt_deregister(mptctl_taskmgmt_id); mpt_deregister(mptctl_id); mpt_device_driver_deregister(MPTCTL_DRIVER); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ module_init(mptctl_init); module_exit(mptctl_exit);
gpl-2.0
akw28888/msm
arch/m68k/atari/ataints.c
4406
6330
/* * arch/m68k/atari/ataints.c -- Atari Linux interrupt handling code * * 5/2/94 Roman Hodek: * Added support for TT interrupts; setup for TT SCU (may someone has * twiddled there and we won't get the right interrupts :-() * * Major change: The device-independent code in m68k/ints.c didn't know * about non-autovec ints yet. It hardcoded the number of possible ints to * 7 (IRQ1...IRQ7). But the Atari has lots of non-autovec ints! I made the * number of possible ints a constant defined in interrupt.h, which is * 47 for the Atari. So we can call request_irq() for all Atari interrupts * just the normal way. Additionally, all vectors >= 48 are initialized to * call trap() instead of inthandler(). This must be changed here, too. * * 1995-07-16 Lars Brinkhoff <f93labr@dd.chalmers.se>: * Corrected a bug in atari_add_isr() which rejected all SCC * interrupt sources if there were no TT MFP! * * 12/13/95: New interface functions atari_level_triggered_int() and * atari_register_vme_int() as support for level triggered VME interrupts. * * 02/12/96: (Roman) * Total rewrite of Atari interrupt handling, for new scheme see comments * below. * * 1996-09-03 lars brinkhoff <f93labr@dd.chalmers.se>: * Added new function atari_unregister_vme_int(), and * modified atari_register_vme_int() as well as IS_VALID_INTNO() * to work with it. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/module.h> #include <asm/traps.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stdma.h> #include <asm/irq.h> #include <asm/entry.h> /* * Atari interrupt handling scheme: * -------------------------------- * * All interrupt source have an internal number (defined in * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP, * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can * be allocated by atari_register_vme_int(). */ /* * Bitmap for free interrupt vector numbers * (new vectors starting from 0x70 can be allocated by * atari_register_vme_int()) */ static int free_vme_vec_bitmap; /* GK: * HBL IRQ handler for Falcon. Nobody needs it :-) * ++andreas: raise ipl to disable further HBLANK interrupts. */ asmlinkage void falcon_hblhandler(void); asm(".text\n" __ALIGN_STR "\n\t" "falcon_hblhandler:\n\t" "orw #0x200,%sp@\n\t" /* set saved ipl to 2 */ "rte"); extern void atari_microwire_cmd(int cmd); static unsigned int atari_irq_startup(struct irq_data *data) { unsigned int irq = data->irq; m68k_irq_startup(data); atari_turnon_irq(irq); atari_enable_irq(irq); return 0; } static void atari_irq_shutdown(struct irq_data *data) { unsigned int irq = data->irq; atari_disable_irq(irq); atari_turnoff_irq(irq); m68k_irq_shutdown(data); if (irq == IRQ_AUTO_4) vectors[VEC_INT4] = falcon_hblhandler; } static void atari_irq_enable(struct irq_data *data) { atari_enable_irq(data->irq); } static void atari_irq_disable(struct irq_data *data) { atari_disable_irq(data->irq); } static struct irq_chip atari_irq_chip = { .name = "atari", .irq_startup = atari_irq_startup, .irq_shutdown = atari_irq_shutdown, .irq_enable = atari_irq_enable, .irq_disable = atari_irq_disable, }; /* * void atari_init_IRQ (void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the atari IRQ handling routines. */ void __init atari_init_IRQ(void) { m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER); m68k_setup_irq_controller(&atari_irq_chip, handle_simple_irq, 1, NUM_ATARI_SOURCES - 1); /* Initialize the MFP(s) */ #ifdef ATARI_USE_SOFTWARE_EOI st_mfp.vec_adr = 0x48; /* Software EOI-Mode */ #else st_mfp.vec_adr = 0x40; /* Automatic EOI-Mode */ #endif st_mfp.int_en_a = 0x00; /* turn off MFP-Ints */ st_mfp.int_en_b = 0x00; st_mfp.int_mk_a = 0xff; /* no Masking */ st_mfp.int_mk_b = 0xff; if (ATARIHW_PRESENT(TT_MFP)) { #ifdef ATARI_USE_SOFTWARE_EOI tt_mfp.vec_adr = 0x58; /* Software EOI-Mode */ #else tt_mfp.vec_adr = 0x50; /* Automatic EOI-Mode */ #endif tt_mfp.int_en_a = 0x00; /* turn off MFP-Ints */ tt_mfp.int_en_b = 0x00; tt_mfp.int_mk_a = 0xff; /* no Masking */ tt_mfp.int_mk_b = 0xff; } if (ATARIHW_PRESENT(SCC) && !atari_SCC_reset_done) { atari_scc.cha_a_ctrl = 9; MFPDELAY(); atari_scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */ } if (ATARIHW_PRESENT(SCU)) { /* init the SCU if present */ tt_scu.sys_mask = 0x10; /* enable VBL (for the cursor) and * disable HSYNC interrupts (who * needs them?) MFP and SCC are * enabled in VME mask */ tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */ } else { /* If no SCU and no Hades, the HSYNC interrupt needs to be * disabled this way. (Else _inthandler in kernel/sys_call.S * gets overruns) */ vectors[VEC_INT2] = falcon_hblhandler; vectors[VEC_INT4] = falcon_hblhandler; } if (ATARIHW_PRESENT(PCM_8BIT) && ATARIHW_PRESENT(MICROWIRE)) { /* Initialize the LM1992 Sound Controller to enable the PSG sound. This is misplaced here, it should be in an atasound_init(), that doesn't exist yet. */ atari_microwire_cmd(MW_LM1992_PSG_HIGH); } stdma_init(); /* Initialize the PSG: all sounds off, both ports output */ sound_ym.rd_data_reg_sel = 7; sound_ym.wd_data = 0xff; } /* * atari_register_vme_int() returns the number of a free interrupt vector for * hardware with a programmable int vector (probably a VME board). */ unsigned long atari_register_vme_int(void) { int i; for (i = 0; i < 32; i++) if ((free_vme_vec_bitmap & (1 << i)) == 0) break; if (i == 16) return 0; free_vme_vec_bitmap |= 1 << i; return VME_SOURCE_BASE + i; } EXPORT_SYMBOL(atari_register_vme_int); void atari_unregister_vme_int(unsigned long irq) { if (irq >= VME_SOURCE_BASE && irq < VME_SOURCE_BASE + VME_MAX_SOURCES) { irq -= VME_SOURCE_BASE; free_vme_vec_bitmap &= ~(1 << irq); } } EXPORT_SYMBOL(atari_unregister_vme_int);
gpl-2.0
thicklizard/patch
arch/arm/mach-exynos/setup-usb-phy.c
4662
3354
/* * Copyright (C) 2011 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> #include <mach/regs-pmu.h> #include <mach/regs-usb-phy.h> #include <plat/cpu.h> #include <plat/usb-phy.h> static atomic_t host_usage; static int exynos4_usb_host_phy_is_on(void) { return (readl(EXYNOS4_PHYPWR) & PHY1_STD_ANALOG_POWERDOWN) ? 0 : 1; } static int exynos4_usb_phy1_init(struct platform_device *pdev) { struct clk *otg_clk; struct clk *xusbxti_clk; u32 phyclk; u32 rstcon; int err; atomic_inc(&host_usage); otg_clk = clk_get(&pdev->dev, "otg"); if (IS_ERR(otg_clk)) { dev_err(&pdev->dev, "Failed to get otg clock\n"); return PTR_ERR(otg_clk); } err = clk_enable(otg_clk); if (err) { clk_put(otg_clk); return err; } if (exynos4_usb_host_phy_is_on()) return 0; writel(readl(S5P_USBHOST_PHY_CONTROL) | S5P_USBHOST_PHY_ENABLE, S5P_USBHOST_PHY_CONTROL); /* set clock frequency for PLL */ phyclk = readl(EXYNOS4_PHYCLK) & ~CLKSEL_MASK; xusbxti_clk = clk_get(&pdev->dev, "xusbxti"); if (xusbxti_clk && !IS_ERR(xusbxti_clk)) { switch (clk_get_rate(xusbxti_clk)) { case 12 * MHZ: phyclk |= CLKSEL_12M; break; case 24 * MHZ: phyclk |= CLKSEL_24M; break; default: case 48 * MHZ: /* default reference clock */ break; } clk_put(xusbxti_clk); } writel(phyclk, EXYNOS4_PHYCLK); /* floating prevention logic: disable */ writel((readl(EXYNOS4_PHY1CON) | FPENABLEN), EXYNOS4_PHY1CON); /* set to normal HSIC 0 and 1 of PHY1 */ writel((readl(EXYNOS4_PHYPWR) & ~PHY1_HSIC_NORMAL_MASK), EXYNOS4_PHYPWR); /* set to normal standard USB of PHY1 */ writel((readl(EXYNOS4_PHYPWR) & ~PHY1_STD_NORMAL_MASK), EXYNOS4_PHYPWR); /* reset all ports of both PHY and Link */ rstcon = readl(EXYNOS4_RSTCON) | HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK; writel(rstcon, EXYNOS4_RSTCON); udelay(10); rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK); writel(rstcon, EXYNOS4_RSTCON); udelay(80); clk_disable(otg_clk); clk_put(otg_clk); return 0; } static int exynos4_usb_phy1_exit(struct platform_device *pdev) { struct clk *otg_clk; int err; if (atomic_dec_return(&host_usage) > 0) return 0; otg_clk = clk_get(&pdev->dev, "otg"); if (IS_ERR(otg_clk)) { dev_err(&pdev->dev, "Failed to get otg clock\n"); return PTR_ERR(otg_clk); } err = clk_enable(otg_clk); if (err) { clk_put(otg_clk); return err; } writel((readl(EXYNOS4_PHYPWR) | PHY1_STD_ANALOG_POWERDOWN), EXYNOS4_PHYPWR); writel(readl(S5P_USBHOST_PHY_CONTROL) & ~S5P_USBHOST_PHY_ENABLE, S5P_USBHOST_PHY_CONTROL); clk_disable(otg_clk); clk_put(otg_clk); return 0; } int s5p_usb_phy_init(struct platform_device *pdev, int type) { if (type == S5P_USB_PHY_HOST) return exynos4_usb_phy1_init(pdev); return -EINVAL; } int s5p_usb_phy_exit(struct platform_device *pdev, int type) { if (type == S5P_USB_PHY_HOST) return exynos4_usb_phy1_exit(pdev); return -EINVAL; }
gpl-2.0
ZdrowyGosciu/kernel_lge_g2_msm8974
drivers/mfd/ucb1x00-ts.c
9782
11092
/* * Touchscreen driver for UCB1x00-based touchscreens * * Copyright (C) 2001 Russell King, All Rights Reserved. * Copyright (C) 2005 Pavel Machek * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * 21-Jan-2002 <jco@ict.es> : * * Added support for synchronous A/D mode. This mode is useful to * avoid noise induced in the touchpanel by the LCD, provided that * the UCB1x00 has a valid LCD sync signal routed to its ADCSYNC pin. * It is important to note that the signal connected to the ADCSYNC * pin should provide pulses even when the LCD is blanked, otherwise * a pen touch needed to unblank the LCD will never be read. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/input.h> #include <linux/device.h> #include <linux/freezer.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/mfd/ucb1x00.h> #include <mach/collie.h> #include <asm/mach-types.h> struct ucb1x00_ts { struct input_dev *idev; struct ucb1x00 *ucb; spinlock_t irq_lock; unsigned irq_disabled; wait_queue_head_t irq_wait; struct task_struct *rtask; u16 x_res; u16 y_res; unsigned int adcsync:1; }; static int adcsync; static inline void ucb1x00_ts_evt_add(struct ucb1x00_ts *ts, u16 pressure, u16 x, u16 y) { struct input_dev *idev = ts->idev; input_report_abs(idev, ABS_X, x); input_report_abs(idev, ABS_Y, y); input_report_abs(idev, ABS_PRESSURE, pressure); input_report_key(idev, BTN_TOUCH, 1); input_sync(idev); } static inline void ucb1x00_ts_event_release(struct ucb1x00_ts *ts) { struct input_dev *idev = ts->idev; input_report_abs(idev, ABS_PRESSURE, 0); input_report_key(idev, BTN_TOUCH, 0); input_sync(idev); } /* * Switch to interrupt mode. */ static inline void ucb1x00_ts_mode_int(struct ucb1x00_ts *ts) { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW | UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND | UCB_TS_CR_MODE_INT); } /* * Switch to pressure mode, and read pressure. We don't need to wait * here, since both plates are being driven. */ static inline unsigned int ucb1x00_ts_read_pressure(struct ucb1x00_ts *ts) { if (machine_is_collie()) { ucb1x00_io_write(ts->ucb, COLLIE_TC35143_GPIO_TBL_CHK, 0); ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSPX_POW | UCB_TS_CR_TSMX_POW | UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); udelay(55); return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_AD2, ts->adcsync); } else { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW | UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPY, ts->adcsync); } } /* * Switch to X position mode and measure Y plate. We switch the plate * configuration in pressure mode, then switch to position mode. This * gives a faster response time. Even so, we need to wait about 55us * for things to stabilise. */ static inline unsigned int ucb1x00_ts_read_xpos(struct ucb1x00_ts *ts) { if (machine_is_collie()) ucb1x00_io_write(ts->ucb, 0, COLLIE_TC35143_GPIO_TBL_CHK); else { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); } ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); udelay(55); return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPY, ts->adcsync); } /* * Switch to Y position mode and measure X plate. We switch the plate * configuration in pressure mode, then switch to position mode. This * gives a faster response time. Even so, we need to wait about 55us * for things to stabilise. */ static inline unsigned int ucb1x00_ts_read_ypos(struct ucb1x00_ts *ts) { if (machine_is_collie()) ucb1x00_io_write(ts->ucb, 0, COLLIE_TC35143_GPIO_TBL_CHK); else { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); } ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); udelay(55); return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPX, ts->adcsync); } /* * Switch to X plate resistance mode. Set MX to ground, PX to * supply. Measure current. */ static inline unsigned int ucb1x00_ts_read_xres(struct ucb1x00_ts *ts) { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); return ucb1x00_adc_read(ts->ucb, 0, ts->adcsync); } /* * Switch to Y plate resistance mode. Set MY to ground, PY to * supply. Measure current. */ static inline unsigned int ucb1x00_ts_read_yres(struct ucb1x00_ts *ts) { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); return ucb1x00_adc_read(ts->ucb, 0, ts->adcsync); } static inline int ucb1x00_ts_pen_down(struct ucb1x00_ts *ts) { unsigned int val = ucb1x00_reg_read(ts->ucb, UCB_TS_CR); if (machine_is_collie()) return (!(val & (UCB_TS_CR_TSPX_LOW))); else return (val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW)); } /* * This is a RT kernel thread that handles the ADC accesses * (mainly so we can use semaphores in the UCB1200 core code * to serialise accesses to the ADC). */ static int ucb1x00_thread(void *_ts) { struct ucb1x00_ts *ts = _ts; DECLARE_WAITQUEUE(wait, current); bool frozen, ignore = false; int valid = 0; set_freezable(); add_wait_queue(&ts->irq_wait, &wait); while (!kthread_freezable_should_stop(&frozen)) { unsigned int x, y, p; signed long timeout; if (frozen) ignore = true; ucb1x00_adc_enable(ts->ucb); x = ucb1x00_ts_read_xpos(ts); y = ucb1x00_ts_read_ypos(ts); p = ucb1x00_ts_read_pressure(ts); /* * Switch back to interrupt mode. */ ucb1x00_ts_mode_int(ts); ucb1x00_adc_disable(ts->ucb); msleep(10); ucb1x00_enable(ts->ucb); if (ucb1x00_ts_pen_down(ts)) { set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&ts->irq_lock); if (ts->irq_disabled) { ts->irq_disabled = 0; enable_irq(ts->ucb->irq_base + UCB_IRQ_TSPX); } spin_unlock_irq(&ts->irq_lock); ucb1x00_disable(ts->ucb); /* * If we spat out a valid sample set last time, * spit out a "pen off" sample here. */ if (valid) { ucb1x00_ts_event_release(ts); valid = 0; } timeout = MAX_SCHEDULE_TIMEOUT; } else { ucb1x00_disable(ts->ucb); /* * Filtering is policy. Policy belongs in user * space. We therefore leave it to user space * to do any filtering they please. */ if (!ignore) { ucb1x00_ts_evt_add(ts, p, x, y); valid = 1; } set_current_state(TASK_INTERRUPTIBLE); timeout = HZ / 100; } schedule_timeout(timeout); } remove_wait_queue(&ts->irq_wait, &wait); ts->rtask = NULL; return 0; } /* * We only detect touch screen _touches_ with this interrupt * handler, and even then we just schedule our task. */ static irqreturn_t ucb1x00_ts_irq(int irq, void *id) { struct ucb1x00_ts *ts = id; spin_lock(&ts->irq_lock); ts->irq_disabled = 1; disable_irq_nosync(ts->ucb->irq_base + UCB_IRQ_TSPX); spin_unlock(&ts->irq_lock); wake_up(&ts->irq_wait); return IRQ_HANDLED; } static int ucb1x00_ts_open(struct input_dev *idev) { struct ucb1x00_ts *ts = input_get_drvdata(idev); unsigned long flags = 0; int ret = 0; BUG_ON(ts->rtask); if (machine_is_collie()) flags = IRQF_TRIGGER_RISING; else flags = IRQF_TRIGGER_FALLING; ts->irq_disabled = 0; init_waitqueue_head(&ts->irq_wait); ret = request_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ucb1x00_ts_irq, flags, "ucb1x00-ts", ts); if (ret < 0) goto out; /* * If we do this at all, we should allow the user to * measure and read the X and Y resistance at any time. */ ucb1x00_adc_enable(ts->ucb); ts->x_res = ucb1x00_ts_read_xres(ts); ts->y_res = ucb1x00_ts_read_yres(ts); ucb1x00_adc_disable(ts->ucb); ts->rtask = kthread_run(ucb1x00_thread, ts, "ktsd"); if (!IS_ERR(ts->rtask)) { ret = 0; } else { free_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ts); ts->rtask = NULL; ret = -EFAULT; } out: return ret; } /* * Release touchscreen resources. Disable IRQs. */ static void ucb1x00_ts_close(struct input_dev *idev) { struct ucb1x00_ts *ts = input_get_drvdata(idev); if (ts->rtask) kthread_stop(ts->rtask); ucb1x00_enable(ts->ucb); free_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ts); ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 0); ucb1x00_disable(ts->ucb); } /* * Initialisation. */ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) { struct ucb1x00_ts *ts; struct input_dev *idev; int err; ts = kzalloc(sizeof(struct ucb1x00_ts), GFP_KERNEL); idev = input_allocate_device(); if (!ts || !idev) { err = -ENOMEM; goto fail; } ts->ucb = dev->ucb; ts->idev = idev; ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC; spin_lock_init(&ts->irq_lock); idev->name = "Touchscreen panel"; idev->id.product = ts->ucb->id; idev->open = ucb1x00_ts_open; idev->close = ucb1x00_ts_close; idev->dev.parent = &ts->ucb->dev; idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_drvdata(idev, ts); ucb1x00_adc_enable(ts->ucb); ts->x_res = ucb1x00_ts_read_xres(ts); ts->y_res = ucb1x00_ts_read_yres(ts); ucb1x00_adc_disable(ts->ucb); input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0); input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0); input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0); err = input_register_device(idev); if (err) goto fail; dev->priv = ts; return 0; fail: input_free_device(idev); kfree(ts); return err; } static void ucb1x00_ts_remove(struct ucb1x00_dev *dev) { struct ucb1x00_ts *ts = dev->priv; input_unregister_device(ts->idev); kfree(ts); } static struct ucb1x00_driver ucb1x00_ts_driver = { .add = ucb1x00_ts_add, .remove = ucb1x00_ts_remove, }; static int __init ucb1x00_ts_init(void) { return ucb1x00_register_driver(&ucb1x00_ts_driver); } static void __exit ucb1x00_ts_exit(void) { ucb1x00_unregister_driver(&ucb1x00_ts_driver); } module_param(adcsync, int, 0444); module_init(ucb1x00_ts_init); module_exit(ucb1x00_ts_exit); MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_DESCRIPTION("UCB1x00 touchscreen driver"); MODULE_LICENSE("GPL");
gpl-2.0
Cryptoo/kernel
drivers/gpu/drm/gma500/mdfld_tpo_vid.c
10038
4086
/* * Copyright © 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * jim liu <jim.liu@intel.com> * Jackie Li<yaodong.li@intel.com> */ #include "mdfld_dsi_dpi.h" static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev) { struct drm_display_mode *mode; struct drm_psb_private *dev_priv = dev->dev_private; struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; bool use_gct = false; mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) return NULL; if (use_gct) { mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; mode->hsync_start = mode->hdisplay + ((ti->hsync_offset_hi << 8) | ti->hsync_offset_lo); mode->hsync_end = mode->hsync_start + ((ti->hsync_pulse_width_hi << 8) | ti->hsync_pulse_width_lo); mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | ti->hblank_lo); mode->vsync_start = mode->vdisplay + ((ti->vsync_offset_hi << 8) | ti->vsync_offset_lo); mode->vsync_end = mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | ti->vsync_pulse_width_lo); mode->vtotal = mode->vdisplay + ((ti->vblank_hi << 8) | ti->vblank_lo); mode->clock = ti->pixel_clock * 10; dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay); dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay); dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start); dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end); dev_dbg(dev->dev, "htotal is %d\n", mode->htotal); dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start); dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end); dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal); dev_dbg(dev->dev, "clock is %d\n", mode->clock); } else { mode->hdisplay = 864; mode->vdisplay = 480; mode->hsync_start = 873; mode->hsync_end = 876; mode->htotal = 887; mode->vsync_start = 487; mode->vsync_end = 490; mode->vtotal = 499; mode->clock = 33264; } drm_mode_set_name(mode); drm_mode_set_crtcinfo(mode, 0); mode->type |= DRM_MODE_TYPE_PREFERRED; return mode; } static int tpo_vid_get_panel_info(struct drm_device *dev, int pipe, struct panel_info *pi) { if (!dev || !pi) return -EINVAL; pi->width_mm = TPO_PANEL_WIDTH; pi->height_mm = TPO_PANEL_HEIGHT; return 0; } /*TPO DPI encoder helper funcs*/ static const struct drm_encoder_helper_funcs mdfld_tpo_dpi_encoder_helper_funcs = { .dpms = mdfld_dsi_dpi_dpms, .mode_fixup = mdfld_dsi_dpi_mode_fixup, .prepare = mdfld_dsi_dpi_prepare, .mode_set = mdfld_dsi_dpi_mode_set, .commit = mdfld_dsi_dpi_commit, }; /*TPO DPI encoder funcs*/ static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; const struct panel_funcs mdfld_tpo_vid_funcs = { .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs, .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, .get_config_mode = &tpo_vid_get_config_mode, .get_panel_info = tpo_vid_get_panel_info, };
gpl-2.0
DennisBold/CodeAurora-MSM-Kernel
Documentation/connector/ucon.c
12086
5237
/* * ucon.c * * Copyright (c) 2004+ Evgeniy Polyakov <zbr@ioremap.net> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/types.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/poll.h> #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <arpa/inet.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <time.h> #include <getopt.h> #include <linux/connector.h> #define DEBUG #define NETLINK_CONNECTOR 11 /* Hopefully your userspace connector.h matches this kernel */ #define CN_TEST_IDX CN_NETLINK_USERS + 3 #define CN_TEST_VAL 0x456 #ifdef DEBUG #define ulog(f, a...) fprintf(stdout, f, ##a) #else #define ulog(f, a...) do {} while (0) #endif static int need_exit; static __u32 seq; static int netlink_send(int s, struct cn_msg *msg) { struct nlmsghdr *nlh; unsigned int size; int err; char buf[128]; struct cn_msg *m; size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len); nlh = (struct nlmsghdr *)buf; nlh->nlmsg_seq = seq++; nlh->nlmsg_pid = getpid(); nlh->nlmsg_type = NLMSG_DONE; nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); nlh->nlmsg_flags = 0; m = NLMSG_DATA(nlh); #if 0 ulog("%s: [%08x.%08x] len=%u, seq=%u, ack=%u.\n", __func__, msg->id.idx, msg->id.val, msg->len, msg->seq, msg->ack); #endif memcpy(m, msg, sizeof(*m) + msg->len); err = send(s, nlh, size, 0); if (err == -1) ulog("Failed to send: %s [%d].\n", strerror(errno), errno); return err; } static void usage(void) { printf( "Usage: ucon [options] [output file]\n" "\n" "\t-h\tthis help screen\n" "\t-s\tsend buffers to the test module\n" "\n" "The default behavior of ucon is to subscribe to the test module\n" "and wait for state messages. Any ones received are dumped to the\n" "specified output file (or stdout). The test module is assumed to\n" "have an id of {%u.%u}\n" "\n" "If you get no output, then verify the cn_test module id matches\n" "the expected id above.\n" , CN_TEST_IDX, CN_TEST_VAL ); } int main(int argc, char *argv[]) { int s; char buf[1024]; int len; struct nlmsghdr *reply; struct sockaddr_nl l_local; struct cn_msg *data; FILE *out; time_t tm; struct pollfd pfd; bool send_msgs = false; while ((s = getopt(argc, argv, "hs")) != -1) { switch (s) { case 's': send_msgs = true; break; case 'h': usage(); return 0; default: /* getopt() outputs an error for us */ usage(); return 1; } } if (argc != optind) { out = fopen(argv[optind], "a+"); if (!out) { ulog("Unable to open %s for writing: %s\n", argv[1], strerror(errno)); out = stdout; } } else out = stdout; memset(buf, 0, sizeof(buf)); s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); if (s == -1) { perror("socket"); return -1; } l_local.nl_family = AF_NETLINK; l_local.nl_groups = -1; /* bitmask of requested groups */ l_local.nl_pid = 0; ulog("subscribing to %u.%u\n", CN_TEST_IDX, CN_TEST_VAL); if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) { perror("bind"); close(s); return -1; } #if 0 { int on = 0x57; /* Additional group number */ setsockopt(s, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &on, sizeof(on)); } #endif if (send_msgs) { int i, j; memset(buf, 0, sizeof(buf)); data = (struct cn_msg *)buf; data->id.idx = CN_TEST_IDX; data->id.val = CN_TEST_VAL; data->seq = seq++; data->ack = 0; data->len = 0; for (j=0; j<10; ++j) { for (i=0; i<1000; ++i) { len = netlink_send(s, data); } ulog("%d messages have been sent to %08x.%08x.\n", i, data->id.idx, data->id.val); } return 0; } pfd.fd = s; while (!need_exit) { pfd.events = POLLIN; pfd.revents = 0; switch (poll(&pfd, 1, -1)) { case 0: need_exit = 1; break; case -1: if (errno != EINTR) { need_exit = 1; break; } continue; } if (need_exit) break; memset(buf, 0, sizeof(buf)); len = recv(s, buf, sizeof(buf), 0); if (len == -1) { perror("recv buf"); close(s); return -1; } reply = (struct nlmsghdr *)buf; switch (reply->nlmsg_type) { case NLMSG_ERROR: fprintf(out, "Error message received.\n"); fflush(out); break; case NLMSG_DONE: data = (struct cn_msg *)NLMSG_DATA(reply); time(&tm); fprintf(out, "%.24s : [%x.%x] [%08u.%08u].\n", ctime(&tm), data->id.idx, data->id.val, data->seq, data->ack); fflush(out); break; default: break; } } close(s); return 0; }
gpl-2.0
victorlapin/kernel_lge_bullhead-NG
drivers/mtd/ubi/wl.c
55
62834
/* * Copyright (c) International Business Machines Corp., 2006 * Copyright (c) 2014 - 2015, Linux Foundation. All rights reserved. * Linux Foundation chooses to take subject only to the GPLv2 * license terms, and distributes only under these terms. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner */ /* * UBI wear-leveling sub-system. * * This sub-system is responsible for wear-leveling. It works in terms of * physical eraseblocks and erase counters and knows nothing about logical * eraseblocks, volumes, etc. From this sub-system's perspective all physical * eraseblocks are of two types - used and free. Used physical eraseblocks are * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function. * * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter * header. The rest of the physical eraseblock contains only %0xFF bytes. * * When physical eraseblocks are returned to the WL sub-system by means of the * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is * done asynchronously in context of the per-UBI device background thread, * which is also managed by the WL sub-system. * * The wear-leveling is ensured by means of moving the contents of used * physical eraseblocks with low erase counter to free physical eraseblocks * with high erase counter. * * If the WL sub-system fails to erase a physical eraseblock, it marks it as * bad. * * This sub-system is also responsible for scrubbing. If a bit-flip is detected * in a physical eraseblock, it has to be moved. Technically this is the same * as moving it for wear-leveling reasons. * * As it was said, for the UBI sub-system all physical eraseblocks are either * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub * RB-trees, as well as (temporarily) in the @wl->pq queue. * * When the WL sub-system returns a physical eraseblock, the physical * eraseblock is protected from being moved for some "time". For this reason, * the physical eraseblock is not directly moved from the @wl->free tree to the * @wl->used tree. There is a protection queue in between where this * physical eraseblock is temporarily stored (@wl->pq). * * All this protection stuff is needed because: * o we don't want to move physical eraseblocks just after we have given them * to the user; instead, we first want to let users fill them up with data; * * o there is a chance that the user will put the physical eraseblock very * soon, so it makes sense not to move it for some time, but wait. * * Physical eraseblocks stay protected only for limited time. But the "time" is * measured in erase cycles in this case. This is implemented with help of the * protection queue. Eraseblocks are put to the tail of this queue when they * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the * head of the queue on each erase operation (for any eraseblock). So the * length of the queue defines how may (global) erase cycles PEBs are protected. * * To put it differently, each physical eraseblock has 2 main states: free and * used. The former state corresponds to the @wl->free tree. The latter state * is split up on several sub-states: * o the WL movement is allowed (@wl->used tree); * o the WL movement is disallowed (@wl->erroneous) because the PEB is * erroneous - e.g., there was a read error; * o the WL movement is temporarily prohibited (@wl->pq queue); * o scrubbing is needed (@wl->scrub tree). * * Depending on the sub-state, wear-leveling entries of the used physical * eraseblocks may be kept in one of those structures. * * Note, in this implementation, we keep a small in-RAM object for each physical * eraseblock. This is surely not a scalable solution. But it appears to be good * enough for moderately large flashes and it is simple. In future, one may * re-work this sub-system and make it more scalable. * * At the moment this sub-system does not utilize the sequence number, which * was introduced relatively recently. But it would be wise to do this because * the sequence number of a logical eraseblock characterizes how old is it. For * example, when we move a PEB with low erase counter, and we need to pick the * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we * pick target PEB with an average EC if our PEB is not very "old". This is a * room for future re-works of the WL sub-system. */ #include <linux/slab.h> #include <linux/crc32.h> #include <linux/freezer.h> #include <linux/kthread.h> #include "ubi.h" /* Number of physical eraseblocks reserved for wear-leveling purposes */ #define WL_RESERVED_PEBS 1 /* * Maximum difference between two erase counters. If this threshold is * exceeded, the WL sub-system starts moving data from used physical * eraseblocks with low erase counter to free physical eraseblocks with high * erase counter. */ #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD /* * When a physical eraseblock is moved, the WL sub-system has to pick the target * physical eraseblock to move to. The simplest way would be just to pick the * one with the highest erase counter. But in certain workloads this could lead * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a * situation when the picked physical eraseblock is constantly erased after the * data is written to it. So, we have a constant which limits the highest erase * counter of the free physical eraseblock to pick. Namely, the WL sub-system * does not pick eraseblocks with erase counter greater than the lowest erase * counter plus %WL_FREE_MAX_DIFF. */ #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) /* * Maximum number of consecutive background thread failures which is enough to * switch to read-only mode. */ #define WL_MAX_FAILURES 32 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec); static int self_check_in_wl_tree(const struct ubi_device *ubi, struct ubi_wl_entry *e, struct rb_root *root); static int self_check_in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e); static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int vol_id, int lnum, int torture); static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture); #ifdef CONFIG_MTD_UBI_FASTMAP /** * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue * @wrk: the work description object */ static void update_fastmap_work_fn(struct work_struct *wrk) { struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); ubi_update_fastmap(ubi); } /** * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap. * @ubi: UBI device description object * @pnum: the to be checked PEB */ static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) { int i; if (!ubi->fm) return 0; for (i = 0; i < ubi->fm->used_blocks; i++) if (ubi->fm->e[i]->pnum == pnum) return 1; return 0; } #else static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) { return 0; } #endif /** * wl_tree_add - add a wear-leveling entry to a WL RB-tree. * @e: the wear-leveling entry to add * @root: the root of the tree * * Note, we use (erase counter, physical eraseblock number) pairs as keys in * the @ubi->used and @ubi->free RB-trees. */ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) { struct rb_node **p, *parent = NULL; p = &root->rb_node; while (*p) { struct ubi_wl_entry *e1; parent = *p; e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); if (e->ec < e1->ec) p = &(*p)->rb_left; else if (e->ec > e1->ec) p = &(*p)->rb_right; else { ubi_assert(e->pnum != e1->pnum); if (e->pnum < e1->pnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; } } rb_link_node(&e->u.rb, parent, p); rb_insert_color(&e->u.rb, root); } /** * do_work - do one pending work. * @ubi: UBI device description object * * This function returns zero in case of success and a negative error code in * case of failure. */ static int do_work(struct ubi_device *ubi) { int err; struct ubi_work *wrk; cond_resched(); /* * @ubi->work_sem is used to synchronize with the workers. Workers take * it in read mode, so many of them may be doing works at a time. But * the queue flush code has to be sure the whole queue of works is * done, and it takes the mutex in write mode. */ down_read(&ubi->work_sem); spin_lock(&ubi->wl_lock); if (list_empty(&ubi->works)) { spin_unlock(&ubi->wl_lock); up_read(&ubi->work_sem); return 0; } wrk = list_entry(ubi->works.next, struct ubi_work, list); list_del(&wrk->list); ubi->works_count -= 1; ubi_assert(ubi->works_count >= 0); spin_unlock(&ubi->wl_lock); /* * Call the worker function. Do not touch the work structure * after this call as it will have been freed or reused by that * time by the worker function. */ err = wrk->func(ubi, wrk, 0); if (err) ubi_err(ubi->ubi_num, "work failed with error code %d", err); up_read(&ubi->work_sem); return err; } /** * produce_free_peb - produce a free physical eraseblock. * @ubi: UBI device description object * * This function tries to make a free PEB by means of synchronous execution of * pending works. This may be needed if, for example the background thread is * disabled. Returns zero in case of success and a negative error code in case * of failure. */ static int produce_free_peb(struct ubi_device *ubi) { int err; while (!ubi->free.rb_node && ubi->works_count) { spin_unlock(&ubi->wl_lock); dbg_wl("do one work synchronously"); err = do_work(ubi); spin_lock(&ubi->wl_lock); if (err) return err; } return 0; } /** * ubi_in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. * @e: the wear-leveling entry to check * @root: the root of the tree * * This function returns non-zero if @e is in the @root RB-tree and zero if it * is not. */ int ubi_in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) { struct rb_node *p; p = root->rb_node; while (p) { struct ubi_wl_entry *e1; e1 = rb_entry(p, struct ubi_wl_entry, u.rb); if (e->pnum == e1->pnum) { ubi_assert(e == e1); return 1; } if (e->ec < e1->ec) p = p->rb_left; else if (e->ec > e1->ec) p = p->rb_right; else { ubi_assert(e->pnum != e1->pnum); if (e->pnum < e1->pnum) p = p->rb_left; else p = p->rb_right; } } return 0; } /** * prot_queue_add - add physical eraseblock to the protection queue. * @ubi: UBI device description object * @e: the physical eraseblock to add * * This function adds @e to the tail of the protection queue @ubi->pq, where * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be * temporarily protected from the wear-leveling worker. Note, @wl->lock has to * be locked. */ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) { int pq_tail = ubi->pq_head - 1; if (pq_tail < 0) pq_tail = UBI_PROT_QUEUE_LEN - 1; ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN); list_add_tail(&e->u.list, &ubi->pq[pq_tail]); dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); } /** * find_wl_entry - find wear-leveling entry closest to certain erase counter. * @ubi: UBI device description object * @root: the RB-tree where to look for * @diff: maximum possible difference from the smallest erase counter * * This function looks for a wear leveling entry with erase counter closest to * min + @diff, where min is the smallest erase counter. */ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, struct rb_root *root, int diff) { struct rb_node *p; struct ubi_wl_entry *e, *prev_e = NULL; int max; e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); max = e->ec + diff; p = root->rb_node; while (p) { struct ubi_wl_entry *e1; e1 = rb_entry(p, struct ubi_wl_entry, u.rb); if (e1->ec >= max) p = p->rb_left; else { p = p->rb_right; prev_e = e; e = e1; } } /* If no fastmap has been written and this WL entry can be used * as anchor PEB, hold it back and return the second best WL entry * such that fastmap can use the anchor PEB later. */ if (prev_e && !ubi->fm_disabled && !ubi->fm && e->pnum < UBI_FM_MAX_START) return prev_e; return e; } /** * find_mean_wl_entry - find wear-leveling entry with medium erase counter. * @ubi: UBI device description object * @root: the RB-tree where to look for * * This function looks for a wear leveling entry with medium erase counter, * but not greater or equivalent than the lowest erase counter plus * %WL_FREE_MAX_DIFF/2. */ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, struct rb_root *root) { struct ubi_wl_entry *e, *first, *last; first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); if (last->ec - first->ec < WL_FREE_MAX_DIFF) { e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); #ifdef CONFIG_MTD_UBI_FASTMAP /* If no fastmap has been written and this WL entry can be used * as anchor PEB, hold it back and return the second best * WL entry such that fastmap can use the anchor PEB later. */ if (e && !ubi->fm_disabled && !ubi->fm && e->pnum < UBI_FM_MAX_START) e = rb_entry(rb_next(root->rb_node), struct ubi_wl_entry, u.rb); #endif } else e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); return e; } #ifdef CONFIG_MTD_UBI_FASTMAP /** * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB. * @root: the RB-tree where to look for */ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) { struct rb_node *p; struct ubi_wl_entry *e, *victim = NULL; int max_ec = UBI_MAX_ERASECOUNTER; ubi_rb_for_each_entry(p, e, root, u.rb) { if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) { victim = e; max_ec = e->ec; } } return victim; } static int anchor_pebs_avalible(struct rb_root *root) { struct rb_node *p; struct ubi_wl_entry *e; ubi_rb_for_each_entry(p, e, root, u.rb) if (e->pnum < UBI_FM_MAX_START) return 1; return 0; } /** * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. * @ubi: UBI device description object * @anchor: This PEB will be used as anchor PEB by fastmap * * The function returns a physical erase block with a given maximal number * and removes it from the wl subsystem. * Must be called with wl_lock held! */ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) { struct ubi_wl_entry *e = NULL; if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) { ubi_warn(ubi->ubi_num, "Can't get peb for fastmap:anchor=%d, free_cnt=%d, reserved=%d", anchor, ubi->free_count, ubi->beb_rsvd_pebs); goto out; } if (anchor) e = find_anchor_wl_entry(&ubi->free); else e = find_mean_wl_entry(ubi, &ubi->free); if (!e) goto out; self_check_in_wl_tree(ubi, e, &ubi->free); /* remove it from the free list, * the wl subsystem does no longer know this erase block */ rb_erase(&e->u.rb, &ubi->free); ubi->free_count--; out: return e; } #endif /** * __wl_get_peb - get a physical eraseblock. * @ubi: UBI device description object * * This function returns a physical eraseblock in case of success and a * negative error code in case of failure. */ static int __wl_get_peb(struct ubi_device *ubi) { int err; struct ubi_wl_entry *e; retry: if (!ubi->free.rb_node) { if (ubi->works_count == 0) { ubi_err(ubi->ubi_num, "no free eraseblocks"); ubi_assert(list_empty(&ubi->works)); return -ENOSPC; } err = produce_free_peb(ubi); if (err < 0) return err; goto retry; } e = find_mean_wl_entry(ubi, &ubi->free); if (!e) { ubi_err(ubi->ubi_num, "no free eraseblocks"); return -ENOSPC; } self_check_in_wl_tree(ubi, e, &ubi->free); /* * Move the physical eraseblock to the protection queue where it will * be protected from being moved for some time. */ rb_erase(&e->u.rb, &ubi->free); ubi->free_count--; dbg_wl("PEB %d EC %d", e->pnum, e->ec); #ifndef CONFIG_MTD_UBI_FASTMAP /* We have to enqueue e only if fastmap is disabled, * is fastmap enabled prot_queue_add() will be called by * ubi_wl_get_peb() after removing e from the pool. */ prot_queue_add(ubi, e); #endif return e->pnum; } #ifdef CONFIG_MTD_UBI_FASTMAP /** * return_unused_pool_pebs - returns unused PEB to the free tree. * @ubi: UBI device description object * @pool: fastmap pool description object */ static void return_unused_pool_pebs(struct ubi_device *ubi, struct ubi_fm_pool *pool) { int i; struct ubi_wl_entry *e; for (i = pool->used; i < pool->size; i++) { e = ubi->lookuptbl[pool->pebs[i]]; wl_tree_add(e, &ubi->free); ubi->free_count++; } } /** * refill_wl_pool - refills all the fastmap pool used by the * WL sub-system. * @ubi: UBI device description object */ static void refill_wl_pool(struct ubi_device *ubi) { struct ubi_wl_entry *e; struct ubi_fm_pool *pool = &ubi->fm_wl_pool; return_unused_pool_pebs(ubi, pool); for (pool->size = 0; pool->size < pool->max_size; pool->size++) { if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 5)) break; e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); self_check_in_wl_tree(ubi, e, &ubi->free); rb_erase(&e->u.rb, &ubi->free); ubi->free_count--; pool->pebs[pool->size] = e->pnum; } pool->used = 0; } /** * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb. * @ubi: UBI device description object */ static void refill_wl_user_pool(struct ubi_device *ubi) { struct ubi_fm_pool *pool = &ubi->fm_pool; int err; return_unused_pool_pebs(ubi, pool); for (pool->size = 0; pool->size < pool->max_size; pool->size++) { retry: if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) { /* There are no avaliable pebs. Try to free * PEB by means of synchronous execution of * pending works. */ if (ubi->works_count == 0) break; spin_unlock(&ubi->wl_lock); err = do_work(ubi); spin_lock(&ubi->wl_lock); if (err < 0) break; goto retry; } pool->pebs[pool->size] = __wl_get_peb(ubi); if (pool->pebs[pool->size] < 0) break; } pool->used = 0; } /** * ubi_refill_pools - refills all fastmap PEB pools. * @ubi: UBI device description object */ void ubi_refill_pools(struct ubi_device *ubi) { spin_lock(&ubi->wl_lock); refill_wl_pool(ubi); refill_wl_user_pool(ubi); spin_unlock(&ubi->wl_lock); } /* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of * the fastmap pool. */ int ubi_wl_get_peb(struct ubi_device *ubi) { int ret; struct ubi_fm_pool *pool = &ubi->fm_pool; struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; if (!pool->size || !wl_pool->size || pool->used == pool->size || wl_pool->used == wl_pool->size) ubi_update_fastmap(ubi); /* we got not a single free PEB */ if (!pool->size) ret = -ENOSPC; else { spin_lock(&ubi->wl_lock); ret = pool->pebs[pool->used++]; prot_queue_add(ubi, ubi->lookuptbl[ret]); spin_unlock(&ubi->wl_lock); } return ret; } /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system. * * @ubi: UBI device description object */ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) { struct ubi_fm_pool *pool = &ubi->fm_wl_pool; int pnum; if (pool->used == pool->size || !pool->size) { /* We cannot update the fastmap here because this * function is called in atomic context. * Let's fail here and refill/update it as soon as possible. */ schedule_work(&ubi->fm_work); return NULL; } else { pnum = pool->pebs[pool->used++]; return ubi->lookuptbl[pnum]; } } #else static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) { struct ubi_wl_entry *e; e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); self_check_in_wl_tree(ubi, e, &ubi->free); ubi->free_count--; ubi_assert(ubi->free_count >= 0); rb_erase(&e->u.rb, &ubi->free); return e; } int ubi_wl_get_peb(struct ubi_device *ubi) { int peb, err; spin_lock(&ubi->wl_lock); peb = __wl_get_peb(ubi); spin_unlock(&ubi->wl_lock); if (peb < 0) return peb; err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, ubi->peb_size - ubi->vid_hdr_aloffset); if (err) { ubi_err(ubi->ubi_num, "new PEB %d does not contain all 0xFF bytes", peb); return err; } return peb; } #endif /** * ubi_wl_scan_all - Scan all PEB's * @ubi: UBI device description object * * This function schedules all device PEBs for erasure if free, or for * scrubbing otherwise. This trigger is used to prevent data loss due to read * disturb, data retention. * * Return 0 in case of success, (negative) error code otherwise * */ int ubi_wl_scrub_all(struct ubi_device *ubi) { struct rb_node *node; struct ubi_wl_entry *wl_e, *tmp; int i, err = 0; struct ubi_wl_entry *sync_erase_q[NUM_PEBS_TO_SYNC_ERASE] = {0}; int sync_erase_pos = 0; if (!ubi->lookuptbl) { ubi_err(ubi->ubi_num, "lookuptbl is null"); return -ENOENT; } spin_lock(&ubi->wl_lock); if (ubi->scrub_in_progress) { ubi_err(ubi->ubi_num, "Scan already in progress, ignoring the trigger"); err = -EPERM; spin_unlock(&ubi->wl_lock); return err; } ubi->scrub_in_progress = true; /* stop all works in order to freeze system state */ ubi->thread_enabled = 0; spin_unlock(&ubi->wl_lock); down_write(&ubi->work_sem); up_write(&ubi->work_sem); /* * fm_mutex prevents fastmap flush. * Without FM flush there is no pools refill. * When the pools are empty, there are no available PEBSs for write. * Thus prevent PEBS's from moving under our feet. * * Keep the wl_lock, while iterating the wl data structures. */ mutex_lock(&ubi->fm_mutex); spin_lock(&ubi->wl_lock); ubi_msg(ubi->ubi_num, "Scheduling all PEBs for scrub/erasure"); /* * Flush the pools into the free list before erasing all the * PEBS in the free list. */ return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0; return_unused_pool_pebs(ubi, &ubi->fm_pool); ubi->fm_pool.used = ubi->fm_pool.size = 0; /* PEBs in free list */ while ((node = rb_first(&ubi->free)) != NULL) { wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); /* Sanity check to verify consistency */ if (self_check_in_wl_tree(ubi, wl_e, &ubi->free)) { ubi_err(ubi->ubi_num, "PEB %d moved from free tree", wl_e->pnum); err = -EAGAIN; spin_unlock(&ubi->wl_lock); goto out; } rb_erase(&wl_e->u.rb, &ubi->free); ubi->free_count--; if (sync_erase_pos < NUM_PEBS_TO_SYNC_ERASE) { sync_erase_q[sync_erase_pos++] = wl_e; } else { spin_unlock(&ubi->wl_lock); err = schedule_erase(ubi, wl_e, UBI_UNKNOWN, UBI_UNKNOWN, 0); spin_lock(&ubi->wl_lock); } if (err) { ubi_err(ubi->ubi_num, "Failed to schedule erase for PEB %d (err=%d)", wl_e->pnum, err); ubi_ro_mode(ubi); spin_unlock(&ubi->wl_lock); goto out; } } /* Move all used pebs to scrub tree */ while ((node = rb_first(&ubi->used)) != NULL) { wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); rb_erase(&wl_e->u.rb, &ubi->used); wl_tree_add(wl_e, &ubi->scrub); } /* Go over protection queue */ for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) { list_for_each_entry_safe(wl_e, tmp, &ubi->pq[i], u.list) { spin_unlock(&ubi->wl_lock); err = ubi_wl_scrub_peb(ubi, wl_e->pnum); spin_lock(&ubi->wl_lock); if (err) ubi_err(ubi->ubi_num, "Failed to schedule scrub for PEB %d (err=%d)", wl_e->pnum, err); } } spin_unlock(&ubi->wl_lock); for (i = 0; i < sync_erase_pos; i++) { wl_e = sync_erase_q[i]; err = sync_erase(ubi, wl_e, 0); if (err) { ubi_err(ubi->ubi_num, "Failed to erase PEB %d (err=%d)", wl_e->pnum, err); err = schedule_erase(ubi, wl_e, UBI_UNKNOWN, UBI_UNKNOWN, 0); if (err) ubi_err(ubi->ubi_num, "Failed to schedule scrub for PEB %d (err=%d)", wl_e->pnum, err); } /* even if have errors we still have to return those PEB's */ spin_lock(&ubi->wl_lock); wl_tree_add(wl_e, &ubi->free); ubi->free_count++; spin_unlock(&ubi->wl_lock); } out: mutex_unlock(&ubi->fm_mutex); /* Resume the worker thread */ spin_lock(&ubi->wl_lock); ubi->thread_enabled = 1; spin_unlock(&ubi->wl_lock); if (!ubi_dbg_is_bgt_disabled(ubi)) wake_up_process(ubi->bgt_thread); /* Make sure all PEBs are scrubed after reset */ err = ubi_update_fastmap(ubi); spin_lock(&ubi->wl_lock); ubi->scrub_in_progress = false; spin_unlock(&ubi->wl_lock); ubi_msg(ubi->ubi_num, "Scrubbing all PEBs completed. err = %d", err); return err; } /** * prot_queue_del - remove a physical eraseblock from the protection queue. * @ubi: UBI device description object * @pnum: the physical eraseblock to remove * * This function deletes PEB @pnum from the protection queue and returns zero * in case of success and %-ENODEV if the PEB was not found. */ static int prot_queue_del(struct ubi_device *ubi, int pnum) { struct ubi_wl_entry *e; e = ubi->lookuptbl[pnum]; if (!e) return -ENODEV; if (self_check_in_pq(ubi, e)) return -ENODEV; list_del(&e->u.list); dbg_wl("deleted PEB %d from the protection queue", e->pnum); return 0; } /** * sync_erase - synchronously erase a physical eraseblock. * @ubi: UBI device description object * @e: the the physical eraseblock to erase * @torture: if the physical eraseblock has to be tortured * * This function returns zero in case of success and a negative error code in * case of failure. */ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) { int err; struct ubi_ec_hdr *ec_hdr; unsigned long long ec = e->ec; dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); err = self_check_ec(ubi, e->pnum, e->ec); if (err) return -EINVAL; ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; err = ubi_io_sync_erase(ubi, e->pnum, torture); if (err < 0) goto out_free; ec += err; if (ec > UBI_MAX_ERASECOUNTER) { /* * Erase counter overflow. Upgrade UBI and use 64-bit * erase counters internally. */ ubi_err(ubi->ubi_num, "erase counter overflow at PEB %d, EC %llu", e->pnum, ec); err = -EINVAL; goto out_free; } dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); ec_hdr->ec = cpu_to_be64(ec); err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); if (err) goto out_free; e->ec = ec; spin_lock(&ubi->wl_lock); if (e->ec > ubi->max_ec) ubi->max_ec = e->ec; spin_unlock(&ubi->wl_lock); out_free: kfree(ec_hdr); return err; } /** * serve_prot_queue - check if it is time to stop protecting PEBs. * @ubi: UBI device description object * * This function is called after each erase operation and removes PEBs from the * tail of the protection queue. These PEBs have been protected for long enough * and should be moved to the used tree. */ static void serve_prot_queue(struct ubi_device *ubi) { struct ubi_wl_entry *e, *tmp; int count; /* * There may be several protected physical eraseblock to remove, * process them all. */ repeat: count = 0; spin_lock(&ubi->wl_lock); list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { dbg_wl("PEB %d EC %d protection over, move to used tree", e->pnum, e->ec); list_del(&e->u.list); wl_tree_add(e, &ubi->used); if (count++ > 32) { /* * Let's be nice and avoid holding the spinlock for * too long. */ spin_unlock(&ubi->wl_lock); cond_resched(); goto repeat; } } ubi->pq_head += 1; if (ubi->pq_head == UBI_PROT_QUEUE_LEN) ubi->pq_head = 0; ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); spin_unlock(&ubi->wl_lock); } /** * __schedule_ubi_work - schedule a work. * @ubi: UBI device description object * @wrk: the work to schedule * * This function adds a work defined by @wrk to the tail of the pending works * list. Can only be used of ubi->work_sem is already held in read mode! */ static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) { spin_lock(&ubi->wl_lock); list_add_tail(&wrk->list, &ubi->works); ubi_assert(ubi->works_count >= 0); ubi->works_count += 1; if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) wake_up_process(ubi->bgt_thread); spin_unlock(&ubi->wl_lock); } /** * schedule_ubi_work - schedule a work. * @ubi: UBI device description object * @wrk: the work to schedule * * This function adds a work defined by @wrk to the tail of the pending works * list. */ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) { down_read(&ubi->work_sem); __schedule_ubi_work(ubi, wrk); up_read(&ubi->work_sem); } static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, int cancel); #ifdef CONFIG_MTD_UBI_FASTMAP /** * ubi_is_erase_work - checks whether a work is erase work. * @wrk: The work object to be checked */ int ubi_is_erase_work(struct ubi_work *wrk) { return wrk->func == erase_worker; } #endif /** * schedule_erase - schedule an erase work. * @ubi: UBI device description object * @e: the WL entry of the physical eraseblock to erase * @vol_id: the volume ID that last used this PEB * @lnum: the last used logical eraseblock number for the PEB * @torture: if the physical eraseblock has to be tortured * * This function returns zero in case of success and a %-ENOMEM in case of * failure. */ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int vol_id, int lnum, int torture) { struct ubi_work *wl_wrk; ubi_assert(e); ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", e->pnum, e->ec, torture); wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); if (!wl_wrk) return -ENOMEM; wl_wrk->func = &erase_worker; wl_wrk->e = e; wl_wrk->vol_id = vol_id; wl_wrk->lnum = lnum; wl_wrk->torture = torture; schedule_ubi_work(ubi, wl_wrk); return 0; } /** * do_sync_erase - run the erase worker synchronously. * @ubi: UBI device description object * @e: the WL entry of the physical eraseblock to erase * @vol_id: the volume ID that last used this PEB * @lnum: the last used logical eraseblock number for the PEB * @torture: if the physical eraseblock has to be tortured * */ static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int vol_id, int lnum, int torture) { struct ubi_work *wl_wrk; dbg_wl("sync erase of PEB %i", e->pnum); wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); if (!wl_wrk) return -ENOMEM; wl_wrk->e = e; wl_wrk->vol_id = vol_id; wl_wrk->lnum = lnum; wl_wrk->torture = torture; return erase_worker(ubi, wl_wrk, 0); } #ifdef CONFIG_MTD_UBI_FASTMAP /** * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling * sub-system. * see: ubi_wl_put_peb() * * @ubi: UBI device description object * @fm_e: physical eraseblock to return * @lnum: the last used logical eraseblock number for the PEB * @torture: if this physical eraseblock has to be tortured */ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e, int lnum, int torture) { struct ubi_wl_entry *e; int vol_id, pnum = fm_e->pnum; dbg_wl("PEB %d", pnum); ubi_assert(pnum >= 0); ubi_assert(pnum < ubi->peb_count); spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; /* This can happen if we recovered from a fastmap the very * first time and writing now a new one. In this case the wl system * has never seen any PEB used by the original fastmap. */ if (!e) { e = fm_e; ubi_assert(e->ec >= 0); ubi->lookuptbl[pnum] = e; } else { e->ec = fm_e->ec; kfree(fm_e); } spin_unlock(&ubi->wl_lock); vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; return schedule_erase(ubi, e, vol_id, lnum, torture); } #endif /** * wear_leveling_worker - wear-leveling worker function. * @ubi: UBI device description object * @wrk: the work object * @cancel: non-zero if the worker has to free memory and exit * * This function copies a more worn out physical eraseblock to a less worn out * one. Returns zero in case of success and a negative error code in case of * failure. */ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int cancel) { int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; int vol_id = -1, uninitialized_var(lnum); #ifdef CONFIG_MTD_UBI_FASTMAP int anchor = wrk->anchor; #endif struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; kfree(wrk); if (cancel) return 0; vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); if (!vid_hdr) return -ENOMEM; mutex_lock(&ubi->move_mutex); spin_lock(&ubi->wl_lock); ubi_assert(!ubi->move_from && !ubi->move_to); ubi_assert(!ubi->move_to_put); if (!ubi->free.rb_node || (!ubi->used.rb_node && !ubi->scrub.rb_node)) { /* * No free physical eraseblocks? Well, they must be waiting in * the queue to be erased. Cancel movement - it will be * triggered again when a free physical eraseblock appears. * * No used physical eraseblocks? They must be temporarily * protected from being moved. They will be moved to the * @ubi->used tree later and the wear-leveling will be * triggered again. */ dbg_wl("cancel WL, a list is empty: free %d, used %d", !ubi->free.rb_node, !ubi->used.rb_node); goto out_cancel; } #ifdef CONFIG_MTD_UBI_FASTMAP /* Check whether we need to produce an anchor PEB */ if (!anchor) anchor = !anchor_pebs_avalible(&ubi->free); if (anchor) { e1 = find_anchor_wl_entry(&ubi->used); if (!e1) goto out_cancel; e2 = get_peb_for_wl(ubi); if (!e2) goto out_cancel; self_check_in_wl_tree(ubi, e1, &ubi->used); rb_erase(&e1->u.rb, &ubi->used); dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); } else if (!ubi->scrub.rb_node) { #else if (!ubi->scrub.rb_node) { #endif /* * Now pick the least worn-out used physical eraseblock and a * highly worn-out free physical eraseblock. If the erase * counters differ much enough, start wear-leveling. */ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); e2 = get_peb_for_wl(ubi); if (!e2) goto out_cancel; if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { dbg_wl("no WL needed: min used EC %d, max free EC %d", e1->ec, e2->ec); /* Give the unused PEB back */ wl_tree_add(e2, &ubi->free); ubi->free_count++; goto out_cancel; } self_check_in_wl_tree(ubi, e1, &ubi->used); rb_erase(&e1->u.rb, &ubi->used); dbg_wl("move PEB %d EC %d to PEB %d EC %d", e1->pnum, e1->ec, e2->pnum, e2->ec); } else { /* Perform scrubbing */ scrubbing = 1; e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); e2 = get_peb_for_wl(ubi); if (!e2) goto out_cancel; self_check_in_wl_tree(ubi, e1, &ubi->scrub); rb_erase(&e1->u.rb, &ubi->scrub); dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); } ubi->move_from = e1; ubi->move_to = e2; spin_unlock(&ubi->wl_lock); /* * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. * We so far do not know which logical eraseblock our physical * eraseblock (@e1) belongs to. We have to read the volume identifier * header first. * * Note, we are protected from this PEB being unmapped and erased. The * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB * which is being moved was unmapped. */ err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); if (err && err != UBI_IO_BITFLIPS) { if (err == UBI_IO_FF) { /* * We are trying to move PEB without a VID header. UBI * always write VID headers shortly after the PEB was * given, so we have a situation when it has not yet * had a chance to write it, because it was preempted. * So add this PEB to the protection queue so far, * because presumably more data will be written there * (including the missing VID header), and then we'll * move it. */ dbg_wl("PEB %d has no VID header", e1->pnum); protect = 1; goto out_not_moved; } else if (err == UBI_IO_FF_BITFLIPS) { /* * The same situation as %UBI_IO_FF, but bit-flips were * detected. It is better to schedule this PEB for * scrubbing. */ dbg_wl("PEB %d has no VID header but has bit-flips", e1->pnum); scrubbing = 1; goto out_not_moved; } ubi_err(ubi->ubi_num, "error %d while reading VID header from PEB %d", err, e1->pnum); goto out_error; } vol_id = be32_to_cpu(vid_hdr->vol_id); lnum = be32_to_cpu(vid_hdr->lnum); err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); if (err) { if (err == MOVE_CANCEL_RACE) { /* * The LEB has not been moved because the volume is * being deleted or the PEB has been put meanwhile. We * should prevent this PEB from being selected for * wear-leveling movement again, so put it to the * protection queue. */ protect = 1; goto out_not_moved; } if (err == MOVE_RETRY) { scrubbing = 1; goto out_not_moved; } if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR || err == MOVE_TARGET_RD_ERR) { /* * Target PEB had bit-flips or write error - torture it. */ torture = 1; goto out_not_moved; } if (err == MOVE_SOURCE_RD_ERR) { /* * An error happened while reading the source PEB. Do * not switch to R/O mode in this case, and give the * upper layers a possibility to recover from this, * e.g. by unmapping corresponding LEB. Instead, just * put this PEB to the @ubi->erroneous list to prevent * UBI from trying to move it over and over again. */ if (ubi->erroneous_peb_count > ubi->max_erroneous) { ubi_err(ubi->ubi_num, "too many erroneous eraseblocks (%d)", ubi->erroneous_peb_count); goto out_error; } erroneous = 1; goto out_not_moved; } if (err < 0) goto out_error; ubi_assert(0); } /* The PEB has been successfully moved */ if (scrubbing) ubi_msg(ubi->ubi_num, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", e1->pnum, vol_id, lnum, e2->pnum); ubi_free_vid_hdr(ubi, vid_hdr); spin_lock(&ubi->wl_lock); if (!ubi->move_to_put) { wl_tree_add(e2, &ubi->used); e2 = NULL; } ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); err = do_sync_erase(ubi, e1, vol_id, lnum, 0); if (err) { if (e2) kmem_cache_free(ubi_wl_entry_slab, e2); goto out_ro; } if (e2) { /* * Well, the target PEB was put meanwhile, schedule it for * erasure. */ dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", e2->pnum, vol_id, lnum); err = do_sync_erase(ubi, e2, vol_id, lnum, 0); if (err) goto out_ro; } dbg_wl("done"); mutex_unlock(&ubi->move_mutex); return 0; /* * For some reasons the LEB was not moved, might be an error, might be * something else. @e1 was not changed, so return it back. @e2 might * have been changed, schedule it for erasure. */ out_not_moved: if (vol_id != -1) dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)", e1->pnum, vol_id, lnum, e2->pnum, err); else dbg_wl("cancel moving PEB %d to PEB %d (%d)", e1->pnum, e2->pnum, err); spin_lock(&ubi->wl_lock); if (protect) prot_queue_add(ubi, e1); else if (erroneous) { wl_tree_add(e1, &ubi->erroneous); ubi->erroneous_peb_count += 1; } else if (scrubbing) wl_tree_add(e1, &ubi->scrub); else wl_tree_add(e1, &ubi->used); ubi_assert(!ubi->move_to_put); ubi->move_from = ubi->move_to = NULL; ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); ubi_free_vid_hdr(ubi, vid_hdr); err = do_sync_erase(ubi, e2, vol_id, lnum, torture); if (err) goto out_ro; mutex_unlock(&ubi->move_mutex); return 0; out_error: if (vol_id != -1) ubi_err(ubi->ubi_num, "error %d while moving PEB %d to PEB %d", err, e1->pnum, e2->pnum); else ubi_err(ubi->ubi_num, "error %d while moving PEB %d (LEB %d:%d) to PEB %d", err, e1->pnum, vol_id, lnum, e2->pnum); spin_lock(&ubi->wl_lock); ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); ubi_free_vid_hdr(ubi, vid_hdr); kmem_cache_free(ubi_wl_entry_slab, e1); kmem_cache_free(ubi_wl_entry_slab, e2); out_ro: ubi_ro_mode(ubi); mutex_unlock(&ubi->move_mutex); ubi_assert(err != 0); return err < 0 ? err : -EIO; out_cancel: ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); mutex_unlock(&ubi->move_mutex); ubi_free_vid_hdr(ubi, vid_hdr); return 0; } /** * ensure_wear_leveling - schedule wear-leveling if it is needed. * @ubi: UBI device description object * @nested: set to non-zero if this function is called from UBI worker * * This function checks if it is time to start wear-leveling and schedules it * if yes. This function returns zero in case of success and a negative error * code in case of failure. */ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) { int err = 0; struct ubi_wl_entry *e1; struct ubi_wl_entry *e2; struct ubi_work *wrk; spin_lock(&ubi->wl_lock); if (ubi->wl_scheduled) /* Wear-leveling is already in the work queue */ goto out_unlock; /* * If the ubi->scrub tree is not empty, scrubbing is needed, and the * the WL worker has to be scheduled anyway. */ if (!ubi->scrub.rb_node) { if (!ubi->used.rb_node || !ubi->free.rb_node) /* No physical eraseblocks - no deal */ goto out_unlock; /* * We schedule wear-leveling only if the difference between the * lowest erase counter of used physical eraseblocks and a high * erase counter of free physical eraseblocks is greater than * %UBI_WL_THRESHOLD. */ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) goto out_unlock; dbg_wl("schedule wear-leveling"); } else dbg_wl("schedule scrubbing"); ubi->wl_scheduled = 1; spin_unlock(&ubi->wl_lock); wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); if (!wrk) { err = -ENOMEM; goto out_cancel; } wrk->anchor = 0; wrk->func = &wear_leveling_worker; if (nested) __schedule_ubi_work(ubi, wrk); else schedule_ubi_work(ubi, wrk); return err; out_cancel: spin_lock(&ubi->wl_lock); ubi->wl_scheduled = 0; out_unlock: spin_unlock(&ubi->wl_lock); return err; } #ifdef CONFIG_MTD_UBI_FASTMAP /** * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB. * @ubi: UBI device description object */ int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; spin_lock(&ubi->wl_lock); if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; } ubi->wl_scheduled = 1; spin_unlock(&ubi->wl_lock); wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); if (!wrk) { spin_lock(&ubi->wl_lock); ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); return -ENOMEM; } wrk->anchor = 1; wrk->func = &wear_leveling_worker; schedule_ubi_work(ubi, wrk); return 0; } #endif /** * erase_worker - physical eraseblock erase worker function. * @ubi: UBI device description object * @wl_wrk: the work object * @cancel: non-zero if the worker has to free memory and exit * * This function erases a physical eraseblock and perform torture testing if * needed. It also takes care about marking the physical eraseblock bad if * needed. Returns zero in case of success and a negative error code in case of * failure. */ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, int cancel) { struct ubi_wl_entry *e = wl_wrk->e; int pnum = e->pnum; int vol_id = wl_wrk->vol_id; int lnum = wl_wrk->lnum; int err, available_consumed = 0; if (cancel) { dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); kfree(wl_wrk); kmem_cache_free(ubi_wl_entry_slab, e); return 0; } dbg_wl("erase PEB %d EC %d LEB %d:%d", pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); err = sync_erase(ubi, e, wl_wrk->torture); if (!err) { /* Fine, we've erased it successfully */ kfree(wl_wrk); spin_lock(&ubi->wl_lock); wl_tree_add(e, &ubi->free); ubi->free_count++; spin_unlock(&ubi->wl_lock); /* * One more erase operation has happened, take care about * protected physical eraseblocks. */ serve_prot_queue(ubi); /* And take care about wear-leveling */ err = ensure_wear_leveling(ubi, 1); return err; } ubi_err(ubi->ubi_num, "failed to erase PEB %d, error %d", pnum, err); kfree(wl_wrk); if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || err == -EBUSY) { int err1; /* Re-schedule the LEB for erasure */ err1 = schedule_erase(ubi, e, vol_id, lnum, 0); if (err1) { err = err1; goto out_ro; } return err; } kmem_cache_free(ubi_wl_entry_slab, e); if (err != -EIO) /* * If this is not %-EIO, we have no idea what to do. Scheduling * this physical eraseblock for erasure again would cause * errors again and again. Well, lets switch to R/O mode. */ goto out_ro; /* It is %-EIO, the PEB went bad */ if (!ubi->bad_allowed) { ubi_err(ubi->ubi_num, "bad physical eraseblock %d detected", pnum); goto out_ro; } spin_lock(&ubi->volumes_lock); if (ubi->beb_rsvd_pebs == 0) { if (ubi->avail_pebs == 0) { spin_unlock(&ubi->volumes_lock); ubi_err(ubi->ubi_num, "no reserved/available physical eraseblocks"); goto out_ro; } ubi->avail_pebs -= 1; available_consumed = 1; } spin_unlock(&ubi->volumes_lock); ubi_msg(ubi->ubi_num, "mark PEB %d as bad", pnum); err = ubi_io_mark_bad(ubi, pnum); if (err) goto out_ro; spin_lock(&ubi->volumes_lock); if (ubi->beb_rsvd_pebs > 0) { if (available_consumed) { /* * The amount of reserved PEBs increased since we last * checked. */ ubi->avail_pebs += 1; available_consumed = 0; } ubi->beb_rsvd_pebs -= 1; } ubi->bad_peb_count += 1; ubi->good_peb_count -= 1; ubi_calculate_reserved(ubi); if (available_consumed) ubi_warn(ubi->ubi_num, "no PEBs in the reserved pool, used an available PEB"); else if (ubi->beb_rsvd_pebs) ubi_msg(ubi->ubi_num, "%d PEBs left in the reserve", ubi->beb_rsvd_pebs); else ubi_warn(ubi->ubi_num, "last PEB from the reserve was used"); spin_unlock(&ubi->volumes_lock); return err; out_ro: if (available_consumed) { spin_lock(&ubi->volumes_lock); ubi->avail_pebs += 1; spin_unlock(&ubi->volumes_lock); } ubi_ro_mode(ubi); return err; } /** * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. * @ubi: UBI device description object * @vol_id: the volume ID that last used this PEB * @lnum: the last used logical eraseblock number for the PEB * @pnum: physical eraseblock to return * @torture: if this physical eraseblock has to be tortured * * This function is called to return physical eraseblock @pnum to the pool of * free physical eraseblocks. The @torture flag has to be set if an I/O error * occurred to this @pnum and it has to be tested. This function returns zero * in case of success, and a negative error code in case of failure. */ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, int pnum, int torture) { int err; struct ubi_wl_entry *e; dbg_wl("PEB %d", pnum); ubi_assert(pnum >= 0); ubi_assert(pnum < ubi->peb_count); retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; if (e == ubi->move_from) { /* * User is putting the physical eraseblock which was selected to * be moved. It will be scheduled for erasure in the * wear-leveling worker. */ dbg_wl("PEB %d is being moved, wait", pnum); spin_unlock(&ubi->wl_lock); /* Wait for the WL worker by taking the @ubi->move_mutex */ mutex_lock(&ubi->move_mutex); mutex_unlock(&ubi->move_mutex); goto retry; } else if (e == ubi->move_to) { /* * User is putting the physical eraseblock which was selected * as the target the data is moved to. It may happen if the EBA * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()' * but the WL sub-system has not put the PEB to the "used" tree * yet, but it is about to do this. So we just set a flag which * will tell the WL worker that the PEB is not needed anymore * and should be scheduled for erasure. */ dbg_wl("PEB %d is the target of data moving", pnum); ubi_assert(!ubi->move_to_put); ubi->move_to_put = 1; spin_unlock(&ubi->wl_lock); return 0; } else { if (ubi_in_wl_tree(e, &ubi->used)) { self_check_in_wl_tree(ubi, e, &ubi->used); rb_erase(&e->u.rb, &ubi->used); } else if (ubi_in_wl_tree(e, &ubi->scrub)) { self_check_in_wl_tree(ubi, e, &ubi->scrub); rb_erase(&e->u.rb, &ubi->scrub); } else if (ubi_in_wl_tree(e, &ubi->erroneous)) { self_check_in_wl_tree(ubi, e, &ubi->erroneous); rb_erase(&e->u.rb, &ubi->erroneous); ubi->erroneous_peb_count -= 1; ubi_assert(ubi->erroneous_peb_count >= 0); /* Erroneous PEBs should be tortured */ torture = 1; } else { err = prot_queue_del(ubi, e->pnum); if (err) { ubi_err(ubi->ubi_num, "PEB %d not found", pnum); ubi_ro_mode(ubi); spin_unlock(&ubi->wl_lock); return err; } } } spin_unlock(&ubi->wl_lock); err = schedule_erase(ubi, e, vol_id, lnum, torture); if (err) { spin_lock(&ubi->wl_lock); wl_tree_add(e, &ubi->used); spin_unlock(&ubi->wl_lock); } return err; } /** * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. * @ubi: UBI device description object * @pnum: the physical eraseblock to schedule * * If a bit-flip in a physical eraseblock is detected, this physical eraseblock * needs scrubbing. This function schedules a physical eraseblock for * scrubbing which is done in background. This function returns zero in case of * success and a negative error code in case of failure. */ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) { struct ubi_wl_entry *e; ubi_msg(ubi->ubi_num, "schedule PEB %d for scrubbing", pnum); retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; if (e == ubi->move_from || ubi_in_wl_tree(e, &ubi->scrub) || ubi_in_wl_tree(e, &ubi->erroneous)) { spin_unlock(&ubi->wl_lock); return 0; } if (e == ubi->move_to) { /* * This physical eraseblock was used to move data to. The data * was moved but the PEB was not yet inserted to the proper * tree. We should just wait a little and let the WL worker * proceed. */ spin_unlock(&ubi->wl_lock); dbg_wl("the PEB %d is not in proper tree, retry", pnum); yield(); goto retry; } if (ubi_in_wl_tree(e, &ubi->used)) { self_check_in_wl_tree(ubi, e, &ubi->used); rb_erase(&e->u.rb, &ubi->used); } else { int err; err = prot_queue_del(ubi, e->pnum); if (err) { ubi_err(ubi->ubi_num, "PEB %d not found", pnum); ubi_ro_mode(ubi); spin_unlock(&ubi->wl_lock); return err; } } ubi_msg(ubi->ubi_num, "schedule PEB %d for scrubbing", pnum); wl_tree_add(e, &ubi->scrub); spin_unlock(&ubi->wl_lock); /* * Technically scrubbing is the same as wear-leveling, so it is done * by the WL worker. */ return ensure_wear_leveling(ubi, 0); } /** * ubi_wl_flush - flush all pending works. * @ubi: UBI device description object * @vol_id: the volume id to flush for * @lnum: the logical eraseblock number to flush for * * This function executes all pending works for a particular volume id / * logical eraseblock number pair. If either value is set to %UBI_ALL, then it * acts as a wildcard for all of the corresponding volume numbers or logical * eraseblock numbers. It returns zero in case of success and a negative error * code in case of failure. */ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) { int err = 0; int found = 1; /* * Erase while the pending works queue is not empty, but not more than * the number of currently pending works. */ dbg_wl("flush pending work for LEB %d:%d (%d pending works)", vol_id, lnum, ubi->works_count); while (found) { struct ubi_work *wrk, *tmp; found = 0; down_read(&ubi->work_sem); spin_lock(&ubi->wl_lock); list_for_each_entry_safe(wrk, tmp, &ubi->works, list) { if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && (lnum == UBI_ALL || wrk->lnum == lnum)) { list_del(&wrk->list); ubi->works_count -= 1; ubi_assert(ubi->works_count >= 0); spin_unlock(&ubi->wl_lock); err = wrk->func(ubi, wrk, 0); if (err) { up_read(&ubi->work_sem); return err; } spin_lock(&ubi->wl_lock); found = 1; break; } } spin_unlock(&ubi->wl_lock); up_read(&ubi->work_sem); } /* * Make sure all the works which have been done in parallel are * finished. */ down_write(&ubi->work_sem); up_write(&ubi->work_sem); return err; } /** * tree_destroy - destroy an RB-tree. * @root: the root of the tree to destroy */ static void tree_destroy(struct rb_root *root) { struct rb_node *rb; struct ubi_wl_entry *e; rb = root->rb_node; while (rb) { if (rb->rb_left) rb = rb->rb_left; else if (rb->rb_right) rb = rb->rb_right; else { e = rb_entry(rb, struct ubi_wl_entry, u.rb); rb = rb_parent(rb); if (rb) { if (rb->rb_left == &e->u.rb) rb->rb_left = NULL; else rb->rb_right = NULL; } kmem_cache_free(ubi_wl_entry_slab, e); } } } /** * ubi_thread - UBI background thread. * @u: the UBI device description object pointer */ int ubi_thread(void *u) { int failures = 0; struct ubi_device *ubi = u; ubi_msg(ubi->ubi_num, "background thread \"%s\" started, PID %d", ubi->bgt_name, task_pid_nr(current)); set_freezable(); for (;;) { int err; if (kthread_should_stop()) { ubi_msg(ubi->ubi_num, "background thread \"%s\" should stop, PID %d", ubi->bgt_name, task_pid_nr(current)); break; } if (try_to_freeze()) continue; spin_lock(&ubi->wl_lock); if (list_empty(&ubi->works) || ubi->ro_mode || !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&ubi->wl_lock); schedule(); continue; } spin_unlock(&ubi->wl_lock); err = do_work(ubi); if (err) { ubi_err(ubi->ubi_num, "%s: work failed with error code %d", ubi->bgt_name, err); if (failures++ > WL_MAX_FAILURES) { /* * Too many failures, disable the thread and * switch to read-only mode. */ ubi_msg(ubi->ubi_num, "%s: %d consecutive failures", ubi->bgt_name, WL_MAX_FAILURES); ubi_ro_mode(ubi); ubi->thread_enabled = 0; continue; } } else failures = 0; cond_resched(); } dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); return 0; } /** * cancel_pending - cancel all pending works. * @ubi: UBI device description object */ static void cancel_pending(struct ubi_device *ubi) { while (!list_empty(&ubi->works)) { struct ubi_work *wrk; wrk = list_entry(ubi->works.next, struct ubi_work, list); list_del(&wrk->list); wrk->func(ubi, wrk, 1); ubi->works_count -= 1; ubi_assert(ubi->works_count >= 0); } } /** * ubi_wl_init - initialize the WL sub-system using attaching information. * @ubi: UBI device description object * @ai: attaching information * * This function returns zero in case of success, and a negative error code in * case of failure. */ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) { int err, i, reserved_pebs, found_pebs = 0; struct rb_node *rb1, *rb2; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb, *tmp; struct ubi_wl_entry *e; ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; spin_lock_init(&ubi->wl_lock); mutex_init(&ubi->move_mutex); init_rwsem(&ubi->work_sem); ubi->max_ec = ai->max_ec; INIT_LIST_HEAD(&ubi->works); #ifdef CONFIG_MTD_UBI_FASTMAP INIT_WORK(&ubi->fm_work, update_fastmap_work_fn); #endif sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); err = -ENOMEM; ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); if (!ubi->lookuptbl) return err; for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) INIT_LIST_HEAD(&ubi->pq[i]); ubi->pq_head = 0; list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = aeb->pnum; e->ec = aeb->ec; ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); ubi->lookuptbl[e->pnum] = e; if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { kmem_cache_free(ubi_wl_entry_slab, e); goto out_free; } found_pebs++; } ubi->free_count = 0; list_for_each_entry(aeb, &ai->free, u.list) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = aeb->pnum; e->ec = aeb->ec; ubi_assert(e->ec >= 0); ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); wl_tree_add(e, &ubi->free); ubi->free_count++; ubi->lookuptbl[e->pnum] = e; found_pebs++; } ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = aeb->pnum; e->ec = aeb->ec; ubi->lookuptbl[e->pnum] = e; if (!aeb->scrub) { dbg_wl("add PEB %d EC %d to the used tree", e->pnum, e->ec); wl_tree_add(e, &ubi->used); } else { dbg_wl("add PEB %d EC %d to the scrub tree", e->pnum, e->ec); wl_tree_add(e, &ubi->scrub); } found_pebs++; } } dbg_wl("found %i PEBs", found_pebs); if (ubi->fm) ubi_assert(ubi->good_peb_count == \ found_pebs + ubi->fm->used_blocks); else ubi_assert(ubi->good_peb_count == found_pebs); reserved_pebs = WL_RESERVED_PEBS; #ifdef CONFIG_MTD_UBI_FASTMAP /* Reserve enough LEBs to store two fastmaps. */ reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2; #endif if (ubi->avail_pebs < reserved_pebs) { ubi_err(ubi->ubi_num, "no enough physical eraseblocks (%d, need %d)", ubi->avail_pebs, reserved_pebs); if (ubi->corr_peb_count) ubi_err(ubi->ubi_num, "%d PEBs are corrupted and not used", ubi->corr_peb_count); goto out_free; } ubi->avail_pebs -= reserved_pebs; ubi->rsvd_pebs += reserved_pebs; /* Schedule wear-leveling if needed */ err = ensure_wear_leveling(ubi, 0); if (err) goto out_free; return 0; out_free: cancel_pending(ubi); tree_destroy(&ubi->used); tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); kfree(ubi->lookuptbl); return err; } /** * protection_queue_destroy - destroy the protection queue. * @ubi: UBI device description object */ static void protection_queue_destroy(struct ubi_device *ubi) { int i; struct ubi_wl_entry *e, *tmp; for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { list_del(&e->u.list); kmem_cache_free(ubi_wl_entry_slab, e); } } } /** * ubi_wl_close - close the wear-leveling sub-system. * @ubi: UBI device description object */ void ubi_wl_close(struct ubi_device *ubi) { ubi_msg(ubi->ubi_num, "close the WL sub-system"); cancel_pending(ubi); protection_queue_destroy(ubi); tree_destroy(&ubi->used); tree_destroy(&ubi->erroneous); tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); kfree(ubi->lookuptbl); } /** * self_check_ec - make sure that the erase counter of a PEB is correct. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * @ec: the erase counter to check * * This function returns zero if the erase counter of physical eraseblock @pnum * is equivalent to @ec, and a negative error code if not or if an error * occurred. */ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) { int err; long long read_ec; struct ubi_ec_hdr *ec_hdr; if (!ubi_dbg_chk_gen(ubi)) return 0; ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); if (err && err != UBI_IO_BITFLIPS) { /* The header does not have to exist */ err = 0; goto out_free; } read_ec = be64_to_cpu(ec_hdr->ec); if (ec != read_ec && read_ec - ec > 1) { ubi_err(ubi->ubi_num, "self-check failed for PEB %d", pnum); ubi_err(ubi->ubi_num, "read EC is %lld, should be %d", read_ec, ec); dump_stack(); err = 1; } else err = 0; out_free: kfree(ec_hdr); return err; } /** * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. * @ubi: UBI device description object * @e: the wear-leveling entry to check * @root: the root of the tree * * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it * is not. */ static int self_check_in_wl_tree(const struct ubi_device *ubi, struct ubi_wl_entry *e, struct rb_root *root) { if (!ubi_dbg_chk_gen(ubi)) return 0; if (ubi_in_wl_tree(e, root)) return 0; ubi_err(ubi->ubi_num, "self-check failed for PEB %d, EC %d, RB-tree %p ", e->pnum, e->ec, root); dump_stack(); return -EINVAL; } /** * self_check_in_pq - check if wear-leveling entry is in the protection * queue. * @ubi: UBI device description object * @e: the wear-leveling entry to check * * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not. */ static int self_check_in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e) { struct ubi_wl_entry *p; int i; if (!ubi_dbg_chk_gen(ubi)) return 0; for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) list_for_each_entry(p, &ubi->pq[i], u.list) if (p == e) return 0; ubi_err(ubi->ubi_num, "self-check failed for PEB %d, EC %d, Protect queue", e->pnum, e->ec); dump_stack(); return -EINVAL; }
gpl-2.0
v1ron/linux-mainline
tools/build/feature/test-all.c
55
3643
/* * test-all.c: Try to build all the main testcases at once. * * A well-configured system will have all the prereqs installed, so we can speed * up auto-detection on such systems. */ /* * Quirk: Python and Perl headers cannot be in arbitrary places, so keep * these 3 testcases at the top: */ #define main main_test_libpython # include "test-libpython.c" #undef main #define main main_test_libpython_version # include "test-libpython-version.c" #undef main #define main main_test_libperl # include "test-libperl.c" #undef main #define main main_test_hello # include "test-hello.c" #undef main #define main main_test_libelf # include "test-libelf.c" #undef main #define main main_test_libelf_mmap # include "test-libelf-mmap.c" #undef main #define main main_test_glibc # include "test-glibc.c" #undef main #define main main_test_dwarf # include "test-dwarf.c" #undef main #define main main_test_dwarf_getlocations # include "test-dwarf_getlocations.c" #undef main #define main main_test_libelf_getphdrnum # include "test-libelf-getphdrnum.c" #undef main #define main main_test_libunwind # include "test-libunwind.c" #undef main #define main main_test_libaudit # include "test-libaudit.c" #undef main #define main main_test_libslang # include "test-libslang.c" #undef main #define main main_test_gtk2 # include "test-gtk2.c" #undef main #define main main_test_gtk2_infobar # include "test-gtk2-infobar.c" #undef main #define main main_test_libbfd # include "test-libbfd.c" #undef main #define main main_test_backtrace # include "test-backtrace.c" #undef main #define main main_test_libnuma # include "test-libnuma.c" #undef main #define main main_test_numa_num_possible_cpus # include "test-numa_num_possible_cpus.c" #undef main #define main main_test_timerfd # include "test-timerfd.c" #undef main #define main main_test_stackprotector_all # include "test-stackprotector-all.c" #undef main #define main main_test_libdw_dwarf_unwind # include "test-libdw-dwarf-unwind.c" #undef main #define main main_test_sync_compare_and_swap # include "test-sync-compare-and-swap.c" #undef main #define main main_test_zlib # include "test-zlib.c" #undef main #define main main_test_pthread_attr_setaffinity_np # include "test-pthread-attr-setaffinity-np.c" #undef main # if 0 /* * Disable libbabeltrace check for test-all, because the requested * library version is not released yet in most distributions. Will * reenable later. */ #define main main_test_libbabeltrace # include "test-libbabeltrace.c" #undef main #endif #define main main_test_lzma # include "test-lzma.c" #undef main #define main main_test_get_cpuid # include "test-get_cpuid.c" #undef main #define main main_test_bpf # include "test-bpf.c" #undef main #define main main_test_libcrypto # include "test-libcrypto.c" #undef main int main(int argc, char *argv[]) { main_test_libpython(); main_test_libpython_version(); main_test_libperl(); main_test_hello(); main_test_libelf(); main_test_libelf_mmap(); main_test_glibc(); main_test_dwarf(); main_test_dwarf_getlocations(); main_test_libelf_getphdrnum(); main_test_libunwind(); main_test_libaudit(); main_test_libslang(); main_test_gtk2(argc, argv); main_test_gtk2_infobar(argc, argv); main_test_libbfd(); main_test_backtrace(); main_test_libnuma(); main_test_numa_num_possible_cpus(); main_test_timerfd(); main_test_stackprotector_all(); main_test_libdw_dwarf_unwind(); main_test_sync_compare_and_swap(argc, argv); main_test_zlib(); main_test_pthread_attr_setaffinity_np(); main_test_lzma(); main_test_get_cpuid(); main_test_bpf(); main_test_libcrypto(); return 0; }
gpl-2.0
jdkernel/jdkernel_vigor_2.6.35
arch/arm/mach-msm/qdsp6v3/apr.c
55
17530
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/debugfs.h> #include <linux/platform_device.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/slab.h> #include <mach/peripheral-loader.h> #include <mach/msm_smd.h> #include <mach/qdsp6v3/apr.h> #include <mach/subsystem_notif.h> #include <mach/subsystem_restart.h> #include "apr_tal.h" #include "dsp_debug.h" struct apr_q6 q6; struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX]; static atomic_t dsp_state; static atomic_t modem_state; static wait_queue_head_t dsp_wait; static wait_queue_head_t modem_wait; /* Subsystem restart: QDSP6 data, functions */ static struct workqueue_struct *apr_reset_workqueue; static void apr_reset_deregister(struct work_struct *work); struct apr_reset_work { void *handle; struct work_struct work; }; int apr_send_pkt(void *handle, uint32_t *buf) { struct apr_svc *svc = handle; struct apr_client *clnt; struct apr_hdr *hdr; uint16_t dest_id; uint16_t client_id; uint16_t w_len; unsigned long flags; if (!handle || !buf) { pr_aud_err("APR: Wrong parameters\n"); return -EINVAL; } if (svc->need_reset) { pr_aud_err("apr: send_pkt service need reset\n"); return -ENETRESET; } if ((svc->dest_id == APR_DEST_QDSP6) && (atomic_read(&dsp_state) == 0)) { pr_aud_err("apr: Still dsp is not Up\n"); return -ENETRESET; } else if ((svc->dest_id == APR_DEST_MODEM) && (atomic_read(&modem_state) == 0)) { pr_aud_err("apr: Still Modem is not Up\n"); return -ENETRESET; } spin_lock_irqsave(&svc->w_lock, flags); dest_id = svc->dest_id; client_id = svc->client_id; clnt = &client[dest_id][client_id]; if (!client[dest_id][client_id].handle) { pr_aud_err("APR: Still service is not yet opened\n"); spin_unlock_irqrestore(&svc->w_lock, flags); return -EINVAL; } hdr = (struct apr_hdr *)buf; hdr->src_domain = APR_DOMAIN_APPS; hdr->src_svc = svc->id; if (dest_id == APR_DEST_MODEM) hdr->dest_domain = APR_DOMAIN_MODEM; else if (dest_id == APR_DEST_QDSP6) hdr->dest_domain = APR_DOMAIN_ADSP; hdr->dest_svc = svc->id; w_len = apr_tal_write(clnt->handle, buf, hdr->pkt_size); if (w_len != hdr->pkt_size) pr_aud_err("Unable to write APR pkt successfully: %d\n", w_len); spin_unlock_irqrestore(&svc->w_lock, flags); return w_len; } static void apr_cb_func(void *buf, int len, void *priv) { struct apr_client_data data; struct apr_client *apr_client; struct apr_svc *c_svc; struct apr_hdr *hdr; uint16_t hdr_size; uint16_t msg_type; uint16_t ver; uint16_t src; uint16_t svc; uint16_t clnt; int i; int temp_port = 0; uint32_t *ptr; pr_debug("APR2: len = %d\n", len); ptr = buf; pr_debug("\n*****************\n"); for (i = 0; i < len/4; i++) pr_debug("%x ", ptr[i]); pr_debug("\n"); pr_debug("\n*****************\n"); if (!buf || len <= APR_HDR_SIZE) { pr_aud_err("APR: Improper apr pkt received:%p %d\n", buf, len); return; } hdr = buf; ver = hdr->hdr_field; ver = (ver & 0x000F); if (ver > APR_PKT_VER + 1) { pr_aud_err("APR: Wrong version: %d\n", ver); return; } hdr_size = hdr->hdr_field; hdr_size = ((hdr_size & 0x00F0) >> 0x4) * 4; if (hdr_size < APR_HDR_SIZE) { pr_aud_err("APR: Wrong hdr size:%d\n", hdr_size); return; } if (hdr->pkt_size < APR_HDR_SIZE) { pr_aud_err("APR: Wrong paket size\n"); return; } msg_type = hdr->hdr_field; msg_type = (msg_type >> 0x08) & 0x0003; if (msg_type >= APR_MSG_TYPE_MAX && msg_type != APR_BASIC_RSP_RESULT) { pr_aud_err("APR: Wrong message type: %d\n", msg_type); return; } if (hdr->src_domain >= APR_DOMAIN_MAX || hdr->dest_domain >= APR_DOMAIN_MAX || hdr->src_svc >= APR_SVC_MAX || hdr->dest_svc >= APR_SVC_MAX) { pr_aud_err("APR: Wrong APR header\n"); return; } svc = hdr->dest_svc; if (hdr->src_domain == APR_DOMAIN_MODEM) { src = APR_DEST_MODEM; if (svc == APR_SVC_MVS || svc == APR_SVC_MVM || svc == APR_SVC_CVS || svc == APR_SVC_CVP || svc == APR_SVC_TEST_CLIENT) clnt = APR_CLIENT_VOICE; else { pr_aud_err("APR: Wrong svc :%d\n", svc); return; } } else if (hdr->src_domain == APR_DOMAIN_ADSP) { src = APR_DEST_QDSP6; if (svc == APR_SVC_AFE || svc == APR_SVC_ASM || svc == APR_SVC_VSM || svc == APR_SVC_VPM || svc == APR_SVC_ADM || svc == APR_SVC_ADSP_CORE || svc == APR_SVC_TEST_CLIENT || svc == APR_SVC_ADSP_MVM || svc == APR_SVC_ADSP_CVS || svc == APR_SVC_ADSP_CVP) clnt = APR_CLIENT_AUDIO; else { pr_aud_err("APR: Wrong svc :%d\n", svc); return; } } else { pr_aud_err("APR: Pkt from wrong source: %d\n", hdr->src_domain); return; } pr_debug("src =%d clnt = %d\n", src, clnt); apr_client = &client[src][clnt]; for (i = 0; i < APR_SVC_MAX; i++) if (apr_client->svc[i].id == svc) { pr_debug("%d\n", apr_client->svc[i].id); c_svc = &apr_client->svc[i]; break; } if (i == APR_SVC_MAX) { pr_aud_err("APR: service is not registered\n"); return; } pr_debug("svc_idx = %d\n", i); pr_debug("%x %x %x %p %p\n", c_svc->id, c_svc->dest_id, c_svc->client_id, c_svc->fn, c_svc->priv); data.payload_size = hdr->pkt_size - hdr_size; data.opcode = hdr->opcode; data.src = src; data.src_port = hdr->src_port; data.dest_port = hdr->dest_port; data.token = hdr->token; data.msg_type = msg_type; if (data.payload_size > 0) data.payload = (char *)hdr + hdr_size; temp_port = ((data.src_port >> 8) * 8) + (data.src_port & 0xFF); pr_debug("port = %d t_port = %d\n", data.src_port, temp_port); if (c_svc->port_cnt && c_svc->port_fn[temp_port]) c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]); else if (c_svc->fn) c_svc->fn(&data, c_svc->priv); else pr_aud_err("APR: Rxed a packet for NULL callback\n"); } struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, uint32_t src_port, void *priv) { int client_id = 0; int svc_idx = 0; int svc_id = 0; int dest_id = 0; int temp_port = 0; struct apr_svc *svc = NULL; int rc = 0; if (!dest || !svc_name || !svc_fn) return NULL; if (!strcmp(dest, "ADSP")) dest_id = APR_DEST_QDSP6; else if (!strcmp(dest, "MODEM")) { dest_id = APR_DEST_MODEM; } else { pr_aud_err("APR: wrong destination\n"); goto done; } if ((dest_id == APR_DEST_QDSP6) && (atomic_read(&dsp_state) == 0)) { rc = wait_event_timeout(dsp_wait, (atomic_read(&dsp_state) == 1), 5*HZ); if (rc == 0) { pr_aud_err("apr: Still dsp is not Up\n"); return NULL; } } else if ((dest_id == APR_DEST_MODEM) && (atomic_read(&modem_state) == 0)) { rc = wait_event_timeout(modem_wait, (atomic_read(&modem_state) == 1), 5*HZ); if (rc == 0) { pr_aud_err("apr: Still Modem is not Up\n"); return NULL; } } if (!strcmp(svc_name, "AFE")) { client_id = APR_CLIENT_AUDIO; svc_idx = 0; svc_id = APR_SVC_AFE; } else if (!strcmp(svc_name, "ASM")) { client_id = APR_CLIENT_AUDIO; svc_idx = 1; svc_id = APR_SVC_ASM; } else if (!strcmp(svc_name, "ADM")) { client_id = APR_CLIENT_AUDIO; svc_idx = 2; svc_id = APR_SVC_ADM; } else if (!strcmp(svc_name, "CORE")) { client_id = APR_CLIENT_AUDIO; svc_idx = 3; svc_id = APR_SVC_ADSP_CORE; } else if (!strcmp(svc_name, "TEST")) { if (dest_id == APR_DEST_QDSP6) { client_id = APR_CLIENT_AUDIO; svc_idx = 4; } else { client_id = APR_CLIENT_VOICE; svc_idx = 7; } svc_id = APR_SVC_TEST_CLIENT; } else if (!strcmp(svc_name, "VSM")) { client_id = APR_CLIENT_VOICE; svc_idx = 0; svc_id = APR_SVC_VSM; } else if (!strcmp(svc_name, "VPM")) { client_id = APR_CLIENT_VOICE; svc_idx = 1; svc_id = APR_SVC_VPM; } else if (!strcmp(svc_name, "MVS")) { client_id = APR_CLIENT_VOICE; svc_idx = 2; svc_id = APR_SVC_MVS; } else if (!strcmp(svc_name, "MVM")) { if (dest_id == APR_DEST_MODEM) { client_id = APR_CLIENT_VOICE; svc_idx = 3; svc_id = APR_SVC_MVM; } else { client_id = APR_CLIENT_AUDIO; svc_idx = 5; svc_id = APR_SVC_ADSP_MVM; } } else if (!strcmp(svc_name, "CVS")) { if (dest_id == APR_DEST_MODEM) { client_id = APR_CLIENT_VOICE; svc_idx = 4; svc_id = APR_SVC_CVS; } else { client_id = APR_CLIENT_AUDIO; svc_idx = 6; svc_id = APR_SVC_ADSP_CVS; } } else if (!strcmp(svc_name, "CVP")) { if (dest_id == APR_DEST_MODEM) { client_id = APR_CLIENT_VOICE; svc_idx = 5; svc_id = APR_SVC_CVP; } else { client_id = APR_CLIENT_AUDIO; svc_idx = 7; svc_id = APR_SVC_ADSP_CVP; } } else if (!strcmp(svc_name, "SRD")) { client_id = APR_CLIENT_VOICE; svc_idx = 6; svc_id = APR_SVC_SRD; } else { pr_aud_err("APR: Wrong svc name\n"); goto done; } pr_debug("svc name = %s c_id = %d dest_id = %d\n", svc_name, client_id, dest_id); mutex_lock(&q6.lock); if (q6.state == APR_Q6_NOIMG) { q6.pil = pil_get("q6"); if (!q6.pil) { pr_aud_err("APR: Unable to load q6 image\n"); mutex_unlock(&q6.lock); return svc; } q6.state = APR_Q6_LOADED; } mutex_unlock(&q6.lock); mutex_lock(&client[dest_id][client_id].m_lock); if (!client[dest_id][client_id].handle) { client[dest_id][client_id].handle = apr_tal_open(client_id, dest_id, APR_DL_SMD, apr_cb_func, NULL); if (!client[dest_id][client_id].handle) { svc = NULL; pr_aud_err("APR: Unable to open handle\n"); mutex_unlock(&client[dest_id][client_id].m_lock); goto done; } } mutex_unlock(&client[dest_id][client_id].m_lock); svc = &client[dest_id][client_id].svc[svc_idx]; mutex_lock(&svc->m_lock); client[dest_id][client_id].id = client_id; if (svc->need_reset) { mutex_unlock(&svc->m_lock); pr_aud_err("APR: Service needs reset\n"); goto done; } svc->priv = priv; svc->id = svc_id; svc->dest_id = dest_id; svc->client_id = client_id; if (src_port != 0xFFFFFFFF) { temp_port = ((src_port >> 8) * 8) + (src_port & 0xFF); if (temp_port >= APR_MAX_PORTS) { mutex_unlock(&svc->m_lock); pr_aud_err("APR: illegal port ID %d\n", temp_port); svc = NULL; goto done; } pr_debug("port = %d t_port = %d\n", src_port, temp_port); if (!svc->port_cnt && !svc->svc_cnt) client[dest_id][client_id].svc_cnt++; svc->port_cnt++; svc->port_fn[temp_port] = svc_fn; svc->port_priv[temp_port] = priv; } else { if (!svc->fn) { if (!svc->port_cnt && !svc->svc_cnt) client[dest_id][client_id].svc_cnt++; svc->fn = svc_fn; if (svc->port_cnt) svc->svc_cnt++; } } mutex_unlock(&svc->m_lock); done: return svc; } static void apr_reset_deregister(struct work_struct *work) { struct apr_svc *handle = NULL; struct apr_reset_work *apr_reset = container_of(work, struct apr_reset_work, work); handle = apr_reset->handle; pr_debug("%s:handle[%p]\n", __func__, handle); apr_deregister(handle); kfree(apr_reset); msleep(5); } int apr_deregister(void *handle) { struct apr_svc *svc = handle; struct apr_client *clnt; uint16_t dest_id; uint16_t client_id; if (!handle) return -EINVAL; mutex_lock(&svc->m_lock); dest_id = svc->dest_id; client_id = svc->client_id; clnt = &client[dest_id][client_id]; if (svc->port_cnt > 0 || svc->svc_cnt > 0) { if (svc->port_cnt) svc->port_cnt--; else if (svc->svc_cnt) svc->svc_cnt--; if (!svc->port_cnt && !svc->svc_cnt) { client[dest_id][client_id].svc_cnt--; svc->need_reset = 0x0; } } else if (client[dest_id][client_id].svc_cnt > 0) { client[dest_id][client_id].svc_cnt--; if (!client[dest_id][client_id].svc_cnt) { svc->need_reset = 0x0; pr_debug("%s: service is reset %p\n", __func__, svc); } } if (!svc->port_cnt && !svc->svc_cnt) { svc->priv = NULL; svc->id = 0; svc->fn = NULL; svc->dest_id = 0; svc->client_id = 0; svc->need_reset = 0x0; } if (client[dest_id][client_id].handle && !client[dest_id][client_id].svc_cnt) { apr_tal_close(client[dest_id][client_id].handle); client[dest_id][client_id].handle = NULL; } mutex_unlock(&svc->m_lock); return 0; } void apr_reset(void *handle) { struct apr_reset_work *apr_reset_worker = NULL; if (!handle) return; pr_debug("%s: handle[%p]\n", __func__, handle); apr_reset_worker = kzalloc(sizeof(struct apr_reset_work), GFP_ATOMIC); if (apr_reset_worker == NULL || apr_reset_workqueue == NULL) { pr_aud_err("%s: mem failure\n", __func__); return; } apr_reset_worker->handle = handle; INIT_WORK(&apr_reset_worker->work, apr_reset_deregister); queue_work(apr_reset_workqueue, &apr_reset_worker->work); } void change_q6_state(int state) { mutex_lock(&q6.lock); q6.state = state; mutex_unlock(&q6.lock); } int adsp_state(int state) { pr_aud_info("dsp state = %d\n", state); return 0; } /* Dispatch the Reset events to Modem and audio clients */ void dispatch_event(unsigned long code, unsigned short proc) { struct apr_client *apr_client; struct apr_client_data data; struct apr_svc *svc; uint16_t clnt; int i, j; data.opcode = RESET_EVENTS; data.reset_event = code; data.reset_proc = proc; clnt = APR_CLIENT_AUDIO; apr_client = &client[proc][clnt]; for (i = 0; i < APR_SVC_MAX; i++) { mutex_lock(&apr_client->svc[i].m_lock); if (apr_client->svc[i].fn) { apr_client->svc[i].need_reset = 0x1; apr_client->svc[i].fn(&data, apr_client->svc[i].priv); } if (apr_client->svc[i].port_cnt) { svc = &(apr_client->svc[i]); svc->need_reset = 0x1; for (j = 0; j < APR_MAX_PORTS; j++) if (svc->port_fn[j]) svc->port_fn[j](&data, svc->port_priv[j]); } mutex_unlock(&apr_client->svc[i].m_lock); } clnt = APR_CLIENT_VOICE; apr_client = &client[proc][clnt]; for (i = 0; i < APR_SVC_MAX; i++) { mutex_lock(&apr_client->svc[i].m_lock); if (apr_client->svc[i].fn) { apr_client->svc[i].need_reset = 0x1; apr_client->svc[i].fn(&data, apr_client->svc[i].priv); } if (apr_client->svc[i].port_cnt) { svc = &(apr_client->svc[i]); svc->need_reset = 0x1; for (j = 0; j < APR_MAX_PORTS; j++) if (svc->port_fn[j]) svc->port_fn[j](&data, svc->port_priv[j]); } mutex_unlock(&apr_client->svc[i].m_lock); } } static int modem_notifier_cb(struct notifier_block *this, unsigned long code, void *_cmd) { switch (code) { case SUBSYS_BEFORE_SHUTDOWN: pr_debug("M-Notify: Shutdown started\n"); atomic_set(&modem_state, 0); dispatch_event(code, APR_DEST_MODEM); break; case SUBSYS_AFTER_SHUTDOWN: pr_debug("M-Notify: Shutdown Completed\n"); break; case SUBSYS_BEFORE_POWERUP: pr_debug("M-notify: Bootup started\n"); break; case SUBSYS_AFTER_POWERUP: if (atomic_read(&modem_state) == 0) { atomic_set(&modem_state, 1); wake_up(&modem_wait); } pr_debug("M-Notify: Bootup Completed\n"); break; default: pr_aud_err("M-Notify: General: %lu\n", code); break; } return NOTIFY_DONE; } static struct notifier_block mnb = { .notifier_call = modem_notifier_cb, }; static int lpass_notifier_cb(struct notifier_block *this, unsigned long code, void *_cmd) { switch (code) { case SUBSYS_BEFORE_SHUTDOWN: pr_debug("L-Notify: Shutdown started\n"); atomic_set(&dsp_state, 0); dispatch_event(code, APR_DEST_QDSP6); break; case SUBSYS_AFTER_SHUTDOWN: pr_debug("L-Notify: Shutdown Completed\n"); break; case SUBSYS_BEFORE_POWERUP: pr_debug("L-notify: Bootup started\n"); break; case SUBSYS_AFTER_POWERUP: if (atomic_read(&dsp_state) == 0) { atomic_set(&dsp_state, 1); wake_up(&dsp_wait); } pr_debug("L-Notify: Bootup Completed\n"); break; default: pr_aud_err("L-Notify: Generel: %lu\n", code); break; } return NOTIFY_DONE; } static struct notifier_block lnb = { .notifier_call = lpass_notifier_cb, }; static int __init apr_init(void) { int i, j, k; pr_aud_info("apr_probe\n"); for (i = 0; i < APR_DEST_MAX; i++) for (j = 0; j < APR_CLIENT_MAX; j++) { mutex_init(&client[i][j].m_lock); for (k = 0; k < APR_SVC_MAX; k++) { mutex_init(&client[i][j].svc[k].m_lock); spin_lock_init(&client[i][j].svc[k].w_lock); } } mutex_init(&q6.lock); dsp_debug_register(adsp_state); apr_reset_workqueue = create_singlethread_workqueue("apr_driver"); if (!apr_reset_workqueue) return -ENOMEM; return 0; } device_initcall(apr_init); static int __init apr_late_init(void) { void *ret; init_waitqueue_head(&dsp_wait); init_waitqueue_head(&modem_wait); atomic_set(&dsp_state, 1); atomic_set(&modem_state, 1); ret = subsys_notif_register_notifier("modem", &mnb); pr_debug("subsys_register_notifier: ret1 = %p\n", ret); ret = subsys_notif_register_notifier("lpass", &lnb); pr_debug("subsys_register_notifier: ret2 = %p\n", ret); return 0; } late_initcall(apr_late_init);
gpl-2.0
openwrt-es/linux
fs/ufs/super.c
55
45273
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/ufs/super.c * * Copyright (C) 1998 * Daniel Pirkl <daniel.pirkl@email.cz> * Charles University, Faculty of Mathematics and Physics */ /* Derived from * * linux/fs/ext2/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ /* * Inspired by * * linux/fs/ufs/super.c * * Copyright (C) 1996 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu) * Laboratory for Computer Science Research Computing Facility * Rutgers, The State University of New Jersey * * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * * Kernel module support added on 96/04/26 by * Stefan Reinauer <stepan@home.culture.mipt.ru> * * Module usage counts added on 96/04/29 by * Gertjan van Wingerde <gwingerde@gmail.com> * * Clean swab support on 19970406 by * Francois-Rene Rideau <fare@tunes.org> * * 4.4BSD (FreeBSD) support added on February 1st 1998 by * Niels Kristian Bech Jensen <nkbj@image.dk> partially based * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>. * * NeXTstep support added on February 5th 1998 by * Niels Kristian Bech Jensen <nkbj@image.dk>. * * write support Daniel Pirkl <daniel.pirkl@email.cz> 1998 * * HP/UX hfs filesystem support added by * Martin K. Petersen <mkp@mkp.net>, August 1999 * * UFS2 (of FreeBSD 5.x) support added by * Niraj Kumar <niraj17@iitbombay.org>, Jan 2004 * * UFS2 write support added by * Evgeniy Dushistov <dushistov@mail.ru>, 2007 */ #include <linux/exportfs.h> #include <linux/module.h> #include <linux/bitops.h> #include <stdarg.h> #include <linux/uaccess.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/init.h> #include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/log2.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/iversion.h> #include "ufs_fs.h" #include "ufs.h" #include "swab.h" #include "util.h" static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct inode *inode; if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg) return ERR_PTR(-ESTALE); inode = ufs_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ufs_nfs_get_inode); } static struct dentry *ufs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ufs_nfs_get_inode); } static struct dentry *ufs_get_parent(struct dentry *child) { struct qstr dot_dot = QSTR_INIT("..", 2); ino_t ino; ino = ufs_inode_by_name(d_inode(child), &dot_dot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(ufs_iget(child->d_sb, ino)); } static const struct export_operations ufs_export_ops = { .fh_to_dentry = ufs_fh_to_dentry, .fh_to_parent = ufs_fh_to_parent, .get_parent = ufs_get_parent, }; #ifdef CONFIG_UFS_DEBUG /* * Print contents of ufs_super_block, useful for debugging */ static void ufs_print_super_stuff(struct super_block *sb, struct ufs_super_block_first *usb1, struct ufs_super_block_second *usb2, struct ufs_super_block_third *usb3) { u32 magic = fs32_to_cpu(sb, usb3->fs_magic); pr_debug("ufs_print_super_stuff\n"); pr_debug(" magic: 0x%x\n", magic); if (fs32_to_cpu(sb, usb3->fs_magic) == UFS2_MAGIC) { pr_debug(" fs_size: %llu\n", (unsigned long long) fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size)); pr_debug(" fs_dsize: %llu\n", (unsigned long long) fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize)); pr_debug(" bsize: %u\n", fs32_to_cpu(sb, usb1->fs_bsize)); pr_debug(" fsize: %u\n", fs32_to_cpu(sb, usb1->fs_fsize)); pr_debug(" fs_volname: %s\n", usb2->fs_un.fs_u2.fs_volname); pr_debug(" fs_sblockloc: %llu\n", (unsigned long long) fs64_to_cpu(sb, usb2->fs_un.fs_u2.fs_sblockloc)); pr_debug(" cs_ndir(No of dirs): %llu\n", (unsigned long long) fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir)); pr_debug(" cs_nbfree(No of free blocks): %llu\n", (unsigned long long) fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_nbfree)); pr_info(" cs_nifree(Num of free inodes): %llu\n", (unsigned long long) fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nifree)); pr_info(" cs_nffree(Num of free frags): %llu\n", (unsigned long long) fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nffree)); pr_info(" fs_maxsymlinklen: %u\n", fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen)); } else { pr_debug(" sblkno: %u\n", fs32_to_cpu(sb, usb1->fs_sblkno)); pr_debug(" cblkno: %u\n", fs32_to_cpu(sb, usb1->fs_cblkno)); pr_debug(" iblkno: %u\n", fs32_to_cpu(sb, usb1->fs_iblkno)); pr_debug(" dblkno: %u\n", fs32_to_cpu(sb, usb1->fs_dblkno)); pr_debug(" cgoffset: %u\n", fs32_to_cpu(sb, usb1->fs_cgoffset)); pr_debug(" ~cgmask: 0x%x\n", ~fs32_to_cpu(sb, usb1->fs_cgmask)); pr_debug(" size: %u\n", fs32_to_cpu(sb, usb1->fs_size)); pr_debug(" dsize: %u\n", fs32_to_cpu(sb, usb1->fs_dsize)); pr_debug(" ncg: %u\n", fs32_to_cpu(sb, usb1->fs_ncg)); pr_debug(" bsize: %u\n", fs32_to_cpu(sb, usb1->fs_bsize)); pr_debug(" fsize: %u\n", fs32_to_cpu(sb, usb1->fs_fsize)); pr_debug(" frag: %u\n", fs32_to_cpu(sb, usb1->fs_frag)); pr_debug(" fragshift: %u\n", fs32_to_cpu(sb, usb1->fs_fragshift)); pr_debug(" ~fmask: %u\n", ~fs32_to_cpu(sb, usb1->fs_fmask)); pr_debug(" fshift: %u\n", fs32_to_cpu(sb, usb1->fs_fshift)); pr_debug(" sbsize: %u\n", fs32_to_cpu(sb, usb1->fs_sbsize)); pr_debug(" spc: %u\n", fs32_to_cpu(sb, usb1->fs_spc)); pr_debug(" cpg: %u\n", fs32_to_cpu(sb, usb1->fs_cpg)); pr_debug(" ipg: %u\n", fs32_to_cpu(sb, usb1->fs_ipg)); pr_debug(" fpg: %u\n", fs32_to_cpu(sb, usb1->fs_fpg)); pr_debug(" csaddr: %u\n", fs32_to_cpu(sb, usb1->fs_csaddr)); pr_debug(" cssize: %u\n", fs32_to_cpu(sb, usb1->fs_cssize)); pr_debug(" cgsize: %u\n", fs32_to_cpu(sb, usb1->fs_cgsize)); pr_debug(" fstodb: %u\n", fs32_to_cpu(sb, usb1->fs_fsbtodb)); pr_debug(" nrpos: %u\n", fs32_to_cpu(sb, usb3->fs_nrpos)); pr_debug(" ndir %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_ndir)); pr_debug(" nifree %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree)); pr_debug(" nbfree %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree)); pr_debug(" nffree %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)); } pr_debug("\n"); } /* * Print contents of ufs_cylinder_group, useful for debugging */ static void ufs_print_cylinder_stuff(struct super_block *sb, struct ufs_cylinder_group *cg) { pr_debug("\nufs_print_cylinder_stuff\n"); pr_debug("size of ucg: %zu\n", sizeof(struct ufs_cylinder_group)); pr_debug(" magic: %x\n", fs32_to_cpu(sb, cg->cg_magic)); pr_debug(" time: %u\n", fs32_to_cpu(sb, cg->cg_time)); pr_debug(" cgx: %u\n", fs32_to_cpu(sb, cg->cg_cgx)); pr_debug(" ncyl: %u\n", fs16_to_cpu(sb, cg->cg_ncyl)); pr_debug(" niblk: %u\n", fs16_to_cpu(sb, cg->cg_niblk)); pr_debug(" ndblk: %u\n", fs32_to_cpu(sb, cg->cg_ndblk)); pr_debug(" cs_ndir: %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_ndir)); pr_debug(" cs_nbfree: %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nbfree)); pr_debug(" cs_nifree: %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nifree)); pr_debug(" cs_nffree: %u\n", fs32_to_cpu(sb, cg->cg_cs.cs_nffree)); pr_debug(" rotor: %u\n", fs32_to_cpu(sb, cg->cg_rotor)); pr_debug(" frotor: %u\n", fs32_to_cpu(sb, cg->cg_frotor)); pr_debug(" irotor: %u\n", fs32_to_cpu(sb, cg->cg_irotor)); pr_debug(" frsum: %u, %u, %u, %u, %u, %u, %u, %u\n", fs32_to_cpu(sb, cg->cg_frsum[0]), fs32_to_cpu(sb, cg->cg_frsum[1]), fs32_to_cpu(sb, cg->cg_frsum[2]), fs32_to_cpu(sb, cg->cg_frsum[3]), fs32_to_cpu(sb, cg->cg_frsum[4]), fs32_to_cpu(sb, cg->cg_frsum[5]), fs32_to_cpu(sb, cg->cg_frsum[6]), fs32_to_cpu(sb, cg->cg_frsum[7])); pr_debug(" btotoff: %u\n", fs32_to_cpu(sb, cg->cg_btotoff)); pr_debug(" boff: %u\n", fs32_to_cpu(sb, cg->cg_boff)); pr_debug(" iuseoff: %u\n", fs32_to_cpu(sb, cg->cg_iusedoff)); pr_debug(" freeoff: %u\n", fs32_to_cpu(sb, cg->cg_freeoff)); pr_debug(" nextfreeoff: %u\n", fs32_to_cpu(sb, cg->cg_nextfreeoff)); pr_debug(" clustersumoff %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clustersumoff)); pr_debug(" clusteroff %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clusteroff)); pr_debug(" nclusterblks %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_nclusterblks)); pr_debug("\n"); } #else # define ufs_print_super_stuff(sb, usb1, usb2, usb3) /**/ # define ufs_print_cylinder_stuff(sb, cg) /**/ #endif /* CONFIG_UFS_DEBUG */ static const struct super_operations ufs_super_ops; void ufs_error (struct super_block * sb, const char * function, const char * fmt, ...) { struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct va_format vaf; va_list args; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); if (!sb_rdonly(sb)) { usb1->fs_clean = UFS_FSBAD; ubh_mark_buffer_dirty(USPI_UBH(uspi)); ufs_mark_sb_dirty(sb); sb->s_flags |= SB_RDONLY; } va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) { case UFS_MOUNT_ONERROR_PANIC: panic("panic (device %s): %s: %pV\n", sb->s_id, function, &vaf); case UFS_MOUNT_ONERROR_LOCK: case UFS_MOUNT_ONERROR_UMOUNT: case UFS_MOUNT_ONERROR_REPAIR: pr_crit("error (device %s): %s: %pV\n", sb->s_id, function, &vaf); } va_end(args); } void ufs_panic (struct super_block * sb, const char * function, const char * fmt, ...) { struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct va_format vaf; va_list args; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); if (!sb_rdonly(sb)) { usb1->fs_clean = UFS_FSBAD; ubh_mark_buffer_dirty(USPI_UBH(uspi)); ufs_mark_sb_dirty(sb); } va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; sb->s_flags |= SB_RDONLY; pr_crit("panic (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); } void ufs_warning (struct super_block * sb, const char * function, const char * fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_warn("(device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); } enum { Opt_type_old = UFS_MOUNT_UFSTYPE_OLD, Opt_type_sunx86 = UFS_MOUNT_UFSTYPE_SUNx86, Opt_type_sun = UFS_MOUNT_UFSTYPE_SUN, Opt_type_sunos = UFS_MOUNT_UFSTYPE_SUNOS, Opt_type_44bsd = UFS_MOUNT_UFSTYPE_44BSD, Opt_type_ufs2 = UFS_MOUNT_UFSTYPE_UFS2, Opt_type_hp = UFS_MOUNT_UFSTYPE_HP, Opt_type_nextstepcd = UFS_MOUNT_UFSTYPE_NEXTSTEP_CD, Opt_type_nextstep = UFS_MOUNT_UFSTYPE_NEXTSTEP, Opt_type_openstep = UFS_MOUNT_UFSTYPE_OPENSTEP, Opt_onerror_panic = UFS_MOUNT_ONERROR_PANIC, Opt_onerror_lock = UFS_MOUNT_ONERROR_LOCK, Opt_onerror_umount = UFS_MOUNT_ONERROR_UMOUNT, Opt_onerror_repair = UFS_MOUNT_ONERROR_REPAIR, Opt_err }; static const match_table_t tokens = { {Opt_type_old, "ufstype=old"}, {Opt_type_sunx86, "ufstype=sunx86"}, {Opt_type_sun, "ufstype=sun"}, {Opt_type_sunos, "ufstype=sunos"}, {Opt_type_44bsd, "ufstype=44bsd"}, {Opt_type_ufs2, "ufstype=ufs2"}, {Opt_type_ufs2, "ufstype=5xbsd"}, {Opt_type_hp, "ufstype=hp"}, {Opt_type_nextstepcd, "ufstype=nextstep-cd"}, {Opt_type_nextstep, "ufstype=nextstep"}, {Opt_type_openstep, "ufstype=openstep"}, /*end of possible ufs types */ {Opt_onerror_panic, "onerror=panic"}, {Opt_onerror_lock, "onerror=lock"}, {Opt_onerror_umount, "onerror=umount"}, {Opt_onerror_repair, "onerror=repair"}, {Opt_err, NULL} }; static int ufs_parse_options (char * options, unsigned * mount_options) { char * p; UFSD("ENTER\n"); if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_type_old: ufs_clear_opt (*mount_options, UFSTYPE); ufs_set_opt (*mount_options, UFSTYPE_OLD); break; case Opt_type_sunx86: ufs_clear_opt (*mount_options, UFSTYPE); ufs_set_opt (*mount_options, UFSTYPE_SUNx86); break; case Opt_type_sun: ufs_clear_opt (*mount_options, UFSTYPE); ufs_set_opt (*mount_options, UFSTYPE_SUN); break; case Opt_type_sunos: ufs_clear_opt(*mount_options, UFSTYPE); ufs_set_opt(*mount_options, UFSTYPE_SUNOS); break; case Opt_type_44bsd: ufs_clear_opt (*mount_options, UFSTYPE); ufs_set_opt (*mount_options, UFSTYPE_44BSD); break; case Opt_type_ufs2: ufs_clear_opt(*mount_options, UFSTYPE); ufs_set_opt(*mount_options, UFSTYPE_UFS2); break; case Opt_type_hp: ufs_clear_opt (*mount_options, UFSTYPE); ufs_set_opt (*mount_options, UFSTYPE_HP); break; case Opt_type_nextstepcd: ufs_clear_opt (*mount_options, UFSTYPE); ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP_CD); break; case Opt_type_nextstep: ufs_clear_opt (*mount_options, UFSTYPE); ufs_set_opt (*mount_options, UFSTYPE_NEXTSTEP); break; case Opt_type_openstep: ufs_clear_opt (*mount_options, UFSTYPE); ufs_set_opt (*mount_options, UFSTYPE_OPENSTEP); break; case Opt_onerror_panic: ufs_clear_opt (*mount_options, ONERROR); ufs_set_opt (*mount_options, ONERROR_PANIC); break; case Opt_onerror_lock: ufs_clear_opt (*mount_options, ONERROR); ufs_set_opt (*mount_options, ONERROR_LOCK); break; case Opt_onerror_umount: ufs_clear_opt (*mount_options, ONERROR); ufs_set_opt (*mount_options, ONERROR_UMOUNT); break; case Opt_onerror_repair: pr_err("Unable to do repair on error, will lock lock instead\n"); ufs_clear_opt (*mount_options, ONERROR); ufs_set_opt (*mount_options, ONERROR_REPAIR); break; default: pr_err("Invalid option: \"%s\" or missing value\n", p); return 0; } } return 1; } /* * Different types of UFS hold fs_cstotal in different * places, and use different data structure for it. * To make things simpler we just copy fs_cstotal to ufs_sb_private_info */ static void ufs_setup_cstotal(struct super_block *sb) { struct ufs_sb_info *sbi = UFS_SB(sb); struct ufs_sb_private_info *uspi = sbi->s_uspi; struct ufs_super_block_first *usb1; struct ufs_super_block_second *usb2; struct ufs_super_block_third *usb3; unsigned mtype = sbi->s_mount_opt & UFS_MOUNT_UFSTYPE; UFSD("ENTER, mtype=%u\n", mtype); usb1 = ubh_get_usb_first(uspi); usb2 = ubh_get_usb_second(uspi); usb3 = ubh_get_usb_third(uspi); if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) || mtype == UFS_MOUNT_UFSTYPE_UFS2) { /*we have statistic in different place, then usual*/ uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir); uspi->cs_total.cs_nbfree = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_nbfree); uspi->cs_total.cs_nifree = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nifree); uspi->cs_total.cs_nffree = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nffree); } else { uspi->cs_total.cs_ndir = fs32_to_cpu(sb, usb1->fs_cstotal.cs_ndir); uspi->cs_total.cs_nbfree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree); uspi->cs_total.cs_nifree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree); uspi->cs_total.cs_nffree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree); } UFSD("EXIT\n"); } /* * Read on-disk structures associated with cylinder groups */ static int ufs_read_cylinder_structures(struct super_block *sb) { struct ufs_sb_info *sbi = UFS_SB(sb); struct ufs_sb_private_info *uspi = sbi->s_uspi; struct ufs_buffer_head * ubh; unsigned char * base, * space; unsigned size, blks, i; UFSD("ENTER\n"); /* * Read cs structures from (usually) first data block * on the device. */ size = uspi->s_cssize; blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift; base = space = kmalloc(size, GFP_NOFS); if (!base) goto failed; sbi->s_csp = (struct ufs_csum *)space; for (i = 0; i < blks; i += uspi->s_fpb) { size = uspi->s_bsize; if (i + uspi->s_fpb > blks) size = (blks - i) * uspi->s_fsize; ubh = ubh_bread(sb, uspi->s_csaddr + i, size); if (!ubh) goto failed; ubh_ubhcpymem (space, ubh, size); space += size; ubh_brelse (ubh); ubh = NULL; } /* * Read cylinder group (we read only first fragment from block * at this time) and prepare internal data structures for cg caching. */ sbi->s_ucg = kmalloc_array(uspi->s_ncg, sizeof(struct buffer_head *), GFP_NOFS); if (!sbi->s_ucg) goto failed; for (i = 0; i < uspi->s_ncg; i++) sbi->s_ucg[i] = NULL; for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) { sbi->s_ucpi[i] = NULL; sbi->s_cgno[i] = UFS_CGNO_EMPTY; } for (i = 0; i < uspi->s_ncg; i++) { UFSD("read cg %u\n", i); if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i)))) goto failed; if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data)) goto failed; ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data); } for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) { if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_NOFS))) goto failed; sbi->s_cgno[i] = UFS_CGNO_EMPTY; } sbi->s_cg_loaded = 0; UFSD("EXIT\n"); return 1; failed: kfree (base); if (sbi->s_ucg) { for (i = 0; i < uspi->s_ncg; i++) if (sbi->s_ucg[i]) brelse (sbi->s_ucg[i]); kfree (sbi->s_ucg); for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) kfree (sbi->s_ucpi[i]); } UFSD("EXIT (FAILED)\n"); return 0; } /* * Sync our internal copy of fs_cstotal with disk */ static void ufs_put_cstotal(struct super_block *sb) { unsigned mtype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct ufs_super_block_first *usb1; struct ufs_super_block_second *usb2; struct ufs_super_block_third *usb3; UFSD("ENTER\n"); usb1 = ubh_get_usb_first(uspi); usb2 = ubh_get_usb_second(uspi); usb3 = ubh_get_usb_third(uspi); if (mtype == UFS_MOUNT_UFSTYPE_UFS2) { /*we have statistic in different place, then usual*/ usb2->fs_un.fs_u2.cs_ndir = cpu_to_fs64(sb, uspi->cs_total.cs_ndir); usb2->fs_un.fs_u2.cs_nbfree = cpu_to_fs64(sb, uspi->cs_total.cs_nbfree); usb3->fs_un1.fs_u2.cs_nifree = cpu_to_fs64(sb, uspi->cs_total.cs_nifree); usb3->fs_un1.fs_u2.cs_nffree = cpu_to_fs64(sb, uspi->cs_total.cs_nffree); goto out; } if (mtype == UFS_MOUNT_UFSTYPE_44BSD && (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) { /* store stats in both old and new places */ usb2->fs_un.fs_u2.cs_ndir = cpu_to_fs64(sb, uspi->cs_total.cs_ndir); usb2->fs_un.fs_u2.cs_nbfree = cpu_to_fs64(sb, uspi->cs_total.cs_nbfree); usb3->fs_un1.fs_u2.cs_nifree = cpu_to_fs64(sb, uspi->cs_total.cs_nifree); usb3->fs_un1.fs_u2.cs_nffree = cpu_to_fs64(sb, uspi->cs_total.cs_nffree); } usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir); usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree); usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree); out: ubh_mark_buffer_dirty(USPI_UBH(uspi)); ufs_print_super_stuff(sb, usb1, usb2, usb3); UFSD("EXIT\n"); } /** * ufs_put_super_internal() - put on-disk intrenal structures * @sb: pointer to super_block structure * Put on-disk structures associated with cylinder groups * and write them back to disk, also update cs_total on disk */ static void ufs_put_super_internal(struct super_block *sb) { struct ufs_sb_info *sbi = UFS_SB(sb); struct ufs_sb_private_info *uspi = sbi->s_uspi; struct ufs_buffer_head * ubh; unsigned char * base, * space; unsigned blks, size, i; UFSD("ENTER\n"); ufs_put_cstotal(sb); size = uspi->s_cssize; blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift; base = space = (char*) sbi->s_csp; for (i = 0; i < blks; i += uspi->s_fpb) { size = uspi->s_bsize; if (i + uspi->s_fpb > blks) size = (blks - i) * uspi->s_fsize; ubh = ubh_bread(sb, uspi->s_csaddr + i, size); ubh_memcpyubh (ubh, space, size); space += size; ubh_mark_buffer_uptodate (ubh, 1); ubh_mark_buffer_dirty (ubh); ubh_brelse (ubh); } for (i = 0; i < sbi->s_cg_loaded; i++) { ufs_put_cylinder (sb, i); kfree (sbi->s_ucpi[i]); } for (; i < UFS_MAX_GROUP_LOADED; i++) kfree (sbi->s_ucpi[i]); for (i = 0; i < uspi->s_ncg; i++) brelse (sbi->s_ucg[i]); kfree (sbi->s_ucg); kfree (base); UFSD("EXIT\n"); } static int ufs_sync_fs(struct super_block *sb, int wait) { struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_super_block_third * usb3; unsigned flags; mutex_lock(&UFS_SB(sb)->s_lock); UFSD("ENTER\n"); flags = UFS_SB(sb)->s_flags; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); usb3 = ubh_get_usb_third(uspi); usb1->fs_time = ufs_get_seconds(sb); if ((flags & UFS_ST_MASK) == UFS_ST_SUN || (flags & UFS_ST_MASK) == UFS_ST_SUNOS || (flags & UFS_ST_MASK) == UFS_ST_SUNx86) ufs_set_fs_state(sb, usb1, usb3, UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); ufs_put_cstotal(sb); UFSD("EXIT\n"); mutex_unlock(&UFS_SB(sb)->s_lock); return 0; } static void delayed_sync_fs(struct work_struct *work) { struct ufs_sb_info *sbi; sbi = container_of(work, struct ufs_sb_info, sync_work.work); spin_lock(&sbi->work_lock); sbi->work_queued = 0; spin_unlock(&sbi->work_lock); ufs_sync_fs(sbi->sb, 1); } void ufs_mark_sb_dirty(struct super_block *sb) { struct ufs_sb_info *sbi = UFS_SB(sb); unsigned long delay; spin_lock(&sbi->work_lock); if (!sbi->work_queued) { delay = msecs_to_jiffies(dirty_writeback_interval * 10); queue_delayed_work(system_long_wq, &sbi->sync_work, delay); sbi->work_queued = 1; } spin_unlock(&sbi->work_lock); } static void ufs_put_super(struct super_block *sb) { struct ufs_sb_info * sbi = UFS_SB(sb); UFSD("ENTER\n"); if (!sb_rdonly(sb)) ufs_put_super_internal(sb); cancel_delayed_work_sync(&sbi->sync_work); ubh_brelse_uspi (sbi->s_uspi); kfree (sbi->s_uspi); kfree (sbi); sb->s_fs_info = NULL; UFSD("EXIT\n"); return; } static u64 ufs_max_bytes(struct super_block *sb) { struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; int bits = uspi->s_apbshift; u64 res; if (bits > 21) res = ~0ULL; else res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) + (1LL << (3*bits)); if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift)) return MAX_LFS_FILESIZE; return res << uspi->s_bshift; } static int ufs_fill_super(struct super_block *sb, void *data, int silent) { struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_super_block_second * usb2; struct ufs_super_block_third * usb3; struct ufs_buffer_head * ubh; struct inode *inode; unsigned block_size, super_block_size; unsigned flags; unsigned super_block_offset; unsigned maxsymlen; int ret = -EINVAL; uspi = NULL; ubh = NULL; flags = 0; UFSD("ENTER\n"); #ifndef CONFIG_UFS_FS_WRITE if (!sb_rdonly(sb)) { pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n"); return -EROFS; } #endif sbi = kzalloc(sizeof(struct ufs_sb_info), GFP_KERNEL); if (!sbi) goto failed_nomem; sb->s_fs_info = sbi; sbi->sb = sb; UFSD("flag %u\n", (int)(sb_rdonly(sb))); mutex_init(&sbi->s_lock); spin_lock_init(&sbi->work_lock); INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs); /* * Set default mount options * Parse mount options */ sbi->s_mount_opt = 0; ufs_set_opt (sbi->s_mount_opt, ONERROR_LOCK); if (!ufs_parse_options ((char *) data, &sbi->s_mount_opt)) { pr_err("wrong mount options\n"); goto failed; } if (!(sbi->s_mount_opt & UFS_MOUNT_UFSTYPE)) { if (!silent) pr_err("You didn't specify the type of your ufs filesystem\n\n" "mount -t ufs -o ufstype=" "sun|sunx86|44bsd|ufs2|5xbsd|old|hp|nextstep|nextstep-cd|openstep ...\n\n" ">>>WARNING<<< Wrong ufstype may corrupt your filesystem, " "default is ufstype=old\n"); ufs_set_opt (sbi->s_mount_opt, UFSTYPE_OLD); } uspi = kzalloc(sizeof(struct ufs_sb_private_info), GFP_KERNEL); sbi->s_uspi = uspi; if (!uspi) goto failed; uspi->s_dirblksize = UFS_SECTOR_SIZE; super_block_offset=UFS_SBLOCK; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_time_gran = NSEC_PER_SEC; sb->s_time_min = S32_MIN; sb->s_time_max = S32_MAX; switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) { case UFS_MOUNT_UFSTYPE_44BSD: UFSD("ufstype=44bsd\n"); uspi->s_fsize = block_size = 512; uspi->s_fmask = ~(512 - 1); uspi->s_fshift = 9; uspi->s_sbsize = super_block_size = 1536; uspi->s_sbbase = 0; flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD; break; case UFS_MOUNT_UFSTYPE_UFS2: UFSD("ufstype=ufs2\n"); super_block_offset=SBLOCK_UFS2; uspi->s_fsize = block_size = 512; uspi->s_fmask = ~(512 - 1); uspi->s_fshift = 9; uspi->s_sbsize = super_block_size = 1536; uspi->s_sbbase = 0; sb->s_time_gran = 1; sb->s_time_min = S64_MIN; sb->s_time_max = S64_MAX; flags |= UFS_TYPE_UFS2 | UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD; break; case UFS_MOUNT_UFSTYPE_SUN: UFSD("ufstype=sun\n"); uspi->s_fsize = block_size = 1024; uspi->s_fmask = ~(1024 - 1); uspi->s_fshift = 10; uspi->s_sbsize = super_block_size = 2048; uspi->s_sbbase = 0; uspi->s_maxsymlinklen = 0; /* Not supported on disk */ flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUN | UFS_CG_SUN; break; case UFS_MOUNT_UFSTYPE_SUNOS: UFSD("ufstype=sunos\n"); uspi->s_fsize = block_size = 1024; uspi->s_fmask = ~(1024 - 1); uspi->s_fshift = 10; uspi->s_sbsize = 2048; super_block_size = 2048; uspi->s_sbbase = 0; uspi->s_maxsymlinklen = 0; /* Not supported on disk */ flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_SUNOS | UFS_CG_SUN; break; case UFS_MOUNT_UFSTYPE_SUNx86: UFSD("ufstype=sunx86\n"); uspi->s_fsize = block_size = 1024; uspi->s_fmask = ~(1024 - 1); uspi->s_fshift = 10; uspi->s_sbsize = super_block_size = 2048; uspi->s_sbbase = 0; uspi->s_maxsymlinklen = 0; /* Not supported on disk */ flags |= UFS_DE_OLD | UFS_UID_EFT | UFS_ST_SUNx86 | UFS_CG_SUN; break; case UFS_MOUNT_UFSTYPE_OLD: UFSD("ufstype=old\n"); uspi->s_fsize = block_size = 1024; uspi->s_fmask = ~(1024 - 1); uspi->s_fshift = 10; uspi->s_sbsize = super_block_size = 2048; uspi->s_sbbase = 0; flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD; if (!sb_rdonly(sb)) { if (!silent) pr_info("ufstype=old is supported read-only\n"); sb->s_flags |= SB_RDONLY; } break; case UFS_MOUNT_UFSTYPE_NEXTSTEP: UFSD("ufstype=nextstep\n"); uspi->s_fsize = block_size = 1024; uspi->s_fmask = ~(1024 - 1); uspi->s_fshift = 10; uspi->s_sbsize = super_block_size = 2048; uspi->s_sbbase = 0; uspi->s_dirblksize = 1024; flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD; if (!sb_rdonly(sb)) { if (!silent) pr_info("ufstype=nextstep is supported read-only\n"); sb->s_flags |= SB_RDONLY; } break; case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD: UFSD("ufstype=nextstep-cd\n"); uspi->s_fsize = block_size = 2048; uspi->s_fmask = ~(2048 - 1); uspi->s_fshift = 11; uspi->s_sbsize = super_block_size = 2048; uspi->s_sbbase = 0; uspi->s_dirblksize = 1024; flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD; if (!sb_rdonly(sb)) { if (!silent) pr_info("ufstype=nextstep-cd is supported read-only\n"); sb->s_flags |= SB_RDONLY; } break; case UFS_MOUNT_UFSTYPE_OPENSTEP: UFSD("ufstype=openstep\n"); uspi->s_fsize = block_size = 1024; uspi->s_fmask = ~(1024 - 1); uspi->s_fshift = 10; uspi->s_sbsize = super_block_size = 2048; uspi->s_sbbase = 0; uspi->s_dirblksize = 1024; flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD; if (!sb_rdonly(sb)) { if (!silent) pr_info("ufstype=openstep is supported read-only\n"); sb->s_flags |= SB_RDONLY; } break; case UFS_MOUNT_UFSTYPE_HP: UFSD("ufstype=hp\n"); uspi->s_fsize = block_size = 1024; uspi->s_fmask = ~(1024 - 1); uspi->s_fshift = 10; uspi->s_sbsize = super_block_size = 2048; uspi->s_sbbase = 0; flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD; if (!sb_rdonly(sb)) { if (!silent) pr_info("ufstype=hp is supported read-only\n"); sb->s_flags |= SB_RDONLY; } break; default: if (!silent) pr_err("unknown ufstype\n"); goto failed; } again: if (!sb_set_blocksize(sb, block_size)) { pr_err("failed to set blocksize\n"); goto failed; } /* * read ufs super block from device */ ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + super_block_offset/block_size, super_block_size); if (!ubh) goto failed; usb1 = ubh_get_usb_first(uspi); usb2 = ubh_get_usb_second(uspi); usb3 = ubh_get_usb_third(uspi); /* Sort out mod used on SunOS 4.1.3 for fs_state */ uspi->s_postblformat = fs32_to_cpu(sb, usb3->fs_postblformat); if (((flags & UFS_ST_MASK) == UFS_ST_SUNOS) && (uspi->s_postblformat != UFS_42POSTBLFMT)) { flags &= ~UFS_ST_MASK; flags |= UFS_ST_SUN; } if ((flags & UFS_ST_MASK) == UFS_ST_44BSD && uspi->s_postblformat == UFS_42POSTBLFMT) { if (!silent) pr_err("this is not a 44bsd filesystem"); goto failed; } /* * Check ufs magic number */ sbi->s_bytesex = BYTESEX_LE; switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) { case UFS_MAGIC: case UFS_MAGIC_BW: case UFS2_MAGIC: case UFS_MAGIC_LFN: case UFS_MAGIC_FEA: case UFS_MAGIC_4GB: goto magic_found; } sbi->s_bytesex = BYTESEX_BE; switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) { case UFS_MAGIC: case UFS_MAGIC_BW: case UFS2_MAGIC: case UFS_MAGIC_LFN: case UFS_MAGIC_FEA: case UFS_MAGIC_4GB: goto magic_found; } if ((((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP) || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_NEXTSTEP_CD) || ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_OPENSTEP)) && uspi->s_sbbase < 256) { ubh_brelse_uspi(uspi); ubh = NULL; uspi->s_sbbase += 8; goto again; } if (!silent) pr_err("%s(): bad magic number\n", __func__); goto failed; magic_found: /* * Check block and fragment sizes */ uspi->s_bsize = fs32_to_cpu(sb, usb1->fs_bsize); uspi->s_fsize = fs32_to_cpu(sb, usb1->fs_fsize); uspi->s_sbsize = fs32_to_cpu(sb, usb1->fs_sbsize); uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask); uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift); if (!is_power_of_2(uspi->s_fsize)) { pr_err("%s(): fragment size %u is not a power of 2\n", __func__, uspi->s_fsize); goto failed; } if (uspi->s_fsize < 512) { pr_err("%s(): fragment size %u is too small\n", __func__, uspi->s_fsize); goto failed; } if (uspi->s_fsize > 4096) { pr_err("%s(): fragment size %u is too large\n", __func__, uspi->s_fsize); goto failed; } if (!is_power_of_2(uspi->s_bsize)) { pr_err("%s(): block size %u is not a power of 2\n", __func__, uspi->s_bsize); goto failed; } if (uspi->s_bsize < 4096) { pr_err("%s(): block size %u is too small\n", __func__, uspi->s_bsize); goto failed; } if (uspi->s_bsize / uspi->s_fsize > 8) { pr_err("%s(): too many fragments per block (%u)\n", __func__, uspi->s_bsize / uspi->s_fsize); goto failed; } if (uspi->s_fsize != block_size || uspi->s_sbsize != super_block_size) { ubh_brelse_uspi(uspi); ubh = NULL; block_size = uspi->s_fsize; super_block_size = uspi->s_sbsize; UFSD("another value of block_size or super_block_size %u, %u\n", block_size, super_block_size); goto again; } sbi->s_flags = flags;/*after that line some functions use s_flags*/ ufs_print_super_stuff(sb, usb1, usb2, usb3); /* * Check, if file system was correctly unmounted. * If not, make it read only. */ if (((flags & UFS_ST_MASK) == UFS_ST_44BSD) || ((flags & UFS_ST_MASK) == UFS_ST_OLD) || (((flags & UFS_ST_MASK) == UFS_ST_SUN || (flags & UFS_ST_MASK) == UFS_ST_SUNOS || (flags & UFS_ST_MASK) == UFS_ST_SUNx86) && (ufs_get_fs_state(sb, usb1, usb3) == (UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time))))) { switch(usb1->fs_clean) { case UFS_FSCLEAN: UFSD("fs is clean\n"); break; case UFS_FSSTABLE: UFSD("fs is stable\n"); break; case UFS_FSLOG: UFSD("fs is logging fs\n"); break; case UFS_FSOSF1: UFSD("fs is DEC OSF/1\n"); break; case UFS_FSACTIVE: pr_err("%s(): fs is active\n", __func__); sb->s_flags |= SB_RDONLY; break; case UFS_FSBAD: pr_err("%s(): fs is bad\n", __func__); sb->s_flags |= SB_RDONLY; break; default: pr_err("%s(): can't grok fs_clean 0x%x\n", __func__, usb1->fs_clean); sb->s_flags |= SB_RDONLY; break; } } else { pr_err("%s(): fs needs fsck\n", __func__); sb->s_flags |= SB_RDONLY; } /* * Read ufs_super_block into internal data structures */ sb->s_op = &ufs_super_ops; sb->s_export_op = &ufs_export_ops; sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic); uspi->s_sblkno = fs32_to_cpu(sb, usb1->fs_sblkno); uspi->s_cblkno = fs32_to_cpu(sb, usb1->fs_cblkno); uspi->s_iblkno = fs32_to_cpu(sb, usb1->fs_iblkno); uspi->s_dblkno = fs32_to_cpu(sb, usb1->fs_dblkno); uspi->s_cgoffset = fs32_to_cpu(sb, usb1->fs_cgoffset); uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask); if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { uspi->s_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); } else { uspi->s_size = fs32_to_cpu(sb, usb1->fs_size); uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize); } uspi->s_ncg = fs32_to_cpu(sb, usb1->fs_ncg); /* s_bsize already set */ /* s_fsize already set */ uspi->s_fpb = fs32_to_cpu(sb, usb1->fs_frag); uspi->s_minfree = fs32_to_cpu(sb, usb1->fs_minfree); uspi->s_bmask = fs32_to_cpu(sb, usb1->fs_bmask); uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask); uspi->s_bshift = fs32_to_cpu(sb, usb1->fs_bshift); uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift); UFSD("uspi->s_bshift = %d,uspi->s_fshift = %d", uspi->s_bshift, uspi->s_fshift); uspi->s_fpbshift = fs32_to_cpu(sb, usb1->fs_fragshift); uspi->s_fsbtodb = fs32_to_cpu(sb, usb1->fs_fsbtodb); /* s_sbsize already set */ uspi->s_csmask = fs32_to_cpu(sb, usb1->fs_csmask); uspi->s_csshift = fs32_to_cpu(sb, usb1->fs_csshift); uspi->s_nindir = fs32_to_cpu(sb, usb1->fs_nindir); uspi->s_inopb = fs32_to_cpu(sb, usb1->fs_inopb); uspi->s_nspf = fs32_to_cpu(sb, usb1->fs_nspf); uspi->s_npsect = ufs_get_fs_npsect(sb, usb1, usb3); uspi->s_interleave = fs32_to_cpu(sb, usb1->fs_interleave); uspi->s_trackskew = fs32_to_cpu(sb, usb1->fs_trackskew); if (uspi->fs_magic == UFS2_MAGIC) uspi->s_csaddr = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_csaddr); else uspi->s_csaddr = fs32_to_cpu(sb, usb1->fs_csaddr); uspi->s_cssize = fs32_to_cpu(sb, usb1->fs_cssize); uspi->s_cgsize = fs32_to_cpu(sb, usb1->fs_cgsize); uspi->s_ntrak = fs32_to_cpu(sb, usb1->fs_ntrak); uspi->s_nsect = fs32_to_cpu(sb, usb1->fs_nsect); uspi->s_spc = fs32_to_cpu(sb, usb1->fs_spc); uspi->s_ipg = fs32_to_cpu(sb, usb1->fs_ipg); uspi->s_fpg = fs32_to_cpu(sb, usb1->fs_fpg); uspi->s_cpc = fs32_to_cpu(sb, usb2->fs_un.fs_u1.fs_cpc); uspi->s_contigsumsize = fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_contigsumsize); uspi->s_qbmask = ufs_get_fs_qbmask(sb, usb3); uspi->s_qfmask = ufs_get_fs_qfmask(sb, usb3); uspi->s_nrpos = fs32_to_cpu(sb, usb3->fs_nrpos); uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff); uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff); uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize, uspi->s_minfree, 100); if (uspi->s_minfree <= 5) { uspi->s_time_to_space = ~0ULL; uspi->s_space_to_time = 0; usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE); } else { uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1; uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize, uspi->s_minfree - 2, 100) - 1; } /* * Compute another frequently used values */ uspi->s_fpbmask = uspi->s_fpb - 1; if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) uspi->s_apbshift = uspi->s_bshift - 3; else uspi->s_apbshift = uspi->s_bshift - 2; uspi->s_2apbshift = uspi->s_apbshift * 2; uspi->s_3apbshift = uspi->s_apbshift * 3; uspi->s_apb = 1 << uspi->s_apbshift; uspi->s_2apb = 1 << uspi->s_2apbshift; uspi->s_3apb = 1 << uspi->s_3apbshift; uspi->s_apbmask = uspi->s_apb - 1; uspi->s_nspfshift = uspi->s_fshift - UFS_SECTOR_BITS; uspi->s_nspb = uspi->s_nspf << uspi->s_fpbshift; uspi->s_inopf = uspi->s_inopb >> uspi->s_fpbshift; uspi->s_bpf = uspi->s_fsize << 3; uspi->s_bpfshift = uspi->s_fshift + 3; uspi->s_bpfmask = uspi->s_bpf - 1; if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_44BSD || (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_UFS2) uspi->s_maxsymlinklen = fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); if (uspi->fs_magic == UFS2_MAGIC) maxsymlen = 2 * 4 * (UFS_NDADDR + UFS_NINDIR); else maxsymlen = 4 * (UFS_NDADDR + UFS_NINDIR); if (uspi->s_maxsymlinklen > maxsymlen) { ufs_warning(sb, __func__, "ufs_read_super: excessive maximum " "fast symlink size (%u)\n", uspi->s_maxsymlinklen); uspi->s_maxsymlinklen = maxsymlen; } sb->s_maxbytes = ufs_max_bytes(sb); sb->s_max_links = UFS_LINK_MAX; inode = ufs_iget(sb, UFS_ROOTINO); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto failed; } sb->s_root = d_make_root(inode); if (!sb->s_root) { ret = -ENOMEM; goto failed; } ufs_setup_cstotal(sb); /* * Read cylinder group structures */ if (!sb_rdonly(sb)) if (!ufs_read_cylinder_structures(sb)) goto failed; UFSD("EXIT\n"); return 0; failed: if (ubh) ubh_brelse_uspi (uspi); kfree (uspi); kfree(sbi); sb->s_fs_info = NULL; UFSD("EXIT (FAILED)\n"); return ret; failed_nomem: UFSD("EXIT (NOMEM)\n"); return -ENOMEM; } static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) { struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_super_block_third * usb3; unsigned new_mount_opt, ufstype; unsigned flags; sync_filesystem(sb); mutex_lock(&UFS_SB(sb)->s_lock); uspi = UFS_SB(sb)->s_uspi; flags = UFS_SB(sb)->s_flags; usb1 = ubh_get_usb_first(uspi); usb3 = ubh_get_usb_third(uspi); /* * Allow the "check" option to be passed as a remount option. * It is not possible to change ufstype option during remount */ ufstype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE; new_mount_opt = 0; ufs_set_opt (new_mount_opt, ONERROR_LOCK); if (!ufs_parse_options (data, &new_mount_opt)) { mutex_unlock(&UFS_SB(sb)->s_lock); return -EINVAL; } if (!(new_mount_opt & UFS_MOUNT_UFSTYPE)) { new_mount_opt |= ufstype; } else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) { pr_err("ufstype can't be changed during remount\n"); mutex_unlock(&UFS_SB(sb)->s_lock); return -EINVAL; } if ((bool)(*mount_flags & SB_RDONLY) == sb_rdonly(sb)) { UFS_SB(sb)->s_mount_opt = new_mount_opt; mutex_unlock(&UFS_SB(sb)->s_lock); return 0; } /* * fs was mouted as rw, remounting ro */ if (*mount_flags & SB_RDONLY) { ufs_put_super_internal(sb); usb1->fs_time = ufs_get_seconds(sb); if ((flags & UFS_ST_MASK) == UFS_ST_SUN || (flags & UFS_ST_MASK) == UFS_ST_SUNOS || (flags & UFS_ST_MASK) == UFS_ST_SUNx86) ufs_set_fs_state(sb, usb1, usb3, UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); ubh_mark_buffer_dirty (USPI_UBH(uspi)); sb->s_flags |= SB_RDONLY; } else { /* * fs was mounted as ro, remounting rw */ #ifndef CONFIG_UFS_FS_WRITE pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n"); mutex_unlock(&UFS_SB(sb)->s_lock); return -EINVAL; #else if (ufstype != UFS_MOUNT_UFSTYPE_SUN && ufstype != UFS_MOUNT_UFSTYPE_SUNOS && ufstype != UFS_MOUNT_UFSTYPE_44BSD && ufstype != UFS_MOUNT_UFSTYPE_SUNx86 && ufstype != UFS_MOUNT_UFSTYPE_UFS2) { pr_err("this ufstype is read-only supported\n"); mutex_unlock(&UFS_SB(sb)->s_lock); return -EINVAL; } if (!ufs_read_cylinder_structures(sb)) { pr_err("failed during remounting\n"); mutex_unlock(&UFS_SB(sb)->s_lock); return -EPERM; } sb->s_flags &= ~SB_RDONLY; #endif } UFS_SB(sb)->s_mount_opt = new_mount_opt; mutex_unlock(&UFS_SB(sb)->s_lock); return 0; } static int ufs_show_options(struct seq_file *seq, struct dentry *root) { struct ufs_sb_info *sbi = UFS_SB(root->d_sb); unsigned mval = sbi->s_mount_opt & UFS_MOUNT_UFSTYPE; const struct match_token *tp = tokens; while (tp->token != Opt_onerror_panic && tp->token != mval) ++tp; BUG_ON(tp->token == Opt_onerror_panic); seq_printf(seq, ",%s", tp->pattern); mval = sbi->s_mount_opt & UFS_MOUNT_ONERROR; while (tp->token != Opt_err && tp->token != mval) ++tp; BUG_ON(tp->token == Opt_err); seq_printf(seq, ",%s", tp->pattern); return 0; } static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct ufs_sb_private_info *uspi= UFS_SB(sb)->s_uspi; unsigned flags = UFS_SB(sb)->s_flags; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); mutex_lock(&UFS_SB(sb)->s_lock); if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) buf->f_type = UFS2_MAGIC; else buf->f_type = UFS_MAGIC; buf->f_blocks = uspi->s_dsize; buf->f_bfree = ufs_freefrags(uspi); buf->f_ffree = uspi->cs_total.cs_nifree; buf->f_bsize = sb->s_blocksize; buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks) ? (buf->f_bfree - uspi->s_root_blocks) : 0; buf->f_files = uspi->s_ncg * uspi->s_ipg; buf->f_namelen = UFS_MAXNAMLEN; buf->f_fsid = u64_to_fsid(id); mutex_unlock(&UFS_SB(sb)->s_lock); return 0; } static struct kmem_cache * ufs_inode_cachep; static struct inode *ufs_alloc_inode(struct super_block *sb) { struct ufs_inode_info *ei; ei = kmem_cache_alloc(ufs_inode_cachep, GFP_NOFS); if (!ei) return NULL; inode_set_iversion(&ei->vfs_inode, 1); seqlock_init(&ei->meta_lock); mutex_init(&ei->truncate_mutex); return &ei->vfs_inode; } static void ufs_free_in_core_inode(struct inode *inode) { kmem_cache_free(ufs_inode_cachep, UFS_I(inode)); } static void init_once(void *foo) { struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { ufs_inode_cachep = kmem_cache_create_usercopy("ufs_inode_cache", sizeof(struct ufs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| SLAB_ACCOUNT), offsetof(struct ufs_inode_info, i_u1.i_symlink), sizeof_field(struct ufs_inode_info, i_u1.i_symlink), init_once); if (ufs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ufs_inode_cachep); } static const struct super_operations ufs_super_ops = { .alloc_inode = ufs_alloc_inode, .free_inode = ufs_free_in_core_inode, .write_inode = ufs_write_inode, .evict_inode = ufs_evict_inode, .put_super = ufs_put_super, .sync_fs = ufs_sync_fs, .statfs = ufs_statfs, .remount_fs = ufs_remount, .show_options = ufs_show_options, }; static struct dentry *ufs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ufs_fill_super); } static struct file_system_type ufs_fs_type = { .owner = THIS_MODULE, .name = "ufs", .mount = ufs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ufs"); static int __init init_ufs_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&ufs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_ufs_fs(void) { unregister_filesystem(&ufs_fs_type); destroy_inodecache(); } module_init(init_ufs_fs) module_exit(exit_ufs_fs) MODULE_LICENSE("GPL");
gpl-2.0
Ca1ne/Enoch-Sense-Kernel
arch/arm/mach-msm/radio_feedback.c
311
6026
/* arch/arm/mach-msm/radio_feedback.c * * Copyright (C) 2010 HTC Corporation. * Author: YaWen Su <YaWen_Su@htc.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/device.h> #include <linux/init.h> #include <linux/io.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/cpufreq.h> #include <linux/uaccess.h> #include <linux/mutex.h> #include <mach/msm_smd.h> #include <mach/msm_iomap.h> #include <linux/fcntl.h> #include "smd_private.h" #include "acpuclock.h" #define RADIO_FEEDBACK_IOCTL_MAGIC 'p' #define RADIO_FEEDBACK_GET_CDLOG_INFO _IOW(RADIO_FEEDBACK_IOCTL_MAGIC, 89, unsigned) #ifdef CONFIG_RADIO_FEEDBACK8660 typedef struct { uint32_t cmdseq; uint32_t rspseq; uint32_t opcode; uint32_t reserve; uint32_t parameter[4]; uint32_t response[4]; } htc_modem_request_type; typedef struct { /* ========= belows are App write ==================== */ uint32_t version; uint32_t struct_size; uint32_t htc_smem_ce_radio_dbg_flag; uint32_t htc_smem_app_run_mode; uint32_t htc_smem_test_flag; uint32_t htc_smem_boot_reason; int32_t htc_cable_status; uint8_t reserve1[4]; /* ========= belows are modem write ==================== */ uint32_t version_R; uint32_t struct_size_R; uint32_t htc_smem_erase_efs_flag; uint32_t htc_smem_flight_mode_flag; uint8_t htc_radio_version_addr[16]; //modem fill it uint8_t htc_protocol_version_addr[16]; // modem fill it uint8_t reserve2[16]; /* ========= belows are shared ==================== */ htc_modem_request_type htc_modem_request; // for error handling only /* for eMMC feature */ uint32_t htc_emmc_magic_flag; uint32_t htc_emmc_buff_addr; uint32_t htc_emmc_buff_size; uint32_t htc_emmc_config_offset; uint32_t htc_emmc_efs_sync_status; uint32_t htc_emmc_nv_calibrate_status; uint32_t htc_emmc_is_dev_inited; uint32_t htc_smem_user_time_offset; /* radio debug */ // Use 32 bytes to record the TCXO shutdown time statistics uint32_t htc_tcxo_off_time_total; uint32_t htc_tcxo_off_cnt_total; uint32_t htc_tcxo_off_time_pwrc_suspend; uint32_t htc_tcxo_off_cnt_pwrc_suspend; uint32_t htc_global_garbage_cnt; uint32_t htc_mssahb_reset_status; uint32_t htc_watchdog_status; uint32_t htc_cdlog_start_addr_for_apps; uint32_t htc_cdlog_max_size_for_apps; uint32_t htc_ciq_flag; } htc_smem_type; #define HTC_SMEM_PARAM_BASE_ADDR 0x400F0000 htc_smem_type *htc_smem_ram_addr; #else #define HTC_SMEM_PARAM_BASE_ADDR 0x004FC000 #define HTC_SMEM_PARAM_SIZE 0x30C static uint32_t radio_feedback_addr; #endif struct msm_radio_feedback_config { uint32_t start_addr; uint32_t max_size; }; struct mutex radio_feedback_lock; struct msm_radio_feedback_config config; int radio_set_cable_status(int charger_type) { #ifdef CONFIG_RADIO_FEEDBACK8660 if (htc_smem_ram_addr == NULL) htc_smem_ram_addr = (htc_smem_type *)ioremap(HTC_SMEM_PARAM_BASE_ADDR, sizeof(htc_smem_type)); htc_smem_ram_addr->htc_cable_status = charger_type; printk(KERN_INFO "[BATT] htc_cable_status:%d\n", htc_smem_ram_addr->htc_cable_status); #endif return 0; } static long radio_feedback_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rc = 0; switch (cmd) { case RADIO_FEEDBACK_GET_CDLOG_INFO: #ifdef CONFIG_RADIO_FEEDBACK8660 if (htc_smem_ram_addr == NULL) htc_smem_ram_addr = (htc_smem_type *)ioremap(HTC_SMEM_PARAM_BASE_ADDR, sizeof(htc_smem_type)); config.start_addr = htc_smem_ram_addr->htc_cdlog_start_addr_for_apps; config.max_size = htc_smem_ram_addr->htc_cdlog_max_size_for_apps; #else radio_feedback_addr = (uint32_t)ioremap(HTC_SMEM_PARAM_BASE_ADDR, HTC_SMEM_PARAM_SIZE); /* start addr(4 bytes): HTC_SMEM_PARAM_BASE_ADDR + 0x304 */ memcpy(&config.start_addr, (void *)(radio_feedback_addr + 0x304), 4); /* max size(4 bytes): HTC_SMEM_PARAM_BASE_ADDR + 0x308 */ memcpy(&config.max_size, (void *)(radio_feedback_addr + 0x308), 4); #endif printk("start addr: 0x%x, max_size: 0x%x\n", config.start_addr, config.max_size); if(copy_to_user((void *)arg, &config, sizeof(config))) rc = -EFAULT; break; default: rc = -EINVAL; } return rc; } static int radio_feedback_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long pgoff; size_t size = vma->vm_end - vma->vm_start; if (vma->vm_pgoff != 0) return -EINVAL; if (size <= config.max_size) pgoff = config.start_addr >> PAGE_SHIFT; else return -EINVAL; vma->vm_flags |= VM_IO | VM_RESERVED; if (io_remap_pfn_range(vma, vma->vm_start, pgoff, size, vma->vm_page_prot)) return -EAGAIN; return 0; } static struct file_operations radio_feedback_fops = { .owner = THIS_MODULE, .mmap = radio_feedback_mmap, .unlocked_ioctl = radio_feedback_ioctl, }; static struct miscdevice radio_feedback_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "radio_feedback", .fops = &radio_feedback_fops, }; static int __init radio_feedback_init(void) { int ret; ret = misc_register(&radio_feedback_misc); if (ret < 0) { pr_err("failed to register misc device!\n"); return ret; } mutex_init(&radio_feedback_lock); return ret; } static void __exit radio_feedback_exit(void) { int ret; ret = misc_deregister(&radio_feedback_misc); if (ret < 0) pr_err("failed to unregister misc device!\n"); } module_init(radio_feedback_init); module_exit(radio_feedback_exit);
gpl-2.0
aatjitra/7105u1
drivers/media/video/samsung/mali_r3p1_lsi/linux/mali_osk_profiling_internal.c
311
9332
/* * Copyright (C) 2010-2012 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "mali_kernel_common.h" #include "mali_osk.h" #include "mali_osk_mali.h" #include "mali_ukk.h" #include "mali_timestamp.h" #include "mali_osk_profiling.h" #include "mali_user_settings_db.h" typedef struct mali_profiling_entry { u64 timestamp; u32 event_id; u32 data[5]; } mali_profiling_entry; typedef enum mali_profiling_state { MALI_PROFILING_STATE_UNINITIALIZED, MALI_PROFILING_STATE_IDLE, MALI_PROFILING_STATE_RUNNING, MALI_PROFILING_STATE_RETURN, } mali_profiling_state; static _mali_osk_lock_t *lock = NULL; static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED; static mali_profiling_entry* profile_entries = NULL; static u32 profile_entry_count = 0; static _mali_osk_atomic_t profile_insert_index; static _mali_osk_atomic_t profile_entries_written; _mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start) { profile_entries = NULL; profile_entry_count = 0; _mali_osk_atomic_init(&profile_insert_index, 0); _mali_osk_atomic_init(&profile_entries_written, 0); lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PROFILING); if (NULL == lock) { return _MALI_OSK_ERR_FAULT; } prof_state = MALI_PROFILING_STATE_IDLE; if (MALI_TRUE == auto_start) { u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE); if (_MALI_OSK_ERR_OK != _mali_osk_profiling_start(&limit)) { return _MALI_OSK_ERR_FAULT; } } return _MALI_OSK_ERR_OK; } void _mali_osk_profiling_term(void) { prof_state = MALI_PROFILING_STATE_UNINITIALIZED; /* wait for all elements to be completely inserted into array */ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written)) { /* do nothing */; } if (NULL != profile_entries) { _mali_osk_vfree(profile_entries); profile_entries = NULL; } if (NULL != lock) { _mali_osk_lock_term(lock); lock = NULL; } } inline _mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit) { _mali_osk_errcode_t ret; mali_profiling_entry *new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry)); if(NULL == new_profile_entries) { return _MALI_OSK_ERR_NOMEM; } _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if (prof_state != MALI_PROFILING_STATE_IDLE) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_vfree(new_profile_entries); return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ } if (*limit > MALI_PROFILING_MAX_BUFFER_ENTRIES) { *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; } profile_entries = new_profile_entries; profile_entry_count = *limit; ret = _mali_timestamp_reset(); if (ret == _MALI_OSK_ERR_OK) { prof_state = MALI_PROFILING_STATE_RUNNING; } else { _mali_osk_vfree(profile_entries); profile_entries = NULL; } _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return ret; } inline void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4) { if (prof_state == MALI_PROFILING_STATE_RUNNING) { u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) % profile_entry_count; profile_entries[cur_index].timestamp = _mali_timestamp_get(); profile_entries[cur_index].event_id = event_id; profile_entries[cur_index].data[0] = data0; profile_entries[cur_index].data[1] = data1; profile_entries[cur_index].data[2] = data2; profile_entries[cur_index].data[3] = data3; profile_entries[cur_index].data[4] = data4; /* If event is "leave API function", add current memory usage to the event * as data point 4. This is used in timeline profiling to indicate how * much memory was used when leaving a function. */ if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) { profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage(); } _mali_osk_atomic_inc(&profile_entries_written); } } inline void _mali_osk_profiling_report_hw_counter(u32 counter_id, u32 value) { /* Not implemented */ } void _mali_osk_profiling_report_sw_counters(u32 *counters) { /* Not implemented */ } inline _mali_osk_errcode_t _mali_osk_profiling_stop(u32 * count) { _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if (prof_state != MALI_PROFILING_STATE_RUNNING) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ } /* go into return state (user to retreive events), no more events will be added after this */ prof_state = MALI_PROFILING_STATE_RETURN; _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); /* wait for all elements to be completely inserted into array */ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written)) { /* do nothing */; } *count = _mali_osk_atomic_read(&profile_insert_index); if(*count>profile_entry_count) *count=profile_entry_count; return _MALI_OSK_ERR_OK; } inline u32 _mali_osk_profiling_get_count(void) { u32 retval = 0; _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if (prof_state == MALI_PROFILING_STATE_RETURN) { retval = _mali_osk_atomic_read(&profile_entries_written); if(retval>profile_entry_count) retval = profile_entry_count; } _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return retval; } inline _mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]) { _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if(index<profile_entry_count) { u32 idx = index; if(_mali_osk_atomic_read(&profile_insert_index)>=profile_entry_count) { idx = (index + _mali_osk_atomic_read(&profile_insert_index)) % profile_entry_count; } if (prof_state != MALI_PROFILING_STATE_RETURN) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ } if (idx >= _mali_osk_atomic_read(&profile_entries_written)) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_FAULT; } *timestamp = profile_entries[idx].timestamp; *event_id = profile_entries[idx].event_id; data[0] = profile_entries[idx].data[0]; data[1] = profile_entries[idx].data[1]; data[2] = profile_entries[idx].data[2]; data[3] = profile_entries[idx].data[3]; data[4] = profile_entries[idx].data[4]; } else { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_FAULT; } _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_OK; } inline _mali_osk_errcode_t _mali_osk_profiling_clear(void) { _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW); if (prof_state != MALI_PROFILING_STATE_RETURN) { _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ } prof_state = MALI_PROFILING_STATE_IDLE; profile_entry_count = 0; _mali_osk_atomic_init(&profile_insert_index, 0); _mali_osk_atomic_init(&profile_entries_written, 0); if (NULL != profile_entries) { _mali_osk_vfree(profile_entries); profile_entries = NULL; } _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW); return _MALI_OSK_ERR_OK; } mali_bool _mali_osk_profiling_is_recording(void) { return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE; } mali_bool _mali_osk_profiling_have_recording(void) { return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE; } _mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args) { return _mali_osk_profiling_start(&args->limit); } _mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args) { /* Always add process and thread identificator in the first two data elements for events from user space */ _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]); return _MALI_OSK_ERR_OK; } _mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args) { return _mali_osk_profiling_stop(&args->count); } _mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args) { return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data); } _mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args) { return _mali_osk_profiling_clear(); } _mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args) { _mali_osk_profiling_report_sw_counters(args->counters); return _MALI_OSK_ERR_OK; }
gpl-2.0
ASAZING/android_kernel_huawei_y210
drivers/mtd/maps/gpio-addr-flash.c
311
9036
/* * drivers/mtd/maps/gpio-addr-flash.c * * Handle the case where a flash device is mostly addressed using physical * line and supplemented by GPIOs. This way you can hook up say a 8MiB flash * to a 2MiB memory range and use the GPIOs to select a particular range. * * Copyright © 2000 Nicolas Pitre <nico@cam.org> * Copyright © 2005-2009 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> #define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) #define DRIVER_NAME "gpio-addr-flash" #define PFX DRIVER_NAME ": " /** * struct async_state - keep GPIO flash state * @mtd: MTD state for this mapping * @map: MTD map state for this flash * @gpio_count: number of GPIOs used to address * @gpio_addrs: array of GPIOs to twiddle * @gpio_values: cached GPIO values * @win_size: dedicated memory size (if no GPIOs) */ struct async_state { struct mtd_info *mtd; struct map_info map; size_t gpio_count; unsigned *gpio_addrs; int *gpio_values; unsigned long win_size; }; #define gf_map_info_to_state(mi) ((struct async_state *)(mi)->map_priv_1) /** * gf_set_gpios() - set GPIO address lines to access specified flash offset * @state: GPIO flash state * @ofs: desired offset to access * * Rather than call the GPIO framework every time, cache the last-programmed * value. This speeds up sequential accesses (which are by far the most common * type). We rely on the GPIO framework to treat non-zero value as high so * that we don't have to normalize the bits. */ static void gf_set_gpios(struct async_state *state, unsigned long ofs) { size_t i = 0; int value; ofs /= state->win_size; do { value = ofs & (1 << i); if (state->gpio_values[i] != value) { gpio_set_value(state->gpio_addrs[i], value); state->gpio_values[i] = value; } } while (++i < state->gpio_count); } /** * gf_read() - read a word at the specified offset * @map: MTD map state * @ofs: desired offset to read */ static map_word gf_read(struct map_info *map, unsigned long ofs) { struct async_state *state = gf_map_info_to_state(map); uint16_t word; map_word test; gf_set_gpios(state, ofs); word = readw(map->virt + (ofs % state->win_size)); test.x[0] = word; return test; } /** * gf_copy_from() - copy a chunk of data from the flash * @map: MTD map state * @to: memory to copy to * @from: flash offset to copy from * @len: how much to copy * * We rely on the MTD layer to chunk up copies such that a single request here * will not cross a window size. This allows us to only wiggle the GPIOs once * before falling back to a normal memcpy. Reading the higher layer code shows * that this is indeed the case, but add a BUG_ON() to future proof. */ static void gf_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { struct async_state *state = gf_map_info_to_state(map); gf_set_gpios(state, from); /* BUG if operation crosses the win_size */ BUG_ON(!((from + len) % state->win_size <= (from + len))); /* operation does not cross the win_size, so one shot it */ memcpy_fromio(to, map->virt + (from % state->win_size), len); } /** * gf_write() - write a word at the specified offset * @map: MTD map state * @ofs: desired offset to write */ static void gf_write(struct map_info *map, map_word d1, unsigned long ofs) { struct async_state *state = gf_map_info_to_state(map); uint16_t d; gf_set_gpios(state, ofs); d = d1.x[0]; writew(d, map->virt + (ofs % state->win_size)); } /** * gf_copy_to() - copy a chunk of data to the flash * @map: MTD map state * @to: flash offset to copy to * @from: memory to copy from * @len: how much to copy * * See gf_copy_from() caveat. */ static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { struct async_state *state = gf_map_info_to_state(map); gf_set_gpios(state, to); /* BUG if operation crosses the win_size */ BUG_ON(!((to + len) % state->win_size <= (to + len))); /* operation does not cross the win_size, so one shot it */ memcpy_toio(map->virt + (to % state->win_size), from, len); } #ifdef CONFIG_MTD_PARTITIONS static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; #endif /** * gpio_flash_probe() - setup a mapping for a GPIO assisted flash * @pdev: platform device * * The platform resource layout expected looks something like: * struct mtd_partition partitions[] = { ... }; * struct physmap_flash_data flash_data = { ... }; * unsigned flash_gpios[] = { GPIO_XX, GPIO_XX, ... }; * struct resource flash_resource[] = { * { * .name = "cfi_probe", * .start = 0x20000000, * .end = 0x201fffff, * .flags = IORESOURCE_MEM, * }, { * .start = (unsigned long)flash_gpios, * .end = ARRAY_SIZE(flash_gpios), * .flags = IORESOURCE_IRQ, * } * }; * struct platform_device flash_device = { * .name = "gpio-addr-flash", * .dev = { .platform_data = &flash_data, }, * .num_resources = ARRAY_SIZE(flash_resource), * .resource = flash_resource, * ... * }; */ static int __devinit gpio_flash_probe(struct platform_device *pdev) { int ret; size_t i, arr_size; struct physmap_flash_data *pdata; struct resource *memory; struct resource *gpios; struct async_state *state; pdata = pdev->dev.platform_data; memory = platform_get_resource(pdev, IORESOURCE_MEM, 0); gpios = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!memory || !gpios || !gpios->end) return -EINVAL; arr_size = sizeof(int) * gpios->end; state = kzalloc(sizeof(*state) + arr_size, GFP_KERNEL); if (!state) return -ENOMEM; /* * We cast start/end to known types in the boards file, so cast * away their pointer types here to the known types (gpios->xxx). */ state->gpio_count = gpios->end; state->gpio_addrs = (void *)(unsigned long)gpios->start; state->gpio_values = (void *)(state + 1); state->win_size = resource_size(memory); memset(state->gpio_values, 0xff, arr_size); state->map.name = DRIVER_NAME; state->map.read = gf_read; state->map.copy_from = gf_copy_from; state->map.write = gf_write; state->map.copy_to = gf_copy_to; state->map.bankwidth = pdata->width; state->map.size = state->win_size * (1 << state->gpio_count); state->map.virt = ioremap_nocache(memory->start, state->map.size); state->map.phys = NO_XIP; state->map.map_priv_1 = (unsigned long)state; platform_set_drvdata(pdev, state); i = 0; do { if (gpio_request(state->gpio_addrs[i], DRIVER_NAME)) { pr_devinit(KERN_ERR PFX "failed to request gpio %d\n", state->gpio_addrs[i]); while (i--) gpio_free(state->gpio_addrs[i]); kfree(state); return -EBUSY; } gpio_direction_output(state->gpio_addrs[i], 0); } while (++i < state->gpio_count); pr_devinit(KERN_NOTICE PFX "probing %d-bit flash bus\n", state->map.bankwidth * 8); state->mtd = do_map_probe(memory->name, &state->map); if (!state->mtd) { for (i = 0; i < state->gpio_count; ++i) gpio_free(state->gpio_addrs[i]); kfree(state); return -ENXIO; } #ifdef CONFIG_MTD_PARTITIONS ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); if (ret > 0) { pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n"); add_mtd_partitions(state->mtd, pdata->parts, ret); kfree(pdata->parts); } else if (pdata->nr_parts) { pr_devinit(KERN_NOTICE PFX "Using board partition definition\n"); add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts); } else #endif { pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n"); add_mtd_device(state->mtd); } return 0; } static int __devexit gpio_flash_remove(struct platform_device *pdev) { struct async_state *state = platform_get_drvdata(pdev); size_t i = 0; do { gpio_free(state->gpio_addrs[i]); } while (++i < state->gpio_count); #ifdef CONFIG_MTD_PARTITIONS del_mtd_partitions(state->mtd); #endif map_destroy(state->mtd); kfree(state); return 0; } static struct platform_driver gpio_flash_driver = { .probe = gpio_flash_probe, .remove = __devexit_p(gpio_flash_remove), .driver = { .name = DRIVER_NAME, }, }; static int __init gpio_flash_init(void) { return platform_driver_register(&gpio_flash_driver); } module_init(gpio_flash_init); static void __exit gpio_flash_exit(void) { platform_driver_unregister(&gpio_flash_driver); } module_exit(gpio_flash_exit); MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>"); MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios"); MODULE_LICENSE("GPL");
gpl-2.0
RidaShamasneh/nethunter_kernel_g5
security/integrity/ima/ima_main.c
311
9784
/* * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Reiner Sailer <sailer@watson.ibm.com> * Serge Hallyn <serue@us.ibm.com> * Kylene Hall <kylene@us.ibm.com> * Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_main.c * implements the IMA hooks: ima_bprm_check, ima_file_mmap, * and ima_file_check. */ #include <linux/module.h> #include <linux/file.h> #include <linux/binfmts.h> #include <linux/mount.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/xattr.h> #include <linux/ima.h> #include <crypto/hash_info.h> #include "ima.h" int ima_initialized; #ifdef CONFIG_IMA_APPRAISE int ima_appraise = IMA_APPRAISE_ENFORCE; #else int ima_appraise; #endif int ima_hash_algo = HASH_ALGO_SHA1; static int hash_setup_done; static int __init hash_setup(char *str) { struct ima_template_desc *template_desc = ima_template_desc_current(); int i; if (hash_setup_done) return 1; if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) { if (strncmp(str, "sha1", 4) == 0) ima_hash_algo = HASH_ALGO_SHA1; else if (strncmp(str, "md5", 3) == 0) ima_hash_algo = HASH_ALGO_MD5; goto out; } for (i = 0; i < HASH_ALGO__LAST; i++) { if (strcmp(str, hash_algo_name[i]) == 0) { ima_hash_algo = i; break; } } out: hash_setup_done = 1; return 1; } __setup("ima_hash=", hash_setup); /* * ima_rdwr_violation_check * * Only invalidate the PCR for measured files: * - Opening a file for write when already open for read, * results in a time of measure, time of use (ToMToU) error. * - Opening a file for read when already open for write, * could result in a file measurement error. * */ static void ima_rdwr_violation_check(struct file *file, struct integrity_iint_cache *iint, int must_measure, char **pathbuf, const char **pathname) { struct inode *inode = file_inode(file); fmode_t mode = file->f_mode; bool send_tomtou = false, send_writers = false; if (mode & FMODE_WRITE) { if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) { if (!iint) iint = integrity_iint_find(inode); /* IMA_MEASURE is set from reader side */ if (iint && (iint->flags & IMA_MEASURE)) send_tomtou = true; } } else { if ((atomic_read(&inode->i_writecount) > 0) && must_measure) send_writers = true; } if (!send_tomtou && !send_writers) return; *pathname = ima_d_path(&file->f_path, pathbuf); if (send_tomtou) ima_add_violation(file, *pathname, "invalid_pcr", "ToMToU"); if (send_writers) ima_add_violation(file, *pathname, "invalid_pcr", "open_writers"); } static void ima_check_last_writer(struct integrity_iint_cache *iint, struct inode *inode, struct file *file) { fmode_t mode = file->f_mode; if (!(mode & FMODE_WRITE)) return; mutex_lock(&inode->i_mutex); if (atomic_read(&inode->i_writecount) == 1) { if ((iint->version != inode->i_version) || (iint->flags & IMA_NEW_FILE)) { iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE); if (iint->flags & IMA_APPRAISE) ima_update_xattr(iint, file); } } mutex_unlock(&inode->i_mutex); } /** * ima_file_free - called on __fput() * @file: pointer to file structure being freed * * Flag files that changed, based on i_version */ void ima_file_free(struct file *file) { struct inode *inode = file_inode(file); struct integrity_iint_cache *iint; if (!iint_initialized || !S_ISREG(inode->i_mode)) return; iint = integrity_iint_find(inode); if (!iint) return; ima_check_last_writer(iint, inode, file); } static int process_measurement(struct file *file, int mask, int function, int opened) { struct inode *inode = file_inode(file); struct integrity_iint_cache *iint = NULL; struct ima_template_desc *template_desc; char *pathbuf = NULL; const char *pathname = NULL; int rc = -ENOMEM, action, must_appraise; struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL; int xattr_len = 0; bool violation_check; if (!ima_policy_flag || !S_ISREG(inode->i_mode)) return 0; /* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action * bitmask based on the appraise/audit/measurement policy. * Included is the appraise submask. */ action = ima_get_action(inode, mask, function); violation_check = ((function == FILE_CHECK || function == MMAP_CHECK) && (ima_policy_flag & IMA_MEASURE)); if (!action && !violation_check) return 0; must_appraise = action & IMA_APPRAISE; /* Is the appraise rule hook specific? */ if (action & IMA_FILE_APPRAISE) function = FILE_CHECK; mutex_lock(&inode->i_mutex); if (action) { iint = integrity_inode_get(inode); if (!iint) goto out; } if (violation_check) { ima_rdwr_violation_check(file, iint, action & IMA_MEASURE, &pathbuf, &pathname); if (!action) { rc = 0; goto out_free; } } /* Determine if already appraised/measured based on bitmask * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED, * IMA_AUDIT, IMA_AUDITED) */ iint->flags |= action; action &= IMA_DO_MASK; action &= ~((iint->flags & IMA_DONE_MASK) >> 1); /* Nothing to do, just return existing appraised status */ if (!action) { if (must_appraise) rc = ima_get_cache_status(iint, function); goto out_digsig; } template_desc = ima_template_desc_current(); if ((action & IMA_APPRAISE_SUBMASK) || strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) xattr_ptr = &xattr_value; rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len); if (rc != 0) { if (file->f_flags & O_DIRECT) rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES; goto out_digsig; } if (!pathname) /* ima_rdwr_violation possibly pre-fetched */ pathname = ima_d_path(&file->f_path, &pathbuf); if (action & IMA_MEASURE) ima_store_measurement(iint, file, pathname, xattr_value, xattr_len); if (action & IMA_APPRAISE_SUBMASK) rc = ima_appraise_measurement(function, iint, file, pathname, xattr_value, xattr_len, opened); if (action & IMA_AUDIT) ima_audit_measurement(iint, pathname); out_digsig: if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG)) rc = -EACCES; kfree(xattr_value); out_free: kfree(pathbuf); out: mutex_unlock(&inode->i_mutex); if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE)) return -EACCES; return 0; } /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) * @prot: contains the protection that will be applied by the kernel. * * Measure files being mmapped executable based on the ima_must_measure() * policy decision. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ int ima_file_mmap(struct file *file, unsigned long prot) { if (file && (prot & PROT_EXEC)) return process_measurement(file, MAY_EXEC, MMAP_CHECK, 0); return 0; } /** * ima_bprm_check - based on policy, collect/store measurement. * @bprm: contains the linux_binprm structure * * The OS protects against an executable file, already open for write, * from being executed in deny_write_access() and an executable file, * already open for execute, from being modified in get_write_access(). * So we can be certain that what we verify and measure here is actually * what is being executed. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ int ima_bprm_check(struct linux_binprm *bprm) { return process_measurement(bprm->file, MAY_EXEC, BPRM_CHECK, 0); } /** * ima_path_check - based on policy, collect/store measurement. * @file: pointer to the file to be measured * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE * * Measure files based on the ima_must_measure() policy decision. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ int ima_file_check(struct file *file, int mask, int opened) { return process_measurement(file, mask & (MAY_READ | MAY_WRITE | MAY_EXEC), FILE_CHECK, opened); } EXPORT_SYMBOL_GPL(ima_file_check); /** * ima_module_check - based on policy, collect/store/appraise measurement. * @file: pointer to the file to be measured/appraised * * Measure/appraise kernel modules based on policy. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ int ima_module_check(struct file *file) { if (!file) { #ifndef CONFIG_MODULE_SIG_FORCE if ((ima_appraise & IMA_APPRAISE_MODULES) && (ima_appraise & IMA_APPRAISE_ENFORCE)) return -EACCES; /* INTEGRITY_UNKNOWN */ #endif return 0; /* We rely on module signature checking */ } return process_measurement(file, MAY_EXEC, MODULE_CHECK, 0); } int ima_fw_from_file(struct file *file, char *buf, size_t size) { if (!file) { if ((ima_appraise & IMA_APPRAISE_FIRMWARE) && (ima_appraise & IMA_APPRAISE_ENFORCE)) return -EACCES; /* INTEGRITY_UNKNOWN */ return 0; } return process_measurement(file, MAY_EXEC, FIRMWARE_CHECK, 0); } static int __init init_ima(void) { int error; hash_setup(CONFIG_IMA_DEFAULT_HASH); error = ima_init(); if (!error) { ima_initialized = 1; ima_update_policy_flag(); } return error; } late_initcall(init_ima); /* Start IMA after the TPM is available */ MODULE_DESCRIPTION("Integrity Measurement Architecture"); MODULE_LICENSE("GPL");
gpl-2.0
HCDRJacob/htc-kernel-wildfire-old
drivers/usb/gadget/config.c
567
5771
/* * usb/gadget/config.c -- simplify building config descriptors * * Copyright (C) 2003 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/string.h> #include <linux/device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> /** * usb_descriptor_fillbuf - fill buffer with descriptors * @buf: Buffer to be filled * @buflen: Size of buf * @src: Array of descriptor pointers, terminated by null pointer. * * Copies descriptors into the buffer, returning the length or a * negative error code if they can't all be copied. Useful when * assembling descriptors for an associated set of interfaces used * as part of configuring a composite device; or in other cases where * sets of descriptors need to be marshaled. */ int usb_descriptor_fillbuf(void *buf, unsigned buflen, const struct usb_descriptor_header **src) { u8 *dest = buf; if (!src) return -EINVAL; /* fill buffer from src[] until null descriptor ptr */ for (; NULL != *src; src++) { unsigned len = (*src)->bLength; if (len > buflen) return -EINVAL; memcpy(dest, *src, len); buflen -= len; dest += len; } return dest - (u8 *)buf; } /** * usb_gadget_config_buf - builts a complete configuration descriptor * @config: Header for the descriptor, including characteristics such * as power requirements and number of interfaces. * @desc: Null-terminated vector of pointers to the descriptors (interface, * endpoint, etc) defining all functions in this device configuration. * @buf: Buffer for the resulting configuration descriptor. * @length: Length of buffer. If this is not big enough to hold the * entire configuration descriptor, an error code will be returned. * * This copies descriptors into the response buffer, building a descriptor * for that configuration. It returns the buffer length or a negative * status code. The config.wTotalLength field is set to match the length * of the result, but other descriptor fields (including power usage and * interface count) must be set by the caller. * * Gadget drivers could use this when constructing a config descriptor * in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed. */ int usb_gadget_config_buf( const struct usb_config_descriptor *config, void *buf, unsigned length, const struct usb_descriptor_header **desc ) { struct usb_config_descriptor *cp = buf; int len; /* config descriptor first */ if (length < USB_DT_CONFIG_SIZE || !desc) return -EINVAL; *cp = *config; /* then interface/endpoint/class/vendor/... */ len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf, length - USB_DT_CONFIG_SIZE, desc); if (len < 0) return len; len += USB_DT_CONFIG_SIZE; if (len > 0xffff) return -EINVAL; /* patch up the config descriptor */ cp->bLength = USB_DT_CONFIG_SIZE; cp->bDescriptorType = USB_DT_CONFIG; cp->wTotalLength = cpu_to_le16(len); cp->bmAttributes |= USB_CONFIG_ATT_ONE; return len; } /** * usb_copy_descriptors - copy a vector of USB descriptors * @src: null-terminated vector to copy * Context: initialization code, which may sleep * * This makes a copy of a vector of USB descriptors. Its primary use * is to support usb_function objects which can have multiple copies, * each needing different descriptors. Functions may have static * tables of descriptors, which are used as templates and customized * with identifiers (for interfaces, strings, endpoints, and more) * as needed by a given function instance. */ struct usb_descriptor_header **__init usb_copy_descriptors(struct usb_descriptor_header **src) { struct usb_descriptor_header **tmp; unsigned bytes; unsigned n_desc; void *mem; struct usb_descriptor_header **ret; /* count descriptors and their sizes; then add vector size */ for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++) bytes += (*tmp)->bLength; bytes += (n_desc + 1) * sizeof(*tmp); mem = kmalloc(bytes, GFP_KERNEL); if (!mem) return NULL; /* fill in pointers starting at "tmp", * to descriptors copied starting at "mem"; * and return "ret" */ tmp = mem; ret = mem; mem += (n_desc + 1) * sizeof(*tmp); while (*src) { memcpy(mem, *src, (*src)->bLength); *tmp = mem; tmp++; mem += (*src)->bLength; src++; } *tmp = NULL; return ret; } /** * usb_find_endpoint - find a copy of an endpoint descriptor * @src: original vector of descriptors * @copy: copy of @src * @match: endpoint descriptor found in @src * * This returns the copy of the @match descriptor made for @copy. Its * intended use is to help remembering the endpoint descriptor to use * when enabling a given endpoint. */ struct usb_endpoint_descriptor *__init usb_find_endpoint( struct usb_descriptor_header **src, struct usb_descriptor_header **copy, struct usb_endpoint_descriptor *match ) { while (*src) { if (*src == (void *) match) return (void *)*copy; src++; copy++; } return NULL; }
gpl-2.0
EnJens/android_kernel_asus_grouper
sound/pci/asihpi/hpifunc.c
567
71471
#include "hpi_internal.h" #include "hpimsginit.h" #include "hpidebug.h" struct hpi_handle { unsigned int obj_index:12; unsigned int obj_type:4; unsigned int adapter_index:14; unsigned int spare:1; unsigned int read_only:1; }; union handle_word { struct hpi_handle h; u32 w; }; u32 hpi_indexes_to_handle(const char c_object, const u16 adapter_index, const u16 object_index) { union handle_word handle; handle.h.adapter_index = adapter_index; handle.h.spare = 0; handle.h.read_only = 0; handle.h.obj_type = c_object; handle.h.obj_index = object_index; return handle.w; } static u16 hpi_handle_indexes(const u32 h, u16 *p1, u16 *p2) { union handle_word uhandle; if (!h) return HPI_ERROR_INVALID_HANDLE; uhandle.w = h; *p1 = (u16)uhandle.h.adapter_index; if (p2) *p2 = (u16)uhandle.h.obj_index; return 0; } void hpi_handle_to_indexes(const u32 handle, u16 *pw_adapter_index, u16 *pw_object_index) { hpi_handle_indexes(handle, pw_adapter_index, pw_object_index); } char hpi_handle_object(const u32 handle) { union handle_word uhandle; uhandle.w = handle; return (char)uhandle.h.obj_type; } void hpi_format_to_msg(struct hpi_msg_format *pMF, const struct hpi_format *pF) { pMF->sample_rate = pF->sample_rate; pMF->bit_rate = pF->bit_rate; pMF->attributes = pF->attributes; pMF->channels = pF->channels; pMF->format = pF->format; } static void hpi_msg_to_format(struct hpi_format *pF, struct hpi_msg_format *pMF) { pF->sample_rate = pMF->sample_rate; pF->bit_rate = pMF->bit_rate; pF->attributes = pMF->attributes; pF->channels = pMF->channels; pF->format = pMF->format; pF->mode_legacy = 0; pF->unused = 0; } void hpi_stream_response_to_legacy(struct hpi_stream_res *pSR) { pSR->u.legacy_stream_info.auxiliary_data_available = pSR->u.stream_info.auxiliary_data_available; pSR->u.legacy_stream_info.state = pSR->u.stream_info.state; } static inline void hpi_send_recvV1(struct hpi_message_header *m, struct hpi_response_header *r) { hpi_send_recv((struct hpi_message *)m, (struct hpi_response *)r); } u16 hpi_subsys_get_version_ex(u32 *pversion_ex) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_GET_VERSION); hpi_send_recv(&hm, &hr); *pversion_ex = hr.u.s.data; return hr.error; } u16 hpi_subsys_get_num_adapters(int *pn_num_adapters) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_GET_NUM_ADAPTERS); hpi_send_recv(&hm, &hr); *pn_num_adapters = (int)hr.u.s.num_adapters; return hr.error; } u16 hpi_subsys_get_adapter(int iterator, u32 *padapter_index, u16 *pw_adapter_type) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_GET_ADAPTER); hm.obj_index = (u16)iterator; hpi_send_recv(&hm, &hr); *padapter_index = (int)hr.u.s.adapter_index; *pw_adapter_type = hr.u.s.adapter_type; return hr.error; } u16 hpi_adapter_open(u16 adapter_index) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_OPEN); hm.adapter_index = adapter_index; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_adapter_close(u16 adapter_index) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE); hm.adapter_index = adapter_index; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_adapter_set_mode(u16 adapter_index, u32 adapter_mode) { return hpi_adapter_set_mode_ex(adapter_index, adapter_mode, HPI_ADAPTER_MODE_SET); } u16 hpi_adapter_set_mode_ex(u16 adapter_index, u32 adapter_mode, u16 query_or_set) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_SET_MODE); hm.adapter_index = adapter_index; hm.u.ax.mode.adapter_mode = adapter_mode; hm.u.ax.mode.query_or_set = query_or_set; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_adapter_get_mode(u16 adapter_index, u32 *padapter_mode) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_GET_MODE); hm.adapter_index = adapter_index; hpi_send_recv(&hm, &hr); if (padapter_mode) *padapter_mode = hr.u.ax.mode.adapter_mode; return hr.error; } u16 hpi_adapter_get_info(u16 adapter_index, u16 *pw_num_outstreams, u16 *pw_num_instreams, u16 *pw_version, u32 *pserial_number, u16 *pw_adapter_type) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_GET_INFO); hm.adapter_index = adapter_index; hpi_send_recv(&hm, &hr); *pw_adapter_type = hr.u.ax.info.adapter_type; *pw_num_outstreams = hr.u.ax.info.num_outstreams; *pw_num_instreams = hr.u.ax.info.num_instreams; *pw_version = hr.u.ax.info.version; *pserial_number = hr.u.ax.info.serial_number; return hr.error; } u16 hpi_adapter_get_module_by_index(u16 adapter_index, u16 module_index, u16 *pw_num_outputs, u16 *pw_num_inputs, u16 *pw_version, u32 *pserial_number, u16 *pw_module_type, u32 *ph_module) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_MODULE_INFO); hm.adapter_index = adapter_index; hm.u.ax.module_info.index = module_index; hpi_send_recv(&hm, &hr); *pw_module_type = hr.u.ax.info.adapter_type; *pw_num_outputs = hr.u.ax.info.num_outstreams; *pw_num_inputs = hr.u.ax.info.num_instreams; *pw_version = hr.u.ax.info.version; *pserial_number = hr.u.ax.info.serial_number; *ph_module = 0; return hr.error; } u16 hpi_adapter_set_property(u16 adapter_index, u16 property, u16 parameter1, u16 parameter2) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_SET_PROPERTY); hm.adapter_index = adapter_index; hm.u.ax.property_set.property = property; hm.u.ax.property_set.parameter1 = parameter1; hm.u.ax.property_set.parameter2 = parameter2; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_adapter_get_property(u16 adapter_index, u16 property, u16 *pw_parameter1, u16 *pw_parameter2) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_GET_PROPERTY); hm.adapter_index = adapter_index; hm.u.ax.property_set.property = property; hpi_send_recv(&hm, &hr); if (!hr.error) { if (pw_parameter1) *pw_parameter1 = hr.u.ax.property_get.parameter1; if (pw_parameter2) *pw_parameter2 = hr.u.ax.property_get.parameter2; } return hr.error; } u16 hpi_adapter_enumerate_property(u16 adapter_index, u16 index, u16 what_to_enumerate, u16 property_index, u32 *psetting) { return 0; } u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format, u32 sample_rate, u32 bit_rate, u32 attributes) { u16 err = 0; struct hpi_msg_format fmt; switch (channels) { case 1: case 2: case 4: case 6: case 8: case 16: break; default: err = HPI_ERROR_INVALID_CHANNELS; return err; } fmt.channels = channels; switch (format) { case HPI_FORMAT_PCM16_SIGNED: case HPI_FORMAT_PCM24_SIGNED: case HPI_FORMAT_PCM32_SIGNED: case HPI_FORMAT_PCM32_FLOAT: case HPI_FORMAT_PCM16_BIGENDIAN: case HPI_FORMAT_PCM8_UNSIGNED: case HPI_FORMAT_MPEG_L1: case HPI_FORMAT_MPEG_L2: case HPI_FORMAT_MPEG_L3: case HPI_FORMAT_DOLBY_AC2: case HPI_FORMAT_AA_TAGIT1_HITS: case HPI_FORMAT_AA_TAGIT1_INSERTS: case HPI_FORMAT_RAW_BITSTREAM: case HPI_FORMAT_AA_TAGIT1_HITS_EX1: case HPI_FORMAT_OEM1: case HPI_FORMAT_OEM2: break; default: err = HPI_ERROR_INVALID_FORMAT; return err; } fmt.format = format; if (sample_rate < 8000L) { err = HPI_ERROR_INCOMPATIBLE_SAMPLERATE; sample_rate = 8000L; } if (sample_rate > 200000L) { err = HPI_ERROR_INCOMPATIBLE_SAMPLERATE; sample_rate = 200000L; } fmt.sample_rate = sample_rate; switch (format) { case HPI_FORMAT_MPEG_L1: case HPI_FORMAT_MPEG_L2: case HPI_FORMAT_MPEG_L3: fmt.bit_rate = bit_rate; break; case HPI_FORMAT_PCM16_SIGNED: case HPI_FORMAT_PCM16_BIGENDIAN: fmt.bit_rate = channels * sample_rate * 2; break; case HPI_FORMAT_PCM32_SIGNED: case HPI_FORMAT_PCM32_FLOAT: fmt.bit_rate = channels * sample_rate * 4; break; case HPI_FORMAT_PCM8_UNSIGNED: fmt.bit_rate = channels * sample_rate; break; default: fmt.bit_rate = 0; } switch (format) { case HPI_FORMAT_MPEG_L2: if ((channels == 1) && (attributes != HPI_MPEG_MODE_DEFAULT)) { attributes = HPI_MPEG_MODE_DEFAULT; err = HPI_ERROR_INVALID_FORMAT; } else if (attributes > HPI_MPEG_MODE_DUALCHANNEL) { attributes = HPI_MPEG_MODE_DEFAULT; err = HPI_ERROR_INVALID_FORMAT; } fmt.attributes = attributes; break; default: fmt.attributes = attributes; } hpi_msg_to_format(p_format, &fmt); return err; } u16 hpi_stream_estimate_buffer_size(struct hpi_format *p_format, u32 host_polling_rate_in_milli_seconds, u32 *recommended_buffer_size) { u32 bytes_per_second; u32 size; u16 channels; struct hpi_format *pF = p_format; channels = pF->channels; switch (pF->format) { case HPI_FORMAT_PCM16_BIGENDIAN: case HPI_FORMAT_PCM16_SIGNED: bytes_per_second = pF->sample_rate * 2L * channels; break; case HPI_FORMAT_PCM24_SIGNED: bytes_per_second = pF->sample_rate * 3L * channels; break; case HPI_FORMAT_PCM32_SIGNED: case HPI_FORMAT_PCM32_FLOAT: bytes_per_second = pF->sample_rate * 4L * channels; break; case HPI_FORMAT_PCM8_UNSIGNED: bytes_per_second = pF->sample_rate * 1L * channels; break; case HPI_FORMAT_MPEG_L1: case HPI_FORMAT_MPEG_L2: case HPI_FORMAT_MPEG_L3: bytes_per_second = pF->bit_rate / 8L; break; case HPI_FORMAT_DOLBY_AC2: bytes_per_second = 256000L / 8L; break; default: return HPI_ERROR_INVALID_FORMAT; } size = (bytes_per_second * host_polling_rate_in_milli_seconds * 2) / 1000L; *recommended_buffer_size = roundup_pow_of_two(((size + 4095L) & ~4095L)); return 0; } u16 hpi_outstream_open(u16 adapter_index, u16 outstream_index, u32 *ph_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN); hm.adapter_index = adapter_index; hm.obj_index = outstream_index; hpi_send_recv(&hm, &hr); if (hr.error == 0) *ph_outstream = hpi_indexes_to_handle(HPI_OBJ_OSTREAM, adapter_index, outstream_index); else *ph_outstream = 0; return hr.error; } u16 hpi_outstream_close(u32 h_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_HOSTBUFFER_FREE); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_GROUP_RESET); hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE); hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_get_info_ex(u32 h_outstream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_to_play, u32 *psamples_played, u32 *pauxiliary_data_to_play) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_GET_INFO); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); if (pw_state) *pw_state = hr.u.d.u.stream_info.state; if (pbuffer_size) *pbuffer_size = hr.u.d.u.stream_info.buffer_size; if (pdata_to_play) *pdata_to_play = hr.u.d.u.stream_info.data_available; if (psamples_played) *psamples_played = hr.u.d.u.stream_info.samples_transferred; if (pauxiliary_data_to_play) *pauxiliary_data_to_play = hr.u.d.u.stream_info.auxiliary_data_available; return hr.error; } u16 hpi_outstream_write_buf(u32 h_outstream, const u8 *pb_data, u32 bytes_to_write, const struct hpi_format *p_format) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_WRITE); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.data.pb_data = (u8 *)pb_data; hm.u.d.u.data.data_size = bytes_to_write; hpi_format_to_msg(&hm.u.d.u.data.format, p_format); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_start(u32 h_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_START); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_wait_start(u32 h_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_WAIT_START); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_stop(u32 h_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_STOP); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_sinegen(u32 h_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_SINEGEN); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_reset(u32 h_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_query_format(u32 h_outstream, struct hpi_format *p_format) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_QUERY_FORMAT); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_format_to_msg(&hm.u.d.u.data.format, p_format); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_set_format(u32 h_outstream, struct hpi_format *p_format) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_SET_FORMAT); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_format_to_msg(&hm.u.d.u.data.format, p_format); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_set_velocity(u32 h_outstream, short velocity) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_SET_VELOCITY); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.velocity = velocity; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_set_punch_in_out(u32 h_outstream, u32 punch_in_sample, u32 punch_out_sample) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_SET_PUNCHINOUT); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.pio.punch_in_sample = punch_in_sample; hm.u.d.u.pio.punch_out_sample = punch_out_sample; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_ancillary_reset(u32 h_outstream, u16 mode) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_ANC_RESET); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.data.format.channels = mode; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_ancillary_get_info(u32 h_outstream, u32 *pframes_available) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_ANC_GET_INFO); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); if (hr.error == 0) { if (pframes_available) *pframes_available = hr.u.d.u.stream_info.data_available / sizeof(struct hpi_anc_frame); } return hr.error; } u16 hpi_outstream_ancillary_read(u32 h_outstream, struct hpi_anc_frame *p_anc_frame_buffer, u32 anc_frame_buffer_size_in_bytes, u32 number_of_ancillary_frames_to_read) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_ANC_READ); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.data.pb_data = (u8 *)p_anc_frame_buffer; hm.u.d.u.data.data_size = number_of_ancillary_frames_to_read * sizeof(struct hpi_anc_frame); if (hm.u.d.u.data.data_size <= anc_frame_buffer_size_in_bytes) hpi_send_recv(&hm, &hr); else hr.error = HPI_ERROR_INVALID_DATASIZE; return hr.error; } u16 hpi_outstream_set_time_scale(u32 h_outstream, u32 time_scale) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_SET_TIMESCALE); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.time_scale = time_scale; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_host_buffer_allocate(u32 h_outstream, u32 size_in_bytes) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_HOSTBUFFER_ALLOC); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.data.data_size = size_in_bytes; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_host_buffer_get_info(u32 h_outstream, u8 **pp_buffer, struct hpi_hostbuffer_status **pp_status) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_HOSTBUFFER_GET_INFO); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); if (hr.error == 0) { if (pp_buffer) *pp_buffer = hr.u.d.u.hostbuffer_info.p_buffer; if (pp_status) *pp_status = hr.u.d.u.hostbuffer_info.p_status; } return hr.error; } u16 hpi_outstream_host_buffer_free(u32 h_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_HOSTBUFFER_FREE); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_group_add(u32 h_outstream, u32 h_stream) { struct hpi_message hm; struct hpi_response hr; u16 adapter; char c_obj_type; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_GROUP_ADD); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; if (hpi_handle_indexes(h_stream, &adapter, &hm.u.d.u.stream.stream_index)) return HPI_ERROR_INVALID_HANDLE; c_obj_type = hpi_handle_object(h_stream); switch (c_obj_type) { case HPI_OBJ_OSTREAM: case HPI_OBJ_ISTREAM: hm.u.d.u.stream.object_type = c_obj_type; break; default: return HPI_ERROR_INVALID_OBJ; } if (adapter != hm.adapter_index) return HPI_ERROR_NO_INTERADAPTER_GROUPS; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_outstream_group_get_map(u32 h_outstream, u32 *poutstream_map, u32 *pinstream_map) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_GROUP_GETMAP); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); if (poutstream_map) *poutstream_map = hr.u.d.u.group_info.outstream_group_map; if (pinstream_map) *pinstream_map = hr.u.d.u.group_info.instream_group_map; return hr.error; } u16 hpi_outstream_group_reset(u32 h_outstream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM, HPI_OSTREAM_GROUP_RESET); if (hpi_handle_indexes(h_outstream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_open(u16 adapter_index, u16 instream_index, u32 *ph_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN); hm.adapter_index = adapter_index; hm.obj_index = instream_index; hpi_send_recv(&hm, &hr); if (hr.error == 0) *ph_instream = hpi_indexes_to_handle(HPI_OBJ_ISTREAM, adapter_index, instream_index); else *ph_instream = 0; return hr.error; } u16 hpi_instream_close(u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_FREE); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_GROUP_RESET); hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE); hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_query_format(u32 h_instream, const struct hpi_format *p_format) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_QUERY_FORMAT); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_format_to_msg(&hm.u.d.u.data.format, p_format); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_set_format(u32 h_instream, const struct hpi_format *p_format) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_SET_FORMAT); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_format_to_msg(&hm.u.d.u.data.format, p_format); hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_read_buf(u32 h_instream, u8 *pb_data, u32 bytes_to_read) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_READ); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.data.data_size = bytes_to_read; hm.u.d.u.data.pb_data = pb_data; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_start(u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_START); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_wait_start(u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_WAIT_START); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_stop(u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_STOP); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_reset(u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_get_info_ex(u32 h_instream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_recorded, u32 *psamples_recorded, u32 *pauxiliary_data_recorded) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_GET_INFO); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); if (pw_state) *pw_state = hr.u.d.u.stream_info.state; if (pbuffer_size) *pbuffer_size = hr.u.d.u.stream_info.buffer_size; if (pdata_recorded) *pdata_recorded = hr.u.d.u.stream_info.data_available; if (psamples_recorded) *psamples_recorded = hr.u.d.u.stream_info.samples_transferred; if (pauxiliary_data_recorded) *pauxiliary_data_recorded = hr.u.d.u.stream_info.auxiliary_data_available; return hr.error; } u16 hpi_instream_ancillary_reset(u32 h_instream, u16 bytes_per_frame, u16 mode, u16 alignment, u16 idle_bit) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_ANC_RESET); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.data.format.attributes = bytes_per_frame; hm.u.d.u.data.format.format = (mode << 8) | (alignment & 0xff); hm.u.d.u.data.format.channels = idle_bit; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_ancillary_get_info(u32 h_instream, u32 *pframe_space) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_ANC_GET_INFO); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); if (pframe_space) *pframe_space = (hr.u.d.u.stream_info.buffer_size - hr.u.d.u.stream_info.data_available) / sizeof(struct hpi_anc_frame); return hr.error; } u16 hpi_instream_ancillary_write(u32 h_instream, const struct hpi_anc_frame *p_anc_frame_buffer, u32 anc_frame_buffer_size_in_bytes, u32 number_of_ancillary_frames_to_write) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_ANC_WRITE); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.data.pb_data = (u8 *)p_anc_frame_buffer; hm.u.d.u.data.data_size = number_of_ancillary_frames_to_write * sizeof(struct hpi_anc_frame); if (hm.u.d.u.data.data_size <= anc_frame_buffer_size_in_bytes) hpi_send_recv(&hm, &hr); else hr.error = HPI_ERROR_INVALID_DATASIZE; return hr.error; } u16 hpi_instream_host_buffer_allocate(u32 h_instream, u32 size_in_bytes) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_ALLOC); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.d.u.data.data_size = size_in_bytes; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_host_buffer_get_info(u32 h_instream, u8 **pp_buffer, struct hpi_hostbuffer_status **pp_status) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_GET_INFO); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); if (hr.error == 0) { if (pp_buffer) *pp_buffer = hr.u.d.u.hostbuffer_info.p_buffer; if (pp_status) *pp_status = hr.u.d.u.hostbuffer_info.p_status; } return hr.error; } u16 hpi_instream_host_buffer_free(u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_FREE); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_group_add(u32 h_instream, u32 h_stream) { struct hpi_message hm; struct hpi_response hr; u16 adapter; char c_obj_type; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_GROUP_ADD); hr.error = 0; if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; if (hpi_handle_indexes(h_stream, &adapter, &hm.u.d.u.stream.stream_index)) return HPI_ERROR_INVALID_HANDLE; c_obj_type = hpi_handle_object(h_stream); switch (c_obj_type) { case HPI_OBJ_OSTREAM: case HPI_OBJ_ISTREAM: hm.u.d.u.stream.object_type = c_obj_type; break; default: return HPI_ERROR_INVALID_OBJ; } if (adapter != hm.adapter_index) return HPI_ERROR_NO_INTERADAPTER_GROUPS; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_instream_group_get_map(u32 h_instream, u32 *poutstream_map, u32 *pinstream_map) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_HOSTBUFFER_FREE); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); if (poutstream_map) *poutstream_map = hr.u.d.u.group_info.outstream_group_map; if (pinstream_map) *pinstream_map = hr.u.d.u.group_info.instream_group_map; return hr.error; } u16 hpi_instream_group_reset(u32 h_instream) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM, HPI_ISTREAM_GROUP_RESET); if (hpi_handle_indexes(h_instream, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_mixer_open(u16 adapter_index, u32 *ph_mixer) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN); hm.adapter_index = adapter_index; hpi_send_recv(&hm, &hr); if (hr.error == 0) *ph_mixer = hpi_indexes_to_handle(HPI_OBJ_MIXER, adapter_index, 0); else *ph_mixer = 0; return hr.error; } u16 hpi_mixer_close(u32 h_mixer) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE); if (hpi_handle_indexes(h_mixer, &hm.adapter_index, NULL)) return HPI_ERROR_INVALID_HANDLE; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_mixer_get_control(u32 h_mixer, u16 src_node_type, u16 src_node_type_index, u16 dst_node_type, u16 dst_node_type_index, u16 control_type, u32 *ph_control) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_GET_CONTROL); if (hpi_handle_indexes(h_mixer, &hm.adapter_index, NULL)) return HPI_ERROR_INVALID_HANDLE; hm.u.m.node_type1 = src_node_type; hm.u.m.node_index1 = src_node_type_index; hm.u.m.node_type2 = dst_node_type; hm.u.m.node_index2 = dst_node_type_index; hm.u.m.control_type = control_type; hpi_send_recv(&hm, &hr); if (hr.error == 0) *ph_control = hpi_indexes_to_handle(HPI_OBJ_CONTROL, hm.adapter_index, hr.u.m.control_index); else *ph_control = 0; return hr.error; } u16 hpi_mixer_get_control_by_index(u32 h_mixer, u16 control_index, u16 *pw_src_node_type, u16 *pw_src_node_index, u16 *pw_dst_node_type, u16 *pw_dst_node_index, u16 *pw_control_type, u32 *ph_control) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_GET_CONTROL_BY_INDEX); if (hpi_handle_indexes(h_mixer, &hm.adapter_index, NULL)) return HPI_ERROR_INVALID_HANDLE; hm.u.m.control_index = control_index; hpi_send_recv(&hm, &hr); if (pw_src_node_type) { *pw_src_node_type = hr.u.m.src_node_type + HPI_SOURCENODE_NONE; *pw_src_node_index = hr.u.m.src_node_index; *pw_dst_node_type = hr.u.m.dst_node_type + HPI_DESTNODE_NONE; *pw_dst_node_index = hr.u.m.dst_node_index; } if (pw_control_type) *pw_control_type = hr.u.m.control_index; if (ph_control) { if (hr.error == 0) *ph_control = hpi_indexes_to_handle(HPI_OBJ_CONTROL, hm.adapter_index, control_index); else *ph_control = 0; } return hr.error; } u16 hpi_mixer_store(u32 h_mixer, enum HPI_MIXER_STORE_COMMAND command, u16 index) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_STORE); if (hpi_handle_indexes(h_mixer, &hm.adapter_index, NULL)) return HPI_ERROR_INVALID_HANDLE; hm.u.mx.store.command = command; hm.u.mx.store.index = index; hpi_send_recv(&hm, &hr); return hr.error; } static u16 hpi_control_param_set(const u32 h_control, const u16 attrib, const u32 param1, const u32 param2) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = attrib; hm.u.c.param1 = param1; hm.u.c.param2 = param2; hpi_send_recv(&hm, &hr); return hr.error; } static u16 hpi_control_log_set2(u32 h_control, u16 attrib, short sv0, short sv1) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = attrib; hm.u.c.an_log_value[0] = sv0; hm.u.c.an_log_value[1] = sv1; hpi_send_recv(&hm, &hr); return hr.error; } static u16 hpi_control_param_get(const u32 h_control, const u16 attrib, u32 param1, u32 param2, u32 *pparam1, u32 *pparam2) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = attrib; hm.u.c.param1 = param1; hm.u.c.param2 = param2; hpi_send_recv(&hm, &hr); *pparam1 = hr.u.c.param1; if (pparam2) *pparam2 = hr.u.c.param2; return hr.error; } #define hpi_control_param1_get(h, a, p1) \ hpi_control_param_get(h, a, 0, 0, p1, NULL) #define hpi_control_param2_get(h, a, p1, p2) \ hpi_control_param_get(h, a, 0, 0, p1, p2) static u16 hpi_control_log_get2(u32 h_control, u16 attrib, short *sv0, short *sv1) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = attrib; hpi_send_recv(&hm, &hr); *sv0 = hr.u.c.an_log_value[0]; if (sv1) *sv1 = hr.u.c.an_log_value[1]; return hr.error; } static u16 hpi_control_query(const u32 h_control, const u16 attrib, const u32 index, const u32 param, u32 *psetting) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_INFO); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = attrib; hm.u.c.param1 = index; hm.u.c.param2 = param; hpi_send_recv(&hm, &hr); *psetting = hr.u.c.param1; return hr.error; } static u16 hpi_control_get_string(const u32 h_control, const u16 attribute, char *psz_string, const u32 string_length) { unsigned int sub_string_index = 0, j = 0; char c = 0; unsigned int n = 0; u16 err = 0; if ((string_length < 1) || (string_length > 256)) return HPI_ERROR_INVALID_CONTROL_VALUE; for (sub_string_index = 0; sub_string_index < string_length; sub_string_index += 8) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = attribute; hm.u.c.param1 = sub_string_index; hm.u.c.param2 = 0; hpi_send_recv(&hm, &hr); if (sub_string_index == 0 && (hr.u.cu.chars8.remaining_chars + 8) > string_length) return HPI_ERROR_INVALID_CONTROL_VALUE; if (hr.error) { err = hr.error; break; } for (j = 0; j < 8; j++) { c = hr.u.cu.chars8.sz_data[j]; psz_string[sub_string_index + j] = c; n++; if (n >= string_length) { psz_string[string_length - 1] = 0; err = HPI_ERROR_INVALID_CONTROL_VALUE; break; } if (c == 0) break; } if ((hr.u.cu.chars8.remaining_chars == 0) && ((sub_string_index + j) < string_length) && (c != 0)) { c = 0; psz_string[sub_string_index + j] = c; } if (c == 0) break; } return err; } u16 hpi_aesebu_receiver_query_format(const u32 h_aes_rx, const u32 index, u16 *pw_format) { u32 qr; u16 err; err = hpi_control_query(h_aes_rx, HPI_AESEBURX_FORMAT, index, 0, &qr); *pw_format = (u16)qr; return err; } u16 hpi_aesebu_receiver_set_format(u32 h_control, u16 format) { return hpi_control_param_set(h_control, HPI_AESEBURX_FORMAT, format, 0); } u16 hpi_aesebu_receiver_get_format(u32 h_control, u16 *pw_format) { u16 err; u32 param; err = hpi_control_param1_get(h_control, HPI_AESEBURX_FORMAT, &param); if (!err && pw_format) *pw_format = (u16)param; return err; } u16 hpi_aesebu_receiver_get_sample_rate(u32 h_control, u32 *psample_rate) { return hpi_control_param1_get(h_control, HPI_AESEBURX_SAMPLERATE, psample_rate); } u16 hpi_aesebu_receiver_get_user_data(u32 h_control, u16 index, u16 *pw_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_AESEBURX_USERDATA; hm.u.c.param1 = index; hpi_send_recv(&hm, &hr); if (pw_data) *pw_data = (u16)hr.u.c.param2; return hr.error; } u16 hpi_aesebu_receiver_get_channel_status(u32 h_control, u16 index, u16 *pw_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_AESEBURX_CHANNELSTATUS; hm.u.c.param1 = index; hpi_send_recv(&hm, &hr); if (pw_data) *pw_data = (u16)hr.u.c.param2; return hr.error; } u16 hpi_aesebu_receiver_get_error_status(u32 h_control, u16 *pw_error_data) { u32 error_data = 0; u16 err = 0; err = hpi_control_param1_get(h_control, HPI_AESEBURX_ERRORSTATUS, &error_data); if (pw_error_data) *pw_error_data = (u16)error_data; return err; } u16 hpi_aesebu_transmitter_set_sample_rate(u32 h_control, u32 sample_rate) { return hpi_control_param_set(h_control, HPI_AESEBUTX_SAMPLERATE, sample_rate, 0); } u16 hpi_aesebu_transmitter_set_user_data(u32 h_control, u16 index, u16 data) { return hpi_control_param_set(h_control, HPI_AESEBUTX_USERDATA, index, data); } u16 hpi_aesebu_transmitter_set_channel_status(u32 h_control, u16 index, u16 data) { return hpi_control_param_set(h_control, HPI_AESEBUTX_CHANNELSTATUS, index, data); } u16 hpi_aesebu_transmitter_get_channel_status(u32 h_control, u16 index, u16 *pw_data) { return HPI_ERROR_INVALID_OPERATION; } u16 hpi_aesebu_transmitter_query_format(const u32 h_aes_tx, const u32 index, u16 *pw_format) { u32 qr; u16 err; err = hpi_control_query(h_aes_tx, HPI_AESEBUTX_FORMAT, index, 0, &qr); *pw_format = (u16)qr; return err; } u16 hpi_aesebu_transmitter_set_format(u32 h_control, u16 output_format) { return hpi_control_param_set(h_control, HPI_AESEBUTX_FORMAT, output_format, 0); } u16 hpi_aesebu_transmitter_get_format(u32 h_control, u16 *pw_output_format) { u16 err; u32 param; err = hpi_control_param1_get(h_control, HPI_AESEBUTX_FORMAT, &param); if (!err && pw_output_format) *pw_output_format = (u16)param; return err; } u16 hpi_bitstream_set_clock_edge(u32 h_control, u16 edge_type) { return hpi_control_param_set(h_control, HPI_BITSTREAM_CLOCK_EDGE, edge_type, 0); } u16 hpi_bitstream_set_data_polarity(u32 h_control, u16 polarity) { return hpi_control_param_set(h_control, HPI_BITSTREAM_DATA_POLARITY, polarity, 0); } u16 hpi_bitstream_get_activity(u32 h_control, u16 *pw_clk_activity, u16 *pw_data_activity) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_BITSTREAM_ACTIVITY; hpi_send_recv(&hm, &hr); if (pw_clk_activity) *pw_clk_activity = (u16)hr.u.c.param1; if (pw_data_activity) *pw_data_activity = (u16)hr.u.c.param2; return hr.error; } u16 hpi_channel_mode_query_mode(const u32 h_mode, const u32 index, u16 *pw_mode) { u32 qr; u16 err; err = hpi_control_query(h_mode, HPI_CHANNEL_MODE_MODE, index, 0, &qr); *pw_mode = (u16)qr; return err; } u16 hpi_channel_mode_set(u32 h_control, u16 mode) { return hpi_control_param_set(h_control, HPI_CHANNEL_MODE_MODE, mode, 0); } u16 hpi_channel_mode_get(u32 h_control, u16 *mode) { u32 mode32 = 0; u16 err = hpi_control_param1_get(h_control, HPI_CHANNEL_MODE_MODE, &mode32); if (mode) *mode = (u16)mode32; return err; } u16 hpi_cobranet_hmi_write(u32 h_control, u32 hmi_address, u32 byte_count, u8 *pb_data) { struct hpi_msg_cobranet_hmiwrite hm; struct hpi_response_header hr; hpi_init_message_responseV1(&hm.h, sizeof(hm), &hr, sizeof(hr), HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); if (hpi_handle_indexes(h_control, &hm.h.adapter_index, &hm.h.obj_index)) return HPI_ERROR_INVALID_HANDLE; if (byte_count > sizeof(hm.bytes)) return HPI_ERROR_MESSAGE_BUFFER_TOO_SMALL; hm.p.attribute = HPI_COBRANET_SET; hm.p.byte_count = byte_count; hm.p.hmi_address = hmi_address; memcpy(hm.bytes, pb_data, byte_count); hm.h.size = (u16)(sizeof(hm.h) + sizeof(hm.p) + byte_count); hpi_send_recvV1(&hm.h, &hr); return hr.error; } u16 hpi_cobranet_hmi_read(u32 h_control, u32 hmi_address, u32 max_byte_count, u32 *pbyte_count, u8 *pb_data) { struct hpi_msg_cobranet_hmiread hm; struct hpi_res_cobranet_hmiread hr; hpi_init_message_responseV1(&hm.h, sizeof(hm), &hr.h, sizeof(hr), HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.h.adapter_index, &hm.h.obj_index)) return HPI_ERROR_INVALID_HANDLE; if (max_byte_count > sizeof(hr.bytes)) return HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL; hm.p.attribute = HPI_COBRANET_GET; hm.p.byte_count = max_byte_count; hm.p.hmi_address = hmi_address; hpi_send_recvV1(&hm.h, &hr.h); if (!hr.h.error && pb_data) { if (hr.byte_count > sizeof(hr.bytes)) return HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL; *pbyte_count = hr.byte_count; if (hr.byte_count < max_byte_count) max_byte_count = *pbyte_count; memcpy(pb_data, hr.bytes, max_byte_count); } return hr.h.error; } u16 hpi_cobranet_hmi_get_status(u32 h_control, u32 *pstatus, u32 *preadable_size, u32 *pwriteable_size) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_COBRANET_GET_STATUS; hpi_send_recv(&hm, &hr); if (!hr.error) { if (pstatus) *pstatus = hr.u.cu.cobranet.status.status; if (preadable_size) *preadable_size = hr.u.cu.cobranet.status.readable_size; if (pwriteable_size) *pwriteable_size = hr.u.cu.cobranet.status.writeable_size; } return hr.error; } u16 hpi_cobranet_get_ip_address(u32 h_control, u32 *pdw_ip_address) { u32 byte_count; u32 iP; u16 err; err = hpi_cobranet_hmi_read(h_control, HPI_COBRANET_HMI_cobra_ip_mon_currentIP, 4, &byte_count, (u8 *)&iP); *pdw_ip_address = ((iP & 0xff000000) >> 8) | ((iP & 0x00ff0000) << 8) | ((iP & 0x0000ff00) >> 8) | ((iP & 0x000000ff) << 8); if (err) *pdw_ip_address = 0; return err; } u16 hpi_cobranet_set_ip_address(u32 h_control, u32 dw_ip_address) { u32 iP; u16 err; iP = ((dw_ip_address & 0xff000000) >> 8) | ((dw_ip_address & 0x00ff0000) << 8) | ((dw_ip_address & 0x0000ff00) >> 8) | ((dw_ip_address & 0x000000ff) << 8); err = hpi_cobranet_hmi_write(h_control, HPI_COBRANET_HMI_cobra_ip_mon_currentIP, 4, (u8 *)&iP); return err; } u16 hpi_cobranet_get_static_ip_address(u32 h_control, u32 *pdw_ip_address) { u32 byte_count; u32 iP; u16 err; err = hpi_cobranet_hmi_read(h_control, HPI_COBRANET_HMI_cobra_ip_mon_staticIP, 4, &byte_count, (u8 *)&iP); *pdw_ip_address = ((iP & 0xff000000) >> 8) | ((iP & 0x00ff0000) << 8) | ((iP & 0x0000ff00) >> 8) | ((iP & 0x000000ff) << 8); if (err) *pdw_ip_address = 0; return err; } u16 hpi_cobranet_set_static_ip_address(u32 h_control, u32 dw_ip_address) { u32 iP; u16 err; iP = ((dw_ip_address & 0xff000000) >> 8) | ((dw_ip_address & 0x00ff0000) << 8) | ((dw_ip_address & 0x0000ff00) >> 8) | ((dw_ip_address & 0x000000ff) << 8); err = hpi_cobranet_hmi_write(h_control, HPI_COBRANET_HMI_cobra_ip_mon_staticIP, 4, (u8 *)&iP); return err; } u16 hpi_cobranet_get_macaddress(u32 h_control, u32 *p_mac_msbs, u32 *p_mac_lsbs) { u32 byte_count; u16 err; u32 mac; err = hpi_cobranet_hmi_read(h_control, HPI_COBRANET_HMI_cobra_if_phy_address, 4, &byte_count, (u8 *)&mac); if (!err) { *p_mac_msbs = ((mac & 0xff000000) >> 8) | ((mac & 0x00ff0000) << 8) | ((mac & 0x0000ff00) >> 8) | ((mac & 0x000000ff) << 8); err = hpi_cobranet_hmi_read(h_control, HPI_COBRANET_HMI_cobra_if_phy_address + 1, 4, &byte_count, (u8 *)&mac); } if (!err) { *p_mac_lsbs = ((mac & 0xff000000) >> 8) | ((mac & 0x00ff0000) << 8) | ((mac & 0x0000ff00) >> 8) | ((mac & 0x000000ff) << 8); } else { *p_mac_msbs = 0; *p_mac_lsbs = 0; } return err; } u16 hpi_compander_set_enable(u32 h_control, u32 enable) { return hpi_control_param_set(h_control, HPI_GENERIC_ENABLE, enable, 0); } u16 hpi_compander_get_enable(u32 h_control, u32 *enable) { return hpi_control_param1_get(h_control, HPI_GENERIC_ENABLE, enable); } u16 hpi_compander_set_makeup_gain(u32 h_control, short makeup_gain0_01dB) { return hpi_control_log_set2(h_control, HPI_COMPANDER_MAKEUPGAIN, makeup_gain0_01dB, 0); } u16 hpi_compander_get_makeup_gain(u32 h_control, short *makeup_gain0_01dB) { return hpi_control_log_get2(h_control, HPI_COMPANDER_MAKEUPGAIN, makeup_gain0_01dB, NULL); } u16 hpi_compander_set_attack_time_constant(u32 h_control, unsigned int index, u32 attack) { return hpi_control_param_set(h_control, HPI_COMPANDER_ATTACK, attack, index); } u16 hpi_compander_get_attack_time_constant(u32 h_control, unsigned int index, u32 *attack) { return hpi_control_param_get(h_control, HPI_COMPANDER_ATTACK, 0, index, attack, NULL); } u16 hpi_compander_set_decay_time_constant(u32 h_control, unsigned int index, u32 decay) { return hpi_control_param_set(h_control, HPI_COMPANDER_DECAY, decay, index); } u16 hpi_compander_get_decay_time_constant(u32 h_control, unsigned int index, u32 *decay) { return hpi_control_param_get(h_control, HPI_COMPANDER_DECAY, 0, index, decay, NULL); } u16 hpi_compander_set_threshold(u32 h_control, unsigned int index, short threshold0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_COMPANDER_THRESHOLD; hm.u.c.param2 = index; hm.u.c.an_log_value[0] = threshold0_01dB; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_compander_get_threshold(u32 h_control, unsigned int index, short *threshold0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_COMPANDER_THRESHOLD; hm.u.c.param2 = index; hpi_send_recv(&hm, &hr); *threshold0_01dB = hr.u.c.an_log_value[0]; return hr.error; } u16 hpi_compander_set_ratio(u32 h_control, u32 index, u32 ratio100) { return hpi_control_param_set(h_control, HPI_COMPANDER_RATIO, ratio100, index); } u16 hpi_compander_get_ratio(u32 h_control, u32 index, u32 *ratio100) { return hpi_control_param_get(h_control, HPI_COMPANDER_RATIO, 0, index, ratio100, NULL); } u16 hpi_level_query_range(u32 h_control, short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_LEVEL_RANGE; hpi_send_recv(&hm, &hr); if (hr.error) { hr.u.c.an_log_value[0] = 0; hr.u.c.an_log_value[1] = 0; hr.u.c.param1 = 0; } if (min_gain_01dB) *min_gain_01dB = hr.u.c.an_log_value[0]; if (max_gain_01dB) *max_gain_01dB = hr.u.c.an_log_value[1]; if (step_gain_01dB) *step_gain_01dB = (short)hr.u.c.param1; return hr.error; } u16 hpi_level_set_gain(u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS] ) { return hpi_control_log_set2(h_control, HPI_LEVEL_GAIN, an_gain0_01dB[0], an_gain0_01dB[1]); } u16 hpi_level_get_gain(u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS] ) { return hpi_control_log_get2(h_control, HPI_LEVEL_GAIN, &an_gain0_01dB[0], &an_gain0_01dB[1]); } u16 hpi_meter_query_channels(const u32 h_meter, u32 *p_channels) { return hpi_control_query(h_meter, HPI_METER_NUM_CHANNELS, 0, 0, p_channels); } u16 hpi_meter_get_peak(u32 h_control, short an_peakdB[HPI_MAX_CHANNELS] ) { short i = 0; struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.obj_index = hm.obj_index; hm.u.c.attribute = HPI_METER_PEAK; hpi_send_recv(&hm, &hr); if (!hr.error) memcpy(an_peakdB, hr.u.c.an_log_value, sizeof(short) * HPI_MAX_CHANNELS); else for (i = 0; i < HPI_MAX_CHANNELS; i++) an_peakdB[i] = HPI_METER_MINIMUM; return hr.error; } u16 hpi_meter_get_rms(u32 h_control, short an_rmsdB[HPI_MAX_CHANNELS] ) { short i = 0; struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_METER_RMS; hpi_send_recv(&hm, &hr); if (!hr.error) memcpy(an_rmsdB, hr.u.c.an_log_value, sizeof(short) * HPI_MAX_CHANNELS); else for (i = 0; i < HPI_MAX_CHANNELS; i++) an_rmsdB[i] = HPI_METER_MINIMUM; return hr.error; } u16 hpi_meter_set_rms_ballistics(u32 h_control, u16 attack, u16 decay) { return hpi_control_param_set(h_control, HPI_METER_RMS_BALLISTICS, attack, decay); } u16 hpi_meter_get_rms_ballistics(u32 h_control, u16 *pn_attack, u16 *pn_decay) { u32 attack; u32 decay; u16 error; error = hpi_control_param2_get(h_control, HPI_METER_RMS_BALLISTICS, &attack, &decay); if (pn_attack) *pn_attack = (unsigned short)attack; if (pn_decay) *pn_decay = (unsigned short)decay; return error; } u16 hpi_meter_set_peak_ballistics(u32 h_control, u16 attack, u16 decay) { return hpi_control_param_set(h_control, HPI_METER_PEAK_BALLISTICS, attack, decay); } u16 hpi_meter_get_peak_ballistics(u32 h_control, u16 *pn_attack, u16 *pn_decay) { u32 attack; u32 decay; u16 error; error = hpi_control_param2_get(h_control, HPI_METER_PEAK_BALLISTICS, &attack, &decay); if (pn_attack) *pn_attack = (short)attack; if (pn_decay) *pn_decay = (short)decay; return error; } u16 hpi_microphone_set_phantom_power(u32 h_control, u16 on_off) { return hpi_control_param_set(h_control, HPI_MICROPHONE_PHANTOM_POWER, (u32)on_off, 0); } u16 hpi_microphone_get_phantom_power(u32 h_control, u16 *pw_on_off) { u16 error = 0; u32 on_off = 0; error = hpi_control_param1_get(h_control, HPI_MICROPHONE_PHANTOM_POWER, &on_off); if (pw_on_off) *pw_on_off = (u16)on_off; return error; } u16 hpi_multiplexer_set_source(u32 h_control, u16 source_node_type, u16 source_node_index) { return hpi_control_param_set(h_control, HPI_MULTIPLEXER_SOURCE, source_node_type, source_node_index); } u16 hpi_multiplexer_get_source(u32 h_control, u16 *source_node_type, u16 *source_node_index) { u32 node, index; u16 err = hpi_control_param2_get(h_control, HPI_MULTIPLEXER_SOURCE, &node, &index); if (source_node_type) *source_node_type = (u16)node; if (source_node_index) *source_node_index = (u16)index; return err; } u16 hpi_multiplexer_query_source(u32 h_control, u16 index, u16 *source_node_type, u16 *source_node_index) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_MULTIPLEXER_QUERYSOURCE; hm.u.c.param1 = index; hpi_send_recv(&hm, &hr); if (source_node_type) *source_node_type = (u16)hr.u.c.param1; if (source_node_index) *source_node_index = (u16)hr.u.c.param2; return hr.error; } u16 hpi_parametric_eq_get_info(u32 h_control, u16 *pw_number_of_bands, u16 *pw_on_off) { u32 oB = 0; u32 oO = 0; u16 error = 0; error = hpi_control_param2_get(h_control, HPI_EQUALIZER_NUM_FILTERS, &oO, &oB); if (pw_number_of_bands) *pw_number_of_bands = (u16)oB; if (pw_on_off) *pw_on_off = (u16)oO; return error; } u16 hpi_parametric_eq_set_state(u32 h_control, u16 on_off) { return hpi_control_param_set(h_control, HPI_EQUALIZER_NUM_FILTERS, on_off, 0); } u16 hpi_parametric_eq_get_band(u32 h_control, u16 index, u16 *pn_type, u32 *pfrequency_hz, short *pnQ100, short *pn_gain0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_EQUALIZER_FILTER; hm.u.c.param2 = index; hpi_send_recv(&hm, &hr); if (pfrequency_hz) *pfrequency_hz = hr.u.c.param1; if (pn_type) *pn_type = (u16)(hr.u.c.param2 >> 16); if (pnQ100) *pnQ100 = hr.u.c.an_log_value[1]; if (pn_gain0_01dB) *pn_gain0_01dB = hr.u.c.an_log_value[0]; return hr.error; } u16 hpi_parametric_eq_set_band(u32 h_control, u16 index, u16 type, u32 frequency_hz, short q100, short gain0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.param1 = frequency_hz; hm.u.c.param2 = (index & 0xFFFFL) + ((u32)type << 16); hm.u.c.an_log_value[0] = gain0_01dB; hm.u.c.an_log_value[1] = q100; hm.u.c.attribute = HPI_EQUALIZER_FILTER; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_parametric_eq_get_coeffs(u32 h_control, u16 index, short coeffs[5] ) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_EQUALIZER_COEFFICIENTS; hm.u.c.param2 = index; hpi_send_recv(&hm, &hr); coeffs[0] = (short)hr.u.c.an_log_value[0]; coeffs[1] = (short)hr.u.c.an_log_value[1]; coeffs[2] = (short)hr.u.c.param1; coeffs[3] = (short)(hr.u.c.param1 >> 16); coeffs[4] = (short)hr.u.c.param2; return hr.error; } u16 hpi_sample_clock_query_source(const u32 h_clock, const u32 index, u16 *pw_source) { u32 qr; u16 err; err = hpi_control_query(h_clock, HPI_SAMPLECLOCK_SOURCE, index, 0, &qr); *pw_source = (u16)qr; return err; } u16 hpi_sample_clock_set_source(u32 h_control, u16 source) { return hpi_control_param_set(h_control, HPI_SAMPLECLOCK_SOURCE, source, 0); } u16 hpi_sample_clock_get_source(u32 h_control, u16 *pw_source) { u16 err = 0; u32 source = 0; err = hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_SOURCE, &source); if (!err) if (pw_source) *pw_source = (u16)source; return err; } u16 hpi_sample_clock_query_source_index(const u32 h_clock, const u32 index, const u32 source, u16 *pw_source_index) { u32 qr; u16 err; err = hpi_control_query(h_clock, HPI_SAMPLECLOCK_SOURCE_INDEX, index, source, &qr); *pw_source_index = (u16)qr; return err; } u16 hpi_sample_clock_set_source_index(u32 h_control, u16 source_index) { return hpi_control_param_set(h_control, HPI_SAMPLECLOCK_SOURCE_INDEX, source_index, 0); } u16 hpi_sample_clock_get_source_index(u32 h_control, u16 *pw_source_index) { u16 err = 0; u32 source_index = 0; err = hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_SOURCE_INDEX, &source_index); if (!err) if (pw_source_index) *pw_source_index = (u16)source_index; return err; } u16 hpi_sample_clock_query_local_rate(const u32 h_clock, const u32 index, u32 *prate) { u16 err; err = hpi_control_query(h_clock, HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, index, 0, prate); return err; } u16 hpi_sample_clock_set_local_rate(u32 h_control, u32 sample_rate) { return hpi_control_param_set(h_control, HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, sample_rate, 0); } u16 hpi_sample_clock_get_local_rate(u32 h_control, u32 *psample_rate) { u16 err = 0; u32 sample_rate = 0; err = hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_LOCAL_SAMPLERATE, &sample_rate); if (!err) if (psample_rate) *psample_rate = sample_rate; return err; } u16 hpi_sample_clock_get_sample_rate(u32 h_control, u32 *psample_rate) { u16 err = 0; u32 sample_rate = 0; err = hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_SAMPLERATE, &sample_rate); if (!err) if (psample_rate) *psample_rate = sample_rate; return err; } u16 hpi_sample_clock_set_auto(u32 h_control, u32 enable) { return hpi_control_param_set(h_control, HPI_SAMPLECLOCK_AUTO, enable, 0); } u16 hpi_sample_clock_get_auto(u32 h_control, u32 *penable) { return hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_AUTO, penable); } u16 hpi_sample_clock_set_local_rate_lock(u32 h_control, u32 lock) { return hpi_control_param_set(h_control, HPI_SAMPLECLOCK_LOCAL_LOCK, lock, 0); } u16 hpi_sample_clock_get_local_rate_lock(u32 h_control, u32 *plock) { return hpi_control_param1_get(h_control, HPI_SAMPLECLOCK_LOCAL_LOCK, plock); } u16 hpi_tone_detector_get_frequency(u32 h_control, u32 index, u32 *frequency) { return hpi_control_param_get(h_control, HPI_TONEDETECTOR_FREQUENCY, index, 0, frequency, NULL); } u16 hpi_tone_detector_get_state(u32 h_control, u32 *state) { return hpi_control_param1_get(h_control, HPI_TONEDETECTOR_STATE, state); } u16 hpi_tone_detector_set_enable(u32 h_control, u32 enable) { return hpi_control_param_set(h_control, HPI_GENERIC_ENABLE, enable, 0); } u16 hpi_tone_detector_get_enable(u32 h_control, u32 *enable) { return hpi_control_param1_get(h_control, HPI_GENERIC_ENABLE, enable); } u16 hpi_tone_detector_set_event_enable(u32 h_control, u32 event_enable) { return hpi_control_param_set(h_control, HPI_GENERIC_EVENT_ENABLE, (u32)event_enable, 0); } u16 hpi_tone_detector_get_event_enable(u32 h_control, u32 *event_enable) { return hpi_control_param1_get(h_control, HPI_GENERIC_EVENT_ENABLE, event_enable); } u16 hpi_tone_detector_set_threshold(u32 h_control, int threshold) { return hpi_control_param_set(h_control, HPI_TONEDETECTOR_THRESHOLD, (u32)threshold, 0); } u16 hpi_tone_detector_get_threshold(u32 h_control, int *threshold) { return hpi_control_param1_get(h_control, HPI_TONEDETECTOR_THRESHOLD, (u32 *)threshold); } u16 hpi_silence_detector_get_state(u32 h_control, u32 *state) { return hpi_control_param1_get(h_control, HPI_SILENCEDETECTOR_STATE, state); } u16 hpi_silence_detector_set_enable(u32 h_control, u32 enable) { return hpi_control_param_set(h_control, HPI_GENERIC_ENABLE, enable, 0); } u16 hpi_silence_detector_get_enable(u32 h_control, u32 *enable) { return hpi_control_param1_get(h_control, HPI_GENERIC_ENABLE, enable); } u16 hpi_silence_detector_set_event_enable(u32 h_control, u32 event_enable) { return hpi_control_param_set(h_control, HPI_GENERIC_EVENT_ENABLE, event_enable, 0); } u16 hpi_silence_detector_get_event_enable(u32 h_control, u32 *event_enable) { return hpi_control_param1_get(h_control, HPI_GENERIC_EVENT_ENABLE, event_enable); } u16 hpi_silence_detector_set_delay(u32 h_control, u32 delay) { return hpi_control_param_set(h_control, HPI_SILENCEDETECTOR_DELAY, delay, 0); } u16 hpi_silence_detector_get_delay(u32 h_control, u32 *delay) { return hpi_control_param1_get(h_control, HPI_SILENCEDETECTOR_DELAY, delay); } u16 hpi_silence_detector_set_threshold(u32 h_control, int threshold) { return hpi_control_param_set(h_control, HPI_SILENCEDETECTOR_THRESHOLD, threshold, 0); } u16 hpi_silence_detector_get_threshold(u32 h_control, int *threshold) { return hpi_control_param1_get(h_control, HPI_SILENCEDETECTOR_THRESHOLD, (u32 *)threshold); } u16 hpi_tuner_query_band(const u32 h_tuner, const u32 index, u16 *pw_band) { u32 qr; u16 err; err = hpi_control_query(h_tuner, HPI_TUNER_BAND, index, 0, &qr); *pw_band = (u16)qr; return err; } u16 hpi_tuner_set_band(u32 h_control, u16 band) { return hpi_control_param_set(h_control, HPI_TUNER_BAND, band, 0); } u16 hpi_tuner_get_band(u32 h_control, u16 *pw_band) { u32 band = 0; u16 error = 0; error = hpi_control_param1_get(h_control, HPI_TUNER_BAND, &band); if (pw_band) *pw_band = (u16)band; return error; } u16 hpi_tuner_query_frequency(const u32 h_tuner, const u32 index, const u16 band, u32 *pfreq) { return hpi_control_query(h_tuner, HPI_TUNER_FREQ, index, band, pfreq); } u16 hpi_tuner_set_frequency(u32 h_control, u32 freq_ink_hz) { return hpi_control_param_set(h_control, HPI_TUNER_FREQ, freq_ink_hz, 0); } u16 hpi_tuner_get_frequency(u32 h_control, u32 *pw_freq_ink_hz) { return hpi_control_param1_get(h_control, HPI_TUNER_FREQ, pw_freq_ink_hz); } u16 hpi_tuner_query_gain(const u32 h_tuner, const u32 index, u16 *pw_gain) { u32 qr; u16 err; err = hpi_control_query(h_tuner, HPI_TUNER_BAND, index, 0, &qr); *pw_gain = (u16)qr; return err; } u16 hpi_tuner_set_gain(u32 h_control, short gain) { return hpi_control_param_set(h_control, HPI_TUNER_GAIN, gain, 0); } u16 hpi_tuner_get_gain(u32 h_control, short *pn_gain) { u32 gain = 0; u16 error = 0; error = hpi_control_param1_get(h_control, HPI_TUNER_GAIN, &gain); if (pn_gain) *pn_gain = (u16)gain; return error; } u16 hpi_tuner_get_rf_level(u32 h_control, short *pw_level) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.cu.attribute = HPI_TUNER_LEVEL_AVG; hpi_send_recv(&hm, &hr); if (pw_level) *pw_level = hr.u.cu.tuner.s_level; return hr.error; } u16 hpi_tuner_get_raw_rf_level(u32 h_control, short *pw_level) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.cu.attribute = HPI_TUNER_LEVEL_RAW; hpi_send_recv(&hm, &hr); if (pw_level) *pw_level = hr.u.cu.tuner.s_level; return hr.error; } u16 hpi_tuner_query_deemphasis(const u32 h_tuner, const u32 index, const u16 band, u32 *pdeemphasis) { return hpi_control_query(h_tuner, HPI_TUNER_DEEMPHASIS, index, band, pdeemphasis); } u16 hpi_tuner_set_deemphasis(u32 h_control, u32 deemphasis) { return hpi_control_param_set(h_control, HPI_TUNER_DEEMPHASIS, deemphasis, 0); } u16 hpi_tuner_get_deemphasis(u32 h_control, u32 *pdeemphasis) { return hpi_control_param1_get(h_control, HPI_TUNER_DEEMPHASIS, pdeemphasis); } u16 hpi_tuner_query_program(const u32 h_tuner, u32 *pbitmap_program) { return hpi_control_query(h_tuner, HPI_TUNER_PROGRAM, 0, 0, pbitmap_program); } u16 hpi_tuner_set_program(u32 h_control, u32 program) { return hpi_control_param_set(h_control, HPI_TUNER_PROGRAM, program, 0); } u16 hpi_tuner_get_program(u32 h_control, u32 *pprogram) { return hpi_control_param1_get(h_control, HPI_TUNER_PROGRAM, pprogram); } u16 hpi_tuner_get_hd_radio_dsp_version(u32 h_control, char *psz_dsp_version, const u32 string_size) { return hpi_control_get_string(h_control, HPI_TUNER_HDRADIO_DSP_VERSION, psz_dsp_version, string_size); } u16 hpi_tuner_get_hd_radio_sdk_version(u32 h_control, char *psz_sdk_version, const u32 string_size) { return hpi_control_get_string(h_control, HPI_TUNER_HDRADIO_SDK_VERSION, psz_sdk_version, string_size); } u16 hpi_tuner_get_status(u32 h_control, u16 *pw_status_mask, u16 *pw_status) { u32 status = 0; u16 error = 0; error = hpi_control_param1_get(h_control, HPI_TUNER_STATUS, &status); if (pw_status) { if (!error) { *pw_status_mask = (u16)(status >> 16); *pw_status = (u16)(status & 0xFFFF); } else { *pw_status_mask = 0; *pw_status = 0; } } return error; } u16 hpi_tuner_set_mode(u32 h_control, u32 mode, u32 value) { return hpi_control_param_set(h_control, HPI_TUNER_MODE, mode, value); } u16 hpi_tuner_get_mode(u32 h_control, u32 mode, u32 *pn_value) { return hpi_control_param_get(h_control, HPI_TUNER_MODE, mode, 0, pn_value, NULL); } u16 hpi_tuner_get_hd_radio_signal_quality(u32 h_control, u32 *pquality) { return hpi_control_param1_get(h_control, HPI_TUNER_HDRADIO_SIGNAL_QUALITY, pquality); } u16 hpi_tuner_get_hd_radio_signal_blend(u32 h_control, u32 *pblend) { return hpi_control_param1_get(h_control, HPI_TUNER_HDRADIO_BLEND, pblend); } u16 hpi_tuner_set_hd_radio_signal_blend(u32 h_control, const u32 blend) { return hpi_control_param_set(h_control, HPI_TUNER_HDRADIO_BLEND, blend, 0); } u16 hpi_tuner_get_rds(u32 h_control, char *p_data) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_TUNER_RDS; hpi_send_recv(&hm, &hr); if (p_data) { *(u32 *)&p_data[0] = hr.u.cu.tuner.rds.data[0]; *(u32 *)&p_data[4] = hr.u.cu.tuner.rds.data[1]; *(u32 *)&p_data[8] = hr.u.cu.tuner.rds.bLER; } return hr.error; } u16 hpi_pad_get_channel_name(u32 h_control, char *psz_string, const u32 data_length) { return hpi_control_get_string(h_control, HPI_PAD_CHANNEL_NAME, psz_string, data_length); } u16 hpi_pad_get_artist(u32 h_control, char *psz_string, const u32 data_length) { return hpi_control_get_string(h_control, HPI_PAD_ARTIST, psz_string, data_length); } u16 hpi_pad_get_title(u32 h_control, char *psz_string, const u32 data_length) { return hpi_control_get_string(h_control, HPI_PAD_TITLE, psz_string, data_length); } u16 hpi_pad_get_comment(u32 h_control, char *psz_string, const u32 data_length) { return hpi_control_get_string(h_control, HPI_PAD_COMMENT, psz_string, data_length); } u16 hpi_pad_get_program_type(u32 h_control, u32 *ppTY) { return hpi_control_param1_get(h_control, HPI_PAD_PROGRAM_TYPE, ppTY); } u16 hpi_pad_get_rdsPI(u32 h_control, u32 *ppI) { return hpi_control_param1_get(h_control, HPI_PAD_PROGRAM_ID, ppI); } u16 hpi_volume_query_channels(const u32 h_volume, u32 *p_channels) { return hpi_control_query(h_volume, HPI_VOLUME_NUM_CHANNELS, 0, 0, p_channels); } u16 hpi_volume_set_gain(u32 h_control, short an_log_gain[HPI_MAX_CHANNELS] ) { return hpi_control_log_set2(h_control, HPI_VOLUME_GAIN, an_log_gain[0], an_log_gain[1]); } u16 hpi_volume_get_gain(u32 h_control, short an_log_gain[HPI_MAX_CHANNELS] ) { return hpi_control_log_get2(h_control, HPI_VOLUME_GAIN, &an_log_gain[0], &an_log_gain[1]); } u16 hpi_volume_set_mute(u32 h_control, u32 mute) { return hpi_control_param_set(h_control, HPI_VOLUME_MUTE, mute, 0); } u16 hpi_volume_get_mute(u32 h_control, u32 *mute) { return hpi_control_param1_get(h_control, HPI_VOLUME_MUTE, mute); } u16 hpi_volume_query_range(u32 h_control, short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_VOLUME_RANGE; hpi_send_recv(&hm, &hr); if (hr.error) { hr.u.c.an_log_value[0] = 0; hr.u.c.an_log_value[1] = 0; hr.u.c.param1 = 0; } if (min_gain_01dB) *min_gain_01dB = hr.u.c.an_log_value[0]; if (max_gain_01dB) *max_gain_01dB = hr.u.c.an_log_value[1]; if (step_gain_01dB) *step_gain_01dB = (short)hr.u.c.param1; return hr.error; } u16 hpi_volume_auto_fade_profile(u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms, u16 profile) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; memcpy(hm.u.c.an_log_value, an_stop_gain0_01dB, sizeof(short) * HPI_MAX_CHANNELS); hm.u.c.attribute = HPI_VOLUME_AUTOFADE; hm.u.c.param1 = duration_ms; hm.u.c.param2 = profile; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_volume_auto_fade(u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms) { return hpi_volume_auto_fade_profile(h_control, an_stop_gain0_01dB, duration_ms, HPI_VOLUME_AUTOFADE_LOG); } u16 hpi_vox_set_threshold(u32 h_control, short an_gain0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_SET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_VOX_THRESHOLD; hm.u.c.an_log_value[0] = an_gain0_01dB; hpi_send_recv(&hm, &hr); return hr.error; } u16 hpi_vox_get_threshold(u32 h_control, short *an_gain0_01dB) { struct hpi_message hm; struct hpi_response hr; hpi_init_message_response(&hm, &hr, HPI_OBJ_CONTROL, HPI_CONTROL_GET_STATE); if (hpi_handle_indexes(h_control, &hm.adapter_index, &hm.obj_index)) return HPI_ERROR_INVALID_HANDLE; hm.u.c.attribute = HPI_VOX_THRESHOLD; hpi_send_recv(&hm, &hr); *an_gain0_01dB = hr.u.c.an_log_value[0]; return hr.error; }
gpl-2.0
1nv4d3r5/linux
drivers/firmware/efi/vars.c
1335
27525
/* * Originally from efivars.c * * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/capability.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/string.h> #include <linux/smp.h> #include <linux/efi.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/ucs2_string.h> /* Private pointer to registered efivars */ static struct efivars *__efivars; static bool efivar_wq_enabled = true; DECLARE_WORK(efivar_work, NULL); EXPORT_SYMBOL_GPL(efivar_work); static bool validate_device_path(struct efi_variable *var, int match, u8 *buffer, unsigned long len) { struct efi_generic_dev_path *node; int offset = 0; node = (struct efi_generic_dev_path *)buffer; if (len < sizeof(*node)) return false; while (offset <= len - sizeof(*node) && node->length >= sizeof(*node) && node->length <= len - offset) { offset += node->length; if ((node->type == EFI_DEV_END_PATH || node->type == EFI_DEV_END_PATH2) && node->sub_type == EFI_DEV_END_ENTIRE) return true; node = (struct efi_generic_dev_path *)(buffer + offset); } /* * If we're here then either node->length pointed past the end * of the buffer or we reached the end of the buffer without * finding a device path end node. */ return false; } static bool validate_boot_order(struct efi_variable *var, int match, u8 *buffer, unsigned long len) { /* An array of 16-bit integers */ if ((len % 2) != 0) return false; return true; } static bool validate_load_option(struct efi_variable *var, int match, u8 *buffer, unsigned long len) { u16 filepathlength; int i, desclength = 0, namelen; namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); /* Either "Boot" or "Driver" followed by four digits of hex */ for (i = match; i < match+4; i++) { if (var->VariableName[i] > 127 || hex_to_bin(var->VariableName[i] & 0xff) < 0) return true; } /* Reject it if there's 4 digits of hex and then further content */ if (namelen > match + 4) return false; /* A valid entry must be at least 8 bytes */ if (len < 8) return false; filepathlength = buffer[4] | buffer[5] << 8; /* * There's no stored length for the description, so it has to be * found by hand */ desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; /* Each boot entry must have a descriptor */ if (!desclength) return false; /* * If the sum of the length of the description, the claimed filepath * length and the original header are greater than the length of the * variable, it's malformed */ if ((desclength + filepathlength + 6) > len) return false; /* * And, finally, check the filepath */ return validate_device_path(var, match, buffer + desclength + 6, filepathlength); } static bool validate_uint16(struct efi_variable *var, int match, u8 *buffer, unsigned long len) { /* A single 16-bit integer */ if (len != 2) return false; return true; } static bool validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, unsigned long len) { int i; for (i = 0; i < len; i++) { if (buffer[i] > 127) return false; if (buffer[i] == 0) return true; } return false; } struct variable_validate { char *name; bool (*validate)(struct efi_variable *var, int match, u8 *data, unsigned long len); }; static const struct variable_validate variable_validate[] = { { "BootNext", validate_uint16 }, { "BootOrder", validate_boot_order }, { "DriverOrder", validate_boot_order }, { "Boot*", validate_load_option }, { "Driver*", validate_load_option }, { "ConIn", validate_device_path }, { "ConInDev", validate_device_path }, { "ConOut", validate_device_path }, { "ConOutDev", validate_device_path }, { "ErrOut", validate_device_path }, { "ErrOutDev", validate_device_path }, { "Timeout", validate_uint16 }, { "Lang", validate_ascii_string }, { "PlatformLang", validate_ascii_string }, { "", NULL }, }; bool efivar_validate(struct efi_variable *var, u8 *data, unsigned long len) { int i; u16 *unicode_name = var->VariableName; for (i = 0; variable_validate[i].validate != NULL; i++) { const char *name = variable_validate[i].name; int match; for (match = 0; ; match++) { char c = name[match]; u16 u = unicode_name[match]; /* All special variables are plain ascii */ if (u > 127) return true; /* Wildcard in the matching name means we've matched */ if (c == '*') return variable_validate[i].validate(var, match, data, len); /* Case sensitive match */ if (c != u) break; /* Reached the end of the string while matching */ if (!c) return variable_validate[i].validate(var, match, data, len); } } return true; } EXPORT_SYMBOL_GPL(efivar_validate); static efi_status_t check_var_size(u32 attributes, unsigned long size) { const struct efivar_operations *fops = __efivars->ops; if (!fops->query_variable_store) return EFI_UNSUPPORTED; return fops->query_variable_store(attributes, size); } static int efi_status_to_err(efi_status_t status) { int err; switch (status) { case EFI_SUCCESS: err = 0; break; case EFI_INVALID_PARAMETER: err = -EINVAL; break; case EFI_OUT_OF_RESOURCES: err = -ENOSPC; break; case EFI_DEVICE_ERROR: err = -EIO; break; case EFI_WRITE_PROTECTED: err = -EROFS; break; case EFI_SECURITY_VIOLATION: err = -EACCES; break; case EFI_NOT_FOUND: err = -ENOENT; break; default: err = -EINVAL; } return err; } static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor, struct list_head *head) { struct efivar_entry *entry, *n; unsigned long strsize1, strsize2; bool found = false; strsize1 = ucs2_strsize(variable_name, 1024); list_for_each_entry_safe(entry, n, head, list) { strsize2 = ucs2_strsize(entry->var.VariableName, 1024); if (strsize1 == strsize2 && !memcmp(variable_name, &(entry->var.VariableName), strsize2) && !efi_guidcmp(entry->var.VendorGuid, *vendor)) { found = true; break; } } return found; } /* * Returns the size of variable_name, in bytes, including the * terminating NULL character, or variable_name_size if no NULL * character is found among the first variable_name_size bytes. */ static unsigned long var_name_strnsize(efi_char16_t *variable_name, unsigned long variable_name_size) { unsigned long len; efi_char16_t c; /* * The variable name is, by definition, a NULL-terminated * string, so make absolutely sure that variable_name_size is * the value we expect it to be. If not, return the real size. */ for (len = 2; len <= variable_name_size; len += sizeof(c)) { c = variable_name[(len / sizeof(c)) - 1]; if (!c) break; } return min(len, variable_name_size); } /* * Print a warning when duplicate EFI variables are encountered and * disable the sysfs workqueue since the firmware is buggy. */ static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid, unsigned long len16) { size_t i, len8 = len16 / sizeof(efi_char16_t); char *s8; /* * Disable the workqueue since the algorithm it uses for * detecting new variables won't work with this buggy * implementation of GetNextVariableName(). */ efivar_wq_enabled = false; s8 = kzalloc(len8, GFP_KERNEL); if (!s8) return; for (i = 0; i < len8; i++) s8[i] = s16[i]; printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", s8, vendor_guid); kfree(s8); } /** * efivar_init - build the initial list of EFI variables * @func: callback function to invoke for every variable * @data: function-specific data to pass to @func * @atomic: do we need to execute the @func-loop atomically? * @duplicates: error if we encounter duplicates on @head? * @head: initialised head of variable list * * Get every EFI variable from the firmware and invoke @func. @func * should call efivar_entry_add() to build the list of variables. * * Returns 0 on success, or a kernel error code on failure. */ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), void *data, bool atomic, bool duplicates, struct list_head *head) { const struct efivar_operations *ops = __efivars->ops; unsigned long variable_name_size = 1024; efi_char16_t *variable_name; efi_status_t status; efi_guid_t vendor_guid; int err = 0; variable_name = kzalloc(variable_name_size, GFP_KERNEL); if (!variable_name) { printk(KERN_ERR "efivars: Memory allocation failed.\n"); return -ENOMEM; } spin_lock_irq(&__efivars->lock); /* * Per EFI spec, the maximum storage allocated for both * the variable name and variable data is 1024 bytes. */ do { variable_name_size = 1024; status = ops->get_next_variable(&variable_name_size, variable_name, &vendor_guid); switch (status) { case EFI_SUCCESS: if (!atomic) spin_unlock_irq(&__efivars->lock); variable_name_size = var_name_strnsize(variable_name, variable_name_size); /* * Some firmware implementations return the * same variable name on multiple calls to * get_next_variable(). Terminate the loop * immediately as there is no guarantee that * we'll ever see a different variable name, * and may end up looping here forever. */ if (duplicates && variable_is_present(variable_name, &vendor_guid, head)) { dup_variable_bug(variable_name, &vendor_guid, variable_name_size); if (!atomic) spin_lock_irq(&__efivars->lock); status = EFI_NOT_FOUND; break; } err = func(variable_name, vendor_guid, variable_name_size, data); if (err) status = EFI_NOT_FOUND; if (!atomic) spin_lock_irq(&__efivars->lock); break; case EFI_NOT_FOUND: break; default: printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n", status); status = EFI_NOT_FOUND; break; } } while (status != EFI_NOT_FOUND); spin_unlock_irq(&__efivars->lock); kfree(variable_name); return err; } EXPORT_SYMBOL_GPL(efivar_init); /** * efivar_entry_add - add entry to variable list * @entry: entry to add to list * @head: list head */ void efivar_entry_add(struct efivar_entry *entry, struct list_head *head) { spin_lock_irq(&__efivars->lock); list_add(&entry->list, head); spin_unlock_irq(&__efivars->lock); } EXPORT_SYMBOL_GPL(efivar_entry_add); /** * efivar_entry_remove - remove entry from variable list * @entry: entry to remove from list */ void efivar_entry_remove(struct efivar_entry *entry) { spin_lock_irq(&__efivars->lock); list_del(&entry->list); spin_unlock_irq(&__efivars->lock); } EXPORT_SYMBOL_GPL(efivar_entry_remove); /* * efivar_entry_list_del_unlock - remove entry from variable list * @entry: entry to remove * * Remove @entry from the variable list and release the list lock. * * NOTE: slightly weird locking semantics here - we expect to be * called with the efivars lock already held, and we release it before * returning. This is because this function is usually called after * set_variable() while the lock is still held. */ static void efivar_entry_list_del_unlock(struct efivar_entry *entry) { WARN_ON(!spin_is_locked(&__efivars->lock)); list_del(&entry->list); spin_unlock_irq(&__efivars->lock); } /** * __efivar_entry_delete - delete an EFI variable * @entry: entry containing EFI variable to delete * * Delete the variable from the firmware but leave @entry on the * variable list. * * This function differs from efivar_entry_delete() because it does * not remove @entry from the variable list. Also, it is safe to be * called from within a efivar_entry_iter_begin() and * efivar_entry_iter_end() region, unlike efivar_entry_delete(). * * Returns 0 on success, or a converted EFI status code if * set_variable() fails. */ int __efivar_entry_delete(struct efivar_entry *entry) { const struct efivar_operations *ops = __efivars->ops; efi_status_t status; WARN_ON(!spin_is_locked(&__efivars->lock)); status = ops->set_variable(entry->var.VariableName, &entry->var.VendorGuid, 0, 0, NULL); return efi_status_to_err(status); } EXPORT_SYMBOL_GPL(__efivar_entry_delete); /** * efivar_entry_delete - delete variable and remove entry from list * @entry: entry containing variable to delete * * Delete the variable from the firmware and remove @entry from the * variable list. It is the caller's responsibility to free @entry * once we return. * * Returns 0 on success, or a converted EFI status code if * set_variable() fails. */ int efivar_entry_delete(struct efivar_entry *entry) { const struct efivar_operations *ops = __efivars->ops; efi_status_t status; spin_lock_irq(&__efivars->lock); status = ops->set_variable(entry->var.VariableName, &entry->var.VendorGuid, 0, 0, NULL); if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) { spin_unlock_irq(&__efivars->lock); return efi_status_to_err(status); } efivar_entry_list_del_unlock(entry); return 0; } EXPORT_SYMBOL_GPL(efivar_entry_delete); /** * efivar_entry_set - call set_variable() * @entry: entry containing the EFI variable to write * @attributes: variable attributes * @size: size of @data buffer * @data: buffer containing variable data * @head: head of variable list * * Calls set_variable() for an EFI variable. If creating a new EFI * variable, this function is usually followed by efivar_entry_add(). * * Before writing the variable, the remaining EFI variable storage * space is checked to ensure there is enough room available. * * If @head is not NULL a lookup is performed to determine whether * the entry is already on the list. * * Returns 0 on success, -EEXIST if a lookup is performed and the entry * already exists on the list, or a converted EFI status code if * set_variable() fails. */ int efivar_entry_set(struct efivar_entry *entry, u32 attributes, unsigned long size, void *data, struct list_head *head) { const struct efivar_operations *ops = __efivars->ops; efi_status_t status; efi_char16_t *name = entry->var.VariableName; efi_guid_t vendor = entry->var.VendorGuid; spin_lock_irq(&__efivars->lock); if (head && efivar_entry_find(name, vendor, head, false)) { spin_unlock_irq(&__efivars->lock); return -EEXIST; } status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) status = ops->set_variable(name, &vendor, attributes, size, data); spin_unlock_irq(&__efivars->lock); return efi_status_to_err(status); } EXPORT_SYMBOL_GPL(efivar_entry_set); /** * efivar_entry_set_safe - call set_variable() if enough space in firmware * @name: buffer containing the variable name * @vendor: variable vendor guid * @attributes: variable attributes * @block: can we block in this context? * @size: size of @data buffer * @data: buffer containing variable data * * Ensures there is enough free storage in the firmware for this variable, and * if so, calls set_variable(). If creating a new EFI variable, this function * is usually followed by efivar_entry_add(). * * Returns 0 on success, -ENOSPC if the firmware does not have enough * space for set_variable() to succeed, or a converted EFI status code * if set_variable() fails. */ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, bool block, unsigned long size, void *data) { const struct efivar_operations *ops = __efivars->ops; unsigned long flags; efi_status_t status; if (!ops->query_variable_store) return -ENOSYS; if (!block) { if (!spin_trylock_irqsave(&__efivars->lock, flags)) return -EBUSY; } else { spin_lock_irqsave(&__efivars->lock, flags); } status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); if (status != EFI_SUCCESS) { spin_unlock_irqrestore(&__efivars->lock, flags); return -ENOSPC; } status = ops->set_variable(name, &vendor, attributes, size, data); spin_unlock_irqrestore(&__efivars->lock, flags); return efi_status_to_err(status); } EXPORT_SYMBOL_GPL(efivar_entry_set_safe); /** * efivar_entry_find - search for an entry * @name: the EFI variable name * @guid: the EFI variable vendor's guid * @head: head of the variable list * @remove: should we remove the entry from the list? * * Search for an entry on the variable list that has the EFI variable * name @name and vendor guid @guid. If an entry is found on the list * and @remove is true, the entry is removed from the list. * * The caller MUST call efivar_entry_iter_begin() and * efivar_entry_iter_end() before and after the invocation of this * function, respectively. * * Returns the entry if found on the list, %NULL otherwise. */ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, struct list_head *head, bool remove) { struct efivar_entry *entry, *n; int strsize1, strsize2; bool found = false; WARN_ON(!spin_is_locked(&__efivars->lock)); list_for_each_entry_safe(entry, n, head, list) { strsize1 = ucs2_strsize(name, 1024); strsize2 = ucs2_strsize(entry->var.VariableName, 1024); if (strsize1 == strsize2 && !memcmp(name, &(entry->var.VariableName), strsize1) && !efi_guidcmp(guid, entry->var.VendorGuid)) { found = true; break; } } if (!found) return NULL; if (remove) list_del(&entry->list); return entry; } EXPORT_SYMBOL_GPL(efivar_entry_find); /** * efivar_entry_size - obtain the size of a variable * @entry: entry for this variable * @size: location to store the variable's size */ int efivar_entry_size(struct efivar_entry *entry, unsigned long *size) { const struct efivar_operations *ops = __efivars->ops; efi_status_t status; *size = 0; spin_lock_irq(&__efivars->lock); status = ops->get_variable(entry->var.VariableName, &entry->var.VendorGuid, NULL, size, NULL); spin_unlock_irq(&__efivars->lock); if (status != EFI_BUFFER_TOO_SMALL) return efi_status_to_err(status); return 0; } EXPORT_SYMBOL_GPL(efivar_entry_size); /** * __efivar_entry_get - call get_variable() * @entry: read data for this variable * @attributes: variable attributes * @size: size of @data buffer * @data: buffer to store variable data * * The caller MUST call efivar_entry_iter_begin() and * efivar_entry_iter_end() before and after the invocation of this * function, respectively. */ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, unsigned long *size, void *data) { const struct efivar_operations *ops = __efivars->ops; efi_status_t status; WARN_ON(!spin_is_locked(&__efivars->lock)); status = ops->get_variable(entry->var.VariableName, &entry->var.VendorGuid, attributes, size, data); return efi_status_to_err(status); } EXPORT_SYMBOL_GPL(__efivar_entry_get); /** * efivar_entry_get - call get_variable() * @entry: read data for this variable * @attributes: variable attributes * @size: size of @data buffer * @data: buffer to store variable data */ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, unsigned long *size, void *data) { const struct efivar_operations *ops = __efivars->ops; efi_status_t status; spin_lock_irq(&__efivars->lock); status = ops->get_variable(entry->var.VariableName, &entry->var.VendorGuid, attributes, size, data); spin_unlock_irq(&__efivars->lock); return efi_status_to_err(status); } EXPORT_SYMBOL_GPL(efivar_entry_get); /** * efivar_entry_set_get_size - call set_variable() and get new size (atomic) * @entry: entry containing variable to set and get * @attributes: attributes of variable to be written * @size: size of data buffer * @data: buffer containing data to write * @set: did the set_variable() call succeed? * * This is a pretty special (complex) function. See efivarfs_file_write(). * * Atomically call set_variable() for @entry and if the call is * successful, return the new size of the variable from get_variable() * in @size. The success of set_variable() is indicated by @set. * * Returns 0 on success, -EINVAL if the variable data is invalid, * -ENOSPC if the firmware does not have enough available space, or a * converted EFI status code if either of set_variable() or * get_variable() fail. * * If the EFI variable does not exist when calling set_variable() * (EFI_NOT_FOUND), @entry is removed from the variable list. */ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, unsigned long *size, void *data, bool *set) { const struct efivar_operations *ops = __efivars->ops; efi_char16_t *name = entry->var.VariableName; efi_guid_t *vendor = &entry->var.VendorGuid; efi_status_t status; int err; *set = false; if (efivar_validate(&entry->var, data, *size) == false) return -EINVAL; /* * The lock here protects the get_variable call, the conditional * set_variable call, and removal of the variable from the efivars * list (in the case of an authenticated delete). */ spin_lock_irq(&__efivars->lock); /* * Ensure that the available space hasn't shrunk below the safe level */ status = check_var_size(attributes, *size + ucs2_strsize(name, 1024)); if (status != EFI_SUCCESS) { if (status != EFI_UNSUPPORTED) { err = efi_status_to_err(status); goto out; } if (*size > 65536) { err = -ENOSPC; goto out; } } status = ops->set_variable(name, vendor, attributes, *size, data); if (status != EFI_SUCCESS) { err = efi_status_to_err(status); goto out; } *set = true; /* * Writing to the variable may have caused a change in size (which * could either be an append or an overwrite), or the variable to be * deleted. Perform a GetVariable() so we can tell what actually * happened. */ *size = 0; status = ops->get_variable(entry->var.VariableName, &entry->var.VendorGuid, NULL, size, NULL); if (status == EFI_NOT_FOUND) efivar_entry_list_del_unlock(entry); else spin_unlock_irq(&__efivars->lock); if (status && status != EFI_BUFFER_TOO_SMALL) return efi_status_to_err(status); return 0; out: spin_unlock_irq(&__efivars->lock); return err; } EXPORT_SYMBOL_GPL(efivar_entry_set_get_size); /** * efivar_entry_iter_begin - begin iterating the variable list * * Lock the variable list to prevent entry insertion and removal until * efivar_entry_iter_end() is called. This function is usually used in * conjunction with __efivar_entry_iter() or efivar_entry_iter(). */ void efivar_entry_iter_begin(void) { spin_lock_irq(&__efivars->lock); } EXPORT_SYMBOL_GPL(efivar_entry_iter_begin); /** * efivar_entry_iter_end - finish iterating the variable list * * Unlock the variable list and allow modifications to the list again. */ void efivar_entry_iter_end(void) { spin_unlock_irq(&__efivars->lock); } EXPORT_SYMBOL_GPL(efivar_entry_iter_end); /** * __efivar_entry_iter - iterate over variable list * @func: callback function * @head: head of the variable list * @data: function-specific data to pass to callback * @prev: entry to begin iterating from * * Iterate over the list of EFI variables and call @func with every * entry on the list. It is safe for @func to remove entries in the * list via efivar_entry_delete(). * * You MUST call efivar_enter_iter_begin() before this function, and * efivar_entry_iter_end() afterwards. * * It is possible to begin iteration from an arbitrary entry within * the list by passing @prev. @prev is updated on return to point to * the last entry passed to @func. To begin iterating from the * beginning of the list @prev must be %NULL. * * The restrictions for @func are the same as documented for * efivar_entry_iter(). */ int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *), struct list_head *head, void *data, struct efivar_entry **prev) { struct efivar_entry *entry, *n; int err = 0; if (!prev || !*prev) { list_for_each_entry_safe(entry, n, head, list) { err = func(entry, data); if (err) break; } if (prev) *prev = entry; return err; } list_for_each_entry_safe_continue((*prev), n, head, list) { err = func(*prev, data); if (err) break; } return err; } EXPORT_SYMBOL_GPL(__efivar_entry_iter); /** * efivar_entry_iter - iterate over variable list * @func: callback function * @head: head of variable list * @data: function-specific data to pass to callback * * Iterate over the list of EFI variables and call @func with every * entry on the list. It is safe for @func to remove entries in the * list via efivar_entry_delete() while iterating. * * Some notes for the callback function: * - a non-zero return value indicates an error and terminates the loop * - @func is called from atomic context */ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), struct list_head *head, void *data) { int err = 0; efivar_entry_iter_begin(); err = __efivar_entry_iter(func, head, data, NULL); efivar_entry_iter_end(); return err; } EXPORT_SYMBOL_GPL(efivar_entry_iter); /** * efivars_kobject - get the kobject for the registered efivars * * If efivars_register() has not been called we return NULL, * otherwise return the kobject used at registration time. */ struct kobject *efivars_kobject(void) { if (!__efivars) return NULL; return __efivars->kobject; } EXPORT_SYMBOL_GPL(efivars_kobject); /** * efivar_run_worker - schedule the efivar worker thread */ void efivar_run_worker(void) { if (efivar_wq_enabled) schedule_work(&efivar_work); } EXPORT_SYMBOL_GPL(efivar_run_worker); /** * efivars_register - register an efivars * @efivars: efivars to register * @ops: efivars operations * @kobject: @efivars-specific kobject * * Only a single efivars can be registered at any time. */ int efivars_register(struct efivars *efivars, const struct efivar_operations *ops, struct kobject *kobject) { spin_lock_init(&efivars->lock); efivars->ops = ops; efivars->kobject = kobject; __efivars = efivars; return 0; } EXPORT_SYMBOL_GPL(efivars_register); /** * efivars_unregister - unregister an efivars * @efivars: efivars to unregister * * The caller must have already removed every entry from the list, * failure to do so is an error. */ int efivars_unregister(struct efivars *efivars) { int rv; if (!__efivars) { printk(KERN_ERR "efivars not registered\n"); rv = -EINVAL; goto out; } if (__efivars != efivars) { rv = -EINVAL; goto out; } __efivars = NULL; rv = 0; out: return rv; } EXPORT_SYMBOL_GPL(efivars_unregister);
gpl-2.0
quadcores/cbs_4.2.4
drivers/net/wireless/wl3501_cs.c
1591
54896
/* * WL3501 Wireless LAN PCMCIA Card Driver for Linux * Written originally for Linux 2.0.30 by Fox Chen, mhchen@golf.ccl.itri.org.tw * Ported to 2.2, 2.4 & 2.5 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * Wireless extensions in 2.4 by Gustavo Niemeyer <niemeyer@conectiva.com> * * References used by Fox Chen while writing the original driver for 2.0.30: * * 1. WL24xx packet drivers (tooasm.asm) * 2. Access Point Firmware Interface Specification for IEEE 802.11 SUTRO * 3. IEEE 802.11 * 4. Linux network driver (/usr/src/linux/drivers/net) * 5. ISA card driver - wl24.c * 6. Linux PCMCIA skeleton driver - skeleton.c * 7. Linux PCMCIA 3c589 network driver - 3c589_cs.c * * Tested with WL2400 firmware 1.2, Linux 2.0.30, and pcmcia-cs-2.9.12 * 1. Performance: about 165 Kbytes/sec in TCP/IP with Ad-Hoc mode. * rsh 192.168.1.3 "dd if=/dev/zero bs=1k count=1000" > /dev/null * (Specification 2M bits/sec. is about 250 Kbytes/sec., but we must deduct * ETHER/IP/UDP/TCP header, and acknowledgement overhead) * * Tested with Planet AP in 2.4.17, 184 Kbytes/s in UDP in Infrastructure mode, * 173 Kbytes/s in TCP. * * Tested with Planet AP in 2.5.73-bk, 216 Kbytes/s in Infrastructure mode * with a SMP machine (dual pentium 100), using pktgen, 432 pps (pkt_size = 60) */ #include <linux/delay.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/fcntl.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/wireless.h> #include <net/cfg80211.h> #include <net/iw_handler.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include <asm/io.h> #include <asm/uaccess.h> #include "wl3501.h" #ifndef __i386__ #define slow_down_io() #endif /* For rough constant delay */ #define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); } #define wl3501_outb(a, b) { outb(a, b); slow_down_io(); } #define wl3501_outb_p(a, b) { outb_p(a, b); slow_down_io(); } #define wl3501_outsb(a, b, c) { outsb(a, b, c); slow_down_io(); } #define WL3501_RELEASE_TIMEOUT (25 * HZ) #define WL3501_MAX_ADHOC_TRIES 16 #define WL3501_RESUME 0 #define WL3501_SUSPEND 1 static int wl3501_config(struct pcmcia_device *link); static void wl3501_release(struct pcmcia_device *link); static const struct { int reg_domain; int min, max, deflt; } iw_channel_table[] = { { .reg_domain = IW_REG_DOMAIN_FCC, .min = 1, .max = 11, .deflt = 1, }, { .reg_domain = IW_REG_DOMAIN_DOC, .min = 1, .max = 11, .deflt = 1, }, { .reg_domain = IW_REG_DOMAIN_ETSI, .min = 1, .max = 13, .deflt = 1, }, { .reg_domain = IW_REG_DOMAIN_SPAIN, .min = 10, .max = 11, .deflt = 10, }, { .reg_domain = IW_REG_DOMAIN_FRANCE, .min = 10, .max = 13, .deflt = 10, }, { .reg_domain = IW_REG_DOMAIN_MKK, .min = 14, .max = 14, .deflt = 14, }, { .reg_domain = IW_REG_DOMAIN_MKK1, .min = 1, .max = 14, .deflt = 1, }, { .reg_domain = IW_REG_DOMAIN_ISRAEL, .min = 3, .max = 9, .deflt = 9, }, }; /** * iw_valid_channel - validate channel in regulatory domain * @reg_comain - regulatory domain * @channel - channel to validate * * Returns 0 if invalid in the specified regulatory domain, non-zero if valid. */ static int iw_valid_channel(int reg_domain, int channel) { int i, rc = 0; for (i = 0; i < ARRAY_SIZE(iw_channel_table); i++) if (reg_domain == iw_channel_table[i].reg_domain) { rc = channel >= iw_channel_table[i].min && channel <= iw_channel_table[i].max; break; } return rc; } /** * iw_default_channel - get default channel for a regulatory domain * @reg_comain - regulatory domain * * Returns the default channel for a regulatory domain */ static int iw_default_channel(int reg_domain) { int i, rc = 1; for (i = 0; i < ARRAY_SIZE(iw_channel_table); i++) if (reg_domain == iw_channel_table[i].reg_domain) { rc = iw_channel_table[i].deflt; break; } return rc; } static void iw_set_mgmt_info_element(enum iw_mgmt_info_element_ids id, struct iw_mgmt_info_element *el, void *value, int len) { el->id = id; el->len = len; memcpy(el->data, value, len); } static void iw_copy_mgmt_info_element(struct iw_mgmt_info_element *to, struct iw_mgmt_info_element *from) { iw_set_mgmt_info_element(from->id, to, from->data, from->len); } static inline void wl3501_switch_page(struct wl3501_card *this, u8 page) { wl3501_outb(page, this->base_addr + WL3501_NIC_BSS); } /* * Get Ethernet MAC address. * * WARNING: We switch to FPAGE0 and switc back again. * Making sure there is no other WL function beening called by ISR. */ static int wl3501_get_flash_mac_addr(struct wl3501_card *this) { int base_addr = this->base_addr; /* get MAC addr */ wl3501_outb(WL3501_BSS_FPAGE3, base_addr + WL3501_NIC_BSS); /* BSS */ wl3501_outb(0x00, base_addr + WL3501_NIC_LMAL); /* LMAL */ wl3501_outb(0x40, base_addr + WL3501_NIC_LMAH); /* LMAH */ /* wait for reading EEPROM */ WL3501_NOPLOOP(100); this->mac_addr[0] = inb(base_addr + WL3501_NIC_IODPA); WL3501_NOPLOOP(100); this->mac_addr[1] = inb(base_addr + WL3501_NIC_IODPA); WL3501_NOPLOOP(100); this->mac_addr[2] = inb(base_addr + WL3501_NIC_IODPA); WL3501_NOPLOOP(100); this->mac_addr[3] = inb(base_addr + WL3501_NIC_IODPA); WL3501_NOPLOOP(100); this->mac_addr[4] = inb(base_addr + WL3501_NIC_IODPA); WL3501_NOPLOOP(100); this->mac_addr[5] = inb(base_addr + WL3501_NIC_IODPA); WL3501_NOPLOOP(100); this->reg_domain = inb(base_addr + WL3501_NIC_IODPA); WL3501_NOPLOOP(100); wl3501_outb(WL3501_BSS_FPAGE0, base_addr + WL3501_NIC_BSS); wl3501_outb(0x04, base_addr + WL3501_NIC_LMAL); wl3501_outb(0x40, base_addr + WL3501_NIC_LMAH); WL3501_NOPLOOP(100); this->version[0] = inb(base_addr + WL3501_NIC_IODPA); WL3501_NOPLOOP(100); this->version[1] = inb(base_addr + WL3501_NIC_IODPA); /* switch to SRAM Page 0 (for safety) */ wl3501_switch_page(this, WL3501_BSS_SPAGE0); /* The MAC addr should be 00:60:... */ return this->mac_addr[0] == 0x00 && this->mac_addr[1] == 0x60; } /** * wl3501_set_to_wla - Move 'size' bytes from PC to card * @dest: Card addressing space * @src: PC addressing space * @size: Bytes to move * * Move 'size' bytes from PC to card. (Shouldn't be interrupted) */ static void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src, int size) { /* switch to SRAM Page 0 */ wl3501_switch_page(this, (dest & 0x8000) ? WL3501_BSS_SPAGE1 : WL3501_BSS_SPAGE0); /* set LMAL and LMAH */ wl3501_outb(dest & 0xff, this->base_addr + WL3501_NIC_LMAL); wl3501_outb(((dest >> 8) & 0x7f), this->base_addr + WL3501_NIC_LMAH); /* rep out to Port A */ wl3501_outsb(this->base_addr + WL3501_NIC_IODPA, src, size); } /** * wl3501_get_from_wla - Move 'size' bytes from card to PC * @src: Card addressing space * @dest: PC addressing space * @size: Bytes to move * * Move 'size' bytes from card to PC. (Shouldn't be interrupted) */ static void wl3501_get_from_wla(struct wl3501_card *this, u16 src, void *dest, int size) { /* switch to SRAM Page 0 */ wl3501_switch_page(this, (src & 0x8000) ? WL3501_BSS_SPAGE1 : WL3501_BSS_SPAGE0); /* set LMAL and LMAH */ wl3501_outb(src & 0xff, this->base_addr + WL3501_NIC_LMAL); wl3501_outb((src >> 8) & 0x7f, this->base_addr + WL3501_NIC_LMAH); /* rep get from Port A */ insb(this->base_addr + WL3501_NIC_IODPA, dest, size); } /* * Get/Allocate a free Tx Data Buffer * * *--------------*-----------------*----------------------------------* * | PLCP | MAC Header | DST SRC Data ... | * | (24 bytes) | (30 bytes) | (6) (6) (Ethernet Row Data) | * *--------------*-----------------*----------------------------------* * \ \- IEEE 802.11 -/ \-------------- len --------------/ * \-struct wl3501_80211_tx_hdr--/ \-------- Ethernet Frame -------/ * * Return = Position in Card */ static u16 wl3501_get_tx_buffer(struct wl3501_card *this, u16 len) { u16 next, blk_cnt = 0, zero = 0; u16 full_len = sizeof(struct wl3501_80211_tx_hdr) + len; u16 ret = 0; if (full_len > this->tx_buffer_cnt * 254) goto out; ret = this->tx_buffer_head; while (full_len) { if (full_len < 254) full_len = 0; else full_len -= 254; wl3501_get_from_wla(this, this->tx_buffer_head, &next, sizeof(next)); if (!full_len) wl3501_set_to_wla(this, this->tx_buffer_head, &zero, sizeof(zero)); this->tx_buffer_head = next; blk_cnt++; /* if buffer is not enough */ if (!next && full_len) { this->tx_buffer_head = ret; ret = 0; goto out; } } this->tx_buffer_cnt -= blk_cnt; out: return ret; } /* * Free an allocated Tx Buffer. ptr must be correct position. */ static void wl3501_free_tx_buffer(struct wl3501_card *this, u16 ptr) { /* check if all space is not free */ if (!this->tx_buffer_head) this->tx_buffer_head = ptr; else wl3501_set_to_wla(this, this->tx_buffer_tail, &ptr, sizeof(ptr)); while (ptr) { u16 next; this->tx_buffer_cnt++; wl3501_get_from_wla(this, ptr, &next, sizeof(next)); this->tx_buffer_tail = ptr; ptr = next; } } static int wl3501_esbq_req_test(struct wl3501_card *this) { u8 tmp = 0; wl3501_get_from_wla(this, this->esbq_req_head + 3, &tmp, sizeof(tmp)); return tmp & 0x80; } static void wl3501_esbq_req(struct wl3501_card *this, u16 *ptr) { u16 tmp = 0; wl3501_set_to_wla(this, this->esbq_req_head, ptr, 2); wl3501_set_to_wla(this, this->esbq_req_head + 2, &tmp, sizeof(tmp)); this->esbq_req_head += 4; if (this->esbq_req_head >= this->esbq_req_end) this->esbq_req_head = this->esbq_req_start; } static int wl3501_esbq_exec(struct wl3501_card *this, void *sig, int sig_size) { int rc = -EIO; if (wl3501_esbq_req_test(this)) { u16 ptr = wl3501_get_tx_buffer(this, sig_size); if (ptr) { wl3501_set_to_wla(this, ptr, sig, sig_size); wl3501_esbq_req(this, &ptr); rc = 0; } } return rc; } static int wl3501_get_mib_value(struct wl3501_card *this, u8 index, void *bf, int size) { struct wl3501_get_req sig = { .sig_id = WL3501_SIG_GET_REQ, .mib_attrib = index, }; unsigned long flags; int rc = -EIO; spin_lock_irqsave(&this->lock, flags); if (wl3501_esbq_req_test(this)) { u16 ptr = wl3501_get_tx_buffer(this, sizeof(sig)); if (ptr) { wl3501_set_to_wla(this, ptr, &sig, sizeof(sig)); wl3501_esbq_req(this, &ptr); this->sig_get_confirm.mib_status = 255; spin_unlock_irqrestore(&this->lock, flags); rc = wait_event_interruptible(this->wait, this->sig_get_confirm.mib_status != 255); if (!rc) memcpy(bf, this->sig_get_confirm.mib_value, size); goto out; } } spin_unlock_irqrestore(&this->lock, flags); out: return rc; } static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend) { struct wl3501_pwr_mgmt_req sig = { .sig_id = WL3501_SIG_PWR_MGMT_REQ, .pwr_save = suspend, .wake_up = !suspend, .receive_dtims = 10, }; unsigned long flags; int rc = -EIO; spin_lock_irqsave(&this->lock, flags); if (wl3501_esbq_req_test(this)) { u16 ptr = wl3501_get_tx_buffer(this, sizeof(sig)); if (ptr) { wl3501_set_to_wla(this, ptr, &sig, sizeof(sig)); wl3501_esbq_req(this, &ptr); this->sig_pwr_mgmt_confirm.status = 255; spin_unlock_irqrestore(&this->lock, flags); rc = wait_event_interruptible(this->wait, this->sig_pwr_mgmt_confirm.status != 255); printk(KERN_INFO "%s: %s status=%d\n", __func__, suspend ? "suspend" : "resume", this->sig_pwr_mgmt_confirm.status); goto out; } } spin_unlock_irqrestore(&this->lock, flags); out: return rc; } /** * wl3501_send_pkt - Send a packet. * @this - card * * Send a packet. * * data = Ethernet raw frame. (e.g. data[0] - data[5] is Dest MAC Addr, * data[6] - data[11] is Src MAC Addr) * Ref: IEEE 802.11 */ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len) { u16 bf, sig_bf, next, tmplen, pktlen; struct wl3501_md_req sig = { .sig_id = WL3501_SIG_MD_REQ, }; u8 *pdata = (char *)data; int rc = -EIO; if (wl3501_esbq_req_test(this)) { sig_bf = wl3501_get_tx_buffer(this, sizeof(sig)); rc = -ENOMEM; if (!sig_bf) /* No free buffer available */ goto out; bf = wl3501_get_tx_buffer(this, len + 26 + 24); if (!bf) { /* No free buffer available */ wl3501_free_tx_buffer(this, sig_bf); goto out; } rc = 0; memcpy(&sig.daddr[0], pdata, 12); pktlen = len - 12; pdata += 12; sig.data = bf; if (((*pdata) * 256 + (*(pdata + 1))) > 1500) { u8 addr4[ETH_ALEN] = { [0] = 0xAA, [1] = 0xAA, [2] = 0x03, [4] = 0x00, }; wl3501_set_to_wla(this, bf + 2 + offsetof(struct wl3501_tx_hdr, addr4), addr4, sizeof(addr4)); sig.size = pktlen + 24 + 4 + 6; if (pktlen > (254 - sizeof(struct wl3501_tx_hdr))) { tmplen = 254 - sizeof(struct wl3501_tx_hdr); pktlen -= tmplen; } else { tmplen = pktlen; pktlen = 0; } wl3501_set_to_wla(this, bf + 2 + sizeof(struct wl3501_tx_hdr), pdata, tmplen); pdata += tmplen; wl3501_get_from_wla(this, bf, &next, sizeof(next)); bf = next; } else { sig.size = pktlen + 24 + 4 - 2; pdata += 2; pktlen -= 2; if (pktlen > (254 - sizeof(struct wl3501_tx_hdr) + 6)) { tmplen = 254 - sizeof(struct wl3501_tx_hdr) + 6; pktlen -= tmplen; } else { tmplen = pktlen; pktlen = 0; } wl3501_set_to_wla(this, bf + 2 + offsetof(struct wl3501_tx_hdr, addr4), pdata, tmplen); pdata += tmplen; wl3501_get_from_wla(this, bf, &next, sizeof(next)); bf = next; } while (pktlen > 0) { if (pktlen > 254) { tmplen = 254; pktlen -= 254; } else { tmplen = pktlen; pktlen = 0; } wl3501_set_to_wla(this, bf + 2, pdata, tmplen); pdata += tmplen; wl3501_get_from_wla(this, bf, &next, sizeof(next)); bf = next; } wl3501_set_to_wla(this, sig_bf, &sig, sizeof(sig)); wl3501_esbq_req(this, &sig_bf); } out: return rc; } static int wl3501_mgmt_resync(struct wl3501_card *this) { struct wl3501_resync_req sig = { .sig_id = WL3501_SIG_RESYNC_REQ, }; return wl3501_esbq_exec(this, &sig, sizeof(sig)); } static inline int wl3501_fw_bss_type(struct wl3501_card *this) { return this->net_type == IW_MODE_INFRA ? WL3501_NET_TYPE_INFRA : WL3501_NET_TYPE_ADHOC; } static inline int wl3501_fw_cap_info(struct wl3501_card *this) { return this->net_type == IW_MODE_INFRA ? WL3501_MGMT_CAPABILITY_ESS : WL3501_MGMT_CAPABILITY_IBSS; } static int wl3501_mgmt_scan(struct wl3501_card *this, u16 chan_time) { struct wl3501_scan_req sig = { .sig_id = WL3501_SIG_SCAN_REQ, .scan_type = WL3501_SCAN_TYPE_ACTIVE, .probe_delay = 0x10, .min_chan_time = chan_time, .max_chan_time = chan_time, .bss_type = wl3501_fw_bss_type(this), }; this->bss_cnt = this->join_sta_bss = 0; return wl3501_esbq_exec(this, &sig, sizeof(sig)); } static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas) { struct wl3501_join_req sig = { .sig_id = WL3501_SIG_JOIN_REQ, .timeout = 10, .ds_pset = { .el = { .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET, .len = 1, }, .chan = this->chan, }, }; memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72); return wl3501_esbq_exec(this, &sig, sizeof(sig)); } static int wl3501_mgmt_start(struct wl3501_card *this) { struct wl3501_start_req sig = { .sig_id = WL3501_SIG_START_REQ, .beacon_period = 400, .dtim_period = 1, .ds_pset = { .el = { .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET, .len = 1, }, .chan = this->chan, }, .bss_basic_rset = { .el = { .id = IW_MGMT_INFO_ELEMENT_SUPPORTED_RATES, .len = 2, }, .data_rate_labels = { [0] = IW_MGMT_RATE_LABEL_MANDATORY | IW_MGMT_RATE_LABEL_1MBIT, [1] = IW_MGMT_RATE_LABEL_MANDATORY | IW_MGMT_RATE_LABEL_2MBIT, }, }, .operational_rset = { .el = { .id = IW_MGMT_INFO_ELEMENT_SUPPORTED_RATES, .len = 2, }, .data_rate_labels = { [0] = IW_MGMT_RATE_LABEL_MANDATORY | IW_MGMT_RATE_LABEL_1MBIT, [1] = IW_MGMT_RATE_LABEL_MANDATORY | IW_MGMT_RATE_LABEL_2MBIT, }, }, .ibss_pset = { .el = { .id = IW_MGMT_INFO_ELEMENT_IBSS_PARAMETER_SET, .len = 2, }, .atim_window = 10, }, .bss_type = wl3501_fw_bss_type(this), .cap_info = wl3501_fw_cap_info(this), }; iw_copy_mgmt_info_element(&sig.ssid.el, &this->essid.el); iw_copy_mgmt_info_element(&this->keep_essid.el, &this->essid.el); return wl3501_esbq_exec(this, &sig, sizeof(sig)); } static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr) { u16 i = 0; int matchflag = 0; struct wl3501_scan_confirm sig; pr_debug("entry"); wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); if (sig.status == WL3501_STATUS_SUCCESS) { pr_debug("success"); if ((this->net_type == IW_MODE_INFRA && (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || (this->net_type == IW_MODE_ADHOC && (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) || this->net_type == IW_MODE_AUTO) { if (!this->essid.el.len) matchflag = 1; else if (this->essid.el.len == 3 && !memcmp(this->essid.essid, "ANY", 3)) matchflag = 1; else if (this->essid.el.len != sig.ssid.el.len) matchflag = 0; else if (memcmp(this->essid.essid, sig.ssid.essid, this->essid.el.len)) matchflag = 0; else matchflag = 1; if (matchflag) { for (i = 0; i < this->bss_cnt; i++) { if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) { matchflag = 0; break; } } } if (matchflag && (i < 20)) { memcpy(&this->bss_set[i].beacon_period, &sig.beacon_period, 73); this->bss_cnt++; this->rssi = sig.rssi; } } } else if (sig.status == WL3501_STATUS_TIMEOUT) { pr_debug("timeout"); this->join_sta_bss = 0; for (i = this->join_sta_bss; i < this->bss_cnt; i++) if (!wl3501_mgmt_join(this, i)) break; this->join_sta_bss = i; if (this->join_sta_bss == this->bss_cnt) { if (this->net_type == IW_MODE_INFRA) wl3501_mgmt_scan(this, 100); else { this->adhoc_times++; if (this->adhoc_times > WL3501_MAX_ADHOC_TRIES) wl3501_mgmt_start(this); else wl3501_mgmt_scan(this, 100); } } } } /** * wl3501_block_interrupt - Mask interrupt from SUTRO * @this - card * * Mask interrupt from SUTRO. (i.e. SUTRO cannot interrupt the HOST) * Return: 1 if interrupt is originally enabled */ static int wl3501_block_interrupt(struct wl3501_card *this) { u8 old = inb(this->base_addr + WL3501_NIC_GCR); u8 new = old & (~(WL3501_GCR_ECINT | WL3501_GCR_INT2EC | WL3501_GCR_ENECINT)); wl3501_outb(new, this->base_addr + WL3501_NIC_GCR); return old & WL3501_GCR_ENECINT; } /** * wl3501_unblock_interrupt - Enable interrupt from SUTRO * @this - card * * Enable interrupt from SUTRO. (i.e. SUTRO can interrupt the HOST) * Return: 1 if interrupt is originally enabled */ static int wl3501_unblock_interrupt(struct wl3501_card *this) { u8 old = inb(this->base_addr + WL3501_NIC_GCR); u8 new = (old & ~(WL3501_GCR_ECINT | WL3501_GCR_INT2EC)) | WL3501_GCR_ENECINT; wl3501_outb(new, this->base_addr + WL3501_NIC_GCR); return old & WL3501_GCR_ENECINT; } /** * wl3501_receive - Receive data from Receive Queue. * * Receive data from Receive Queue. * * @this: card * @bf: address of host * @size: size of buffer. */ static u16 wl3501_receive(struct wl3501_card *this, u8 *bf, u16 size) { u16 next_addr, next_addr1; u8 *data = bf + 12; size -= 12; wl3501_get_from_wla(this, this->start_seg + 2, &next_addr, sizeof(next_addr)); if (size > WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr)) { wl3501_get_from_wla(this, this->start_seg + sizeof(struct wl3501_rx_hdr), data, WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr)); size -= WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr); data += WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr); } else { wl3501_get_from_wla(this, this->start_seg + sizeof(struct wl3501_rx_hdr), data, size); size = 0; } while (size > 0) { if (size > WL3501_BLKSZ - 5) { wl3501_get_from_wla(this, next_addr + 5, data, WL3501_BLKSZ - 5); size -= WL3501_BLKSZ - 5; data += WL3501_BLKSZ - 5; wl3501_get_from_wla(this, next_addr + 2, &next_addr1, sizeof(next_addr1)); next_addr = next_addr1; } else { wl3501_get_from_wla(this, next_addr + 5, data, size); size = 0; } } return 0; } static void wl3501_esbq_req_free(struct wl3501_card *this) { u8 tmp; u16 addr; if (this->esbq_req_head == this->esbq_req_tail) goto out; wl3501_get_from_wla(this, this->esbq_req_tail + 3, &tmp, sizeof(tmp)); if (!(tmp & 0x80)) goto out; wl3501_get_from_wla(this, this->esbq_req_tail, &addr, sizeof(addr)); wl3501_free_tx_buffer(this, addr); this->esbq_req_tail += 4; if (this->esbq_req_tail >= this->esbq_req_end) this->esbq_req_tail = this->esbq_req_start; out: return; } static int wl3501_esbq_confirm(struct wl3501_card *this) { u8 tmp; wl3501_get_from_wla(this, this->esbq_confirm + 3, &tmp, sizeof(tmp)); return tmp & 0x80; } static void wl3501_online(struct net_device *dev) { struct wl3501_card *this = netdev_priv(dev); printk(KERN_INFO "%s: Wireless LAN online. BSSID: %pM\n", dev->name, this->bssid); netif_wake_queue(dev); } static void wl3501_esbq_confirm_done(struct wl3501_card *this) { u8 tmp = 0; wl3501_set_to_wla(this, this->esbq_confirm + 3, &tmp, sizeof(tmp)); this->esbq_confirm += 4; if (this->esbq_confirm >= this->esbq_confirm_end) this->esbq_confirm = this->esbq_confirm_start; } static int wl3501_mgmt_auth(struct wl3501_card *this) { struct wl3501_auth_req sig = { .sig_id = WL3501_SIG_AUTH_REQ, .type = WL3501_SYS_TYPE_OPEN, .timeout = 1000, }; pr_debug("entry"); memcpy(sig.mac_addr, this->bssid, ETH_ALEN); return wl3501_esbq_exec(this, &sig, sizeof(sig)); } static int wl3501_mgmt_association(struct wl3501_card *this) { struct wl3501_assoc_req sig = { .sig_id = WL3501_SIG_ASSOC_REQ, .timeout = 1000, .listen_interval = 5, .cap_info = this->cap_info, }; pr_debug("entry"); memcpy(sig.mac_addr, this->bssid, ETH_ALEN); return wl3501_esbq_exec(this, &sig, sizeof(sig)); } static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr) { struct wl3501_card *this = netdev_priv(dev); struct wl3501_join_confirm sig; pr_debug("entry"); wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); if (sig.status == WL3501_STATUS_SUCCESS) { if (this->net_type == IW_MODE_INFRA) { if (this->join_sta_bss < this->bss_cnt) { const int i = this->join_sta_bss; memcpy(this->bssid, this->bss_set[i].bssid, ETH_ALEN); this->chan = this->bss_set[i].ds_pset.chan; iw_copy_mgmt_info_element(&this->keep_essid.el, &this->bss_set[i].ssid.el); wl3501_mgmt_auth(this); } } else { const int i = this->join_sta_bss; memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN); this->chan = this->bss_set[i].ds_pset.chan; iw_copy_mgmt_info_element(&this->keep_essid.el, &this->bss_set[i].ssid.el); wl3501_online(dev); } } else { int i; this->join_sta_bss++; for (i = this->join_sta_bss; i < this->bss_cnt; i++) if (!wl3501_mgmt_join(this, i)) break; this->join_sta_bss = i; if (this->join_sta_bss == this->bss_cnt) { if (this->net_type == IW_MODE_INFRA) wl3501_mgmt_scan(this, 100); else { this->adhoc_times++; if (this->adhoc_times > WL3501_MAX_ADHOC_TRIES) wl3501_mgmt_start(this); else wl3501_mgmt_scan(this, 100); } } } } static inline void wl3501_alarm_interrupt(struct net_device *dev, struct wl3501_card *this) { if (this->net_type == IW_MODE_INFRA) { printk(KERN_INFO "Wireless LAN offline\n"); netif_stop_queue(dev); wl3501_mgmt_resync(this); } } static inline void wl3501_md_confirm_interrupt(struct net_device *dev, struct wl3501_card *this, u16 addr) { struct wl3501_md_confirm sig; pr_debug("entry"); wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); wl3501_free_tx_buffer(this, sig.data); if (netif_queue_stopped(dev)) netif_wake_queue(dev); } static inline void wl3501_md_ind_interrupt(struct net_device *dev, struct wl3501_card *this, u16 addr) { struct wl3501_md_ind sig; struct sk_buff *skb; u8 rssi, addr4[ETH_ALEN]; u16 pkt_len; wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); this->start_seg = sig.data; wl3501_get_from_wla(this, sig.data + offsetof(struct wl3501_rx_hdr, rssi), &rssi, sizeof(rssi)); this->rssi = rssi <= 63 ? (rssi * 100) / 64 : 255; wl3501_get_from_wla(this, sig.data + offsetof(struct wl3501_rx_hdr, addr4), &addr4, sizeof(addr4)); if (!(addr4[0] == 0xAA && addr4[1] == 0xAA && addr4[2] == 0x03 && addr4[4] == 0x00)) { printk(KERN_INFO "Insupported packet type!\n"); return; } pkt_len = sig.size + 12 - 24 - 4 - 6; skb = dev_alloc_skb(pkt_len + 5); if (!skb) { printk(KERN_WARNING "%s: Can't alloc a sk_buff of size %d.\n", dev->name, pkt_len); dev->stats.rx_dropped++; } else { skb->dev = dev; skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */ skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12); wl3501_receive(this, skb->data, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_rx(skb); } } static inline void wl3501_get_confirm_interrupt(struct wl3501_card *this, u16 addr, void *sig, int size) { pr_debug("entry"); wl3501_get_from_wla(this, addr, &this->sig_get_confirm, sizeof(this->sig_get_confirm)); wake_up(&this->wait); } static inline void wl3501_start_confirm_interrupt(struct net_device *dev, struct wl3501_card *this, u16 addr) { struct wl3501_start_confirm sig; pr_debug("entry"); wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); if (sig.status == WL3501_STATUS_SUCCESS) netif_wake_queue(dev); } static inline void wl3501_assoc_confirm_interrupt(struct net_device *dev, u16 addr) { struct wl3501_card *this = netdev_priv(dev); struct wl3501_assoc_confirm sig; pr_debug("entry"); wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); if (sig.status == WL3501_STATUS_SUCCESS) wl3501_online(dev); } static inline void wl3501_auth_confirm_interrupt(struct wl3501_card *this, u16 addr) { struct wl3501_auth_confirm sig; pr_debug("entry"); wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); if (sig.status == WL3501_STATUS_SUCCESS) wl3501_mgmt_association(this); else wl3501_mgmt_resync(this); } static inline void wl3501_rx_interrupt(struct net_device *dev) { int morepkts; u16 addr; u8 sig_id; struct wl3501_card *this = netdev_priv(dev); pr_debug("entry"); loop: morepkts = 0; if (!wl3501_esbq_confirm(this)) goto free; wl3501_get_from_wla(this, this->esbq_confirm, &addr, sizeof(addr)); wl3501_get_from_wla(this, addr + 2, &sig_id, sizeof(sig_id)); switch (sig_id) { case WL3501_SIG_DEAUTH_IND: case WL3501_SIG_DISASSOC_IND: case WL3501_SIG_ALARM: wl3501_alarm_interrupt(dev, this); break; case WL3501_SIG_MD_CONFIRM: wl3501_md_confirm_interrupt(dev, this, addr); break; case WL3501_SIG_MD_IND: wl3501_md_ind_interrupt(dev, this, addr); break; case WL3501_SIG_GET_CONFIRM: wl3501_get_confirm_interrupt(this, addr, &this->sig_get_confirm, sizeof(this->sig_get_confirm)); break; case WL3501_SIG_PWR_MGMT_CONFIRM: wl3501_get_confirm_interrupt(this, addr, &this->sig_pwr_mgmt_confirm, sizeof(this->sig_pwr_mgmt_confirm)); break; case WL3501_SIG_START_CONFIRM: wl3501_start_confirm_interrupt(dev, this, addr); break; case WL3501_SIG_SCAN_CONFIRM: wl3501_mgmt_scan_confirm(this, addr); break; case WL3501_SIG_JOIN_CONFIRM: wl3501_mgmt_join_confirm(dev, addr); break; case WL3501_SIG_ASSOC_CONFIRM: wl3501_assoc_confirm_interrupt(dev, addr); break; case WL3501_SIG_AUTH_CONFIRM: wl3501_auth_confirm_interrupt(this, addr); break; case WL3501_SIG_RESYNC_CONFIRM: wl3501_mgmt_resync(this); /* FIXME: should be resync_confirm */ break; } wl3501_esbq_confirm_done(this); morepkts = 1; /* free request if necessary */ free: wl3501_esbq_req_free(this); if (morepkts) goto loop; } static inline void wl3501_ack_interrupt(struct wl3501_card *this) { wl3501_outb(WL3501_GCR_ECINT, this->base_addr + WL3501_NIC_GCR); } /** * wl3501_interrupt - Hardware interrupt from card. * @irq - Interrupt number * @dev_id - net_device * * We must acknowledge the interrupt as soon as possible, and block the * interrupt from the same card immediately to prevent re-entry. * * Before accessing the Control_Status_Block, we must lock SUTRO first. * On the other hand, to prevent SUTRO from malfunctioning, we must * unlock the SUTRO as soon as possible. */ static irqreturn_t wl3501_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct wl3501_card *this; this = netdev_priv(dev); spin_lock(&this->lock); wl3501_ack_interrupt(this); wl3501_block_interrupt(this); wl3501_rx_interrupt(dev); wl3501_unblock_interrupt(this); spin_unlock(&this->lock); return IRQ_HANDLED; } static int wl3501_reset_board(struct wl3501_card *this) { u8 tmp = 0; int i, rc = 0; /* Coreset */ wl3501_outb_p(WL3501_GCR_CORESET, this->base_addr + WL3501_NIC_GCR); wl3501_outb_p(0, this->base_addr + WL3501_NIC_GCR); wl3501_outb_p(WL3501_GCR_CORESET, this->base_addr + WL3501_NIC_GCR); /* Reset SRAM 0x480 to zero */ wl3501_set_to_wla(this, 0x480, &tmp, sizeof(tmp)); /* Start up */ wl3501_outb_p(0, this->base_addr + WL3501_NIC_GCR); WL3501_NOPLOOP(1024 * 50); wl3501_unblock_interrupt(this); /* acme: was commented */ /* Polling Self_Test_Status */ for (i = 0; i < 10000; i++) { wl3501_get_from_wla(this, 0x480, &tmp, sizeof(tmp)); if (tmp == 'W') { /* firmware complete all test successfully */ tmp = 'A'; wl3501_set_to_wla(this, 0x480, &tmp, sizeof(tmp)); goto out; } WL3501_NOPLOOP(10); } printk(KERN_WARNING "%s: failed to reset the board!\n", __func__); rc = -ENODEV; out: return rc; } static int wl3501_init_firmware(struct wl3501_card *this) { u16 ptr, next; int rc = wl3501_reset_board(this); if (rc) goto fail; this->card_name[0] = '\0'; wl3501_get_from_wla(this, 0x1a00, this->card_name, sizeof(this->card_name)); this->card_name[sizeof(this->card_name) - 1] = '\0'; this->firmware_date[0] = '\0'; wl3501_get_from_wla(this, 0x1a40, this->firmware_date, sizeof(this->firmware_date)); this->firmware_date[sizeof(this->firmware_date) - 1] = '\0'; /* Switch to SRAM Page 0 */ wl3501_switch_page(this, WL3501_BSS_SPAGE0); /* Read parameter from card */ wl3501_get_from_wla(this, 0x482, &this->esbq_req_start, 2); wl3501_get_from_wla(this, 0x486, &this->esbq_req_end, 2); wl3501_get_from_wla(this, 0x488, &this->esbq_confirm_start, 2); wl3501_get_from_wla(this, 0x48c, &this->esbq_confirm_end, 2); wl3501_get_from_wla(this, 0x48e, &this->tx_buffer_head, 2); wl3501_get_from_wla(this, 0x492, &this->tx_buffer_size, 2); this->esbq_req_tail = this->esbq_req_head = this->esbq_req_start; this->esbq_req_end += this->esbq_req_start; this->esbq_confirm = this->esbq_confirm_start; this->esbq_confirm_end += this->esbq_confirm_start; /* Initial Tx Buffer */ this->tx_buffer_cnt = 1; ptr = this->tx_buffer_head; next = ptr + WL3501_BLKSZ; while ((next - this->tx_buffer_head) < this->tx_buffer_size) { this->tx_buffer_cnt++; wl3501_set_to_wla(this, ptr, &next, sizeof(next)); ptr = next; next = ptr + WL3501_BLKSZ; } rc = 0; next = 0; wl3501_set_to_wla(this, ptr, &next, sizeof(next)); this->tx_buffer_tail = ptr; out: return rc; fail: printk(KERN_WARNING "%s: failed!\n", __func__); goto out; } static int wl3501_close(struct net_device *dev) { struct wl3501_card *this = netdev_priv(dev); int rc = -ENODEV; unsigned long flags; struct pcmcia_device *link; link = this->p_dev; spin_lock_irqsave(&this->lock, flags); link->open--; /* Stop wl3501_hard_start_xmit() from now on */ netif_stop_queue(dev); wl3501_ack_interrupt(this); /* Mask interrupts from the SUTRO */ wl3501_block_interrupt(this); rc = 0; printk(KERN_INFO "%s: WL3501 closed\n", dev->name); spin_unlock_irqrestore(&this->lock, flags); return rc; } /** * wl3501_reset - Reset the SUTRO. * @dev - network device * * It is almost the same as wl3501_open(). In fact, we may just wl3501_close() * and wl3501_open() again, but I wouldn't like to free_irq() when the driver * is running. It seems to be dangerous. */ static int wl3501_reset(struct net_device *dev) { struct wl3501_card *this = netdev_priv(dev); int rc = -ENODEV; wl3501_block_interrupt(this); if (wl3501_init_firmware(this)) { printk(KERN_WARNING "%s: Can't initialize Firmware!\n", dev->name); /* Free IRQ, and mark IRQ as unused */ free_irq(dev->irq, dev); goto out; } /* * Queue has to be started only when the Card is Started */ netif_stop_queue(dev); this->adhoc_times = 0; wl3501_ack_interrupt(this); wl3501_unblock_interrupt(this); wl3501_mgmt_scan(this, 100); pr_debug("%s: device reset", dev->name); rc = 0; out: return rc; } static void wl3501_tx_timeout(struct net_device *dev) { struct wl3501_card *this = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned long flags; int rc; stats->tx_errors++; spin_lock_irqsave(&this->lock, flags); rc = wl3501_reset(dev); spin_unlock_irqrestore(&this->lock, flags); if (rc) printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n", dev->name, rc); else { dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } } /* * Return : 0 - OK * 1 - Could not transmit (dev_queue_xmit will queue it) * and try to sent it later */ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { int enabled, rc; struct wl3501_card *this = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&this->lock, flags); enabled = wl3501_block_interrupt(this); rc = wl3501_send_pkt(this, skb->data, skb->len); if (enabled) wl3501_unblock_interrupt(this); if (rc) { ++dev->stats.tx_dropped; netif_stop_queue(dev); } else { ++dev->stats.tx_packets; dev->stats.tx_bytes += skb->len; kfree_skb(skb); if (this->tx_buffer_cnt < 2) netif_stop_queue(dev); } spin_unlock_irqrestore(&this->lock, flags); return NETDEV_TX_OK; } static int wl3501_open(struct net_device *dev) { int rc = -ENODEV; struct wl3501_card *this = netdev_priv(dev); unsigned long flags; struct pcmcia_device *link; link = this->p_dev; spin_lock_irqsave(&this->lock, flags); if (!pcmcia_dev_present(link)) goto out; netif_device_attach(dev); link->open++; /* Initial WL3501 firmware */ pr_debug("%s: Initialize WL3501 firmware...", dev->name); if (wl3501_init_firmware(this)) goto fail; /* Initial device variables */ this->adhoc_times = 0; /* Acknowledge Interrupt, for cleaning last state */ wl3501_ack_interrupt(this); /* Enable interrupt from card after all */ wl3501_unblock_interrupt(this); wl3501_mgmt_scan(this, 100); rc = 0; pr_debug("%s: WL3501 opened", dev->name); printk(KERN_INFO "%s: Card Name: %s\n" "%s: Firmware Date: %s\n", dev->name, this->card_name, dev->name, this->firmware_date); out: spin_unlock_irqrestore(&this->lock, flags); return rc; fail: printk(KERN_WARNING "%s: Can't initialize firmware!\n", dev->name); goto out; } static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev) { struct wl3501_card *this = netdev_priv(dev); struct iw_statistics *wstats = &this->wstats; u32 value; /* size checked: it is u32 */ memset(wstats, 0, sizeof(*wstats)); wstats->status = netif_running(dev); if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_ICV_ERROR_COUNT, &value, sizeof(value))) wstats->discard.code += value; if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_UNDECRYPTABLE_COUNT, &value, sizeof(value))) wstats->discard.code += value; if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_EXCLUDED_COUNT, &value, sizeof(value))) wstats->discard.code += value; if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_RETRY_COUNT, &value, sizeof(value))) wstats->discard.retries = value; if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_FAILED_COUNT, &value, sizeof(value))) wstats->discard.misc += value; if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_RTS_FAILURE_COUNT, &value, sizeof(value))) wstats->discard.misc += value; if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_ACK_FAILURE_COUNT, &value, sizeof(value))) wstats->discard.misc += value; if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_FRAME_DUPLICATE_COUNT, &value, sizeof(value))) wstats->discard.misc += value; return wstats; } /** * wl3501_detach - deletes a driver "instance" * @link - FILL_IN * * This deletes a driver "instance". The device is de-registered with Card * Services. If it has been released, all local data structures are freed. * Otherwise, the structures will be freed when the device is released. */ static void wl3501_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; /* If the device is currently configured and active, we won't actually * delete it yet. Instead, it is marked so that when the release() * function is called, that will trigger a proper detach(). */ while (link->open > 0) wl3501_close(dev); netif_device_detach(dev); wl3501_release(link); unregister_netdev(dev); if (link->priv) free_netdev(link->priv); } static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { strlcpy(wrqu->name, "IEEE 802.11-DS", sizeof(wrqu->name)); return 0; } static int wl3501_set_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); int channel = wrqu->freq.m; int rc = -EINVAL; if (iw_valid_channel(this->reg_domain, channel)) { this->chan = channel; rc = wl3501_reset(dev); } return rc; } static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); wrqu->freq.m = 100000 * ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ); wrqu->freq.e = 1; return 0; } static int wl3501_set_mode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int rc = -EINVAL; if (wrqu->mode == IW_MODE_INFRA || wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_AUTO) { struct wl3501_card *this = netdev_priv(dev); this->net_type = wrqu->mode; rc = wl3501_reset(dev); } return rc; } static int wl3501_get_mode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); wrqu->mode = this->net_type; return 0; } static int wl3501_get_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); wrqu->sens.value = this->rssi; wrqu->sens.disabled = !wrqu->sens.value; wrqu->sens.fixed = 1; return 0; } static int wl3501_get_range(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_range *range = (struct iw_range *)extra; /* Set the length (very important for backward compatibility) */ wrqu->data.length = sizeof(*range); /* Set all the info we don't care or don't know about to zero */ memset(range, 0, sizeof(*range)); /* Set the Wireless Extension versions */ range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 1; range->throughput = 2 * 1000 * 1000; /* ~2 Mb/s */ /* FIXME: study the code to fill in more fields... */ return 0; } static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); int rc = -EINVAL; /* FIXME: we support other ARPHRDs...*/ if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) goto out; if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data)) { /* FIXME: rescan? */ } else memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); /* FIXME: rescan? deassoc & scan? */ rc = 0; out: return rc; } static int wl3501_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); wrqu->ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu->ap_addr.sa_data, this->bssid, ETH_ALEN); return 0; } static int wl3501_set_scan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * FIXME: trigger scanning with a reset, yes, I'm lazy */ return wl3501_reset(dev); } static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); int i; char *current_ev = extra; struct iw_event iwe; for (i = 0; i < this->bss_cnt; ++i) { iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN); current_ev = iwe_stream_add_event(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_ADDR_LEN); iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; iwe.u.data.length = this->bss_set[i].ssid.el.len; current_ev = iwe_stream_add_point(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, this->bss_set[i].ssid.essid); iwe.cmd = SIOCGIWMODE; iwe.u.mode = this->bss_set[i].bss_type; current_ev = iwe_stream_add_event(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_UINT_LEN); iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = this->bss_set[i].ds_pset.chan; iwe.u.freq.e = 0; current_ev = iwe_stream_add_event(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN); iwe.cmd = SIOCGIWENCODE; if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; current_ev = iwe_stream_add_point(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, NULL); } /* Length of data */ wrqu->data.length = (current_ev - extra); wrqu->data.flags = 0; /* FIXME: set properly these flags */ return 0; } static int wl3501_set_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); if (wrqu->data.flags) { iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID, &this->essid.el, extra, wrqu->data.length); } else { /* We accept any ESSID */ iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID, &this->essid.el, "ANY", 3); } return wl3501_reset(dev); } static int wl3501_get_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&this->lock, flags); wrqu->essid.flags = 1; wrqu->essid.length = this->essid.el.len; memcpy(extra, this->essid.essid, this->essid.el.len); spin_unlock_irqrestore(&this->lock, flags); return 0; } static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); if (wrqu->data.length > sizeof(this->nick)) return -E2BIG; strlcpy(this->nick, extra, wrqu->data.length); return 0; } static int wl3501_get_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl3501_card *this = netdev_priv(dev); strlcpy(extra, this->nick, 32); wrqu->data.length = strlen(extra); return 0; } static int wl3501_get_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * FIXME: have to see from where to get this info, perhaps this card * works at 1 Mbit/s too... for now leave at 2 Mbit/s that is the most * common with the Planet Access Points. -acme */ wrqu->bitrate.value = 2000000; wrqu->bitrate.fixed = 1; return 0; } static int wl3501_get_rts_threshold(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u16 threshold; /* size checked: it is u16 */ struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_RTS_THRESHOLD, &threshold, sizeof(threshold)); if (!rc) { wrqu->rts.value = threshold; wrqu->rts.disabled = threshold >= 2347; wrqu->rts.fixed = 1; } return rc; } static int wl3501_get_frag_threshold(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u16 threshold; /* size checked: it is u16 */ struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_FRAG_THRESHOLD, &threshold, sizeof(threshold)); if (!rc) { wrqu->frag.value = threshold; wrqu->frag.disabled = threshold >= 2346; wrqu->frag.fixed = 1; } return rc; } static int wl3501_get_txpow(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u16 txpow; struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_CURRENT_TX_PWR_LEVEL, &txpow, sizeof(txpow)); if (!rc) { wrqu->txpower.value = txpow; wrqu->txpower.disabled = 0; /* * From the MIB values I think this can be configurable, * as it lists several tx power levels -acme */ wrqu->txpower.fixed = 0; wrqu->txpower.flags = IW_TXPOW_MWATT; } return rc; } static int wl3501_get_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u8 retry; /* size checked: it is u8 */ struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_LONG_RETRY_LIMIT, &retry, sizeof(retry)); if (rc) goto out; if (wrqu->retry.flags & IW_RETRY_LONG) { wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG; goto set_value; } rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_SHORT_RETRY_LIMIT, &retry, sizeof(retry)); if (rc) goto out; wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; set_value: wrqu->retry.value = retry; wrqu->retry.disabled = 0; out: return rc; } static int wl3501_get_encode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u8 implemented, restricted, keys[100], len_keys, tocopy; struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_PRIV_OPT_IMPLEMENTED, &implemented, sizeof(implemented)); if (rc) goto out; if (!implemented) { wrqu->encoding.flags = IW_ENCODE_DISABLED; goto out; } rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_EXCLUDE_UNENCRYPTED, &restricted, sizeof(restricted)); if (rc) goto out; wrqu->encoding.flags = restricted ? IW_ENCODE_RESTRICTED : IW_ENCODE_OPEN; rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_KEY_MAPPINGS_LEN, &len_keys, sizeof(len_keys)); if (rc) goto out; rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_KEY_MAPPINGS, keys, len_keys); if (rc) goto out; tocopy = min_t(u16, len_keys, wrqu->encoding.length); tocopy = min_t(u8, tocopy, 100); wrqu->encoding.length = tocopy; memcpy(extra, keys, tocopy); out: return rc; } static int wl3501_get_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u8 pwr_state; struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_CURRENT_PWR_STATE, &pwr_state, sizeof(pwr_state)); if (rc) goto out; wrqu->power.disabled = !pwr_state; wrqu->power.flags = IW_POWER_ON; out: return rc; } static const iw_handler wl3501_handler[] = { IW_HANDLER(SIOCGIWNAME, wl3501_get_name), IW_HANDLER(SIOCSIWFREQ, wl3501_set_freq), IW_HANDLER(SIOCGIWFREQ, wl3501_get_freq), IW_HANDLER(SIOCSIWMODE, wl3501_set_mode), IW_HANDLER(SIOCGIWMODE, wl3501_get_mode), IW_HANDLER(SIOCGIWSENS, wl3501_get_sens), IW_HANDLER(SIOCGIWRANGE, wl3501_get_range), IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy), IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy), IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy), IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy), IW_HANDLER(SIOCSIWAP, wl3501_set_wap), IW_HANDLER(SIOCGIWAP, wl3501_get_wap), IW_HANDLER(SIOCSIWSCAN, wl3501_set_scan), IW_HANDLER(SIOCGIWSCAN, wl3501_get_scan), IW_HANDLER(SIOCSIWESSID, wl3501_set_essid), IW_HANDLER(SIOCGIWESSID, wl3501_get_essid), IW_HANDLER(SIOCSIWNICKN, wl3501_set_nick), IW_HANDLER(SIOCGIWNICKN, wl3501_get_nick), IW_HANDLER(SIOCGIWRATE, wl3501_get_rate), IW_HANDLER(SIOCGIWRTS, wl3501_get_rts_threshold), IW_HANDLER(SIOCGIWFRAG, wl3501_get_frag_threshold), IW_HANDLER(SIOCGIWTXPOW, wl3501_get_txpow), IW_HANDLER(SIOCGIWRETRY, wl3501_get_retry), IW_HANDLER(SIOCGIWENCODE, wl3501_get_encode), IW_HANDLER(SIOCGIWPOWER, wl3501_get_power), }; static const struct iw_handler_def wl3501_handler_def = { .num_standard = ARRAY_SIZE(wl3501_handler), .standard = (iw_handler *)wl3501_handler, .get_wireless_stats = wl3501_get_wireless_stats, }; static const struct net_device_ops wl3501_netdev_ops = { .ndo_open = wl3501_open, .ndo_stop = wl3501_close, .ndo_start_xmit = wl3501_hard_start_xmit, .ndo_tx_timeout = wl3501_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int wl3501_probe(struct pcmcia_device *p_dev) { struct net_device *dev; struct wl3501_card *this; /* The io structure describes IO port mapping */ p_dev->resource[0]->end = 16; p_dev->resource[0]->flags = IO_DATA_PATH_WIDTH_8; /* General socket configuration */ p_dev->config_flags = CONF_ENABLE_IRQ; p_dev->config_index = 1; dev = alloc_etherdev(sizeof(struct wl3501_card)); if (!dev) goto out_link; dev->netdev_ops = &wl3501_netdev_ops; dev->watchdog_timeo = 5 * HZ; this = netdev_priv(dev); this->wireless_data.spy_data = &this->spy_data; this->p_dev = p_dev; dev->wireless_data = &this->wireless_data; dev->wireless_handlers = &wl3501_handler_def; netif_stop_queue(dev); p_dev->priv = dev; return wl3501_config(p_dev); out_link: return -ENOMEM; } static int wl3501_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; int i = 0, j, ret; struct wl3501_card *this; /* Try allocating IO ports. This tries a few fixed addresses. If you * want, you can also read the card's config table to pick addresses -- * see the serial driver for an example. */ link->io_lines = 5; for (j = 0x280; j < 0x400; j += 0x20) { /* The '^0x300' is so that we probe 0x300-0x3ff first, then * 0x200-0x2ff, and so on, because this seems safer */ link->resource[0]->start = j; link->resource[1]->start = link->resource[0]->start + 0x10; i = pcmcia_request_io(link); if (i == 0) break; } if (i != 0) goto failed; /* Now allocate an interrupt line. Note that this does not actually * assign a handler to the interrupt. */ ret = pcmcia_request_irq(link, wl3501_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; dev->irq = link->irq; dev->base_addr = link->resource[0]->start; SET_NETDEV_DEV(dev, &link->dev); if (register_netdev(dev)) { printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n"); goto failed; } this = netdev_priv(dev); this->base_addr = dev->base_addr; if (!wl3501_get_flash_mac_addr(this)) { printk(KERN_WARNING "%s: Can't read MAC addr in flash ROM?\n", dev->name); unregister_netdev(dev); goto failed; } for (i = 0; i < 6; i++) dev->dev_addr[i] = ((char *)&this->mac_addr)[i]; /* print probe information */ printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, " "MAC addr in flash ROM:%pM\n", dev->name, this->base_addr, (int)dev->irq, dev->dev_addr); /* * Initialize card parameters - added by jss */ this->net_type = IW_MODE_INFRA; this->bss_cnt = 0; this->join_sta_bss = 0; this->adhoc_times = 0; iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID, &this->essid.el, "ANY", 3); this->card_name[0] = '\0'; this->firmware_date[0] = '\0'; this->rssi = 255; this->chan = iw_default_channel(this->reg_domain); strlcpy(this->nick, "Planet WL3501", sizeof(this->nick)); spin_lock_init(&this->lock); init_waitqueue_head(&this->wait); netif_start_queue(dev); return 0; failed: wl3501_release(link); return -ENODEV; } static void wl3501_release(struct pcmcia_device *link) { pcmcia_disable_device(link); } static int wl3501_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; wl3501_pwr_mgmt(netdev_priv(dev), WL3501_SUSPEND); if (link->open) netif_device_detach(dev); return 0; } static int wl3501_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; wl3501_pwr_mgmt(netdev_priv(dev), WL3501_RESUME); if (link->open) { wl3501_reset(dev); netif_device_attach(dev); } return 0; } static const struct pcmcia_device_id wl3501_ids[] = { PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0001), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, wl3501_ids); static struct pcmcia_driver wl3501_driver = { .owner = THIS_MODULE, .name = "wl3501_cs", .probe = wl3501_probe, .remove = wl3501_detach, .id_table = wl3501_ids, .suspend = wl3501_suspend, .resume = wl3501_resume, }; module_pcmcia_driver(wl3501_driver); MODULE_AUTHOR("Fox Chen <mhchen@golf.ccl.itri.org.tw>, " "Arnaldo Carvalho de Melo <acme@conectiva.com.br>," "Gustavo Niemeyer <niemeyer@conectiva.com>"); MODULE_DESCRIPTION("Planet wl3501 wireless driver"); MODULE_LICENSE("GPL");
gpl-2.0
lijinc/linux-source-3.11.0
kernel/context_tracking.c
2103
5588
/* * Context tracking: Probe on high level context boundaries such as kernel * and userspace. This includes syscalls and exceptions entry/exit. * * This is used by RCU to remove its dependency on the timer tick while a CPU * runs in userspace. * * Started by Frederic Weisbecker: * * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> * * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, * Steven Rostedt, Peter Zijlstra for suggestions and improvements. * */ #include <linux/context_tracking.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/hardirq.h> #include <linux/export.h> DEFINE_PER_CPU(struct context_tracking, context_tracking) = { #ifdef CONFIG_CONTEXT_TRACKING_FORCE .active = true, #endif }; /** * user_enter - Inform the context tracking that the CPU is going to * enter userspace mode. * * This function must be called right before we switch from the kernel * to userspace, when it's guaranteed the remaining kernel instructions * to execute won't use any RCU read side critical section because this * function sets RCU in extended quiescent state. */ void user_enter(void) { unsigned long flags; /* * Some contexts may involve an exception occuring in an irq, * leading to that nesting: * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() * This would mess up the dyntick_nesting count though. And rcu_irq_*() * helpers are enough to protect RCU uses inside the exception. So * just return immediately if we detect we are in an IRQ. */ if (in_interrupt()) return; /* Kernel threads aren't supposed to go to userspace */ WARN_ON_ONCE(!current->mm); local_irq_save(flags); if (__this_cpu_read(context_tracking.active) && __this_cpu_read(context_tracking.state) != IN_USER) { /* * At this stage, only low level arch entry code remains and * then we'll run in userspace. We can assume there won't be * any RCU read-side critical section until the next call to * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency * on the tick. */ vtime_user_enter(current); rcu_user_enter(); __this_cpu_write(context_tracking.state, IN_USER); } local_irq_restore(flags); } #ifdef CONFIG_PREEMPT /** * preempt_schedule_context - preempt_schedule called by tracing * * The tracing infrastructure uses preempt_enable_notrace to prevent * recursion and tracing preempt enabling caused by the tracing * infrastructure itself. But as tracing can happen in areas coming * from userspace or just about to enter userspace, a preempt enable * can occur before user_exit() is called. This will cause the scheduler * to be called when the system is still in usermode. * * To prevent this, the preempt_enable_notrace will use this function * instead of preempt_schedule() to exit user context if needed before * calling the scheduler. */ void __sched notrace preempt_schedule_context(void) { struct thread_info *ti = current_thread_info(); enum ctx_state prev_ctx; if (likely(ti->preempt_count || irqs_disabled())) return; /* * Need to disable preemption in case user_exit() is traced * and the tracer calls preempt_enable_notrace() causing * an infinite recursion. */ preempt_disable_notrace(); prev_ctx = exception_enter(); preempt_enable_no_resched_notrace(); preempt_schedule(); preempt_disable_notrace(); exception_exit(prev_ctx); preempt_enable_notrace(); } EXPORT_SYMBOL_GPL(preempt_schedule_context); #endif /* CONFIG_PREEMPT */ /** * user_exit - Inform the context tracking that the CPU is * exiting userspace mode and entering the kernel. * * This function must be called after we entered the kernel from userspace * before any use of RCU read side critical section. This potentially include * any high level kernel code like syscalls, exceptions, signal handling, etc... * * This call supports re-entrancy. This way it can be called from any exception * handler without needing to know if we came from userspace or not. */ void user_exit(void) { unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); if (__this_cpu_read(context_tracking.state) == IN_USER) { /* * We are going to run code that may use RCU. Inform * RCU core about that (ie: we may need the tick again). */ rcu_user_exit(); vtime_user_exit(current); __this_cpu_write(context_tracking.state, IN_KERNEL); } local_irq_restore(flags); } void guest_enter(void) { if (vtime_accounting_enabled()) vtime_guest_enter(current); else __guest_enter(); } EXPORT_SYMBOL_GPL(guest_enter); void guest_exit(void) { if (vtime_accounting_enabled()) vtime_guest_exit(current); else __guest_exit(); } EXPORT_SYMBOL_GPL(guest_exit); /** * context_tracking_task_switch - context switch the syscall callbacks * @prev: the task that is being switched out * @next: the task that is being switched in * * The context tracking uses the syscall slow path to implement its user-kernel * boundaries probes on syscalls. This way it doesn't impact the syscall fast * path on CPUs that don't do context tracking. * * But we need to clear the flag on the previous task because it may later * migrate to some CPU that doesn't do the context tracking. As such the TIF * flag may not be desired there. */ void context_tracking_task_switch(struct task_struct *prev, struct task_struct *next) { if (__this_cpu_read(context_tracking.active)) { clear_tsk_thread_flag(prev, TIF_NOHZ); set_tsk_thread_flag(next, TIF_NOHZ); } }
gpl-2.0
linux4hach/linux-at91
drivers/pcmcia/sa1100_assabet.c
4407
2520
/* * drivers/pcmcia/sa1100_assabet.c * * PCMCIA implementation routines for Assabet * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/init.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <mach/assabet.h> #include "sa1100_generic.h" static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { skt->stat[SOC_STAT_CD].gpio = ASSABET_GPIO_CF_CD; skt->stat[SOC_STAT_CD].name = "CF CD"; skt->stat[SOC_STAT_BVD1].gpio = ASSABET_GPIO_CF_BVD1; skt->stat[SOC_STAT_BVD1].name = "CF BVD1"; skt->stat[SOC_STAT_BVD2].gpio = ASSABET_GPIO_CF_BVD2; skt->stat[SOC_STAT_BVD2].name = "CF BVD2"; skt->stat[SOC_STAT_RDY].gpio = ASSABET_GPIO_CF_IRQ; skt->stat[SOC_STAT_RDY].name = "CF RDY"; return 0; } static void assabet_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { state->vs_3v = 1; /* Can only apply 3.3V on Assabet. */ state->vs_Xv = 0; } static int assabet_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { unsigned int mask; switch (state->Vcc) { case 0: mask = 0; break; case 50: printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V...\n", __func__); case 33: /* Can only apply 3.3V to the CF slot. */ mask = ASSABET_BCR_CF_PWR; break; default: printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__, state->Vcc); return -1; } /* Silently ignore Vpp, speaker enable. */ if (state->flags & SS_RESET) mask |= ASSABET_BCR_CF_RST; if (!(state->flags & SS_OUTPUT_ENA)) mask |= ASSABET_BCR_CF_BUS_OFF; ASSABET_BCR_frob(ASSABET_BCR_CF_RST | ASSABET_BCR_CF_PWR | ASSABET_BCR_CF_BUS_OFF, mask); return 0; } /* * Disable card status IRQs on suspend. */ static void assabet_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) { /* * Tristate the CF bus signals. Also assert CF * reset as per user guide page 4-11. */ ASSABET_BCR_set(ASSABET_BCR_CF_BUS_OFF | ASSABET_BCR_CF_RST); } static struct pcmcia_low_level assabet_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = assabet_pcmcia_hw_init, .socket_state = assabet_pcmcia_socket_state, .configure_socket = assabet_pcmcia_configure_socket, .socket_suspend = assabet_pcmcia_socket_suspend, }; int pcmcia_assabet_init(struct device *dev) { int ret = -ENODEV; if (machine_is_assabet() && !machine_has_neponset()) ret = sa11xx_drv_pcmcia_probe(dev, &assabet_pcmcia_ops, 1, 1); return ret; }
gpl-2.0
TeamRegular/android_kernel_samsung_exynos3470
drivers/acpi/acpica/dsobject.c
4919
24843
/****************************************************************************** * * Module Name: dsobject - Dispatcher object management routines * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "amlcode.h" #include "acdispat.h" #include "acnamesp.h" #include "acinterp.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsobject") /* Local prototypes */ static acpi_status acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, union acpi_parse_object *op, union acpi_operand_object **obj_desc_ptr); #ifndef ACPI_NO_METHOD_EXECUTION /******************************************************************************* * * FUNCTION: acpi_ds_build_internal_object * * PARAMETERS: walk_state - Current walk state * Op - Parser object to be translated * obj_desc_ptr - Where the ACPI internal object is returned * * RETURN: Status * * DESCRIPTION: Translate a parser Op object to the equivalent namespace object * Simple objects are any objects other than a package object! * ******************************************************************************/ static acpi_status acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, union acpi_parse_object *op, union acpi_operand_object **obj_desc_ptr) { union acpi_operand_object *obj_desc; acpi_status status; acpi_object_type type; ACPI_FUNCTION_TRACE(ds_build_internal_object); *obj_desc_ptr = NULL; if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) { /* * This is a named object reference. If this name was * previously looked up in the namespace, it was stored in this op. * Otherwise, go ahead and look it up now */ if (!op->common.node) { status = acpi_ns_lookup(walk_state->scope_info, op->common.value.string, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, NULL, ACPI_CAST_INDIRECT_PTR(struct acpi_namespace_node, &(op-> common. node))); if (ACPI_FAILURE(status)) { /* Check if we are resolving a named reference within a package */ if ((status == AE_NOT_FOUND) && (acpi_gbl_enable_interpreter_slack) && ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP))) { /* * We didn't find the target and we are populating elements * of a package - ignore if slack enabled. Some ASL code * contains dangling invalid references in packages and * expects that no exception will be issued. Leave the * element as a null element. It cannot be used, but it * can be overwritten by subsequent ASL code - this is * typically the case. */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Ignoring unresolved reference in package [%4.4s]\n", walk_state-> scope_info->scope. node->name.ascii)); return_ACPI_STATUS(AE_OK); } else { ACPI_ERROR_NAMESPACE(op->common.value. string, status); } return_ACPI_STATUS(status); } } /* Special object resolution for elements of a package */ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) { /* * Attempt to resolve the node to a value before we insert it into * the package. If this is a reference to a common data type, * resolve it immediately. According to the ACPI spec, package * elements can only be "data objects" or method references. * Attempt to resolve to an Integer, Buffer, String or Package. * If cannot, return the named reference (for things like Devices, * Methods, etc.) Buffer Fields and Fields will resolve to simple * objects (int/buf/str/pkg). * * NOTE: References to things like Devices, Methods, Mutexes, etc. * will remain as named references. This behavior is not described * in the ACPI spec, but it appears to be an oversight. */ obj_desc = ACPI_CAST_PTR(union acpi_operand_object, op->common.node); status = acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, &obj_desc), walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Special handling for Alias objects. We need to setup the type * and the Op->Common.Node to point to the Alias target. Note, * Alias has at most one level of indirection internally. */ type = op->common.node->type; if (type == ACPI_TYPE_LOCAL_ALIAS) { type = obj_desc->common.type; op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, op->common.node->object); } switch (type) { /* * For these types, we need the actual node, not the subobject. * However, the subobject did not get an extra reference count above. * * TBD: should ex_resolve_node_to_value be changed to fix this? */ case ACPI_TYPE_DEVICE: case ACPI_TYPE_THERMAL: acpi_ut_add_reference(op->common.node->object); /*lint -fallthrough */ /* * For these types, we need the actual node, not the subobject. * The subobject got an extra reference count in ex_resolve_node_to_value. */ case ACPI_TYPE_MUTEX: case ACPI_TYPE_METHOD: case ACPI_TYPE_POWER: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_EVENT: case ACPI_TYPE_REGION: /* We will create a reference object for these types below */ break; default: /* * All other types - the node was resolved to an actual * object, we are done. */ goto exit; } } } /* Create and init a new internal ACPI object */ obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info (op->common.aml_opcode))-> object_type); if (!obj_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } status = acpi_ds_init_object_from_op(walk_state, op, op->common.aml_opcode, &obj_desc); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } exit: *obj_desc_ptr = obj_desc; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_build_internal_buffer_obj * * PARAMETERS: walk_state - Current walk state * Op - Parser object to be translated * buffer_length - Length of the buffer * obj_desc_ptr - Where the ACPI internal object is returned * * RETURN: Status * * DESCRIPTION: Translate a parser Op package object to the equivalent * namespace object * ******************************************************************************/ acpi_status acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, union acpi_parse_object *op, u32 buffer_length, union acpi_operand_object **obj_desc_ptr) { union acpi_parse_object *arg; union acpi_operand_object *obj_desc; union acpi_parse_object *byte_list; u32 byte_list_length = 0; ACPI_FUNCTION_TRACE(ds_build_internal_buffer_obj); /* * If we are evaluating a Named buffer object "Name (xxxx, Buffer)". * The buffer object already exists (from the NS node), otherwise it must * be created. */ obj_desc = *obj_desc_ptr; if (!obj_desc) { /* Create a new buffer object */ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER); *obj_desc_ptr = obj_desc; if (!obj_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } } /* * Second arg is the buffer data (optional) byte_list can be either * individual bytes or a string initializer. In either case, a * byte_list appears in the AML. */ arg = op->common.value.arg; /* skip first arg */ byte_list = arg->named.next; if (byte_list) { if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) { ACPI_ERROR((AE_INFO, "Expecting bytelist, found AML opcode 0x%X in op %p", byte_list->common.aml_opcode, byte_list)); acpi_ut_remove_reference(obj_desc); return (AE_TYPE); } byte_list_length = (u32) byte_list->common.value.integer; } /* * The buffer length (number of bytes) will be the larger of: * 1) The specified buffer length and * 2) The length of the initializer byte list */ obj_desc->buffer.length = buffer_length; if (byte_list_length > buffer_length) { obj_desc->buffer.length = byte_list_length; } /* Allocate the buffer */ if (obj_desc->buffer.length == 0) { obj_desc->buffer.pointer = NULL; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer defined with zero length in AML, creating\n")); } else { obj_desc->buffer.pointer = ACPI_ALLOCATE_ZEROED(obj_desc->buffer.length); if (!obj_desc->buffer.pointer) { acpi_ut_delete_object_desc(obj_desc); return_ACPI_STATUS(AE_NO_MEMORY); } /* Initialize buffer from the byte_list (if present) */ if (byte_list) { ACPI_MEMCPY(obj_desc->buffer.pointer, byte_list->named.data, byte_list_length); } } obj_desc->buffer.flags |= AOPOBJ_DATA_VALID; op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_build_internal_package_obj * * PARAMETERS: walk_state - Current walk state * Op - Parser object to be translated * element_count - Number of elements in the package - this is * the num_elements argument to Package() * obj_desc_ptr - Where the ACPI internal object is returned * * RETURN: Status * * DESCRIPTION: Translate a parser Op package object to the equivalent * namespace object * * NOTE: The number of elements in the package will be always be the num_elements * count, regardless of the number of elements in the package list. If * num_elements is smaller, only that many package list elements are used. * if num_elements is larger, the Package object is padded out with * objects of type Uninitialized (as per ACPI spec.) * * Even though the ASL compilers do not allow num_elements to be smaller * than the Package list length (for the fixed length package opcode), some * BIOS code modifies the AML on the fly to adjust the num_elements, and * this code compensates for that. This also provides compatibility with * other AML interpreters. * ******************************************************************************/ acpi_status acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, union acpi_parse_object *op, u32 element_count, union acpi_operand_object **obj_desc_ptr) { union acpi_parse_object *arg; union acpi_parse_object *parent; union acpi_operand_object *obj_desc = NULL; acpi_status status = AE_OK; unsigned i; u16 index; u16 reference_count; ACPI_FUNCTION_TRACE(ds_build_internal_package_obj); /* Find the parent of a possibly nested package */ parent = op->common.parent; while ((parent->common.aml_opcode == AML_PACKAGE_OP) || (parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) { parent = parent->common.parent; } /* * If we are evaluating a Named package object "Name (xxxx, Package)", * the package object already exists, otherwise it must be created. */ obj_desc = *obj_desc_ptr; if (!obj_desc) { obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE); *obj_desc_ptr = obj_desc; if (!obj_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } obj_desc->package.node = parent->common.node; } /* * Allocate the element array (array of pointers to the individual * objects) based on the num_elements parameter. Add an extra pointer slot * so that the list is always null terminated. */ obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) element_count + 1) * sizeof(void *)); if (!obj_desc->package.elements) { acpi_ut_delete_object_desc(obj_desc); return_ACPI_STATUS(AE_NO_MEMORY); } obj_desc->package.count = element_count; /* * Initialize the elements of the package, up to the num_elements count. * Package is automatically padded with uninitialized (NULL) elements * if num_elements is greater than the package list length. Likewise, * Package is truncated if num_elements is less than the list length. */ arg = op->common.value.arg; arg = arg->common.next; for (i = 0; arg && (i < element_count); i++) { if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { if (arg->common.node->type == ACPI_TYPE_METHOD) { /* * A method reference "looks" to the parser to be a method * invocation, so we special case it here */ arg->common.aml_opcode = AML_INT_NAMEPATH_OP; status = acpi_ds_build_internal_object(walk_state, arg, &obj_desc-> package. elements[i]); } else { /* This package element is already built, just get it */ obj_desc->package.elements[i] = ACPI_CAST_PTR(union acpi_operand_object, arg->common.node); } } else { status = acpi_ds_build_internal_object(walk_state, arg, &obj_desc-> package. elements[i]); } if (*obj_desc_ptr) { /* Existing package, get existing reference count */ reference_count = (*obj_desc_ptr)->common.reference_count; if (reference_count > 1) { /* Make new element ref count match original ref count */ for (index = 0; index < (reference_count - 1); index++) { acpi_ut_add_reference((obj_desc-> package. elements[i])); } } } arg = arg->common.next; } /* Check for match between num_elements and actual length of package_list */ if (arg) { /* * num_elements was exhausted, but there are remaining elements in the * package_list. Truncate the package to num_elements. * * Note: technically, this is an error, from ACPI spec: "It is an error * for NumElements to be less than the number of elements in the * PackageList". However, we just print a message and * no exception is returned. This provides Windows compatibility. Some * BIOSs will alter the num_elements on the fly, creating this type * of ill-formed package object. */ while (arg) { /* * We must delete any package elements that were created earlier * and are not going to be used because of the package truncation. */ if (arg->common.node) { acpi_ut_remove_reference(ACPI_CAST_PTR (union acpi_operand_object, arg->common.node)); arg->common.node = NULL; } /* Find out how many elements there really are */ i++; arg = arg->common.next; } ACPI_INFO((AE_INFO, "Actual Package length (%u) is larger than NumElements field (%u), truncated\n", i, element_count)); } else if (i < element_count) { /* * Arg list (elements) was exhausted, but we did not reach num_elements count. * Note: this is not an error, the package is padded out with NULLs. */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Package List length (%u) smaller than NumElements count (%u), padded with null elements\n", i, element_count)); } obj_desc->package.flags |= AOPOBJ_DATA_VALID; op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_create_node * * PARAMETERS: walk_state - Current walk state * Node - NS Node to be initialized * Op - Parser object to be translated * * RETURN: Status * * DESCRIPTION: Create the object to be associated with a namespace node * ******************************************************************************/ acpi_status acpi_ds_create_node(struct acpi_walk_state *walk_state, struct acpi_namespace_node *node, union acpi_parse_object *op) { acpi_status status; union acpi_operand_object *obj_desc; ACPI_FUNCTION_TRACE_PTR(ds_create_node, op); /* * Because of the execution pass through the non-control-method * parts of the table, we can arrive here twice. Only init * the named object node the first time through */ if (acpi_ns_get_attached_object(node)) { return_ACPI_STATUS(AE_OK); } if (!op->common.value.arg) { /* No arguments, there is nothing to do */ return_ACPI_STATUS(AE_OK); } /* Build an internal object for the argument(s) */ status = acpi_ds_build_internal_object(walk_state, op->common.value.arg, &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Re-type the object according to its argument */ node->type = obj_desc->common.type; /* Attach obj to node */ status = acpi_ns_attach_object(node, obj_desc, node->type); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } #endif /* ACPI_NO_METHOD_EXECUTION */ /******************************************************************************* * * FUNCTION: acpi_ds_init_object_from_op * * PARAMETERS: walk_state - Current walk state * Op - Parser op used to init the internal object * Opcode - AML opcode associated with the object * ret_obj_desc - Namespace object to be initialized * * RETURN: Status * * DESCRIPTION: Initialize a namespace object from a parser Op and its * associated arguments. The namespace object is a more compact * representation of the Op and its arguments. * ******************************************************************************/ acpi_status acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, union acpi_parse_object *op, u16 opcode, union acpi_operand_object **ret_obj_desc) { const struct acpi_opcode_info *op_info; union acpi_operand_object *obj_desc; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ds_init_object_from_op); obj_desc = *ret_obj_desc; op_info = acpi_ps_get_opcode_info(opcode); if (op_info->class == AML_CLASS_UNKNOWN) { /* Unknown opcode */ return_ACPI_STATUS(AE_TYPE); } /* Perform per-object initialization */ switch (obj_desc->common.type) { case ACPI_TYPE_BUFFER: /* * Defer evaluation of Buffer term_arg operand */ obj_desc->buffer.node = ACPI_CAST_PTR(struct acpi_namespace_node, walk_state->operands[0]); obj_desc->buffer.aml_start = op->named.data; obj_desc->buffer.aml_length = op->named.length; break; case ACPI_TYPE_PACKAGE: /* * Defer evaluation of Package term_arg operand */ obj_desc->package.node = ACPI_CAST_PTR(struct acpi_namespace_node, walk_state->operands[0]); obj_desc->package.aml_start = op->named.data; obj_desc->package.aml_length = op->named.length; break; case ACPI_TYPE_INTEGER: switch (op_info->type) { case AML_TYPE_CONSTANT: /* * Resolve AML Constants here - AND ONLY HERE! * All constants are integers. * We mark the integer with a flag that indicates that it started * life as a constant -- so that stores to constants will perform * as expected (noop). zero_op is used as a placeholder for optional * target operands. */ obj_desc->common.flags = AOPOBJ_AML_CONSTANT; switch (opcode) { case AML_ZERO_OP: obj_desc->integer.value = 0; break; case AML_ONE_OP: obj_desc->integer.value = 1; break; case AML_ONES_OP: obj_desc->integer.value = ACPI_UINT64_MAX; /* Truncate value if we are executing from a 32-bit ACPI table */ #ifndef ACPI_NO_METHOD_EXECUTION acpi_ex_truncate_for32bit_table(obj_desc); #endif break; case AML_REVISION_OP: obj_desc->integer.value = ACPI_CA_VERSION; break; default: ACPI_ERROR((AE_INFO, "Unknown constant opcode 0x%X", opcode)); status = AE_AML_OPERAND_TYPE; break; } break; case AML_TYPE_LITERAL: obj_desc->integer.value = op->common.value.integer; #ifndef ACPI_NO_METHOD_EXECUTION acpi_ex_truncate_for32bit_table(obj_desc); #endif break; default: ACPI_ERROR((AE_INFO, "Unknown Integer type 0x%X", op_info->type)); status = AE_AML_OPERAND_TYPE; break; } break; case ACPI_TYPE_STRING: obj_desc->string.pointer = op->common.value.string; obj_desc->string.length = (u32) ACPI_STRLEN(op->common.value.string); /* * The string is contained in the ACPI table, don't ever try * to delete it */ obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; break; case ACPI_TYPE_METHOD: break; case ACPI_TYPE_LOCAL_REFERENCE: switch (op_info->type) { case AML_TYPE_LOCAL_VARIABLE: /* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */ obj_desc->reference.value = ((u32)opcode) - AML_LOCAL_OP; obj_desc->reference.class = ACPI_REFCLASS_LOCAL; #ifndef ACPI_NO_METHOD_EXECUTION status = acpi_ds_method_data_get_node(ACPI_REFCLASS_LOCAL, obj_desc->reference. value, walk_state, ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, &obj_desc->reference. object)); #endif break; case AML_TYPE_METHOD_ARGUMENT: /* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */ obj_desc->reference.value = ((u32)opcode) - AML_ARG_OP; obj_desc->reference.class = ACPI_REFCLASS_ARG; #ifndef ACPI_NO_METHOD_EXECUTION status = acpi_ds_method_data_get_node(ACPI_REFCLASS_ARG, obj_desc-> reference.value, walk_state, ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, &obj_desc-> reference. object)); #endif break; default: /* Object name or Debug object */ switch (op->common.aml_opcode) { case AML_INT_NAMEPATH_OP: /* Node was saved in Op */ obj_desc->reference.node = op->common.node; obj_desc->reference.object = op->common.node->object; obj_desc->reference.class = ACPI_REFCLASS_NAME; break; case AML_DEBUG_OP: obj_desc->reference.class = ACPI_REFCLASS_DEBUG; break; default: ACPI_ERROR((AE_INFO, "Unimplemented reference type for AML opcode: 0x%4.4X", opcode)); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } break; } break; default: ACPI_ERROR((AE_INFO, "Unimplemented data type: 0x%X", obj_desc->common.type)); status = AE_AML_OPERAND_TYPE; break; } return_ACPI_STATUS(status); }
gpl-2.0
omega-roms/I9500_Stock_Kernel_KK_4.4.2
drivers/acpi/acpica/dswstate.c
4919
21428
/****************************************************************************** * * Module Name: dswstate - Dispatcher parse tree walk management routines * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "acdispat.h" #include "acnamesp.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dswstate") /* Local prototypes */ static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws); static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws); /******************************************************************************* * * FUNCTION: acpi_ds_result_pop * * PARAMETERS: Object - Where to return the popped object * walk_state - Current Walk state * * RETURN: Status * * DESCRIPTION: Pop an object off the top of this walk's result stack * ******************************************************************************/ acpi_status acpi_ds_result_pop(union acpi_operand_object **object, struct acpi_walk_state *walk_state) { u32 index; union acpi_generic_state *state; acpi_status status; ACPI_FUNCTION_NAME(ds_result_pop); state = walk_state->results; /* Incorrect state of result stack */ if (state && !walk_state->result_count) { ACPI_ERROR((AE_INFO, "No results on result stack")); return (AE_AML_INTERNAL); } if (!state && walk_state->result_count) { ACPI_ERROR((AE_INFO, "No result state for result stack")); return (AE_AML_INTERNAL); } /* Empty result stack */ if (!state) { ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p", walk_state)); return (AE_AML_NO_RETURN_VALUE); } /* Return object of the top element and clean that top element result stack */ walk_state->result_count--; index = (u32)walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM; *object = state->results.obj_desc[index]; if (!*object) { ACPI_ERROR((AE_INFO, "No result objects on result stack, State=%p", walk_state)); return (AE_AML_NO_RETURN_VALUE); } state->results.obj_desc[index] = NULL; if (index == 0) { status = acpi_ds_result_stack_pop(walk_state); if (ACPI_FAILURE(status)) { return (status); } } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object, acpi_ut_get_object_type_name(*object), index, walk_state, walk_state->result_count)); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_result_push * * PARAMETERS: Object - Where to return the popped object * walk_state - Current Walk state * * RETURN: Status * * DESCRIPTION: Push an object onto the current result stack * ******************************************************************************/ acpi_status acpi_ds_result_push(union acpi_operand_object * object, struct acpi_walk_state * walk_state) { union acpi_generic_state *state; acpi_status status; u32 index; ACPI_FUNCTION_NAME(ds_result_push); if (walk_state->result_count > walk_state->result_size) { ACPI_ERROR((AE_INFO, "Result stack is full")); return (AE_AML_INTERNAL); } else if (walk_state->result_count == walk_state->result_size) { /* Extend the result stack */ status = acpi_ds_result_stack_push(walk_state); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Failed to extend the result stack")); return (status); } } if (!(walk_state->result_count < walk_state->result_size)) { ACPI_ERROR((AE_INFO, "No free elements in result stack")); return (AE_AML_INTERNAL); } state = walk_state->results; if (!state) { ACPI_ERROR((AE_INFO, "No result stack frame during push")); return (AE_AML_INTERNAL); } if (!object) { ACPI_ERROR((AE_INFO, "Null Object! Obj=%p State=%p Num=%u", object, walk_state, walk_state->result_count)); return (AE_BAD_PARAMETER); } /* Assign the address of object to the top free element of result stack */ index = (u32)walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM; state->results.obj_desc[index] = object; walk_state->result_count++; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p Num=%X Cur=%X\n", object, acpi_ut_get_object_type_name((union acpi_operand_object *) object), walk_state, walk_state->result_count, walk_state->current_result)); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_result_stack_push * * PARAMETERS: walk_state - Current Walk state * * RETURN: Status * * DESCRIPTION: Push an object onto the walk_state result stack * ******************************************************************************/ static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state) { union acpi_generic_state *state; ACPI_FUNCTION_NAME(ds_result_stack_push); /* Check for stack overflow */ if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) > ACPI_RESULTS_OBJ_NUM_MAX) { ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%u", walk_state, walk_state->result_size)); return (AE_STACK_OVERFLOW); } state = acpi_ut_create_generic_state(); if (!state) { return (AE_NO_MEMORY); } state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT; acpi_ut_push_generic_state(&walk_state->results, state); /* Increase the length of the result stack by the length of frame */ walk_state->result_size += ACPI_RESULTS_FRAME_OBJ_NUM; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n", state, walk_state)); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_result_stack_pop * * PARAMETERS: walk_state - Current Walk state * * RETURN: Status * * DESCRIPTION: Pop an object off of the walk_state result stack * ******************************************************************************/ static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state) { union acpi_generic_state *state; ACPI_FUNCTION_NAME(ds_result_stack_pop); /* Check for stack underflow */ if (walk_state->results == NULL) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Result stack underflow - State=%p\n", walk_state)); return (AE_AML_NO_OPERAND); } if (walk_state->result_size < ACPI_RESULTS_FRAME_OBJ_NUM) { ACPI_ERROR((AE_INFO, "Insufficient result stack size")); return (AE_AML_INTERNAL); } state = acpi_ut_pop_generic_state(&walk_state->results); acpi_ut_delete_generic_state(state); /* Decrease the length of result stack by the length of frame */ walk_state->result_size -= ACPI_RESULTS_FRAME_OBJ_NUM; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Result=%p RemainingResults=%X State=%p\n", state, walk_state->result_count, walk_state)); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_obj_stack_push * * PARAMETERS: Object - Object to push * walk_state - Current Walk state * * RETURN: Status * * DESCRIPTION: Push an object onto this walk's object/operand stack * ******************************************************************************/ acpi_status acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state) { ACPI_FUNCTION_NAME(ds_obj_stack_push); /* Check for stack overflow */ if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) { ACPI_ERROR((AE_INFO, "Object stack overflow! Obj=%p State=%p #Ops=%u", object, walk_state, walk_state->num_operands)); return (AE_STACK_OVERFLOW); } /* Put the object onto the stack */ walk_state->operands[walk_state->operand_index] = object; walk_state->num_operands++; /* For the usual order of filling the operand stack */ walk_state->operand_index++; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n", object, acpi_ut_get_object_type_name((union acpi_operand_object *) object), walk_state, walk_state->num_operands)); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_obj_stack_pop * * PARAMETERS: pop_count - Number of objects/entries to pop * walk_state - Current Walk state * * RETURN: Status * * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT * deleted by this routine. * ******************************************************************************/ acpi_status acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state) { u32 i; ACPI_FUNCTION_NAME(ds_obj_stack_pop); for (i = 0; i < pop_count; i++) { /* Check for stack underflow */ if (walk_state->num_operands == 0) { ACPI_ERROR((AE_INFO, "Object stack underflow! Count=%X State=%p #Ops=%u", pop_count, walk_state, walk_state->num_operands)); return (AE_STACK_UNDERFLOW); } /* Just set the stack entry to null */ walk_state->num_operands--; walk_state->operands[walk_state->num_operands] = NULL; } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%u\n", pop_count, walk_state, walk_state->num_operands)); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_obj_stack_pop_and_delete * * PARAMETERS: pop_count - Number of objects/entries to pop * walk_state - Current Walk state * * RETURN: Status * * DESCRIPTION: Pop this walk's object stack and delete each object that is * popped off. * ******************************************************************************/ void acpi_ds_obj_stack_pop_and_delete(u32 pop_count, struct acpi_walk_state *walk_state) { s32 i; union acpi_operand_object *obj_desc; ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete); if (pop_count == 0) { return; } for (i = (s32) pop_count - 1; i >= 0; i--) { if (walk_state->num_operands == 0) { return; } /* Pop the stack and delete an object if present in this stack entry */ walk_state->num_operands--; obj_desc = walk_state->operands[i]; if (obj_desc) { acpi_ut_remove_reference(walk_state->operands[i]); walk_state->operands[i] = NULL; } } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n", pop_count, walk_state, walk_state->num_operands)); } /******************************************************************************* * * FUNCTION: acpi_ds_get_current_walk_state * * PARAMETERS: Thread - Get current active state for this Thread * * RETURN: Pointer to the current walk state * * DESCRIPTION: Get the walk state that is at the head of the list (the "current" * walk state.) * ******************************************************************************/ struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state *thread) { ACPI_FUNCTION_NAME(ds_get_current_walk_state); if (!thread) { return (NULL); } ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current WalkState %p\n", thread->walk_state_list)); return (thread->walk_state_list); } /******************************************************************************* * * FUNCTION: acpi_ds_push_walk_state * * PARAMETERS: walk_state - State to push * Thread - Thread state object * * RETURN: None * * DESCRIPTION: Place the Thread state at the head of the state list * ******************************************************************************/ void acpi_ds_push_walk_state(struct acpi_walk_state *walk_state, struct acpi_thread_state *thread) { ACPI_FUNCTION_TRACE(ds_push_walk_state); walk_state->next = thread->walk_state_list; thread->walk_state_list = walk_state; return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ds_pop_walk_state * * PARAMETERS: Thread - Current thread state * * RETURN: A walk_state object popped from the thread's stack * * DESCRIPTION: Remove and return the walkstate object that is at the head of * the walk stack for the given walk list. NULL indicates that * the list is empty. * ******************************************************************************/ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread) { struct acpi_walk_state *walk_state; ACPI_FUNCTION_TRACE(ds_pop_walk_state); walk_state = thread->walk_state_list; if (walk_state) { /* Next walk state becomes the current walk state */ thread->walk_state_list = walk_state->next; /* * Don't clear the NEXT field, this serves as an indicator * that there is a parent WALK STATE * Do Not: walk_state->Next = NULL; */ } return_PTR(walk_state); } /******************************************************************************* * * FUNCTION: acpi_ds_create_walk_state * * PARAMETERS: owner_id - ID for object creation * Origin - Starting point for this walk * method_desc - Method object * Thread - Current thread state * * RETURN: Pointer to the new walk state. * * DESCRIPTION: Allocate and initialize a new walk state. The current walk * state is set to this new state. * ******************************************************************************/ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object *origin, union acpi_operand_object *method_desc, struct acpi_thread_state *thread) { struct acpi_walk_state *walk_state; ACPI_FUNCTION_TRACE(ds_create_walk_state); walk_state = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_walk_state)); if (!walk_state) { return_PTR(NULL); } walk_state->descriptor_type = ACPI_DESC_TYPE_WALK; walk_state->method_desc = method_desc; walk_state->owner_id = owner_id; walk_state->origin = origin; walk_state->thread = thread; walk_state->parser_state.start_op = origin; /* Init the method args/local */ #if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY)) acpi_ds_method_data_init(walk_state); #endif /* Put the new state at the head of the walk list */ if (thread) { acpi_ds_push_walk_state(walk_state, thread); } return_PTR(walk_state); } /******************************************************************************* * * FUNCTION: acpi_ds_init_aml_walk * * PARAMETERS: walk_state - New state to be initialized * Op - Current parse op * method_node - Control method NS node, if any * aml_start - Start of AML * aml_length - Length of AML * Info - Method info block (params, etc.) * pass_number - 1, 2, or 3 * * RETURN: Status * * DESCRIPTION: Initialize a walk state for a pass 1 or 2 parse tree walk * ******************************************************************************/ acpi_status acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, union acpi_parse_object *op, struct acpi_namespace_node *method_node, u8 * aml_start, u32 aml_length, struct acpi_evaluate_info *info, u8 pass_number) { acpi_status status; struct acpi_parse_state *parser_state = &walk_state->parser_state; union acpi_parse_object *extra_op; ACPI_FUNCTION_TRACE(ds_init_aml_walk); walk_state->parser_state.aml = walk_state->parser_state.aml_start = aml_start; walk_state->parser_state.aml_end = walk_state->parser_state.pkg_end = aml_start + aml_length; /* The next_op of the next_walk will be the beginning of the method */ walk_state->next_op = NULL; walk_state->pass_number = pass_number; if (info) { walk_state->params = info->parameters; walk_state->caller_return_desc = &info->return_object; } status = acpi_ps_init_scope(&walk_state->parser_state, op); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (method_node) { walk_state->parser_state.start_node = method_node; walk_state->walk_type = ACPI_WALK_METHOD; walk_state->method_node = method_node; walk_state->method_desc = acpi_ns_get_attached_object(method_node); /* Push start scope on scope stack and make it current */ status = acpi_ds_scope_stack_push(method_node, ACPI_TYPE_METHOD, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Init the method arguments */ status = acpi_ds_method_data_init_args(walk_state->params, ACPI_METHOD_NUM_ARGS, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } else { /* * Setup the current scope. * Find a Named Op that has a namespace node associated with it. * search upwards from this Op. Current scope is the first * Op with a namespace node. */ extra_op = parser_state->start_op; while (extra_op && !extra_op->common.node) { extra_op = extra_op->common.parent; } if (!extra_op) { parser_state->start_node = NULL; } else { parser_state->start_node = extra_op->common.node; } if (parser_state->start_node) { /* Push start scope on scope stack and make it current */ status = acpi_ds_scope_stack_push(parser_state->start_node, parser_state->start_node-> type, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } } status = acpi_ds_init_callbacks(walk_state, pass_number); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_delete_walk_state * * PARAMETERS: walk_state - State to delete * * RETURN: Status * * DESCRIPTION: Delete a walk state including all internal data structures * ******************************************************************************/ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state); if (!walk_state) { return; } if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) { ACPI_ERROR((AE_INFO, "%p is not a valid walk state", walk_state)); return; } /* There should not be any open scopes */ if (walk_state->parser_state.scope) { ACPI_ERROR((AE_INFO, "%p walk still has a scope list", walk_state)); acpi_ps_cleanup_scope(&walk_state->parser_state); } /* Always must free any linked control states */ while (walk_state->control_state) { state = walk_state->control_state; walk_state->control_state = state->common.next; acpi_ut_delete_generic_state(state); } /* Always must free any linked parse states */ while (walk_state->scope_info) { state = walk_state->scope_info; walk_state->scope_info = state->common.next; acpi_ut_delete_generic_state(state); } /* Always must free any stacked result states */ while (walk_state->results) { state = walk_state->results; walk_state->results = state->common.next; acpi_ut_delete_generic_state(state); } ACPI_FREE(walk_state); return_VOID; }
gpl-2.0
AOKP/kernel_samsung_manta
drivers/acpi/acpica/dscontrol.c
4919
11508
/****************************************************************************** * * Module Name: dscontrol - Support for execution control opcodes - * if/else/while/return * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acdispat.h" #include "acinterp.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dscontrol") /******************************************************************************* * * FUNCTION: acpi_ds_exec_begin_control_op * * PARAMETERS: walk_list - The list that owns the walk stack * Op - The control Op * * RETURN: Status * * DESCRIPTION: Handles all control ops encountered during control method * execution. * ******************************************************************************/ acpi_status acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state, union acpi_parse_object *op) { acpi_status status = AE_OK; union acpi_generic_state *control_state; ACPI_FUNCTION_NAME(ds_exec_begin_control_op); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op, op->common.aml_opcode, walk_state)); switch (op->common.aml_opcode) { case AML_WHILE_OP: /* * If this is an additional iteration of a while loop, continue. * There is no need to allocate a new control state. */ if (walk_state->control_state) { if (walk_state->control_state->control. aml_predicate_start == (walk_state->parser_state.aml - 1)) { /* Reset the state to start-of-loop */ walk_state->control_state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING; break; } } /*lint -fallthrough */ case AML_IF_OP: /* * IF/WHILE: Create a new control state to manage these * constructs. We need to manage these as a stack, in order * to handle nesting. */ control_state = acpi_ut_create_control_state(); if (!control_state) { status = AE_NO_MEMORY; break; } /* * Save a pointer to the predicate for multiple executions * of a loop */ control_state->control.aml_predicate_start = walk_state->parser_state.aml - 1; control_state->control.package_end = walk_state->parser_state.pkg_end; control_state->control.opcode = op->common.aml_opcode; /* Push the control state on this walk's control stack */ acpi_ut_push_generic_state(&walk_state->control_state, control_state); break; case AML_ELSE_OP: /* Predicate is in the state object */ /* If predicate is true, the IF was executed, ignore ELSE part */ if (walk_state->last_predicate) { status = AE_CTRL_TRUE; } break; case AML_RETURN_OP: break; default: break; } return (status); } /******************************************************************************* * * FUNCTION: acpi_ds_exec_end_control_op * * PARAMETERS: walk_list - The list that owns the walk stack * Op - The control Op * * RETURN: Status * * DESCRIPTION: Handles all control ops encountered during control method * execution. * ******************************************************************************/ acpi_status acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, union acpi_parse_object * op) { acpi_status status = AE_OK; union acpi_generic_state *control_state; ACPI_FUNCTION_NAME(ds_exec_end_control_op); switch (op->common.aml_opcode) { case AML_IF_OP: ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op)); /* * Save the result of the predicate in case there is an * ELSE to come */ walk_state->last_predicate = (u8)walk_state->control_state->common.value; /* * Pop the control state that was created at the start * of the IF and free it */ control_state = acpi_ut_pop_generic_state(&walk_state->control_state); acpi_ut_delete_generic_state(control_state); break; case AML_ELSE_OP: break; case AML_WHILE_OP: ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op)); control_state = walk_state->control_state; if (control_state->common.value) { /* Predicate was true, the body of the loop was just executed */ /* * This loop counter mechanism allows the interpreter to escape * possibly infinite loops. This can occur in poorly written AML * when the hardware does not respond within a while loop and the * loop does not implement a timeout. */ control_state->control.loop_count++; if (control_state->control.loop_count > ACPI_MAX_LOOP_ITERATIONS) { status = AE_AML_INFINITE_LOOP; break; } /* * Go back and evaluate the predicate and maybe execute the loop * another time */ status = AE_CTRL_PENDING; walk_state->aml_last_while = control_state->control.aml_predicate_start; break; } /* Predicate was false, terminate this while loop */ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] termination! Op=%p\n", op)); /* Pop this control state and free it */ control_state = acpi_ut_pop_generic_state(&walk_state->control_state); acpi_ut_delete_generic_state(control_state); break; case AML_RETURN_OP: ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[RETURN_OP] Op=%p Arg=%p\n", op, op->common.value.arg)); /* * One optional operand -- the return value * It can be either an immediate operand or a result that * has been bubbled up the tree */ if (op->common.value.arg) { /* Since we have a real Return(), delete any implicit return */ acpi_ds_clear_implicit_return(walk_state); /* Return statement has an immediate operand */ status = acpi_ds_create_operands(walk_state, op->common.value.arg); if (ACPI_FAILURE(status)) { return (status); } /* * If value being returned is a Reference (such as * an arg or local), resolve it now because it may * cease to exist at the end of the method. */ status = acpi_ex_resolve_to_value(&walk_state->operands[0], walk_state); if (ACPI_FAILURE(status)) { return (status); } /* * Get the return value and save as the last result * value. This is the only place where walk_state->return_desc * is set to anything other than zero! */ walk_state->return_desc = walk_state->operands[0]; } else if (walk_state->result_count) { /* Since we have a real Return(), delete any implicit return */ acpi_ds_clear_implicit_return(walk_state); /* * The return value has come from a previous calculation. * * If value being returned is a Reference (such as * an arg or local), resolve it now because it may * cease to exist at the end of the method. * * Allow references created by the Index operator to return * unchanged. */ if ((ACPI_GET_DESCRIPTOR_TYPE (walk_state->results->results.obj_desc[0]) == ACPI_DESC_TYPE_OPERAND) && ((walk_state->results->results.obj_desc[0])-> common.type == ACPI_TYPE_LOCAL_REFERENCE) && ((walk_state->results->results.obj_desc[0])-> reference.class != ACPI_REFCLASS_INDEX)) { status = acpi_ex_resolve_to_value(&walk_state-> results->results. obj_desc[0], walk_state); if (ACPI_FAILURE(status)) { return (status); } } walk_state->return_desc = walk_state->results->results.obj_desc[0]; } else { /* No return operand */ if (walk_state->num_operands) { acpi_ut_remove_reference(walk_state-> operands[0]); } walk_state->operands[0] = NULL; walk_state->num_operands = 0; walk_state->return_desc = NULL; } ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Completed RETURN_OP State=%p, RetVal=%p\n", walk_state, walk_state->return_desc)); /* End the control method execution right now */ status = AE_CTRL_TERMINATE; break; case AML_NOOP_OP: /* Just do nothing! */ break; case AML_BREAK_POINT_OP: /* * Set the single-step flag. This will cause the debugger (if present) * to break to the console within the AML debugger at the start of the * next AML instruction. */ ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE); ACPI_DEBUGGER_EXEC(acpi_os_printf ("**break** Executed AML BreakPoint opcode\n")); /* Call to the OSL in case OS wants a piece of the action */ status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT, "Executed AML Breakpoint opcode"); break; case AML_BREAK_OP: case AML_CONTINUE_OP: /* ACPI 2.0 */ /* Pop and delete control states until we find a while */ while (walk_state->control_state && (walk_state->control_state->control.opcode != AML_WHILE_OP)) { control_state = acpi_ut_pop_generic_state(&walk_state-> control_state); acpi_ut_delete_generic_state(control_state); } /* No while found? */ if (!walk_state->control_state) { return (AE_AML_NO_WHILE); } /* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */ walk_state->aml_last_while = walk_state->control_state->control.package_end; /* Return status depending on opcode */ if (op->common.aml_opcode == AML_BREAK_OP) { status = AE_CTRL_BREAK; } else { status = AE_CTRL_CONTINUE; } break; default: ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p", op->common.aml_opcode, op)); status = AE_AML_BAD_OPCODE; break; } return (status); }
gpl-2.0
smac0628/htc_gpe_51
drivers/acpi/acpica/psxface.c
4919
11333
/****************************************************************************** * * Module Name: psxface - Parser external interfaces * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "acdispat.h" #include "acinterp.h" #include "actables.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psxface") /* Local Prototypes */ static void acpi_ps_start_trace(struct acpi_evaluate_info *info); static void acpi_ps_stop_trace(struct acpi_evaluate_info *info); static void acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action); /******************************************************************************* * * FUNCTION: acpi_debug_trace * * PARAMETERS: method_name - Valid ACPI name string * debug_level - Optional level mask. 0 to use default * debug_layer - Optional layer mask. 0 to use default * Flags - bit 1: one shot(1) or persistent(0) * * RETURN: Status * * DESCRIPTION: External interface to enable debug tracing during control * method execution * ******************************************************************************/ acpi_status acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags) { acpi_status status; status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } /* TBDs: Validate name, allow full path or just nameseg */ acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name); acpi_gbl_trace_flags = flags; if (debug_level) { acpi_gbl_trace_dbg_level = debug_level; } if (debug_layer) { acpi_gbl_trace_dbg_layer = debug_layer; } (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ps_start_trace * * PARAMETERS: Info - Method info struct * * RETURN: None * * DESCRIPTION: Start control method execution trace * ******************************************************************************/ static void acpi_ps_start_trace(struct acpi_evaluate_info *info) { acpi_status status; ACPI_FUNCTION_ENTRY(); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return; } if ((!acpi_gbl_trace_method_name) || (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) { goto exit; } acpi_gbl_original_dbg_level = acpi_dbg_level; acpi_gbl_original_dbg_layer = acpi_dbg_layer; acpi_dbg_level = 0x00FFFFFF; acpi_dbg_layer = ACPI_UINT32_MAX; if (acpi_gbl_trace_dbg_level) { acpi_dbg_level = acpi_gbl_trace_dbg_level; } if (acpi_gbl_trace_dbg_layer) { acpi_dbg_layer = acpi_gbl_trace_dbg_layer; } exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); } /******************************************************************************* * * FUNCTION: acpi_ps_stop_trace * * PARAMETERS: Info - Method info struct * * RETURN: None * * DESCRIPTION: Stop control method execution trace * ******************************************************************************/ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info) { acpi_status status; ACPI_FUNCTION_ENTRY(); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return; } if ((!acpi_gbl_trace_method_name) || (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) { goto exit; } /* Disable further tracing if type is one-shot */ if (acpi_gbl_trace_flags & 1) { acpi_gbl_trace_method_name = 0; acpi_gbl_trace_dbg_level = 0; acpi_gbl_trace_dbg_layer = 0; } acpi_dbg_level = acpi_gbl_original_dbg_level; acpi_dbg_layer = acpi_gbl_original_dbg_layer; exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); } /******************************************************************************* * * FUNCTION: acpi_ps_execute_method * * PARAMETERS: Info - Method info block, contains: * Node - Method Node to execute * obj_desc - Method object * Parameters - List of parameters to pass to the method, * terminated by NULL. Params itself may be * NULL if no parameters are being passed. * return_object - Where to put method's return value (if * any). If NULL, no value is returned. * parameter_type - Type of Parameter list * return_object - Where to put method's return value (if * any). If NULL, no value is returned. * pass_number - Parse or execute pass * * RETURN: Status * * DESCRIPTION: Execute a control method * ******************************************************************************/ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) { acpi_status status; union acpi_parse_object *op; struct acpi_walk_state *walk_state; ACPI_FUNCTION_TRACE(ps_execute_method); /* Quick validation of DSDT header */ acpi_tb_check_dsdt_header(); /* Validate the Info and method Node */ if (!info || !info->resolved_node) { return_ACPI_STATUS(AE_NULL_ENTRY); } /* Init for new method, wait on concurrency semaphore */ status = acpi_ds_begin_method_execution(info->resolved_node, info->obj_desc, NULL); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * The caller "owns" the parameters, so give each one an extra reference */ acpi_ps_update_parameter_list(info, REF_INCREMENT); /* Begin tracing if requested */ acpi_ps_start_trace(info); /* * Execute the method. Performs parse simultaneously */ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Begin Method Parse/Execute [%4.4s] **** Node=%p Obj=%p\n", info->resolved_node->name.ascii, info->resolved_node, info->obj_desc)); /* Create and init a Root Node */ op = acpi_ps_create_scope_op(); if (!op) { status = AE_NO_MEMORY; goto cleanup; } /* Create and initialize a new walk state */ info->pass_number = ACPI_IMODE_EXECUTE; walk_state = acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL, NULL, NULL); if (!walk_state) { status = AE_NO_MEMORY; goto cleanup; } status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node, info->obj_desc->method.aml_start, info->obj_desc->method.aml_length, info, info->pass_number); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); goto cleanup; } if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) { walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL; } /* Invoke an internal method if necessary */ if (info->obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) { status = info->obj_desc->method.dispatch.implementation(walk_state); info->return_object = walk_state->return_desc; /* Cleanup states */ acpi_ds_scope_stack_clear(walk_state); acpi_ps_cleanup_scope(&walk_state->parser_state); acpi_ds_terminate_control_method(walk_state->method_desc, walk_state); acpi_ds_delete_walk_state(walk_state); goto cleanup; } /* * Start method evaluation with an implicit return of zero. * This is done for Windows compatibility. */ if (acpi_gbl_enable_interpreter_slack) { walk_state->implicit_return_obj = acpi_ut_create_integer_object((u64) 0); if (!walk_state->implicit_return_obj) { status = AE_NO_MEMORY; acpi_ds_delete_walk_state(walk_state); goto cleanup; } } /* Parse the AML */ status = acpi_ps_parse_aml(walk_state); /* walk_state was deleted by parse_aml */ cleanup: acpi_ps_delete_parse_tree(op); /* End optional tracing */ acpi_ps_stop_trace(info); /* Take away the extra reference that we gave the parameters above */ acpi_ps_update_parameter_list(info, REF_DECREMENT); /* Exit now if error above */ if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * If the method has returned an object, signal this to the caller with * a control exception code */ if (info->return_object) { ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method returned ObjDesc=%p\n", info->return_object)); ACPI_DUMP_STACK_ENTRY(info->return_object); status = AE_CTRL_RETURN_VALUE; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ps_update_parameter_list * * PARAMETERS: Info - See struct acpi_evaluate_info * (Used: parameter_type and Parameters) * Action - Add or Remove reference * * RETURN: Status * * DESCRIPTION: Update reference count on all method parameter objects * ******************************************************************************/ static void acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action) { u32 i; if (info->parameters) { /* Update reference count for each parameter */ for (i = 0; info->parameters[i]; i++) { /* Ignore errors, just do them all */ (void)acpi_ut_update_object_reference(info-> parameters[i], action); } } }
gpl-2.0
AOSParadox/kernel_msm
drivers/acpi/acpica/hwvalid.c
4919
10510
/****************************************************************************** * * Module Name: hwvalid - I/O request validation * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwvalid") /* Local prototypes */ static acpi_status acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width); /* * Protected I/O ports. Some ports are always illegal, and some are * conditionally illegal. This table must remain ordered by port address. * * The table is used to implement the Microsoft port access rules that * first appeared in Windows XP. Some ports are always illegal, and some * ports are only illegal if the BIOS calls _OSI with a win_xP string or * later (meaning that the BIOS itelf is post-XP.) * * This provides ACPICA with the desired port protections and * Microsoft compatibility. * * Description of port entries: * DMA: DMA controller * PIC0: Programmable Interrupt Controller (8259_a) * PIT1: System Timer 1 * PIT2: System Timer 2 failsafe * RTC: Real-time clock * CMOS: Extended CMOS * DMA1: DMA 1 page registers * DMA1L: DMA 1 Ch 0 low page * DMA2: DMA 2 page registers * DMA2L: DMA 2 low page refresh * ARBC: Arbitration control * SETUP: Reserved system board setup * POS: POS channel select * PIC1: Cascaded PIC * IDMA: ISA DMA * ELCR: PIC edge/level registers * PCI: PCI configuration space */ static const struct acpi_port_info acpi_protected_ports[] = { {"DMA", 0x0000, 0x000F, ACPI_OSI_WIN_XP}, {"PIC0", 0x0020, 0x0021, ACPI_ALWAYS_ILLEGAL}, {"PIT1", 0x0040, 0x0043, ACPI_OSI_WIN_XP}, {"PIT2", 0x0048, 0x004B, ACPI_OSI_WIN_XP}, {"RTC", 0x0070, 0x0071, ACPI_OSI_WIN_XP}, {"CMOS", 0x0074, 0x0076, ACPI_OSI_WIN_XP}, {"DMA1", 0x0081, 0x0083, ACPI_OSI_WIN_XP}, {"DMA1L", 0x0087, 0x0087, ACPI_OSI_WIN_XP}, {"DMA2", 0x0089, 0x008B, ACPI_OSI_WIN_XP}, {"DMA2L", 0x008F, 0x008F, ACPI_OSI_WIN_XP}, {"ARBC", 0x0090, 0x0091, ACPI_OSI_WIN_XP}, {"SETUP", 0x0093, 0x0094, ACPI_OSI_WIN_XP}, {"POS", 0x0096, 0x0097, ACPI_OSI_WIN_XP}, {"PIC1", 0x00A0, 0x00A1, ACPI_ALWAYS_ILLEGAL}, {"IDMA", 0x00C0, 0x00DF, ACPI_OSI_WIN_XP}, {"ELCR", 0x04D0, 0x04D1, ACPI_ALWAYS_ILLEGAL}, {"PCI", 0x0CF8, 0x0CFF, ACPI_OSI_WIN_XP} }; #define ACPI_PORT_INFO_ENTRIES ACPI_ARRAY_LENGTH (acpi_protected_ports) /****************************************************************************** * * FUNCTION: acpi_hw_validate_io_request * * PARAMETERS: Address Address of I/O port/register * bit_width Number of bits (8,16,32) * * RETURN: Status * * DESCRIPTION: Validates an I/O request (address/length). Certain ports are * always illegal and some ports are only illegal depending on * the requests the BIOS AML code makes to the predefined * _OSI method. * ******************************************************************************/ static acpi_status acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) { u32 i; u32 byte_width; acpi_io_address last_address; const struct acpi_port_info *port_info; ACPI_FUNCTION_TRACE(hw_validate_io_request); /* Supported widths are 8/16/32 */ if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) { ACPI_ERROR((AE_INFO, "Bad BitWidth parameter: %8.8X", bit_width)); return AE_BAD_PARAMETER; } port_info = acpi_protected_ports; byte_width = ACPI_DIV_8(bit_width); last_address = address + byte_width - 1; ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X", ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void, last_address), byte_width)); /* Maximum 16-bit address in I/O space */ if (last_address > ACPI_UINT16_MAX) { ACPI_ERROR((AE_INFO, "Illegal I/O port address/length above 64K: %p/0x%X", ACPI_CAST_PTR(void, address), byte_width)); return_ACPI_STATUS(AE_LIMIT); } /* Exit if requested address is not within the protected port table */ if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) { return_ACPI_STATUS(AE_OK); } /* Check request against the list of protected I/O ports */ for (i = 0; i < ACPI_PORT_INFO_ENTRIES; i++, port_info++) { /* * Check if the requested address range will write to a reserved * port. Four cases to consider: * * 1) Address range is contained completely in the port address range * 2) Address range overlaps port range at the port range start * 3) Address range overlaps port range at the port range end * 4) Address range completely encompasses the port range */ if ((address <= port_info->end) && (last_address >= port_info->start)) { /* Port illegality may depend on the _OSI calls made by the BIOS */ if (acpi_gbl_osi_data >= port_info->osi_dependency) { ACPI_DEBUG_PRINT((ACPI_DB_IO, "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)", ACPI_CAST_PTR(void, address), byte_width, port_info->name, port_info->start, port_info->end)); return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS); } } /* Finished if address range ends before the end of this port */ if (last_address <= port_info->end) { break; } } return_ACPI_STATUS(AE_OK); } /****************************************************************************** * * FUNCTION: acpi_hw_read_port * * PARAMETERS: Address Address of I/O port/register to read * Value Where value is placed * Width Number of bits * * RETURN: Status and value read from port * * DESCRIPTION: Read data from an I/O port or register. This is a front-end * to acpi_os_read_port that performs validation on both the port * address and the length. * *****************************************************************************/ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width) { acpi_status status; u32 one_byte; u32 i; /* Truncate address to 16 bits if requested */ if (acpi_gbl_truncate_io_addresses) { address &= ACPI_UINT16_MAX; } /* Validate the entire request and perform the I/O */ status = acpi_hw_validate_io_request(address, width); if (ACPI_SUCCESS(status)) { status = acpi_os_read_port(address, value, width); return status; } if (status != AE_AML_ILLEGAL_ADDRESS) { return status; } /* * There has been a protection violation within the request. Fall * back to byte granularity port I/O and ignore the failing bytes. * This provides Windows compatibility. */ for (i = 0, *value = 0; i < width; i += 8) { /* Validate and read one byte */ if (acpi_hw_validate_io_request(address, 8) == AE_OK) { status = acpi_os_read_port(address, &one_byte, 8); if (ACPI_FAILURE(status)) { return status; } *value |= (one_byte << i); } address++; } return AE_OK; } /****************************************************************************** * * FUNCTION: acpi_hw_write_port * * PARAMETERS: Address Address of I/O port/register to write * Value Value to write * Width Number of bits * * RETURN: Status * * DESCRIPTION: Write data to an I/O port or register. This is a front-end * to acpi_os_write_port that performs validation on both the port * address and the length. * *****************************************************************************/ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width) { acpi_status status; u32 i; /* Truncate address to 16 bits if requested */ if (acpi_gbl_truncate_io_addresses) { address &= ACPI_UINT16_MAX; } /* Validate the entire request and perform the I/O */ status = acpi_hw_validate_io_request(address, width); if (ACPI_SUCCESS(status)) { status = acpi_os_write_port(address, value, width); return status; } if (status != AE_AML_ILLEGAL_ADDRESS) { return status; } /* * There has been a protection violation within the request. Fall * back to byte granularity port I/O and ignore the failing bytes. * This provides Windows compatibility. */ for (i = 0; i < width; i += 8) { /* Validate and write one byte */ if (acpi_hw_validate_io_request(address, 8) == AE_OK) { status = acpi_os_write_port(address, (value >> i) & 0xFF, 8); if (ACPI_FAILURE(status)) { return status; } } address++; } return AE_OK; }
gpl-2.0
UnORoms/SebastianFM-kernel
drivers/acpi/acpica/utstate.c
4919
10325
/******************************************************************************* * * Module Name: utstate - state object support procedures * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utstate") /******************************************************************************* * * FUNCTION: acpi_ut_create_pkg_state_and_push * * PARAMETERS: Object - Object to be added to the new state * Action - Increment/Decrement * state_list - List the state will be added to * * RETURN: Status * * DESCRIPTION: Create a new state and push it * ******************************************************************************/ acpi_status acpi_ut_create_pkg_state_and_push(void *internal_object, void *external_object, u16 index, union acpi_generic_state **state_list) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); state = acpi_ut_create_pkg_state(internal_object, external_object, index); if (!state) { return (AE_NO_MEMORY); } acpi_ut_push_generic_state(state_list, state); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_push_generic_state * * PARAMETERS: list_head - Head of the state stack * State - State object to push * * RETURN: None * * DESCRIPTION: Push a state object onto a state stack * ******************************************************************************/ void acpi_ut_push_generic_state(union acpi_generic_state **list_head, union acpi_generic_state *state) { ACPI_FUNCTION_TRACE(ut_push_generic_state); /* Push the state object onto the front of the list (stack) */ state->common.next = *list_head; *list_head = state; return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_pop_generic_state * * PARAMETERS: list_head - Head of the state stack * * RETURN: The popped state object * * DESCRIPTION: Pop a state object from a state stack * ******************************************************************************/ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state **list_head) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE(ut_pop_generic_state); /* Remove the state object at the head of the list (stack) */ state = *list_head; if (state) { /* Update the list head */ *list_head = state->common.next; } return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_generic_state * * PARAMETERS: None * * RETURN: The new state object. NULL on failure. * * DESCRIPTION: Create a generic state object. Attempt to obtain one from * the global state cache; If none available, create a new one. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_generic_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); state = acpi_os_acquire_object(acpi_gbl_state_cache); if (state) { /* Initialize */ memset(state, 0, sizeof(union acpi_generic_state)); state->common.descriptor_type = ACPI_DESC_TYPE_STATE; } return (state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_thread_state * * PARAMETERS: None * * RETURN: New Thread State. NULL on failure * * DESCRIPTION: Create a "Thread State" - a flavor of the generic state used * to track per-thread info during method execution * ******************************************************************************/ struct acpi_thread_state *acpi_ut_create_thread_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE(ut_create_thread_state); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_THREAD; state->thread.thread_id = acpi_os_get_thread_id(); /* Check for invalid thread ID - zero is very bad, it will break things */ if (!state->thread.thread_id) { ACPI_ERROR((AE_INFO, "Invalid zero ID from AcpiOsGetThreadId")); state->thread.thread_id = (acpi_thread_id) 1; } return_PTR((struct acpi_thread_state *)state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_update_state * * PARAMETERS: Object - Initial Object to be installed in the state * Action - Update action to be performed * * RETURN: New state object, null on failure * * DESCRIPTION: Create an "Update State" - a flavor of the generic state used * to update reference counts and delete complex objects such * as packages. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object *object, u16 action) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE; state->update.object = object; state->update.value = action; return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_pkg_state * * PARAMETERS: Object - Initial Object to be installed in the state * Action - Update action to be performed * * RETURN: New state object, null on failure * * DESCRIPTION: Create a "Package State" * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object, void *external_object, u16 index) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_PACKAGE; state->pkg.source_object = (union acpi_operand_object *)internal_object; state->pkg.dest_object = external_object; state->pkg.index = index; state->pkg.num_packages = 1; return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_control_state * * PARAMETERS: None * * RETURN: New state object, null on failure * * DESCRIPTION: Create a "Control State" - a flavor of the generic state used * to support nested IF/WHILE constructs in the AML. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_control_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE(ut_create_control_state); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the control struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL; state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING; return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_delete_generic_state * * PARAMETERS: State - The state object to be deleted * * RETURN: None * * DESCRIPTION: Release a state object to the state cache. NULL state objects * are ignored. * ******************************************************************************/ void acpi_ut_delete_generic_state(union acpi_generic_state *state) { ACPI_FUNCTION_TRACE(ut_delete_generic_state); /* Ignore null state */ if (state) { (void)acpi_os_release_object(acpi_gbl_state_cache, state); } return_VOID; }
gpl-2.0
AlmightyMegadeth00/kernel_oneplus_msm8974
fs/9p/fid.c
5175
7287
/* * V9FS FID Management * * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> * Copyright (C) 2005, 2006 by Eric Van Hensbergen <ericvh@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/idr.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" /** * v9fs_fid_add - add a fid to a dentry * @dentry: dentry that the fid is being added to * @fid: fid to add * */ int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid) { struct v9fs_dentry *dent; p9_debug(P9_DEBUG_VFS, "fid %d dentry %s\n", fid->fid, dentry->d_name.name); dent = dentry->d_fsdata; if (!dent) { dent = kmalloc(sizeof(struct v9fs_dentry), GFP_KERNEL); if (!dent) return -ENOMEM; spin_lock_init(&dent->lock); INIT_LIST_HEAD(&dent->fidlist); dentry->d_fsdata = dent; } spin_lock(&dent->lock); list_add(&fid->dlist, &dent->fidlist); spin_unlock(&dent->lock); return 0; } /** * v9fs_fid_find - retrieve a fid that belongs to the specified uid * @dentry: dentry to look for fid in * @uid: return fid that belongs to the specified user * @any: if non-zero, return any fid associated with the dentry * */ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any) { struct v9fs_dentry *dent; struct p9_fid *fid, *ret; p9_debug(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n", dentry->d_name.name, dentry, uid, any); dent = (struct v9fs_dentry *) dentry->d_fsdata; ret = NULL; if (dent) { spin_lock(&dent->lock); list_for_each_entry(fid, &dent->fidlist, dlist) { if (any || fid->uid == uid) { ret = fid; break; } } spin_unlock(&dent->lock); } return ret; } /* * We need to hold v9ses->rename_sem as long as we hold references * to returned path array. Array element contain pointers to * dentry names. */ static int build_path_from_dentry(struct v9fs_session_info *v9ses, struct dentry *dentry, char ***names) { int n = 0, i; char **wnames; struct dentry *ds; for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent) n++; wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL); if (!wnames) goto err_out; for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent) wnames[i] = (char *)ds->d_name.name; *names = wnames; return n; err_out: return -ENOMEM; } static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, uid_t uid, int any) { struct dentry *ds; char **wnames, *uname; int i, n, l, clone, access; struct v9fs_session_info *v9ses; struct p9_fid *fid, *old_fid = NULL; v9ses = v9fs_dentry2v9ses(dentry); access = v9ses->flags & V9FS_ACCESS_MASK; fid = v9fs_fid_find(dentry, uid, any); if (fid) return fid; /* * we don't have a matching fid. To do a TWALK we need * parent fid. We need to prevent rename when we want to * look at the parent. */ down_read(&v9ses->rename_sem); ds = dentry->d_parent; fid = v9fs_fid_find(ds, uid, any); if (fid) { /* Found the parent fid do a lookup with that */ fid = p9_client_walk(fid, 1, (char **)&dentry->d_name.name, 1); goto fid_out; } up_read(&v9ses->rename_sem); /* start from the root and try to do a lookup */ fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any); if (!fid) { /* the user is not attached to the fs yet */ if (access == V9FS_ACCESS_SINGLE) return ERR_PTR(-EPERM); if (v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) uname = NULL; else uname = v9ses->uname; fid = p9_client_attach(v9ses->clnt, NULL, uname, uid, v9ses->aname); if (IS_ERR(fid)) return fid; v9fs_fid_add(dentry->d_sb->s_root, fid); } /* If we are root ourself just return that */ if (dentry->d_sb->s_root == dentry) return fid; /* * Do a multipath walk with attached root. * When walking parent we need to make sure we * don't have a parallel rename happening */ down_read(&v9ses->rename_sem); n = build_path_from_dentry(v9ses, dentry, &wnames); if (n < 0) { fid = ERR_PTR(n); goto err_out; } clone = 1; i = 0; while (i < n) { l = min(n - i, P9_MAXWELEM); /* * We need to hold rename lock when doing a multipath * walk to ensure none of the patch component change */ fid = p9_client_walk(fid, l, &wnames[i], clone); if (IS_ERR(fid)) { if (old_fid) { /* * If we fail, clunk fid which are mapping * to path component and not the last component * of the path. */ p9_client_clunk(old_fid); } kfree(wnames); goto err_out; } old_fid = fid; i += l; clone = 0; } kfree(wnames); fid_out: if (!IS_ERR(fid)) v9fs_fid_add(dentry, fid); err_out: up_read(&v9ses->rename_sem); return fid; } /** * v9fs_fid_lookup - lookup for a fid, try to walk if not found * @dentry: dentry to look for fid in * * Look for a fid in the specified dentry for the current user. * If no fid is found, try to create one walking from a fid from the parent * dentry (if it has one), or the root dentry. If the user haven't accessed * the fs yet, attach now and walk from the root. */ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) { uid_t uid; int any, access; struct v9fs_session_info *v9ses; v9ses = v9fs_dentry2v9ses(dentry); access = v9ses->flags & V9FS_ACCESS_MASK; switch (access) { case V9FS_ACCESS_SINGLE: case V9FS_ACCESS_USER: case V9FS_ACCESS_CLIENT: uid = current_fsuid(); any = 0; break; case V9FS_ACCESS_ANY: uid = v9ses->uid; any = 1; break; default: uid = ~0; any = 0; break; } return v9fs_fid_lookup_with_uid(dentry, uid, any); } struct p9_fid *v9fs_fid_clone(struct dentry *dentry) { struct p9_fid *fid, *ret; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return fid; ret = p9_client_walk(fid, 0, NULL, 1); return ret; } static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid) { struct p9_fid *fid, *ret; fid = v9fs_fid_lookup_with_uid(dentry, uid, 0); if (IS_ERR(fid)) return fid; ret = p9_client_walk(fid, 0, NULL, 1); return ret; } struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) { int err; struct p9_fid *fid; fid = v9fs_fid_clone_with_uid(dentry, 0); if (IS_ERR(fid)) goto error_out; /* * writeback fid will only be used to write back the * dirty pages. We always request for the open fid in read-write * mode so that a partial page write which result in page * read can work. */ err = p9_client_open(fid, O_RDWR); if (err < 0) { p9_client_clunk(fid); fid = ERR_PTR(err); goto error_out; } error_out: return fid; }
gpl-2.0
widz4rd/WIDzard-A850K
tools/perf/util/debugfs.c
5175
2557
#include "util.h" #include "debugfs.h" #include "cache.h" #include <linux/kernel.h> #include <sys/mount.h> static int debugfs_premounted; char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug"; char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events"; static const char *debugfs_known_mountpoints[] = { "/sys/kernel/debug/", "/debug/", 0, }; static int debugfs_found; /* find the path to the mounted debugfs */ const char *debugfs_find_mountpoint(void) { const char **ptr; char type[100]; FILE *fp; if (debugfs_found) return (const char *) debugfs_mountpoint; ptr = debugfs_known_mountpoints; while (*ptr) { if (debugfs_valid_mountpoint(*ptr) == 0) { debugfs_found = 1; strcpy(debugfs_mountpoint, *ptr); return debugfs_mountpoint; } ptr++; } /* give up and parse /proc/mounts */ fp = fopen("/proc/mounts", "r"); if (fp == NULL) return NULL; while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n", debugfs_mountpoint, type) == 2) { if (strcmp(type, "debugfs") == 0) break; } fclose(fp); if (strcmp(type, "debugfs") != 0) return NULL; debugfs_found = 1; return debugfs_mountpoint; } /* verify that a mountpoint is actually a debugfs instance */ int debugfs_valid_mountpoint(const char *debugfs) { struct statfs st_fs; if (statfs(debugfs, &st_fs) < 0) return -ENOENT; else if (st_fs.f_type != (long) DEBUGFS_MAGIC) return -ENOENT; return 0; } static void debugfs_set_tracing_events_path(const char *mountpoint) { snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s", mountpoint, "tracing/events"); } /* mount the debugfs somewhere if it's not mounted */ char *debugfs_mount(const char *mountpoint) { /* see if it's already mounted */ if (debugfs_find_mountpoint()) { debugfs_premounted = 1; goto out; } /* if not mounted and no argument */ if (mountpoint == NULL) { /* see if environment variable set */ mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT); /* if no environment variable, use default */ if (mountpoint == NULL) mountpoint = "/sys/kernel/debug"; } if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0) return NULL; /* save the mountpoint */ debugfs_found = 1; strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); out: debugfs_set_tracing_events_path(debugfs_mountpoint); return debugfs_mountpoint; } void debugfs_set_path(const char *mountpoint) { snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint); debugfs_set_tracing_events_path(mountpoint); }
gpl-2.0
cameron581/kernel
net/irda/irqueue.c
5431
23284
/********************************************************************* * * Filename: irqueue.c * Version: 0.3 * Description: General queue implementation * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Tue Jun 9 13:29:31 1998 * Modified at: Sun Dec 12 13:48:22 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * Modified at: Thu Jan 4 14:29:10 CET 2001 * Modified by: Marc Zyngier <mzyngier@freesurf.fr> * * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no> * Copyright (C) 1998, Dag Brattli, * All Rights Reserved. * * This code is taken from the Vortex Operating System written by Aage * Kvalnes. Aage has agreed that this code can use the GPL licence, * although he does not use that licence in his own code. * * This copyright does however _not_ include the ELF hash() function * which I currently don't know which licence or copyright it * has. Please inform me if you know. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ /* * NOTE : * There are various problems with this package : * o the hash function for ints is pathetic (but could be changed) * o locking is sometime suspicious (especially during enumeration) * o most users have only a few elements (== overhead) * o most users never use search, so don't benefit from hashing * Problem already fixed : * o not 64 bit compliant (most users do hashv = (int) self) * o hashbin_remove() is broken => use hashbin_remove_this() * I think most users would be better served by a simple linked list * (like include/linux/list.h) with a global spinlock per list. * Jean II */ /* * Notes on the concurrent access to hashbin and other SMP issues * ------------------------------------------------------------- * Hashbins are very often in the IrDA stack a global repository of * information, and therefore used in a very asynchronous manner following * various events (driver calls, timers, user calls...). * Therefore, very often it is highly important to consider the * management of concurrent access to the hashbin and how to guarantee the * consistency of the operations on it. * * First, we need to define the objective of locking : * 1) Protect user data (content pointed by the hashbin) * 2) Protect hashbin structure itself (linked list in each bin) * * OLD LOCKING * ----------- * * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were * both inadequate in *both* aspect. * o HB_GLOBAL was using a spinlock for each bin (local locking). * o HB_LOCAL was disabling irq on *all* CPUs, so use a single * global semaphore. * The problems were : * A) Global irq disabling is no longer supported by the kernel * B) No protection for the hashbin struct global data * o hashbin_delete() * o hb_current * C) No protection for user data in some cases * * A) HB_LOCAL use global irq disabling, so doesn't work on kernel * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its * performance is not satisfactory on SMP setups. Most hashbins were * HB_LOCAL, so (A) definitely need fixing. * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL * lock only the individual bins, it will never be able to lock the * global data, so can't do (B). * C) Some functions return pointer to data that is still in the * hashbin : * o hashbin_find() * o hashbin_get_first() * o hashbin_get_next() * As the data is still in the hashbin, it may be changed or free'd * while the caller is examinimg the data. In those case, locking can't * be done within the hashbin, but must include use of the data within * the caller. * The caller can easily do this with HB_LOCAL (just disable irqs). * However, this is impossible with HB_GLOBAL because the caller has no * way to know the proper bin, so don't know which spinlock to use. * * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is * fundamentally broken and will never work. * * NEW LOCKING * ----------- * * To fix those problems, I've introduce a few changes in the * hashbin locking : * 1) New HB_LOCK scheme * 2) hashbin->hb_spinlock * 3) New hashbin usage policy * * HB_LOCK : * ------- * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL * and HB_GLOBAL. It uses a single spinlock to protect the whole content * of the hashbin. As it is a single spinlock, it can protect the global * data of the hashbin and not only the bins themselves. * HB_LOCK can only protect some of the hashbin calls, so it only lock * call that can be made 100% safe and leave other call unprotected. * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin * content is always small contention is not high, so it doesn't matter * much. HB_LOCK is probably faster than HB_LOCAL. * * hashbin->hb_spinlock : * -------------------- * The spinlock that HB_LOCK uses is available for caller, so that * the caller can protect unprotected calls (see below). * If the caller want to do entirely its own locking (HB_NOLOCK), he * can do so and may use safely this spinlock. * Locking is done like this : * spin_lock_irqsave(&hashbin->hb_spinlock, flags); * Releasing the lock : * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); * * Safe & Protected calls : * ---------------------- * The following calls are safe or protected via HB_LOCK : * o hashbin_new() -> safe * o hashbin_delete() * o hashbin_insert() * o hashbin_remove_first() * o hashbin_remove() * o hashbin_remove_this() * o HASHBIN_GET_SIZE() -> atomic * * The following calls only protect the hashbin itself : * o hashbin_lock_find() * o hashbin_find_next() * * Unprotected calls : * ----------------- * The following calls need to be protected by the caller : * o hashbin_find() * o hashbin_get_first() * o hashbin_get_next() * * Locking Policy : * -------------- * If the hashbin is used only in a single thread of execution * (explicitly or implicitely), you can use HB_NOLOCK * If the calling module already provide concurrent access protection, * you may use HB_NOLOCK. * * In all other cases, you need to use HB_LOCK and lock the hashbin * every time before calling one of the unprotected calls. You also must * use the pointer returned by the unprotected call within the locked * region. * * Extra care for enumeration : * -------------------------- * hashbin_get_first() and hashbin_get_next() use the hashbin to * store the current position, in hb_current. * As long as the hashbin remains locked, this is safe. If you unlock * the hashbin, the current position may change if anybody else modify * or enumerate the hashbin. * Summary : do the full enumeration while locked. * * Alternatively, you may use hashbin_find_next(). But, this will * be slower, is more complex to use and doesn't protect the hashbin * content. So, care is needed here as well. * * Other issues : * ------------ * I believe that we are overdoing it by using spin_lock_irqsave() * and we should use only spin_lock_bh() or similar. But, I don't have * the balls to try it out. * Don't believe that because hashbin are now (somewhat) SMP safe * that the rest of the code is. Higher layers tend to be safest, * but LAP and LMP would need some serious dedicated love. * * Jean II */ #include <linux/module.h> #include <linux/slab.h> #include <net/irda/irda.h> #include <net/irda/irqueue.h> /************************ QUEUE SUBROUTINES ************************/ /* * Hashbin */ #define GET_HASHBIN(x) ( x & HASHBIN_MASK ) /* * Function hash (name) * * This function hash the input string 'name' using the ELF hash * function for strings. */ static __u32 hash( const char* name) { __u32 h = 0; __u32 g; while(*name) { h = (h<<4) + *name++; if ((g = (h & 0xf0000000))) h ^=g>>24; h &=~g; } return h; } /* * Function enqueue_first (queue, proc) * * Insert item first in queue. * */ static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) { IRDA_DEBUG( 4, "%s()\n", __func__); /* * Check if queue is empty. */ if ( *queue == NULL ) { /* * Queue is empty. Insert one element into the queue. */ element->q_next = element->q_prev = *queue = element; } else { /* * Queue is not empty. Insert element into front of queue. */ element->q_next = (*queue); (*queue)->q_prev->q_next = element; element->q_prev = (*queue)->q_prev; (*queue)->q_prev = element; (*queue) = element; } } /* * Function dequeue (queue) * * Remove first entry in queue * */ static irda_queue_t *dequeue_first(irda_queue_t **queue) { irda_queue_t *ret; IRDA_DEBUG( 4, "dequeue_first()\n"); /* * Set return value */ ret = *queue; if ( *queue == NULL ) { /* * Queue was empty. */ } else if ( (*queue)->q_next == *queue ) { /* * Queue only contained a single element. It will now be * empty. */ *queue = NULL; } else { /* * Queue contained several element. Remove the first one. */ (*queue)->q_prev->q_next = (*queue)->q_next; (*queue)->q_next->q_prev = (*queue)->q_prev; *queue = (*queue)->q_next; } /* * Return the removed entry (or NULL of queue was empty). */ return ret; } /* * Function dequeue_general (queue, element) * * */ static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) { irda_queue_t *ret; IRDA_DEBUG( 4, "dequeue_general()\n"); /* * Set return value */ ret = *queue; if ( *queue == NULL ) { /* * Queue was empty. */ } else if ( (*queue)->q_next == *queue ) { /* * Queue only contained a single element. It will now be * empty. */ *queue = NULL; } else { /* * Remove specific element. */ element->q_prev->q_next = element->q_next; element->q_next->q_prev = element->q_prev; if ( (*queue) == element) (*queue) = element->q_next; } /* * Return the removed entry (or NULL of queue was empty). */ return ret; } /************************ HASHBIN MANAGEMENT ************************/ /* * Function hashbin_create ( type, name ) * * Create hashbin! * */ hashbin_t *hashbin_new(int type) { hashbin_t* hashbin; /* * Allocate new hashbin */ hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC); if (!hashbin) return NULL; /* * Initialize structure */ hashbin->hb_type = type; hashbin->magic = HB_MAGIC; //hashbin->hb_current = NULL; /* Make sure all spinlock's are unlocked */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_init(&hashbin->hb_spinlock); } return hashbin; } EXPORT_SYMBOL(hashbin_new); /* * Function hashbin_delete (hashbin, free_func) * * Destroy hashbin, the free_func can be a user supplied special routine * for deallocating this structure if it's complex. If not the user can * just supply kfree, which should take care of the job. */ #ifdef CONFIG_LOCKDEP static int hashbin_lock_depth = 0; #endif int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) { irda_queue_t* queue; unsigned long flags = 0; int i; IRDA_ASSERT(hashbin != NULL, return -1;); IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, hashbin_lock_depth++); } /* * Free the entries in the hashbin, TODO: use hashbin_clear when * it has been shown to work */ for (i = 0; i < HASHBIN_SIZE; i ++ ) { queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); while (queue ) { if (free_func) (*free_func)(queue); queue = dequeue_first( (irda_queue_t**) &hashbin->hb_queue[i]); } } /* Cleanup local data */ hashbin->hb_current = NULL; hashbin->magic = ~HB_MAGIC; /* Release lock */ if ( hashbin->hb_type & HB_LOCK) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); #ifdef CONFIG_LOCKDEP hashbin_lock_depth--; #endif } /* * Free the hashbin structure */ kfree(hashbin); return 0; } EXPORT_SYMBOL(hashbin_delete); /********************* HASHBIN LIST OPERATIONS *********************/ /* * Function hashbin_insert (hashbin, entry, name) * * Insert an entry into the hashbin * */ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, const char* name) { unsigned long flags = 0; int bin; IRDA_DEBUG( 4, "%s()\n", __func__); IRDA_ASSERT( hashbin != NULL, return;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); /* * Locate hashbin */ if ( name ) hashv = hash( name ); bin = GET_HASHBIN( hashv ); /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ /* * Store name and key */ entry->q_hash = hashv; if ( name ) strlcpy( entry->q_name, name, sizeof(entry->q_name)); /* * Insert new entry first */ enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ], entry); hashbin->hb_size++; /* Release lock */ if ( hashbin->hb_type & HB_LOCK ) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ } EXPORT_SYMBOL(hashbin_insert); /* * Function hashbin_remove_first (hashbin) * * Remove first entry of the hashbin * * Note : this function no longer use hashbin_remove(), but does things * similar to hashbin_remove_this(), so can be considered safe. * Jean II */ void *hashbin_remove_first( hashbin_t *hashbin) { unsigned long flags = 0; irda_queue_t *entry = NULL; /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ entry = hashbin_get_first( hashbin); if ( entry != NULL) { int bin; long hashv; /* * Locate hashbin */ hashv = entry->q_hash; bin = GET_HASHBIN( hashv ); /* * Dequeue the entry... */ dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], (irda_queue_t*) entry ); hashbin->hb_size--; entry->q_next = NULL; entry->q_prev = NULL; /* * Check if this item is the currently selected item, and in * that case we must reset hb_current */ if ( entry == hashbin->hb_current) hashbin->hb_current = NULL; } /* Release lock */ if ( hashbin->hb_type & HB_LOCK ) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ return entry; } /* * Function hashbin_remove (hashbin, hashv, name) * * Remove entry with the given name * * The use of this function is highly discouraged, because the whole * concept behind hashbin_remove() is broken. In many cases, it's not * possible to guarantee the unicity of the index (either hashv or name), * leading to removing the WRONG entry. * The only simple safe use is : * hashbin_remove(hasbin, (int) self, NULL); * In other case, you must think hard to guarantee unicity of the index. * Jean II */ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) { int bin, found = FALSE; unsigned long flags = 0; irda_queue_t* entry; IRDA_DEBUG( 4, "%s()\n", __func__); IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); /* * Locate hashbin */ if ( name ) hashv = hash( name ); bin = GET_HASHBIN( hashv ); /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ /* * Search for entry */ entry = hashbin->hb_queue[ bin ]; if ( entry ) { do { /* * Check for key */ if ( entry->q_hash == hashv ) { /* * Name compare too? */ if ( name ) { if ( strcmp( entry->q_name, name) == 0) { found = TRUE; break; } } else { found = TRUE; break; } } entry = entry->q_next; } while ( entry != hashbin->hb_queue[ bin ] ); } /* * If entry was found, dequeue it */ if ( found ) { dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], (irda_queue_t*) entry ); hashbin->hb_size--; /* * Check if this item is the currently selected item, and in * that case we must reset hb_current */ if ( entry == hashbin->hb_current) hashbin->hb_current = NULL; } /* Release lock */ if ( hashbin->hb_type & HB_LOCK ) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ /* Return */ if ( found ) return entry; else return NULL; } EXPORT_SYMBOL(hashbin_remove); /* * Function hashbin_remove_this (hashbin, entry) * * Remove entry with the given name * * In some cases, the user of hashbin can't guarantee the unicity * of either the hashv or name. * In those cases, using the above function is guaranteed to cause troubles, * so we use this one instead... * And by the way, it's also faster, because we skip the search phase ;-) */ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) { unsigned long flags = 0; int bin; long hashv; IRDA_DEBUG( 4, "%s()\n", __func__); IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); IRDA_ASSERT( entry != NULL, return NULL;); /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ /* Check if valid and not already removed... */ if((entry->q_next == NULL) || (entry->q_prev == NULL)) { entry = NULL; goto out; } /* * Locate hashbin */ hashv = entry->q_hash; bin = GET_HASHBIN( hashv ); /* * Dequeue the entry... */ dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], (irda_queue_t*) entry ); hashbin->hb_size--; entry->q_next = NULL; entry->q_prev = NULL; /* * Check if this item is the currently selected item, and in * that case we must reset hb_current */ if ( entry == hashbin->hb_current) hashbin->hb_current = NULL; out: /* Release lock */ if ( hashbin->hb_type & HB_LOCK ) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ return entry; } EXPORT_SYMBOL(hashbin_remove_this); /*********************** HASHBIN ENUMERATION ***********************/ /* * Function hashbin_common_find (hashbin, hashv, name) * * Find item with the given hashv or name * */ void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name ) { int bin; irda_queue_t* entry; IRDA_DEBUG( 4, "hashbin_find()\n"); IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); /* * Locate hashbin */ if ( name ) hashv = hash( name ); bin = GET_HASHBIN( hashv ); /* * Search for entry */ entry = hashbin->hb_queue[ bin]; if ( entry ) { do { /* * Check for key */ if ( entry->q_hash == hashv ) { /* * Name compare too? */ if ( name ) { if ( strcmp( entry->q_name, name ) == 0 ) { return entry; } } else { return entry; } } entry = entry->q_next; } while ( entry != hashbin->hb_queue[ bin ] ); } return NULL; } EXPORT_SYMBOL(hashbin_find); /* * Function hashbin_lock_find (hashbin, hashv, name) * * Find item with the given hashv or name * * Same, but with spinlock protection... * I call it safe, but it's only safe with respect to the hashbin, not its * content. - Jean II */ void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name ) { unsigned long flags = 0; irda_queue_t* entry; /* Synchronize */ spin_lock_irqsave(&hashbin->hb_spinlock, flags); /* * Search for entry */ entry = hashbin_find(hashbin, hashv, name); /* Release lock */ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); return entry; } EXPORT_SYMBOL(hashbin_lock_find); /* * Function hashbin_find (hashbin, hashv, name, pnext) * * Find an item with the given hashv or name, and its successor * * This function allow to do concurrent enumerations without the * need to lock over the whole session, because the caller keep the * context of the search. On the other hand, it might fail and return * NULL if the entry is removed. - Jean II */ void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name, void ** pnext) { unsigned long flags = 0; irda_queue_t* entry; /* Synchronize */ spin_lock_irqsave(&hashbin->hb_spinlock, flags); /* * Search for current entry * This allow to check if the current item is still in the * hashbin or has been removed. */ entry = hashbin_find(hashbin, hashv, name); /* * Trick hashbin_get_next() to return what we want */ if(entry) { hashbin->hb_current = entry; *pnext = hashbin_get_next( hashbin ); } else *pnext = NULL; /* Release lock */ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); return entry; } /* * Function hashbin_get_first (hashbin) * * Get a pointer to first element in hashbin, this function must be * called before any calls to hashbin_get_next()! * */ irda_queue_t *hashbin_get_first( hashbin_t* hashbin) { irda_queue_t *entry; int i; IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); if ( hashbin == NULL) return NULL; for ( i = 0; i < HASHBIN_SIZE; i ++ ) { entry = hashbin->hb_queue[ i]; if ( entry) { hashbin->hb_current = entry; return entry; } } /* * Did not find any item in hashbin */ return NULL; } EXPORT_SYMBOL(hashbin_get_first); /* * Function hashbin_get_next (hashbin) * * Get next item in hashbin. A series of hashbin_get_next() calls must * be started by a call to hashbin_get_first(). The function returns * NULL when all items have been traversed * * The context of the search is stored within the hashbin, so you must * protect yourself from concurrent enumerations. - Jean II */ irda_queue_t *hashbin_get_next( hashbin_t *hashbin) { irda_queue_t* entry; int bin; int i; IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); if ( hashbin->hb_current == NULL) { IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;); return NULL; } entry = hashbin->hb_current->q_next; bin = GET_HASHBIN( entry->q_hash); /* * Make sure that we are not back at the beginning of the queue * again */ if ( entry != hashbin->hb_queue[ bin ]) { hashbin->hb_current = entry; return entry; } /* * Check that this is not the last queue in hashbin */ if ( bin >= HASHBIN_SIZE) return NULL; /* * Move to next queue in hashbin */ bin++; for ( i = bin; i < HASHBIN_SIZE; i++ ) { entry = hashbin->hb_queue[ i]; if ( entry) { hashbin->hb_current = entry; return entry; } } return NULL; } EXPORT_SYMBOL(hashbin_get_next);
gpl-2.0
valir/android_kernel_samsung_chagalllte
drivers/staging/tidspbridge/core/ue_deh.c
7991
6802
/* * ue_deh.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Implements upper edge DSP exception handling (DEH) functions. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * Copyright (C) 2010 Felipe Contreras * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <plat/dmtimer.h> #include <dspbridge/dbdefs.h> #include <dspbridge/dspdeh.h> #include <dspbridge/dev.h> #include "_tiomap.h" #include "_deh.h" #include <dspbridge/io_sm.h> #include <dspbridge/drv.h> #include <dspbridge/wdt.h> static u32 fault_addr; static void mmu_fault_dpc(unsigned long data) { struct deh_mgr *deh = (void *)data; if (!deh) return; bridge_deh_notify(deh, DSP_MMUFAULT, 0); } static irqreturn_t mmu_fault_isr(int irq, void *data) { struct deh_mgr *deh = data; struct cfg_hostres *resources; u32 event; if (!deh) return IRQ_HANDLED; resources = deh->bridge_context->resources; if (!resources) { dev_dbg(bridge, "%s: Failed to get Host Resources\n", __func__); return IRQ_HANDLED; } hw_mmu_event_status(resources->dmmu_base, &event); if (event == HW_MMU_TRANSLATION_FAULT) { hw_mmu_fault_addr_read(resources->dmmu_base, &fault_addr); dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, event, fault_addr); /* * Schedule a DPC directly. In the future, it may be * necessary to check if DSP MMU fault is intended for * Bridge. */ tasklet_schedule(&deh->dpc_tasklet); /* Disable the MMU events, else once we clear it will * start to raise INTs again */ hw_mmu_event_disable(resources->dmmu_base, HW_MMU_TRANSLATION_FAULT); } else { hw_mmu_event_disable(resources->dmmu_base, HW_MMU_ALL_INTERRUPTS); } return IRQ_HANDLED; } int bridge_deh_create(struct deh_mgr **ret_deh, struct dev_object *hdev_obj) { int status; struct deh_mgr *deh; struct bridge_dev_context *hbridge_context = NULL; /* Message manager will be created when a file is loaded, since * size of message buffer in shared memory is configurable in * the base image. */ /* Get Bridge context info. */ dev_get_bridge_context(hdev_obj, &hbridge_context); /* Allocate IO manager object: */ deh = kzalloc(sizeof(*deh), GFP_KERNEL); if (!deh) { status = -ENOMEM; goto err; } /* Create an NTFY object to manage notifications */ deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); if (!deh->ntfy_obj) { status = -ENOMEM; goto err; } ntfy_init(deh->ntfy_obj); /* Create a MMUfault DPC */ tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); /* Fill in context structure */ deh->bridge_context = hbridge_context; /* Install ISR function for DSP MMU fault */ status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, "DspBridge\tiommu fault", deh); if (status < 0) goto err; *ret_deh = deh; return 0; err: bridge_deh_destroy(deh); *ret_deh = NULL; return status; } int bridge_deh_destroy(struct deh_mgr *deh) { if (!deh) return -EFAULT; /* If notification object exists, delete it */ if (deh->ntfy_obj) { ntfy_delete(deh->ntfy_obj); kfree(deh->ntfy_obj); } /* Disable DSP MMU fault */ free_irq(INT_DSP_MMU_IRQ, deh); /* Free DPC object */ tasklet_kill(&deh->dpc_tasklet); /* Deallocate the DEH manager object */ kfree(deh); return 0; } int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, u32 notify_type, struct dsp_notification *hnotification) { if (!deh) return -EFAULT; if (event_mask) return ntfy_register(deh->ntfy_obj, hnotification, event_mask, notify_type); else return ntfy_unregister(deh->ntfy_obj, hnotification); } #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) { struct cfg_hostres *resources; struct hw_mmu_map_attrs_t map_attrs = { .endianism = HW_LITTLE_ENDIAN, .element_size = HW_ELEM_SIZE16BIT, .mixed_size = HW_MMU_CPUES, }; void *dummy_va_addr; resources = dev_context->resources; dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC); /* * Before acking the MMU fault, let's make sure MMU can only * access entry #0. Then add a new entry so that the DSP OS * can continue in order to dump the stack. */ hw_mmu_twl_disable(resources->dmmu_base); hw_mmu_tlb_flush_all(resources->dmmu_base); hw_mmu_tlb_add(resources->dmmu_base, virt_to_phys(dummy_va_addr), fault_addr, HW_PAGE_SIZE4KB, 1, &map_attrs, HW_SET, HW_SET); dsp_clk_enable(DSP_CLK_GPT8); dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); /* Clear MMU interrupt */ hw_mmu_event_ack(resources->dmmu_base, HW_MMU_TRANSLATION_FAULT); dump_dsp_stack(dev_context); dsp_clk_disable(DSP_CLK_GPT8); hw_mmu_disable(resources->dmmu_base); free_page((unsigned long)dummy_va_addr); } #endif static inline const char *event_to_string(int event) { switch (event) { case DSP_SYSERROR: return "DSP_SYSERROR"; break; case DSP_MMUFAULT: return "DSP_MMUFAULT"; break; case DSP_PWRERROR: return "DSP_PWRERROR"; break; case DSP_WDTOVERFLOW: return "DSP_WDTOVERFLOW"; break; default: return "unkown event"; break; } } void bridge_deh_notify(struct deh_mgr *deh, int event, int info) { struct bridge_dev_context *dev_context; const char *str = event_to_string(event); if (!deh) return; dev_dbg(bridge, "%s: device exception", __func__); dev_context = deh->bridge_context; switch (event) { case DSP_SYSERROR: dev_err(bridge, "%s: %s, info=0x%x", __func__, str, info); #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE dump_dl_modules(dev_context); dump_dsp_stack(dev_context); #endif break; case DSP_MMUFAULT: dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, fault_addr); #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE print_dsp_trace_buffer(dev_context); dump_dl_modules(dev_context); mmu_fault_print_stack(dev_context); #endif break; default: dev_err(bridge, "%s: %s", __func__, str); break; } /* Filter subsequent notifications when an error occurs */ if (dev_context->brd_state != BRD_ERROR) { ntfy_notify(deh->ntfy_obj, event); #ifdef CONFIG_TIDSPBRIDGE_RECOVERY bridge_recover_schedule(); #endif } /* Set the Board state as ERROR */ dev_context->brd_state = BRD_ERROR; /* Disable all the clocks that were enabled by DSP */ dsp_clock_disable_all(dev_context->dsp_per_clks); /* * Avoid the subsequent WDT if it happens once, * also if fatal error occurs. */ dsp_wdt_enable(false); }
gpl-2.0
Ravike14/Ravike14-kernel
drivers/net/wireless/atmel_pci.c
9015
2535
/*** -*- linux-c -*- ********************************************************** Driver for Atmel at76c502 at76c504 and at76c506 wireless cards. Copyright 2004 Simon Kelley. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Atmel wireless lan drivers; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ******************************************************************************/ #include <linux/pci.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include "atmel.h" MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); static DEFINE_PCI_DEVICE_TABLE(card_ids) = { { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID }, { 0, } }; MODULE_DEVICE_TABLE(pci, card_ids); static int atmel_pci_probe(struct pci_dev *, const struct pci_device_id *); static void atmel_pci_remove(struct pci_dev *); static struct pci_driver atmel_driver = { .name = "atmel", .id_table = card_ids, .probe = atmel_pci_probe, .remove = __devexit_p(atmel_pci_remove), }; static int __devinit atmel_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pent) { struct net_device *dev; if (pci_enable_device(pdev)) return -ENODEV; pci_set_master(pdev); dev = init_atmel_card(pdev->irq, pdev->resource[1].start, ATMEL_FW_TYPE_506, &pdev->dev, NULL, NULL); if (!dev) return -ENODEV; pci_set_drvdata(pdev, dev); return 0; } static void __devexit atmel_pci_remove(struct pci_dev *pdev) { stop_atmel_card(pci_get_drvdata(pdev)); } static int __init atmel_init_module(void) { return pci_register_driver(&atmel_driver); } static void __exit atmel_cleanup_module(void) { pci_unregister_driver(&atmel_driver); } module_init(atmel_init_module); module_exit(atmel_cleanup_module);
gpl-2.0
bas-t/linux_media
drivers/auxdisplay/cfag12864b.c
12855
8374
/* * Filename: cfag12864b.c * Version: 0.1.0 * Description: cfag12864b LCD driver * License: GPLv2 * Depends: ks0108 * * Author: Copyright (C) Miguel Ojeda Sandonis * Date: 2006-10-31 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/cdev.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/ks0108.h> #include <linux/cfag12864b.h> #define CFAG12864B_NAME "cfag12864b" /* * Module Parameters */ static unsigned int cfag12864b_rate = CONFIG_CFAG12864B_RATE; module_param(cfag12864b_rate, uint, S_IRUGO); MODULE_PARM_DESC(cfag12864b_rate, "Refresh rate (hertz)"); unsigned int cfag12864b_getrate(void) { return cfag12864b_rate; } /* * cfag12864b Commands * * E = Enable signal * Every time E switch from low to high, * cfag12864b/ks0108 reads the command/data. * * CS1 = First ks0108controller. * If high, the first ks0108 controller receives commands/data. * * CS2 = Second ks0108 controller * If high, the second ks0108 controller receives commands/data. * * DI = Data/Instruction * If low, cfag12864b will expect commands. * If high, cfag12864b will expect data. * */ #define bit(n) (((unsigned char)1)<<(n)) #define CFAG12864B_BIT_E (0) #define CFAG12864B_BIT_CS1 (2) #define CFAG12864B_BIT_CS2 (1) #define CFAG12864B_BIT_DI (3) static unsigned char cfag12864b_state; static void cfag12864b_set(void) { ks0108_writecontrol(cfag12864b_state); } static void cfag12864b_setbit(unsigned char state, unsigned char n) { if (state) cfag12864b_state |= bit(n); else cfag12864b_state &= ~bit(n); } static void cfag12864b_e(unsigned char state) { cfag12864b_setbit(state, CFAG12864B_BIT_E); cfag12864b_set(); } static void cfag12864b_cs1(unsigned char state) { cfag12864b_setbit(state, CFAG12864B_BIT_CS1); } static void cfag12864b_cs2(unsigned char state) { cfag12864b_setbit(state, CFAG12864B_BIT_CS2); } static void cfag12864b_di(unsigned char state) { cfag12864b_setbit(state, CFAG12864B_BIT_DI); } static void cfag12864b_setcontrollers(unsigned char first, unsigned char second) { if (first) cfag12864b_cs1(0); else cfag12864b_cs1(1); if (second) cfag12864b_cs2(0); else cfag12864b_cs2(1); } static void cfag12864b_controller(unsigned char which) { if (which == 0) cfag12864b_setcontrollers(1, 0); else if (which == 1) cfag12864b_setcontrollers(0, 1); } static void cfag12864b_displaystate(unsigned char state) { cfag12864b_di(0); cfag12864b_e(1); ks0108_displaystate(state); cfag12864b_e(0); } static void cfag12864b_address(unsigned char address) { cfag12864b_di(0); cfag12864b_e(1); ks0108_address(address); cfag12864b_e(0); } static void cfag12864b_page(unsigned char page) { cfag12864b_di(0); cfag12864b_e(1); ks0108_page(page); cfag12864b_e(0); } static void cfag12864b_startline(unsigned char startline) { cfag12864b_di(0); cfag12864b_e(1); ks0108_startline(startline); cfag12864b_e(0); } static void cfag12864b_writebyte(unsigned char byte) { cfag12864b_di(1); cfag12864b_e(1); ks0108_writedata(byte); cfag12864b_e(0); } static void cfag12864b_nop(void) { cfag12864b_startline(0); } /* * cfag12864b Internal Commands */ static void cfag12864b_on(void) { cfag12864b_setcontrollers(1, 1); cfag12864b_displaystate(1); } static void cfag12864b_off(void) { cfag12864b_setcontrollers(1, 1); cfag12864b_displaystate(0); } static void cfag12864b_clear(void) { unsigned char i, j; cfag12864b_setcontrollers(1, 1); for (i = 0; i < CFAG12864B_PAGES; i++) { cfag12864b_page(i); cfag12864b_address(0); for (j = 0; j < CFAG12864B_ADDRESSES; j++) cfag12864b_writebyte(0); } } /* * Update work */ unsigned char *cfag12864b_buffer; static unsigned char *cfag12864b_cache; static DEFINE_MUTEX(cfag12864b_mutex); static unsigned char cfag12864b_updating; static void cfag12864b_update(struct work_struct *delayed_work); static struct workqueue_struct *cfag12864b_workqueue; static DECLARE_DELAYED_WORK(cfag12864b_work, cfag12864b_update); static void cfag12864b_queue(void) { queue_delayed_work(cfag12864b_workqueue, &cfag12864b_work, HZ / cfag12864b_rate); } unsigned char cfag12864b_enable(void) { unsigned char ret; mutex_lock(&cfag12864b_mutex); if (!cfag12864b_updating) { cfag12864b_updating = 1; cfag12864b_queue(); ret = 0; } else ret = 1; mutex_unlock(&cfag12864b_mutex); return ret; } void cfag12864b_disable(void) { mutex_lock(&cfag12864b_mutex); if (cfag12864b_updating) { cfag12864b_updating = 0; cancel_delayed_work(&cfag12864b_work); flush_workqueue(cfag12864b_workqueue); } mutex_unlock(&cfag12864b_mutex); } unsigned char cfag12864b_isenabled(void) { return cfag12864b_updating; } static void cfag12864b_update(struct work_struct *work) { unsigned char c; unsigned short i, j, k, b; if (memcmp(cfag12864b_cache, cfag12864b_buffer, CFAG12864B_SIZE)) { for (i = 0; i < CFAG12864B_CONTROLLERS; i++) { cfag12864b_controller(i); cfag12864b_nop(); for (j = 0; j < CFAG12864B_PAGES; j++) { cfag12864b_page(j); cfag12864b_nop(); cfag12864b_address(0); cfag12864b_nop(); for (k = 0; k < CFAG12864B_ADDRESSES; k++) { for (c = 0, b = 0; b < 8; b++) if (cfag12864b_buffer [i * CFAG12864B_ADDRESSES / 8 + k / 8 + (j * 8 + b) * CFAG12864B_WIDTH / 8] & bit(k % 8)) c |= bit(b); cfag12864b_writebyte(c); } } } memcpy(cfag12864b_cache, cfag12864b_buffer, CFAG12864B_SIZE); } if (cfag12864b_updating) cfag12864b_queue(); } /* * cfag12864b Exported Symbols */ EXPORT_SYMBOL_GPL(cfag12864b_buffer); EXPORT_SYMBOL_GPL(cfag12864b_getrate); EXPORT_SYMBOL_GPL(cfag12864b_enable); EXPORT_SYMBOL_GPL(cfag12864b_disable); EXPORT_SYMBOL_GPL(cfag12864b_isenabled); /* * Is the module inited? */ static unsigned char cfag12864b_inited; unsigned char cfag12864b_isinited(void) { return cfag12864b_inited; } EXPORT_SYMBOL_GPL(cfag12864b_isinited); /* * Module Init & Exit */ static int __init cfag12864b_init(void) { int ret = -EINVAL; /* ks0108_init() must be called first */ if (!ks0108_isinited()) { printk(KERN_ERR CFAG12864B_NAME ": ERROR: " "ks0108 is not initialized\n"); goto none; } BUILD_BUG_ON(PAGE_SIZE < CFAG12864B_SIZE); cfag12864b_buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL); if (cfag12864b_buffer == NULL) { printk(KERN_ERR CFAG12864B_NAME ": ERROR: " "can't get a free page\n"); ret = -ENOMEM; goto none; } cfag12864b_cache = kmalloc(sizeof(unsigned char) * CFAG12864B_SIZE, GFP_KERNEL); if (cfag12864b_cache == NULL) { printk(KERN_ERR CFAG12864B_NAME ": ERROR: " "can't alloc cache buffer (%i bytes)\n", CFAG12864B_SIZE); ret = -ENOMEM; goto bufferalloced; } cfag12864b_workqueue = create_singlethread_workqueue(CFAG12864B_NAME); if (cfag12864b_workqueue == NULL) goto cachealloced; cfag12864b_clear(); cfag12864b_on(); cfag12864b_inited = 1; return 0; cachealloced: kfree(cfag12864b_cache); bufferalloced: free_page((unsigned long) cfag12864b_buffer); none: return ret; } static void __exit cfag12864b_exit(void) { cfag12864b_disable(); cfag12864b_off(); destroy_workqueue(cfag12864b_workqueue); kfree(cfag12864b_cache); free_page((unsigned long) cfag12864b_buffer); } module_init(cfag12864b_init); module_exit(cfag12864b_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>"); MODULE_DESCRIPTION("cfag12864b LCD driver");
gpl-2.0
gizero/linux-wallya-2.6.33-rc4-psp03.20.00.14
drivers/net/xen-netfront.c
56
45808
/* * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/if_ether.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/moduleparam.h> #include <linux/mm.h> #include <net/ip.h> #include <xen/xen.h> #include <xen/xenbus.h> #include <xen/events.h> #include <xen/page.h> #include <xen/grant_table.h> #include <xen/interface/io/netif.h> #include <xen/interface/memory.h> #include <xen/interface/grant_table.h> static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #define RX_COPY_THRESHOLD 256 #define GRANT_INVALID_REF 0 #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) struct netfront_info { struct list_head list; struct net_device *netdev; struct napi_struct napi; unsigned int evtchn; struct xenbus_device *xbdev; spinlock_t tx_lock; struct xen_netif_tx_front_ring tx; int tx_ring_ref; /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries * are linked from tx_skb_freelist through skb_entry.link. * * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ union skb_entry { struct sk_buff *skb; unsigned long link; } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; int rx_ring_ref; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; }; struct netfront_rx_info { struct xen_netif_rx_response rx; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; static void skb_entry_set_link(union skb_entry *list, unsigned short id) { list->link = id; } static int skb_entry_is_link(const union skb_entry *list) { BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); return ((unsigned long)list->skb < PAGE_OFFSET); } /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static void add_id_to_freelist(unsigned *head, union skb_entry *list, unsigned short id) { skb_entry_set_link(&list[id], *head); *head = id; } static unsigned short get_id_from_freelist(unsigned *head, union skb_entry *list) { unsigned int id = *head; *head = list[id].link; return id; } static int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while (0) #endif static int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); napi_schedule(&np->napi); } static int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static void xennet_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev))) netif_wake_queue(dev); } static void xennet_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; struct xen_netif_rx_request *req; if (unlikely(!netif_carrier_ok(dev))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; /* Align ip header to a 16 bytes boundary */ skb_reserve(skb, NET_IP_ALIGN); page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); break; } skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (i = 0; ; i++) { skb = __skb_dequeue(&np->rx_batch); if (skb == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); req->id = id; req->gref = ref; } wmb(); /* barrier so backend seens requests */ /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); } static int xennet_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) napi_schedule(&np->napi); } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void xennet_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netif_carrier_ok(dev)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct xen_netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id].skb; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "xennet_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref( np->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); /* update shared area */ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); xennet_maybe_wake_tx(dev); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct xen_netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; /* While the header overlaps a page boundary (including being larger than a page), split it it into page-sized chunks. */ while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } /* Grant backend access to each skb fragment page. */ for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct xen_netif_tx_request *tx; struct xen_netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netif_carrier_ok(dev) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; extra = NULL; tx->flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ tx->flags |= NETTXF_data_validated; if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, struct xen_netif_extra_info *extras, RING_IDX rp) { struct xen_netif_extra_info *extra; struct device *dev = &np->netdev->dev; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) dev_warn(dev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list) { struct xen_netif_rx_response *rx = &rinfo->rx; struct xen_netif_extra_info *extras = rinfo->extras; struct device *dev = &np->netdev->dev; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) dev_warn(dev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) dev_warn(dev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } ret = gnttab_end_foreign_access_ref(ref, 0); BUG_ON(!ret); gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) dev_warn(dev, "Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) dev_warn(dev, "Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; return err; } static int xennet_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) printk(KERN_WARNING "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct xen_netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int skb_checksum_setup(struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; if (skb->protocol != htons(ETH_P_IP)) goto out; iph = (void *)skb->data; th = skb->data + 4 * iph->ihl; if (th >= skb_tail_pointer(skb)) goto out; skb->csum_start = th - skb->head; switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); break; default: if (net_ratelimit()) printk(KERN_ERR "Attempting to checksum a non-" "TCP/UDP packet, dropping a protocol" " %d packet", iph->protocol); goto out; } if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) goto out; err = 0; out: return err; } static int handle_incoming_queue(struct net_device *dev, struct sk_buff_head *rxq) { int packets_dropped = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_checksum_setup(skb)) { kfree_skb(skb); packets_dropped++; dev->stats.rx_errors++; continue; } } dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; /* Pass it up. */ netif_receive_skb(skb); } return packets_dropped; } static int xennet_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct xen_netif_rx_response *rx = &rinfo.rx; struct xen_netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int err; spin_lock(&np->rx_lock); skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize approximates the size of true data plus * any supervisor overheads. Adding hypervisor * overheads has been shown to significantly reduce * achievable bandwidth with the default receive * buffer size. It is therefore not wise to account * for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set * to RX_COPY_THRESHOLD + the supervisor * overheads. Here, we add the size of the data pulled * in xennet_fill_frags(). * * We also adjust for any unused space in the main * data area by subtracting (RX_COPY_THRESHOLD - * len). This is especially important with drivers * which split incoming packets into header and data, * using only 66 bytes of the main data area (see the * e1000 driver for example.) On such systems, * without this last adjustement, our achievable * receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; if (rx->flags & NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } __skb_queue_purge(&errq); work_done -= handle_incoming_queue(dev, &rxq); /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; xennet_alloc_rx_buffers(dev); if (work_done < budget) { int more_to_do = 0; local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static void xennet_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ if (skb_entry_is_link(&np->tx_skbs[i])) continue; skb = np->tx_skbs[i].skb; gnttab_end_foreign_access_ref(np->grant_tx_ref[i], GNTMAP_readonly); gnttab_release_grant_reference(&np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void xennet_release_rx_bufs(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref; dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", __func__); return; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { ref = np->grant_rx_ref[id]; if (ref == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; if (0 == mfn) { skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, mfn_pte(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((u64)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", __func__, xfer, noxfer, unused); if (xfer) { if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, NULL, DOMID_SELF); mcl++; HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void xennet_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); xennet_release_tx_bufs(np); xennet_release_rx_bufs(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct net_device_ops xennet_netdev_ops = { .ndo_open = xennet_open, .ndo_uninit = xennet_uninit, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, .ndo_change_mtu = xennet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) { int i, err; struct net_device *netdev; struct netfront_info *np; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __func__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise tx_skbs as a free chain containing every entry. */ np->tx_skb_freelist = 0; for (i = 0; i < NET_TX_RING_SIZE; i++) { skb_entry_set_link(&np->tx_skbs[i], i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, xennet_poll, 64); netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netif_carrier_off(netdev); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; netdev = xennet_create_dev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __func__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __func__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static void xennet_end_access(int ref, void *page) { /* This frees the page as a side-effect */ if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, 0, (unsigned long)page); } static void xennet_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netif_carrier_off(info->netdev); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->netdev->irq) unbind_from_irqhandler(info->netdev->irq, info->netdev); info->evtchn = info->netdev->irq = 0; /* End access and free the pages */ xennet_end_access(info->tx_ring_ref, info->tx.sring); xennet_end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netif_carrier_ok(dev))) { xennet_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) napi_schedule(&np->napi); } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) { struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; netdev->irq = 0; err = xen_net_read_mac(dev, netdev->dev_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto fail; } txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) goto fail; err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, IRQF_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; netdev->irq = err; return 0; fail: return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_netfront(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", info->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: xennet_disconnect_backend(info); out: return err; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (!xennet_set_sg(dev, 1)) xennet_set_tso(dev, 1); } static int xennet_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; struct xen_netif_rx_request *req; unsigned int feature_rx_copy; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; if (!feature_rx_copy) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; } err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* Step 1: Discard all pending TX packet fragments. */ xennet_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netif_carrier_on(np->netdev); notify_remote_via_irq(np->netdev->irq); xennet_tx_buf_gc(dev); xennet_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateConnected: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } static const struct ethtool_ops xennet_ethtool_ops = { .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = xennet_set_sg, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; xennet_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; xennet_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int err; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { err = device_create_file(&netdev->dev, &xennet_attrs[i]); if (err) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return err; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ static struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; static int __devexit xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); unregister_netdev(info->netdev); xennet_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); free_netdev(info->netdev); return 0; } static struct xenbus_driver netfront_driver = { .name = "vif", .owner = THIS_MODULE, .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(xennet_remove), .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain()) return 0; printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { if (xen_initial_domain()) return; xenbus_unregister_driver(&netfront_driver); } module_exit(netif_exit); MODULE_DESCRIPTION("Xen virtual network device frontend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:vif"); MODULE_ALIAS("xennet");
gpl-2.0
Metaluim/linux-rtws-2.6.36-d13
arch/arm/mach-mmp/jasper.c
56
3255
/* * linux/arch/arm/mach-mmp/jasper.c * * Support for the Marvell Jasper Development Platform. * * Copyright (C) 2009-2010 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/regulator/machine.h> #include <linux/regulator/max8649.h> #include <linux/mfd/max8925.h> #include <linux/interrupt.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/addr-map.h> #include <mach/mfp-mmp2.h> #include <mach/mmp2.h> #include "common.h" #define JASPER_NR_IRQS (IRQ_BOARD_START + 48) static unsigned long jasper_pin_config[] __initdata = { /* UART1 */ GPIO29_UART1_RXD, GPIO30_UART1_TXD, /* UART3 */ GPIO51_UART3_RXD, GPIO52_UART3_TXD, /* DFI */ GPIO168_DFI_D0, GPIO167_DFI_D1, GPIO166_DFI_D2, GPIO165_DFI_D3, GPIO107_DFI_D4, GPIO106_DFI_D5, GPIO105_DFI_D6, GPIO104_DFI_D7, GPIO111_DFI_D8, GPIO164_DFI_D9, GPIO163_DFI_D10, GPIO162_DFI_D11, GPIO161_DFI_D12, GPIO110_DFI_D13, GPIO109_DFI_D14, GPIO108_DFI_D15, GPIO143_ND_nCS0, GPIO144_ND_nCS1, GPIO147_ND_nWE, GPIO148_ND_nRE, GPIO150_ND_ALE, GPIO149_ND_CLE, GPIO112_ND_RDY0, GPIO160_ND_RDY1, /* PMIC */ PMIC_PMIC_INT | MFP_LPM_EDGE_FALL, }; static struct regulator_consumer_supply max8649_supply[] = { REGULATOR_SUPPLY("vcc_core", NULL), }; static struct regulator_init_data max8649_init_data = { .constraints = { .name = "vcc_core range", .min_uV = 1150000, .max_uV = 1280000, .always_on = 1, .boot_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .num_consumer_supplies = 1, .consumer_supplies = &max8649_supply[0], }; static struct max8649_platform_data jasper_max8649_info = { .mode = 2, /* VID1 = 1, VID0 = 0 */ .extclk = 0, .ramp_timing = MAX8649_RAMP_32MV, .regulator = &max8649_init_data, }; static struct max8925_backlight_pdata jasper_backlight_data = { .dual_string = 0, }; static struct max8925_power_pdata jasper_power_data = { .batt_detect = 0, /* can't detect battery by ID pin */ .topoff_threshold = MAX8925_TOPOFF_THR_10PER, .fast_charge = MAX8925_FCHG_1000MA, }; static struct max8925_platform_data jasper_max8925_info = { .backlight = &jasper_backlight_data, .power = &jasper_power_data, .irq_base = IRQ_BOARD_START, }; static struct i2c_board_info jasper_twsi1_info[] = { [0] = { .type = "max8649", .addr = 0x60, .platform_data = &jasper_max8649_info, }, [1] = { .type = "max8925", .addr = 0x3c, .irq = IRQ_MMP2_PMIC, .platform_data = &jasper_max8925_info, }, }; static void __init jasper_init(void) { mfp_config(ARRAY_AND_SIZE(jasper_pin_config)); /* on-chip devices */ mmp2_add_uart(1); mmp2_add_uart(3); mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(jasper_twsi1_info)); regulator_has_full_constraints(); } MACHINE_START(MARVELL_JASPER, "Jasper Development Platform") .map_io = mmp_map_io, .nr_irqs = JASPER_NR_IRQS, .init_irq = mmp2_init_irq, .timer = &mmp2_timer, .init_machine = jasper_init, MACHINE_END
gpl-2.0
davidmueller13/L900_3.9_Experiment
arch/arm/mach-omap2/gpmc-nand.c
56
4013
/* * gpmc-nand.c * * Copyright (C) 2009 Texas Instruments * Vimal Singh <vimalsingh@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/mtd/nand.h> #include <linux/platform_data/mtd-nand-omap2.h> #include <asm/mach/flash.h> #include "gpmc.h" #include "soc.h" #include "gpmc-nand.h" /* minimum size for IO mapping */ #define NAND_IO_SIZE 4 static struct resource gpmc_nand_resource[] = { { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_IRQ, }, { .flags = IORESOURCE_IRQ, }, }; static struct platform_device gpmc_nand_device = { .name = "omap2-nand", .id = 0, .num_resources = ARRAY_SIZE(gpmc_nand_resource), .resource = gpmc_nand_resource, }; static int omap2_nand_gpmc_retime( struct omap_nand_platform_data *gpmc_nand_data, struct gpmc_timings *gpmc_t) { struct gpmc_timings t; int err; memset(&t, 0, sizeof(t)); t.sync_clk = gpmc_t->sync_clk; t.cs_on = gpmc_t->cs_on; t.adv_on = gpmc_t->adv_on; /* Read */ t.adv_rd_off = gpmc_t->adv_rd_off; t.oe_on = t.adv_on; t.access = gpmc_t->access; t.oe_off = gpmc_t->oe_off; t.cs_rd_off = gpmc_t->cs_rd_off; t.rd_cycle = gpmc_t->rd_cycle; /* Write */ t.adv_wr_off = gpmc_t->adv_wr_off; t.we_on = t.oe_on; if (cpu_is_omap34xx()) { t.wr_data_mux_bus = gpmc_t->wr_data_mux_bus; t.wr_access = gpmc_t->wr_access; } t.we_off = gpmc_t->we_off; t.cs_wr_off = gpmc_t->cs_wr_off; t.wr_cycle = gpmc_t->wr_cycle; /* Configure GPMC */ if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16) gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 1); else gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 0); gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_TYPE, GPMC_DEVICETYPE_NAND); gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_WP, 0); err = gpmc_cs_set_timings(gpmc_nand_data->cs, &t); if (err) return err; return 0; } static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt) { /* support only OMAP3 class */ if (!cpu_is_omap34xx() && !soc_is_am33xx()) { pr_err("BCH ecc is not supported on this CPU\n"); return 0; } /* * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1 * and AM33xx derivates. Other chips may be added if confirmed to work. */ if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) && (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)) && (!soc_is_am33xx())) { pr_err("BCH 4-bit mode is not supported on this CPU\n"); return 0; } return 1; } int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data, struct gpmc_timings *gpmc_t) { int err = 0; struct device *dev = &gpmc_nand_device.dev; gpmc_nand_device.dev.platform_data = gpmc_nand_data; err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE, (unsigned long *)&gpmc_nand_resource[0].start); if (err < 0) { dev_err(dev, "Cannot request GPMC CS\n"); return err; } gpmc_nand_resource[0].end = gpmc_nand_resource[0].start + NAND_IO_SIZE - 1; gpmc_nand_resource[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE); gpmc_nand_resource[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT); if (gpmc_t) { err = omap2_nand_gpmc_retime(gpmc_nand_data, gpmc_t); if (err < 0) { dev_err(dev, "Unable to set gpmc timings: %d\n", err); return err; } } /* Enable RD PIN Monitoring Reg */ if (gpmc_nand_data->dev_ready) { gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_RDY_BSY, 1); } gpmc_update_nand_reg(&gpmc_nand_data->reg, gpmc_nand_data->cs); if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) return -EINVAL; err = platform_device_register(&gpmc_nand_device); if (err < 0) { dev_err(dev, "Unable to register NAND device\n"); goto out_free_cs; } return 0; out_free_cs: gpmc_cs_free(gpmc_nand_data->cs); return err; }
gpl-2.0
androidaosp/kernel-msm
drivers/pwm/pwm-imx.c
312
8874
/* * simple driver for PWM (Pulse Width Modulator) controller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Derived from pxa PWM driver by eric miao <eric.miao@marvell.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/pwm.h> #include <linux/of.h> #include <linux/of_device.h> /* i.MX1 and i.MX21 share the same PWM function block: */ #define MX1_PWMC 0x00 /* PWM Control Register */ #define MX1_PWMS 0x04 /* PWM Sample Register */ #define MX1_PWMP 0x08 /* PWM Period Register */ #define MX1_PWMC_EN (1 << 4) /* i.MX27, i.MX31, i.MX35 share the same PWM function block: */ #define MX3_PWMCR 0x00 /* PWM Control Register */ #define MX3_PWMSR 0x04 /* PWM Status Register */ #define MX3_PWMSAR 0x0C /* PWM Sample Register */ #define MX3_PWMPR 0x10 /* PWM Period Register */ #define MX3_PWMCR_PRESCALER(x) ((((x) - 1) & 0xFFF) << 4) #define MX3_PWMCR_DOZEEN (1 << 24) #define MX3_PWMCR_WAITEN (1 << 23) #define MX3_PWMCR_DBGEN (1 << 22) #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) #define MX3_PWMCR_CLKSRC_IPG (1 << 16) #define MX3_PWMCR_SWR (1 << 3) #define MX3_PWMCR_EN (1 << 0) #define MX3_PWMSR_FIFOAV_4WORDS 0x4 #define MX3_PWMSR_FIFOAV_MASK 0x7 #define MX3_PWM_SWR_LOOP 5 struct imx_chip { struct clk *clk_per; struct clk *clk_ipg; void __iomem *mmio_base; struct pwm_chip chip; int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns); void (*set_enable)(struct pwm_chip *chip, bool enable); }; #define to_imx_chip(chip) container_of(chip, struct imx_chip, chip) static int imx_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct imx_chip *imx = to_imx_chip(chip); /* * The PWM subsystem allows for exact frequencies. However, * I cannot connect a scope on my device to the PWM line and * thus cannot provide the program the PWM controller * exactly. Instead, I'm relying on the fact that the * Bootloader (u-boot or WinCE+haret) has programmed the PWM * function group already. So I'll just modify the PWM sample * register to follow the ratio of duty_ns vs. period_ns * accordingly. * * This is good enough for programming the brightness of * the LCD backlight. * * The real implementation would divide PERCLK[0] first by * both the prescaler (/1 .. /128) and then by CLKSEL * (/2 .. /16). */ u32 max = readl(imx->mmio_base + MX1_PWMP); u32 p = max * duty_ns / period_ns; writel(max - p, imx->mmio_base + MX1_PWMS); return 0; } static void imx_pwm_set_enable_v1(struct pwm_chip *chip, bool enable) { struct imx_chip *imx = to_imx_chip(chip); u32 val; val = readl(imx->mmio_base + MX1_PWMC); if (enable) val |= MX1_PWMC_EN; else val &= ~MX1_PWMC_EN; writel(val, imx->mmio_base + MX1_PWMC); } static int imx_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct imx_chip *imx = to_imx_chip(chip); struct device *dev = chip->dev; unsigned long long c; unsigned long period_cycles, duty_cycles, prescale; unsigned int period_ms; bool enable = test_bit(PWMF_ENABLED, &pwm->flags); int wait_count = 0, fifoav; u32 cr, sr; /* * i.MX PWMv2 has a 4-word sample FIFO. * In order to avoid FIFO overflow issue, we do software reset * to clear all sample FIFO if the controller is disabled or * wait for a full PWM cycle to get a relinquished FIFO slot * when the controller is enabled and the FIFO is fully loaded. */ if (enable) { sr = readl(imx->mmio_base + MX3_PWMSR); fifoav = sr & MX3_PWMSR_FIFOAV_MASK; if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) { period_ms = DIV_ROUND_UP(pwm->period, NSEC_PER_MSEC); msleep(period_ms); sr = readl(imx->mmio_base + MX3_PWMSR); if (fifoav == (sr & MX3_PWMSR_FIFOAV_MASK)) dev_warn(dev, "there is no free FIFO slot\n"); } } else { writel(MX3_PWMCR_SWR, imx->mmio_base + MX3_PWMCR); do { usleep_range(200, 1000); cr = readl(imx->mmio_base + MX3_PWMCR); } while ((cr & MX3_PWMCR_SWR) && (wait_count++ < MX3_PWM_SWR_LOOP)); if (cr & MX3_PWMCR_SWR) dev_warn(dev, "software reset timeout\n"); } c = clk_get_rate(imx->clk_per); c = c * period_ns; do_div(c, 1000000000); period_cycles = c; prescale = period_cycles / 0x10000 + 1; period_cycles /= prescale; c = (unsigned long long)period_cycles * duty_ns; do_div(c, period_ns); duty_cycles = c; /* * according to imx pwm RM, the real period value should be * PERIOD value in PWMPR plus 2. */ if (period_cycles > 2) period_cycles -= 2; else period_cycles = 0; writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); writel(period_cycles, imx->mmio_base + MX3_PWMPR); cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH; if (enable) cr |= MX3_PWMCR_EN; writel(cr, imx->mmio_base + MX3_PWMCR); return 0; } static void imx_pwm_set_enable_v2(struct pwm_chip *chip, bool enable) { struct imx_chip *imx = to_imx_chip(chip); u32 val; val = readl(imx->mmio_base + MX3_PWMCR); if (enable) val |= MX3_PWMCR_EN; else val &= ~MX3_PWMCR_EN; writel(val, imx->mmio_base + MX3_PWMCR); } static int imx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct imx_chip *imx = to_imx_chip(chip); int ret; ret = clk_prepare_enable(imx->clk_ipg); if (ret) return ret; ret = imx->config(chip, pwm, duty_ns, period_ns); clk_disable_unprepare(imx->clk_ipg); return ret; } static int imx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct imx_chip *imx = to_imx_chip(chip); int ret; ret = clk_prepare_enable(imx->clk_per); if (ret) return ret; imx->set_enable(chip, true); return 0; } static void imx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct imx_chip *imx = to_imx_chip(chip); imx->set_enable(chip, false); clk_disable_unprepare(imx->clk_per); } static struct pwm_ops imx_pwm_ops = { .enable = imx_pwm_enable, .disable = imx_pwm_disable, .config = imx_pwm_config, .owner = THIS_MODULE, }; struct imx_pwm_data { int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns); void (*set_enable)(struct pwm_chip *chip, bool enable); }; static struct imx_pwm_data imx_pwm_data_v1 = { .config = imx_pwm_config_v1, .set_enable = imx_pwm_set_enable_v1, }; static struct imx_pwm_data imx_pwm_data_v2 = { .config = imx_pwm_config_v2, .set_enable = imx_pwm_set_enable_v2, }; static const struct of_device_id imx_pwm_dt_ids[] = { { .compatible = "fsl,imx1-pwm", .data = &imx_pwm_data_v1, }, { .compatible = "fsl,imx27-pwm", .data = &imx_pwm_data_v2, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_pwm_dt_ids); static int imx_pwm_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(imx_pwm_dt_ids, &pdev->dev); const struct imx_pwm_data *data; struct imx_chip *imx; struct resource *r; int ret = 0; if (!of_id) return -ENODEV; imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); if (imx == NULL) return -ENOMEM; imx->clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(imx->clk_per)) { dev_err(&pdev->dev, "getting per clock failed with %ld\n", PTR_ERR(imx->clk_per)); return PTR_ERR(imx->clk_per); } imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(imx->clk_ipg)) { dev_err(&pdev->dev, "getting ipg clock failed with %ld\n", PTR_ERR(imx->clk_ipg)); return PTR_ERR(imx->clk_ipg); } imx->chip.ops = &imx_pwm_ops; imx->chip.dev = &pdev->dev; imx->chip.base = -1; imx->chip.npwm = 1; imx->chip.can_sleep = true; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(imx->mmio_base)) return PTR_ERR(imx->mmio_base); data = of_id->data; imx->config = data->config; imx->set_enable = data->set_enable; ret = pwmchip_add(&imx->chip); if (ret < 0) return ret; platform_set_drvdata(pdev, imx); return 0; } static int imx_pwm_remove(struct platform_device *pdev) { struct imx_chip *imx; imx = platform_get_drvdata(pdev); if (imx == NULL) return -ENODEV; return pwmchip_remove(&imx->chip); } static struct platform_driver imx_pwm_driver = { .driver = { .name = "imx-pwm", .owner = THIS_MODULE, .of_match_table = imx_pwm_dt_ids, }, .probe = imx_pwm_probe, .remove = imx_pwm_remove, }; module_platform_driver(imx_pwm_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
gpl-2.0
arunthomas/linux
net/netfilter/nf_conntrack_proto_dccp.c
824
29451
/* * DCCP connection tracking protocol helper * * Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/dccp.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter/nfnetlink_conntrack.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_log.h> /* Timeouts are based on values from RFC4340: * * - REQUEST: * * 8.1.2. Client Request * * A client MAY give up on its DCCP-Requests after some time * (3 minutes, for example). * * - RESPOND: * * 8.1.3. Server Response * * It MAY also leave the RESPOND state for CLOSED after a timeout of * not less than 4MSL (8 minutes); * * - PARTOPEN: * * 8.1.5. Handshake Completion * * If the client remains in PARTOPEN for more than 4MSL (8 minutes), * it SHOULD reset the connection with Reset Code 2, "Aborted". * * - OPEN: * * The DCCP timestamp overflows after 11.9 hours. If the connection * stays idle this long the sequence number won't be recognized * as valid anymore. * * - CLOSEREQ/CLOSING: * * 8.3. Termination * * The retransmission timer should initially be set to go off in two * round-trip times and should back off to not less than once every * 64 seconds ... * * - TIMEWAIT: * * 4.3. States * * A server or client socket remains in this state for 2MSL (4 minutes) * after the connection has been town down, ... */ #define DCCP_MSL (2 * 60 * HZ) static const char * const dccp_state_names[] = { [CT_DCCP_NONE] = "NONE", [CT_DCCP_REQUEST] = "REQUEST", [CT_DCCP_RESPOND] = "RESPOND", [CT_DCCP_PARTOPEN] = "PARTOPEN", [CT_DCCP_OPEN] = "OPEN", [CT_DCCP_CLOSEREQ] = "CLOSEREQ", [CT_DCCP_CLOSING] = "CLOSING", [CT_DCCP_TIMEWAIT] = "TIMEWAIT", [CT_DCCP_IGNORE] = "IGNORE", [CT_DCCP_INVALID] = "INVALID", }; #define sNO CT_DCCP_NONE #define sRQ CT_DCCP_REQUEST #define sRS CT_DCCP_RESPOND #define sPO CT_DCCP_PARTOPEN #define sOP CT_DCCP_OPEN #define sCR CT_DCCP_CLOSEREQ #define sCG CT_DCCP_CLOSING #define sTW CT_DCCP_TIMEWAIT #define sIG CT_DCCP_IGNORE #define sIV CT_DCCP_INVALID /* * DCCP state transition table * * The assumption is the same as for TCP tracking: * * We are the man in the middle. All the packets go through us but might * get lost in transit to the destination. It is assumed that the destination * can't receive segments we haven't seen. * * The following states exist: * * NONE: Initial state, expecting Request * REQUEST: Request seen, waiting for Response from server * RESPOND: Response from server seen, waiting for Ack from client * PARTOPEN: Ack after Response seen, waiting for packet other than Response, * Reset or Sync from server * OPEN: Packet other than Response, Reset or Sync seen * CLOSEREQ: CloseReq from server seen, expecting Close from client * CLOSING: Close seen, expecting Reset * TIMEWAIT: Reset seen * IGNORE: Not determinable whether packet is valid * * Some states exist only on one side of the connection: REQUEST, RESPOND, * PARTOPEN, CLOSEREQ. For the other side these states are equivalent to * the one it was in before. * * Packets are marked as ignored (sIG) if we don't know if they're valid * (for example a reincarnation of a connection we didn't notice is dead * already) and the server may send back a connection closing Reset or a * Response. They're also used for Sync/SyncAck packets, which we don't * care about. */ static const u_int8_t dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = { [CT_DCCP_ROLE_CLIENT] = { [DCCP_PKT_REQUEST] = { /* * sNO -> sRQ Regular Request * sRQ -> sRQ Retransmitted Request or reincarnation * sRS -> sRS Retransmitted Request (apparently Response * got lost after we saw it) or reincarnation * sPO -> sIG Ignore, conntrack might be out of sync * sOP -> sIG Ignore, conntrack might be out of sync * sCR -> sIG Ignore, conntrack might be out of sync * sCG -> sIG Ignore, conntrack might be out of sync * sTW -> sRQ Reincarnation * * sNO, sRQ, sRS, sPO. sOP, sCR, sCG, sTW, */ sRQ, sRQ, sRS, sIG, sIG, sIG, sIG, sRQ, }, [DCCP_PKT_RESPONSE] = { /* * sNO -> sIV Invalid * sRQ -> sIG Ignore, might be response to ignored Request * sRS -> sIG Ignore, might be response to ignored Request * sPO -> sIG Ignore, might be response to ignored Request * sOP -> sIG Ignore, might be response to ignored Request * sCR -> sIG Ignore, might be response to ignored Request * sCG -> sIG Ignore, might be response to ignored Request * sTW -> sIV Invalid, reincarnation in reverse direction * goes through sRQ * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIV, }, [DCCP_PKT_ACK] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.) * sPO -> sPO Retransmitted Ack for Response, remain in PARTOPEN * sOP -> sOP Regular ACK, remain in OPEN * sCR -> sCR Ack in CLOSEREQ MAY be processed (8.3.) * sCG -> sCG Ack in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV }, [DCCP_PKT_DATA] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sIV MUST use DataAck in PARTOPEN state (8.1.5.) * sOP -> sOP Regular Data packet * sCR -> sCR Data in CLOSEREQ MAY be processed (8.3.) * sCG -> sCG Data in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sIV, sOP, sCR, sCG, sIV, }, [DCCP_PKT_DATAACK] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.) * sPO -> sPO Remain in PARTOPEN state * sOP -> sOP Regular DataAck packet in OPEN state * sCR -> sCR DataAck in CLOSEREQ MAY be processed (8.3.) * sCG -> sCG DataAck in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV }, [DCCP_PKT_CLOSEREQ] = { /* * CLOSEREQ may only be sent by the server. * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, [DCCP_PKT_CLOSE] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sCG Client-initiated close * sOP -> sCG Client-initiated close * sCR -> sCG Close in response to CloseReq (8.3.) * sCG -> sCG Retransmit * sTW -> sIV Late retransmit, already in TIME_WAIT * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sCG, sCG, sCG, sIV, sIV }, [DCCP_PKT_RESET] = { /* * sNO -> sIV No connection * sRQ -> sTW Sync received or timeout, SHOULD send Reset (8.1.1.) * sRS -> sTW Response received without Request * sPO -> sTW Timeout, SHOULD send Reset (8.1.5.) * sOP -> sTW Connection reset * sCR -> sTW Connection reset * sCG -> sTW Connection reset * sTW -> sIG Ignore (don't refresh timer) * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sTW, sTW, sTW, sTW, sTW, sTW, sIG }, [DCCP_PKT_SYNC] = { /* * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, [CT_DCCP_ROLE_SERVER] = { [DCCP_PKT_REQUEST] = { /* * sNO -> sIV Invalid * sRQ -> sIG Ignore, conntrack might be out of sync * sRS -> sIG Ignore, conntrack might be out of sync * sPO -> sIG Ignore, conntrack might be out of sync * sOP -> sIG Ignore, conntrack might be out of sync * sCR -> sIG Ignore, conntrack might be out of sync * sCG -> sIG Ignore, conntrack might be out of sync * sTW -> sRQ Reincarnation, must reverse roles * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sRQ }, [DCCP_PKT_RESPONSE] = { /* * sNO -> sIV Response without Request * sRQ -> sRS Response to clients Request * sRS -> sRS Retransmitted Response (8.1.3. SHOULD NOT) * sPO -> sIG Response to an ignored Request or late retransmit * sOP -> sIG Ignore, might be response to ignored Request * sCR -> sIG Ignore, might be response to ignored Request * sCG -> sIG Ignore, might be response to ignored Request * sTW -> sIV Invalid, Request from client in sTW moves to sRQ * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sRS, sRS, sIG, sIG, sIG, sIG, sIV }, [DCCP_PKT_ACK] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP Enter OPEN state (8.1.5.) * sOP -> sOP Regular Ack in OPEN state * sCR -> sIV Waiting for Close from client * sCG -> sCG Ack in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV }, [DCCP_PKT_DATA] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP Enter OPEN state (8.1.5.) * sOP -> sOP Regular Data packet in OPEN state * sCR -> sIV Waiting for Close from client * sCG -> sCG Data in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV }, [DCCP_PKT_DATAACK] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP Enter OPEN state (8.1.5.) * sOP -> sOP Regular DataAck in OPEN state * sCR -> sIV Waiting for Close from client * sCG -> sCG Data in CLOSING MAY be processed (8.3.) * sTW -> sIV * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV }, [DCCP_PKT_CLOSEREQ] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP -> sCR Move directly to CLOSEREQ (8.1.5.) * sOP -> sCR CloseReq in OPEN state * sCR -> sCR Retransmit * sCG -> sCR Simultaneous close, client sends another Close * sTW -> sIV Already closed * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sCR, sCR, sCR, sCR, sIV }, [DCCP_PKT_CLOSE] = { /* * sNO -> sIV No connection * sRQ -> sIV No connection * sRS -> sIV No connection * sPO -> sOP -> sCG Move direcly to CLOSING * sOP -> sCG Move to CLOSING * sCR -> sIV Close after CloseReq is invalid * sCG -> sCG Retransmit * sTW -> sIV Already closed * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIV, sIV, sIV, sCG, sCG, sIV, sCG, sIV }, [DCCP_PKT_RESET] = { /* * sNO -> sIV No connection * sRQ -> sTW Reset in response to Request * sRS -> sTW Timeout, SHOULD send Reset (8.1.3.) * sPO -> sTW Timeout, SHOULD send Reset (8.1.3.) * sOP -> sTW * sCR -> sTW * sCG -> sTW * sTW -> sIG Ignore (don't refresh timer) * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW, sTW */ sIV, sTW, sTW, sTW, sTW, sTW, sTW, sTW, sIG }, [DCCP_PKT_SYNC] = { /* * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, }; /* this module per-net specifics */ static int dccp_net_id __read_mostly; struct dccp_net { struct nf_proto_net pn; int dccp_loose; unsigned int dccp_timeout[CT_DCCP_MAX + 1]; }; static inline struct dccp_net *dccp_pernet(struct net *net) { return net_generic(net, dccp_net_id); } static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct nf_conntrack_tuple *tuple) { struct dccp_hdr _hdr, *dh; dh = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); if (dh == NULL) return false; tuple->src.u.dccp.port = dh->dccph_sport; tuple->dst.u.dccp.port = dh->dccph_dport; return true; } static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv, const struct nf_conntrack_tuple *tuple) { inv->src.u.dccp.port = tuple->dst.u.dccp.port; inv->dst.u.dccp.port = tuple->src.u.dccp.port; return true; } static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { struct net *net = nf_ct_net(ct); struct dccp_net *dn; struct dccp_hdr _dh, *dh; const char *msg; u_int8_t state; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); BUG_ON(dh == NULL); state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE]; switch (state) { default: dn = dccp_pernet(net); if (dn->dccp_loose == 0) { msg = "nf_ct_dccp: not picking up existing connection "; goto out_invalid; } case CT_DCCP_REQUEST: break; case CT_DCCP_INVALID: msg = "nf_ct_dccp: invalid state transition "; goto out_invalid; } ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; ct->proto.dccp.state = CT_DCCP_NONE; ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST; ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL; ct->proto.dccp.handshake_seq = 0; return true; out_invalid: if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, "%s", msg); return false; } static u64 dccp_ack_seq(const struct dccp_hdr *dh) { const struct dccp_hdr_ack_bits *dhack; dhack = (void *)dh + __dccp_basic_hdr_len(dh); return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low); } static unsigned int *dccp_get_timeouts(struct net *net) { return dccp_pernet(net)->dccp_timeout; } static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) { struct net *net = nf_ct_net(ct); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); struct dccp_hdr _dh, *dh; u_int8_t type, old_state, new_state; enum ct_dccp_roles role; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); BUG_ON(dh == NULL); type = dh->dccph_type; if (type == DCCP_PKT_RESET && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { /* Tear down connection immediately if only reply is a RESET */ nf_ct_kill_acct(ct, ctinfo, skb); return NF_ACCEPT; } spin_lock_bh(&ct->lock); role = ct->proto.dccp.role[dir]; old_state = ct->proto.dccp.state; new_state = dccp_state_table[role][type][old_state]; switch (new_state) { case CT_DCCP_REQUEST: if (old_state == CT_DCCP_TIMEWAIT && role == CT_DCCP_ROLE_SERVER) { /* Reincarnation in the reverse direction: reopen and * reverse client/server roles. */ ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER; } break; case CT_DCCP_RESPOND: if (old_state == CT_DCCP_REQUEST) ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh); break; case CT_DCCP_PARTOPEN: if (old_state == CT_DCCP_RESPOND && type == DCCP_PKT_ACK && dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq) set_bit(IPS_ASSURED_BIT, &ct->status); break; case CT_DCCP_IGNORE: /* * Connection tracking might be out of sync, so we ignore * packets that might establish a new connection and resync * if the server responds with a valid Response. */ if (ct->proto.dccp.last_dir == !dir && ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST && type == DCCP_PKT_RESPONSE) { ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER; ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh); new_state = CT_DCCP_RESPOND; break; } ct->proto.dccp.last_dir = dir; ct->proto.dccp.last_pkt = type; spin_unlock_bh(&ct->lock); if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_dccp: invalid packet ignored "); return NF_ACCEPT; case CT_DCCP_INVALID: spin_unlock_bh(&ct->lock); if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_dccp: invalid state transition "); return -NF_ACCEPT; } ct->proto.dccp.last_dir = dir; ct->proto.dccp.last_pkt = type; ct->proto.dccp.state = new_state; spin_unlock_bh(&ct->lock); if (new_state != old_state) nf_conntrack_event_cache(IPCT_PROTOINFO, ct); nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); return NF_ACCEPT; } static int dccp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { struct dccp_hdr _dh, *dh; unsigned int dccp_len = skb->len - dataoff; unsigned int cscov; const char *msg; dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); if (dh == NULL) { msg = "nf_ct_dccp: short packet "; goto out_invalid; } if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) || dh->dccph_doff * 4 > dccp_len) { msg = "nf_ct_dccp: truncated/malformed packet "; goto out_invalid; } cscov = dccp_len; if (dh->dccph_cscov) { cscov = (dh->dccph_cscov - 1) * 4; if (cscov > dccp_len) { msg = "nf_ct_dccp: bad checksum coverage "; goto out_invalid; } } if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP, pf)) { msg = "nf_ct_dccp: bad checksum "; goto out_invalid; } if (dh->dccph_type >= DCCP_PKT_INVALID) { msg = "nf_ct_dccp: reserved packet type "; goto out_invalid; } return NF_ACCEPT; out_invalid: if (LOG_INVALID(net, IPPROTO_DCCP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg); return -NF_ACCEPT; } static void dccp_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.dccp.port), ntohs(tuple->dst.u.dccp.port)); } static void dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct) { seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct) { struct nlattr *nest_parms; spin_lock_bh(&ct->lock); nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) || nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE, ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) || nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, cpu_to_be64(ct->proto.dccp.handshake_seq))) goto nla_put_failure; nla_nest_end(skb, nest_parms); spin_unlock_bh(&ct->lock); return 0; nla_put_failure: spin_unlock_bh(&ct->lock); return -1; } static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 }, [CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 }, [CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ] = { .type = NLA_U64 }, }; static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) { struct nlattr *attr = cda[CTA_PROTOINFO_DCCP]; struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1]; int err; if (!attr) return 0; err = nla_parse_nested(tb, CTA_PROTOINFO_DCCP_MAX, attr, dccp_nla_policy); if (err < 0) return err; if (!tb[CTA_PROTOINFO_DCCP_STATE] || !tb[CTA_PROTOINFO_DCCP_ROLE] || nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX || nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) { return -EINVAL; } spin_lock_bh(&ct->lock); ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) { ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; } else { ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER; ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT; } if (tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]) { ct->proto.dccp.handshake_seq = be64_to_cpu(nla_get_be64(tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ])); } spin_unlock_bh(&ct->lock); return 0; } static int dccp_nlattr_size(void) { return nla_total_size(0) /* CTA_PROTOINFO_DCCP */ + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1); } #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { struct dccp_net *dn = dccp_pernet(net); unsigned int *timeouts = data; int i; /* set default DCCP timeouts. */ for (i=0; i<CT_DCCP_MAX; i++) timeouts[i] = dn->dccp_timeout[i]; /* there's a 1:1 mapping between attributes and protocol states. */ for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) { if (tb[i]) { timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; } } return 0; } static int dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; int i; for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) { if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ))) goto nla_put_failure; } return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = { [CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #ifdef CONFIG_SYSCTL /* template, data assigned later */ static struct ctl_table dccp_sysctl_table[] = { { .procname = "nf_conntrack_dccp_timeout_request", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_respond", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_partopen", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_open", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_closereq", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_closing", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_timeout_timewait", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_dccp_loose", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; #endif /* CONFIG_SYSCTL */ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn, struct dccp_net *dn) { #ifdef CONFIG_SYSCTL if (pn->ctl_table) return 0; pn->ctl_table = kmemdup(dccp_sysctl_table, sizeof(dccp_sysctl_table), GFP_KERNEL); if (!pn->ctl_table) return -ENOMEM; pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST]; pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND]; pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN]; pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN]; pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ]; pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING]; pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; pn->ctl_table[7].data = &dn->dccp_loose; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) pn->ctl_table[0].procname = NULL; #endif return 0; } static int dccp_init_net(struct net *net, u_int16_t proto) { struct dccp_net *dn = dccp_pernet(net); struct nf_proto_net *pn = &dn->pn; if (!pn->users) { /* default values */ dn->dccp_loose = 1; dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL; dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL; dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL; dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ; dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; } return dccp_kmemdup_sysctl_table(net, pn, dn); } static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = { .l3proto = AF_INET, .l4proto = IPPROTO_DCCP, .name = "dccp", .pkt_to_tuple = dccp_pkt_to_tuple, .invert_tuple = dccp_invert_tuple, .new = dccp_new, .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, .error = dccp_error, .print_tuple = dccp_print_tuple, .print_conntrack = dccp_print_conntrack, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = dccp_to_nlattr, .nlattr_size = dccp_nlattr_size, .from_nlattr = nlattr_to_dccp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = dccp_timeout_nlattr_to_obj, .obj_to_nlattr = dccp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_DCCP_MAX, .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, .nla_policy = dccp_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .net_id = &dccp_net_id, .init_net = dccp_init_net, }; static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { .l3proto = AF_INET6, .l4proto = IPPROTO_DCCP, .name = "dccp", .pkt_to_tuple = dccp_pkt_to_tuple, .invert_tuple = dccp_invert_tuple, .new = dccp_new, .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, .error = dccp_error, .print_tuple = dccp_print_tuple, .print_conntrack = dccp_print_conntrack, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = dccp_to_nlattr, .nlattr_size = dccp_nlattr_size, .from_nlattr = nlattr_to_dccp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = dccp_timeout_nlattr_to_obj, .obj_to_nlattr = dccp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_DCCP_MAX, .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, .nla_policy = dccp_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .net_id = &dccp_net_id, .init_net = dccp_init_net, }; static __net_init int dccp_net_init(struct net *net) { int ret = 0; ret = nf_ct_l4proto_pernet_register(net, &dccp_proto4); if (ret < 0) { pr_err("nf_conntrack_dccp4: pernet registration failed.\n"); goto out; } ret = nf_ct_l4proto_pernet_register(net, &dccp_proto6); if (ret < 0) { pr_err("nf_conntrack_dccp6: pernet registration failed.\n"); goto cleanup_dccp4; } return 0; cleanup_dccp4: nf_ct_l4proto_pernet_unregister(net, &dccp_proto4); out: return ret; } static __net_exit void dccp_net_exit(struct net *net) { nf_ct_l4proto_pernet_unregister(net, &dccp_proto6); nf_ct_l4proto_pernet_unregister(net, &dccp_proto4); } static struct pernet_operations dccp_net_ops = { .init = dccp_net_init, .exit = dccp_net_exit, .id = &dccp_net_id, .size = sizeof(struct dccp_net), }; static int __init nf_conntrack_proto_dccp_init(void) { int ret; ret = register_pernet_subsys(&dccp_net_ops); if (ret < 0) goto out_pernet; ret = nf_ct_l4proto_register(&dccp_proto4); if (ret < 0) goto out_dccp4; ret = nf_ct_l4proto_register(&dccp_proto6); if (ret < 0) goto out_dccp6; return 0; out_dccp6: nf_ct_l4proto_unregister(&dccp_proto4); out_dccp4: unregister_pernet_subsys(&dccp_net_ops); out_pernet: return ret; } static void __exit nf_conntrack_proto_dccp_fini(void) { nf_ct_l4proto_unregister(&dccp_proto6); nf_ct_l4proto_unregister(&dccp_proto4); unregister_pernet_subsys(&dccp_net_ops); } module_init(nf_conntrack_proto_dccp_init); module_exit(nf_conntrack_proto_dccp_fini); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("DCCP connection tracking protocol helper"); MODULE_LICENSE("GPL");
gpl-2.0
glewarne/testing
drivers/cpufreq/pcc-cpufreq.c
824
15808
/* * pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface * * Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com> * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * Nagananda Chumbalkar <nagananda.chumbalkar@hp.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON * INFRINGEMENT. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/compiler.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <acpi/processor.h> #define PCC_VERSION "1.10.00" #define POLL_LOOPS 300 #define CMD_COMPLETE 0x1 #define CMD_GET_FREQ 0x0 #define CMD_SET_FREQ 0x1 #define BUF_SZ 4 struct pcc_register_resource { u8 descriptor; u16 length; u8 space_id; u8 bit_width; u8 bit_offset; u8 access_size; u64 address; } __attribute__ ((packed)); struct pcc_memory_resource { u8 descriptor; u16 length; u8 space_id; u8 resource_usage; u8 type_specific; u64 granularity; u64 minimum; u64 maximum; u64 translation_offset; u64 address_length; } __attribute__ ((packed)); static struct cpufreq_driver pcc_cpufreq_driver; struct pcc_header { u32 signature; u16 length; u8 major; u8 minor; u32 features; u16 command; u16 status; u32 latency; u32 minimum_time; u32 maximum_time; u32 nominal; u32 throttled_frequency; u32 minimum_frequency; }; static void __iomem *pcch_virt_addr; static struct pcc_header __iomem *pcch_hdr; static DEFINE_SPINLOCK(pcc_lock); static struct acpi_generic_address doorbell; static u64 doorbell_preserve; static u64 doorbell_write; static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49, 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; struct pcc_cpu { u32 input_offset; u32 output_offset; }; static struct pcc_cpu __percpu *pcc_cpu_info; static int pcc_cpufreq_verify(struct cpufreq_policy *policy) { cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; } static inline void pcc_cmd(void) { u64 doorbell_value; int i; acpi_read(&doorbell_value, &doorbell); acpi_write((doorbell_value & doorbell_preserve) | doorbell_write, &doorbell); for (i = 0; i < POLL_LOOPS; i++) { if (ioread16(&pcch_hdr->status) & CMD_COMPLETE) break; } } static inline void pcc_clear_mapping(void) { if (pcch_virt_addr) iounmap(pcch_virt_addr); pcch_virt_addr = NULL; } static unsigned int pcc_get_freq(unsigned int cpu) { struct pcc_cpu *pcc_cpu_data; unsigned int curr_freq; unsigned int freq_limit; u16 status; u32 input_buffer; u32 output_buffer; spin_lock(&pcc_lock); pr_debug("get: get_freq for CPU %d\n", cpu); pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); input_buffer = 0x1; iowrite32(input_buffer, (pcch_virt_addr + pcc_cpu_data->input_offset)); iowrite16(CMD_GET_FREQ, &pcch_hdr->command); pcc_cmd(); output_buffer = ioread32(pcch_virt_addr + pcc_cpu_data->output_offset); /* Clear the input buffer - we are done with the current command */ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); status = ioread16(&pcch_hdr->status); if (status != CMD_COMPLETE) { pr_debug("get: FAILED: for CPU %d, status is %d\n", cpu, status); goto cmd_incomplete; } iowrite16(0, &pcch_hdr->status); curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) / 100) * 1000); pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is " "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n", cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), output_buffer, curr_freq); freq_limit = (output_buffer >> 8) & 0xff; if (freq_limit != 0xff) { pr_debug("get: frequency for cpu %d is being temporarily" " capped at %d\n", cpu, curr_freq); } spin_unlock(&pcc_lock); return curr_freq; cmd_incomplete: iowrite16(0, &pcch_hdr->status); spin_unlock(&pcc_lock); return 0; } static int pcc_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct pcc_cpu *pcc_cpu_data; struct cpufreq_freqs freqs; u16 status; u32 input_buffer; int cpu; spin_lock(&pcc_lock); cpu = policy->cpu; pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); pr_debug("target: CPU %d should go to target freq: %d " "(virtual) input_offset is 0x%p\n", cpu, target_freq, (pcch_virt_addr + pcc_cpu_data->input_offset)); freqs.new = target_freq; cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); input_buffer = 0x1 | (((target_freq * 100) / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); iowrite32(input_buffer, (pcch_virt_addr + pcc_cpu_data->input_offset)); iowrite16(CMD_SET_FREQ, &pcch_hdr->command); pcc_cmd(); /* Clear the input buffer - we are done with the current command */ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); status = ioread16(&pcch_hdr->status); if (status != CMD_COMPLETE) { pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", cpu, status); goto cmd_incomplete; } iowrite16(0, &pcch_hdr->status); cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); spin_unlock(&pcc_lock); return 0; cmd_incomplete: iowrite16(0, &pcch_hdr->status); spin_unlock(&pcc_lock); return -EINVAL; } static int pcc_get_offset(int cpu) { acpi_status status; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *pccp, *offset; struct pcc_cpu *pcc_cpu_data; struct acpi_processor *pr; int ret = 0; pr = per_cpu(processors, cpu); pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); if (!pr) return -ENODEV; status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); if (ACPI_FAILURE(status)) return -ENODEV; pccp = buffer.pointer; if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) { ret = -ENODEV; goto out_free; }; offset = &(pccp->package.elements[0]); if (!offset || offset->type != ACPI_TYPE_INTEGER) { ret = -ENODEV; goto out_free; } pcc_cpu_data->input_offset = offset->integer.value; offset = &(pccp->package.elements[1]); if (!offset || offset->type != ACPI_TYPE_INTEGER) { ret = -ENODEV; goto out_free; } pcc_cpu_data->output_offset = offset->integer.value; memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data " "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); out_free: kfree(buffer.pointer); return ret; } static int __init pcc_cpufreq_do_osc(acpi_handle *handle) { acpi_status status; struct acpi_object_list input; struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object in_params[4]; union acpi_object *out_obj; u32 capabilities[2]; u32 errors; u32 supported; int ret = 0; input.count = 4; input.pointer = in_params; in_params[0].type = ACPI_TYPE_BUFFER; in_params[0].buffer.length = 16; in_params[0].buffer.pointer = OSC_UUID; in_params[1].type = ACPI_TYPE_INTEGER; in_params[1].integer.value = 1; in_params[2].type = ACPI_TYPE_INTEGER; in_params[2].integer.value = 2; in_params[3].type = ACPI_TYPE_BUFFER; in_params[3].buffer.length = 8; in_params[3].buffer.pointer = (u8 *)&capabilities; capabilities[0] = OSC_QUERY_ENABLE; capabilities[1] = 0x1; status = acpi_evaluate_object(*handle, "_OSC", &input, &output); if (ACPI_FAILURE(status)) return -ENODEV; if (!output.length) return -ENODEV; out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_BUFFER) { ret = -ENODEV; goto out_free; } errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); if (errors) { ret = -ENODEV; goto out_free; } supported = *((u32 *)(out_obj->buffer.pointer + 4)); if (!(supported & 0x1)) { ret = -ENODEV; goto out_free; } kfree(output.pointer); capabilities[0] = 0x0; capabilities[1] = 0x1; status = acpi_evaluate_object(*handle, "_OSC", &input, &output); if (ACPI_FAILURE(status)) return -ENODEV; if (!output.length) return -ENODEV; out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_BUFFER) { ret = -ENODEV; goto out_free; } errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); if (errors) { ret = -ENODEV; goto out_free; } supported = *((u32 *)(out_obj->buffer.pointer + 4)); if (!(supported & 0x1)) { ret = -ENODEV; goto out_free; } out_free: kfree(output.pointer); return ret; } static int __init pcc_cpufreq_probe(void) { acpi_status status; struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; struct pcc_memory_resource *mem_resource; struct pcc_register_resource *reg_resource; union acpi_object *out_obj, *member; acpi_handle handle, osc_handle, pcch_handle; int ret = 0; status = acpi_get_handle(NULL, "\\_SB", &handle); if (ACPI_FAILURE(status)) return -ENODEV; status = acpi_get_handle(handle, "PCCH", &pcch_handle); if (ACPI_FAILURE(status)) return -ENODEV; status = acpi_get_handle(handle, "_OSC", &osc_handle); if (ACPI_SUCCESS(status)) { ret = pcc_cpufreq_do_osc(&osc_handle); if (ret) pr_debug("probe: _OSC evaluation did not succeed\n"); /* Firmware's use of _OSC is optional */ ret = 0; } status = acpi_evaluate_object(handle, "PCCH", NULL, &output); if (ACPI_FAILURE(status)) return -ENODEV; out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_PACKAGE) { ret = -ENODEV; goto out_free; } member = &out_obj->package.elements[0]; if (member->type != ACPI_TYPE_BUFFER) { ret = -ENODEV; goto out_free; } mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; pr_debug("probe: mem_resource descriptor: 0x%x," " length: %d, space_id: %d, resource_usage: %d," " type_specific: %d, granularity: 0x%llx," " minimum: 0x%llx, maximum: 0x%llx," " translation_offset: 0x%llx, address_length: 0x%llx\n", mem_resource->descriptor, mem_resource->length, mem_resource->space_id, mem_resource->resource_usage, mem_resource->type_specific, mem_resource->granularity, mem_resource->minimum, mem_resource->maximum, mem_resource->translation_offset, mem_resource->address_length); if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { ret = -ENODEV; goto out_free; } pcch_virt_addr = ioremap_nocache(mem_resource->minimum, mem_resource->address_length); if (pcch_virt_addr == NULL) { pr_debug("probe: could not map shared mem region\n"); ret = -ENOMEM; goto out_free; } pcch_hdr = pcch_virt_addr; pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); pr_debug("probe: PCCH header is at physical address: 0x%llx," " signature: 0x%x, length: %d bytes, major: %d, minor: %d," " supported features: 0x%x, command field: 0x%x," " status field: 0x%x, nominal latency: %d us\n", mem_resource->minimum, ioread32(&pcch_hdr->signature), ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major), ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features), ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), ioread32(&pcch_hdr->latency)); pr_debug("probe: min time between commands: %d us," " max time between commands: %d us," " nominal CPU frequency: %d MHz," " minimum CPU frequency: %d MHz," " minimum CPU frequency without throttling: %d MHz\n", ioread32(&pcch_hdr->minimum_time), ioread32(&pcch_hdr->maximum_time), ioread32(&pcch_hdr->nominal), ioread32(&pcch_hdr->throttled_frequency), ioread32(&pcch_hdr->minimum_frequency)); member = &out_obj->package.elements[1]; if (member->type != ACPI_TYPE_BUFFER) { ret = -ENODEV; goto pcch_free; } reg_resource = (struct pcc_register_resource *)member->buffer.pointer; doorbell.space_id = reg_resource->space_id; doorbell.bit_width = reg_resource->bit_width; doorbell.bit_offset = reg_resource->bit_offset; doorbell.access_width = 64; doorbell.address = reg_resource->address; pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " "bit_offset is %d, access_width is %d, address is 0x%llx\n", doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, doorbell.access_width, reg_resource->address); member = &out_obj->package.elements[2]; if (member->type != ACPI_TYPE_INTEGER) { ret = -ENODEV; goto pcch_free; } doorbell_preserve = member->integer.value; member = &out_obj->package.elements[3]; if (member->type != ACPI_TYPE_INTEGER) { ret = -ENODEV; goto pcch_free; } doorbell_write = member->integer.value; pr_debug("probe: doorbell_preserve: 0x%llx," " doorbell_write: 0x%llx\n", doorbell_preserve, doorbell_write); pcc_cpu_info = alloc_percpu(struct pcc_cpu); if (!pcc_cpu_info) { ret = -ENOMEM; goto pcch_free; } printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency" " limits: %d MHz, %d MHz\n", PCC_VERSION, ioread32(&pcch_hdr->minimum_frequency), ioread32(&pcch_hdr->nominal)); kfree(output.pointer); return ret; pcch_free: pcc_clear_mapping(); out_free: kfree(output.pointer); return ret; } static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; unsigned int result = 0; if (!pcch_virt_addr) { result = -1; goto out; } result = pcc_get_offset(cpu); if (result) { pr_debug("init: PCCP evaluation failed\n"); goto out; } policy->max = policy->cpuinfo.max_freq = ioread32(&pcch_hdr->nominal) * 1000; policy->min = policy->cpuinfo.min_freq = ioread32(&pcch_hdr->minimum_frequency) * 1000; policy->cur = pcc_get_freq(cpu); if (!policy->cur) { pr_debug("init: Unable to get current CPU frequency\n"); result = -EINVAL; goto out; } pr_debug("init: policy->max is %d, policy->min is %d\n", policy->max, policy->min); out: return result; } static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy) { return 0; } static struct cpufreq_driver pcc_cpufreq_driver = { .flags = CPUFREQ_CONST_LOOPS, .get = pcc_get_freq, .verify = pcc_cpufreq_verify, .target = pcc_cpufreq_target, .init = pcc_cpufreq_cpu_init, .exit = pcc_cpufreq_cpu_exit, .name = "pcc-cpufreq", .owner = THIS_MODULE, }; static int __init pcc_cpufreq_init(void) { int ret; if (acpi_disabled) return 0; ret = pcc_cpufreq_probe(); if (ret) { pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n"); return ret; } ret = cpufreq_register_driver(&pcc_cpufreq_driver); return ret; } static void __exit pcc_cpufreq_exit(void) { cpufreq_unregister_driver(&pcc_cpufreq_driver); pcc_clear_mapping(); free_percpu(pcc_cpu_info); } MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); MODULE_VERSION(PCC_VERSION); MODULE_DESCRIPTION("Processor Clocking Control interface driver"); MODULE_LICENSE("GPL"); late_initcall(pcc_cpufreq_init); module_exit(pcc_cpufreq_exit);
gpl-2.0
hiikezoe/android_kernel_huawei_204hw
drivers/regulator/core.c
824
96792
/* * core.c -- Voltage/Current Regulator framework. * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * Copyright 2008 SlimLogic Ltd. * * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/async.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/suspend.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/consumer.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/module.h> #define CREATE_TRACE_POINTS #include <trace/events/regulator.h> #include "dummy.h" #define rdev_crit(rdev, fmt, ...) \ pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_err(rdev, fmt, ...) \ pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_warn(rdev, fmt, ...) \ pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_info(rdev, fmt, ...) \ pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_dbg(rdev, fmt, ...) \ pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) static DEFINE_MUTEX(regulator_list_mutex); static LIST_HEAD(regulator_list); static LIST_HEAD(regulator_map_list); static bool has_full_constraints; static bool board_wants_dummy_regulator; static int suppress_info_printing; static struct dentry *debugfs_root; /* * struct regulator_map * * Used to provide symbolic supply names to devices. */ struct regulator_map { struct list_head list; const char *dev_name; /* The dev_name() for the consumer */ const char *supply; struct regulator_dev *regulator; }; /* * struct regulator * * One for each consumer device. */ struct regulator { struct device *dev; struct list_head list; int uA_load; int min_uV; int max_uV; int enabled; char *supply_name; struct device_attribute dev_attr; struct regulator_dev *rdev; struct dentry *debugfs; }; static int _regulator_is_enabled(struct regulator_dev *rdev); static int _regulator_disable(struct regulator_dev *rdev); static int _regulator_get_voltage(struct regulator_dev *rdev); static int _regulator_get_current_limit(struct regulator_dev *rdev); static unsigned int _regulator_get_mode(struct regulator_dev *rdev); static void _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV); static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name); static const char *rdev_get_name(struct regulator_dev *rdev) { if (rdev->constraints && rdev->constraints->name) return rdev->constraints->name; else if (rdev->desc->name) return rdev->desc->name; else return ""; } /* gets the regulator for a given consumer device */ static struct regulator *get_device_regulator(struct device *dev) { struct regulator *regulator = NULL; struct regulator_dev *rdev; mutex_lock(&regulator_list_mutex); list_for_each_entry(rdev, &regulator_list, list) { mutex_lock(&rdev->mutex); list_for_each_entry(regulator, &rdev->consumer_list, list) { if (regulator->dev == dev) { mutex_unlock(&rdev->mutex); mutex_unlock(&regulator_list_mutex); return regulator; } } mutex_unlock(&rdev->mutex); } mutex_unlock(&regulator_list_mutex); return NULL; } /** * of_get_regulator - get a regulator device node based on supply name * @dev: Device pointer for the consumer (of regulator) device * @supply: regulator supply name * * Extract the regulator device node corresponding to the supply name. * retruns the device node corresponding to the regulator if found, else * returns NULL. */ static struct device_node *of_get_regulator(struct device *dev, const char *supply) { struct device_node *regnode = NULL; char prop_name[32]; /* 32 is max size of property name */ dev_dbg(dev, "Looking up %s-supply from device tree\n", supply); snprintf(prop_name, 32, "%s-supply", supply); regnode = of_parse_phandle(dev->of_node, prop_name, 0); if (!regnode) { dev_dbg(dev, "Looking up %s property in node %s failed", prop_name, dev->of_node->full_name); return NULL; } return regnode; } /* Platform voltage constraint check */ static int regulator_check_voltage(struct regulator_dev *rdev, int *min_uV, int *max_uV) { BUG_ON(*min_uV > *max_uV); if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } /* check if requested voltage range actually overlaps the constraints */ if (*max_uV < rdev->constraints->min_uV || *min_uV > rdev->constraints->max_uV) { rdev_err(rdev, "requested voltage range [%d, %d] does not fit " "within constraints: [%d, %d]\n", *min_uV, *max_uV, rdev->constraints->min_uV, rdev->constraints->max_uV); return -EINVAL; } if (*max_uV > rdev->constraints->max_uV) *max_uV = rdev->constraints->max_uV; if (*min_uV < rdev->constraints->min_uV) *min_uV = rdev->constraints->min_uV; if (*min_uV > *max_uV) { rdev_err(rdev, "unsupportable voltage range: %d-%duV\n", *min_uV, *max_uV); return -EINVAL; } return 0; } /* Make sure we select a voltage that suits the needs of all * regulator consumers */ static int regulator_check_consumers(struct regulator_dev *rdev, int *min_uV, int *max_uV) { struct regulator *regulator; int init_min_uV = *min_uV; int init_max_uV = *max_uV; list_for_each_entry(regulator, &rdev->consumer_list, list) { /* * Assume consumers that didn't say anything are OK * with anything in the constraint range. */ if (!regulator->min_uV && !regulator->max_uV) continue; if (init_max_uV < regulator->min_uV || init_min_uV > regulator->max_uV) rdev_err(rdev, "requested voltage range [%d, %d] does " "not fit within previously voted range: " "[%d, %d]\n", init_min_uV, init_max_uV, regulator->min_uV, regulator->max_uV); if (*max_uV > regulator->max_uV) *max_uV = regulator->max_uV; if (*min_uV < regulator->min_uV) *min_uV = regulator->min_uV; } if (*min_uV > *max_uV) return -EINVAL; return 0; } /* current constraint check */ static int regulator_check_current_limit(struct regulator_dev *rdev, int *min_uA, int *max_uA) { BUG_ON(*min_uA > *max_uA); if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } if (*max_uA > rdev->constraints->max_uA) *max_uA = rdev->constraints->max_uA; if (*min_uA < rdev->constraints->min_uA) *min_uA = rdev->constraints->min_uA; if (*min_uA > *max_uA) { rdev_err(rdev, "unsupportable current range: %d-%duA\n", *min_uA, *max_uA); return -EINVAL; } return 0; } /* operating mode constraint check */ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode) { switch (*mode) { case REGULATOR_MODE_FAST: case REGULATOR_MODE_NORMAL: case REGULATOR_MODE_IDLE: case REGULATOR_MODE_STANDBY: break; default: rdev_err(rdev, "invalid mode %x specified\n", *mode); return -EINVAL; } if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } /* The modes are bitmasks, the most power hungry modes having * the lowest values. If the requested mode isn't supported * try higher modes. */ while (*mode) { if (rdev->constraints->valid_modes_mask & *mode) return 0; *mode /= 2; } return -EINVAL; } /* dynamic regulator mode switching constraint check */ static int regulator_check_drms(struct regulator_dev *rdev) { if (!rdev->constraints) { rdev_dbg(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) { rdev_dbg(rdev, "operation not allowed\n"); return -EPERM; } return 0; } static ssize_t device_requested_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator *regulator; regulator = get_device_regulator(dev); if (regulator == NULL) return 0; return sprintf(buf, "%d\n", regulator->uA_load); } static ssize_t regulator_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); ssize_t ret; mutex_lock(&rdev->mutex); ret = sprintf(buf, "%d\n", _regulator_get_voltage(rdev)); mutex_unlock(&rdev->mutex); return ret; } static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL); static ssize_t regulator_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev)); } static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL); static ssize_t regulator_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%s\n", rdev_get_name(rdev)); } static ssize_t regulator_print_opmode(char *buf, int mode) { switch (mode) { case REGULATOR_MODE_FAST: return sprintf(buf, "fast\n"); case REGULATOR_MODE_NORMAL: return sprintf(buf, "normal\n"); case REGULATOR_MODE_IDLE: return sprintf(buf, "idle\n"); case REGULATOR_MODE_STANDBY: return sprintf(buf, "standby\n"); } return sprintf(buf, "unknown\n"); } static ssize_t regulator_opmode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, _regulator_get_mode(rdev)); } static DEVICE_ATTR(opmode, 0444, regulator_opmode_show, NULL); static ssize_t regulator_print_state(char *buf, int state) { if (state > 0) return sprintf(buf, "enabled\n"); else if (state == 0) return sprintf(buf, "disabled\n"); else return sprintf(buf, "unknown\n"); } static ssize_t regulator_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); ssize_t ret; mutex_lock(&rdev->mutex); ret = regulator_print_state(buf, _regulator_is_enabled(rdev)); mutex_unlock(&rdev->mutex); return ret; } static DEVICE_ATTR(state, 0444, regulator_state_show, NULL); static ssize_t regulator_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); int status; char *label; status = rdev->desc->ops->get_status(rdev); if (status < 0) return status; switch (status) { case REGULATOR_STATUS_OFF: label = "off"; break; case REGULATOR_STATUS_ON: label = "on"; break; case REGULATOR_STATUS_ERROR: label = "error"; break; case REGULATOR_STATUS_FAST: label = "fast"; break; case REGULATOR_STATUS_NORMAL: label = "normal"; break; case REGULATOR_STATUS_IDLE: label = "idle"; break; case REGULATOR_STATUS_STANDBY: label = "standby"; break; default: return -ERANGE; } return sprintf(buf, "%s\n", label); } static DEVICE_ATTR(status, 0444, regulator_status_show, NULL); static ssize_t regulator_min_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->min_uA); } static DEVICE_ATTR(min_microamps, 0444, regulator_min_uA_show, NULL); static ssize_t regulator_max_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->max_uA); } static DEVICE_ATTR(max_microamps, 0444, regulator_max_uA_show, NULL); static ssize_t regulator_min_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->min_uV); } static DEVICE_ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL); static ssize_t regulator_max_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->max_uV); } static DEVICE_ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL); static ssize_t regulator_total_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); struct regulator *regulator; int uA = 0; mutex_lock(&rdev->mutex); list_for_each_entry(regulator, &rdev->consumer_list, list) uA += regulator->uA_load; mutex_unlock(&rdev->mutex); return sprintf(buf, "%d\n", uA); } static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL); static ssize_t regulator_num_users_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->use_count); } static ssize_t regulator_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); switch (rdev->desc->type) { case REGULATOR_VOLTAGE: return sprintf(buf, "voltage\n"); case REGULATOR_CURRENT: return sprintf(buf, "current\n"); } return sprintf(buf, "unknown\n"); } static ssize_t regulator_suspend_mem_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV); } static DEVICE_ATTR(suspend_mem_microvolts, 0444, regulator_suspend_mem_uV_show, NULL); static ssize_t regulator_suspend_disk_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV); } static DEVICE_ATTR(suspend_disk_microvolts, 0444, regulator_suspend_disk_uV_show, NULL); static ssize_t regulator_suspend_standby_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV); } static DEVICE_ATTR(suspend_standby_microvolts, 0444, regulator_suspend_standby_uV_show, NULL); static ssize_t regulator_suspend_mem_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_mem.mode); } static DEVICE_ATTR(suspend_mem_mode, 0444, regulator_suspend_mem_mode_show, NULL); static ssize_t regulator_suspend_disk_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_disk.mode); } static DEVICE_ATTR(suspend_disk_mode, 0444, regulator_suspend_disk_mode_show, NULL); static ssize_t regulator_suspend_standby_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_standby.mode); } static DEVICE_ATTR(suspend_standby_mode, 0444, regulator_suspend_standby_mode_show, NULL); static ssize_t regulator_suspend_mem_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_mem.enabled); } static DEVICE_ATTR(suspend_mem_state, 0444, regulator_suspend_mem_state_show, NULL); static ssize_t regulator_suspend_disk_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_disk.enabled); } static DEVICE_ATTR(suspend_disk_state, 0444, regulator_suspend_disk_state_show, NULL); static ssize_t regulator_suspend_standby_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_standby.enabled); } static DEVICE_ATTR(suspend_standby_state, 0444, regulator_suspend_standby_state_show, NULL); /* * These are the only attributes are present for all regulators. * Other attributes are a function of regulator functionality. */ static struct device_attribute regulator_dev_attrs[] = { __ATTR(name, 0444, regulator_name_show, NULL), __ATTR(num_users, 0444, regulator_num_users_show, NULL), __ATTR(type, 0444, regulator_type_show, NULL), __ATTR_NULL, }; static void regulator_dev_release(struct device *dev) { struct regulator_dev *rdev = dev_get_drvdata(dev); kfree(rdev); } static struct class regulator_class = { .name = "regulator", .dev_release = regulator_dev_release, .dev_attrs = regulator_dev_attrs, }; /* Calculate the new optimum regulator operating mode based on the new total * consumer load. All locks held by caller */ static void drms_uA_update(struct regulator_dev *rdev) { struct regulator *sibling; int current_uA = 0, output_uV, input_uV, err; unsigned int regulator_curr_mode, mode; err = regulator_check_drms(rdev); if (err < 0 || !rdev->desc->ops->get_optimum_mode || (!rdev->desc->ops->get_voltage && !rdev->desc->ops->get_voltage_sel) || !rdev->desc->ops->set_mode) return; /* get output voltage */ output_uV = _regulator_get_voltage(rdev); if (output_uV <= 0) return; /* get input voltage */ input_uV = 0; if (rdev->supply) input_uV = _regulator_get_voltage(rdev); if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) return; /* calc total requested load */ list_for_each_entry(sibling, &rdev->consumer_list, list) current_uA += sibling->uA_load; /* now get the optimum mode for our new total regulator load */ mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV, output_uV, current_uA); /* check the new mode is allowed */ err = regulator_mode_constrain(rdev, &mode); /* return if the same mode is requested */ if (rdev->desc->ops->get_mode) { regulator_curr_mode = rdev->desc->ops->get_mode(rdev); if (regulator_curr_mode == mode) return; } else return; if (err == 0) rdev->desc->ops->set_mode(rdev, mode); } static int suspend_set_state(struct regulator_dev *rdev, struct regulator_state *rstate) { int ret = 0; bool can_set_state; can_set_state = rdev->desc->ops->set_suspend_enable && rdev->desc->ops->set_suspend_disable; /* If we have no suspend mode configration don't set anything; * only warn if the driver actually makes the suspend mode * configurable. */ if (!rstate->enabled && !rstate->disabled) { if (can_set_state) rdev_warn(rdev, "No configuration\n"); return 0; } if (rstate->enabled && rstate->disabled) { rdev_err(rdev, "invalid configuration\n"); return -EINVAL; } if (!can_set_state) { rdev_err(rdev, "no way to set suspend state\n"); return -EINVAL; } if (rstate->enabled) ret = rdev->desc->ops->set_suspend_enable(rdev); else ret = rdev->desc->ops->set_suspend_disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to enabled/disable\n"); return ret; } if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) { ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV); if (ret < 0) { rdev_err(rdev, "failed to set voltage\n"); return ret; } } if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) { ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode); if (ret < 0) { rdev_err(rdev, "failed to set mode\n"); return ret; } } return ret; } /* locks held by caller */ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) { if (!rdev->constraints) return -EINVAL; switch (state) { case PM_SUSPEND_STANDBY: return suspend_set_state(rdev, &rdev->constraints->state_standby); case PM_SUSPEND_MEM: return suspend_set_state(rdev, &rdev->constraints->state_mem); case PM_SUSPEND_MAX: return suspend_set_state(rdev, &rdev->constraints->state_disk); default: return -EINVAL; } } static void print_constraints(struct regulator_dev *rdev) { struct regulation_constraints *constraints = rdev->constraints; char buf[80] = ""; int count = 0; int ret; if (constraints->min_uV && constraints->max_uV) { if (constraints->min_uV == constraints->max_uV) count += sprintf(buf + count, "%d mV ", constraints->min_uV / 1000); else count += sprintf(buf + count, "%d <--> %d mV ", constraints->min_uV / 1000, constraints->max_uV / 1000); } if (!constraints->min_uV || constraints->min_uV != constraints->max_uV) { ret = _regulator_get_voltage(rdev); if (ret > 0) count += sprintf(buf + count, "at %d mV ", ret / 1000); } if (constraints->uV_offset) count += sprintf(buf, "%dmV offset ", constraints->uV_offset / 1000); if (constraints->min_uA && constraints->max_uA) { if (constraints->min_uA == constraints->max_uA) count += sprintf(buf + count, "%d mA ", constraints->min_uA / 1000); else count += sprintf(buf + count, "%d <--> %d mA ", constraints->min_uA / 1000, constraints->max_uA / 1000); } if (!constraints->min_uA || constraints->min_uA != constraints->max_uA) { ret = _regulator_get_current_limit(rdev); if (ret > 0) count += sprintf(buf + count, "at %d mA ", ret / 1000); } if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) count += sprintf(buf + count, "fast "); if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL) count += sprintf(buf + count, "normal "); if (constraints->valid_modes_mask & REGULATOR_MODE_IDLE) count += sprintf(buf + count, "idle "); if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY) count += sprintf(buf + count, "standby"); rdev_info(rdev, "%s\n", buf); if ((constraints->min_uV != constraints->max_uV) && !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) rdev_warn(rdev, "Voltage range but no REGULATOR_CHANGE_VOLTAGE\n"); } static int machine_constraints_voltage(struct regulator_dev *rdev, struct regulation_constraints *constraints) { struct regulator_ops *ops = rdev->desc->ops; int ret; /* do we need to apply the constraint voltage */ if (rdev->constraints->apply_uV && rdev->constraints->min_uV == rdev->constraints->max_uV) { ret = _regulator_do_set_voltage(rdev, rdev->constraints->min_uV, rdev->constraints->max_uV); if (ret < 0) { rdev_err(rdev, "failed to apply %duV constraint\n", rdev->constraints->min_uV); return ret; } } /* constrain machine-level voltage specs to fit * the actual range supported by this regulator. */ if (ops->list_voltage && rdev->desc->n_voltages) { int count = rdev->desc->n_voltages; int i; int min_uV = INT_MAX; int max_uV = INT_MIN; int cmin = constraints->min_uV; int cmax = constraints->max_uV; /* it's safe to autoconfigure fixed-voltage supplies and the constraints are used by list_voltage. */ if (count == 1 && !cmin) { cmin = 1; cmax = INT_MAX; constraints->min_uV = cmin; constraints->max_uV = cmax; } /* voltage constraints are optional */ if ((cmin == 0) && (cmax == 0)) return 0; /* else require explicit machine-level constraints */ if (cmin <= 0 || cmax <= 0 || cmax < cmin) { rdev_err(rdev, "invalid voltage constraints\n"); return -EINVAL; } /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */ for (i = 0; i < count; i++) { int value; value = ops->list_voltage(rdev, i); if (value <= 0) continue; /* maybe adjust [min_uV..max_uV] */ if (value >= cmin && value < min_uV) min_uV = value; if (value <= cmax && value > max_uV) max_uV = value; } /* final: [min_uV..max_uV] valid iff constraints valid */ if (max_uV < min_uV) { rdev_err(rdev, "unsupportable voltage constraints\n"); return -EINVAL; } /* use regulator's subset of machine constraints */ if (constraints->min_uV < min_uV) { rdev_dbg(rdev, "override min_uV, %d -> %d\n", constraints->min_uV, min_uV); constraints->min_uV = min_uV; } if (constraints->max_uV > max_uV) { rdev_dbg(rdev, "override max_uV, %d -> %d\n", constraints->max_uV, max_uV); constraints->max_uV = max_uV; } } return 0; } /** * set_machine_constraints - sets regulator constraints * @rdev: regulator source * @constraints: constraints to apply * * Allows platform initialisation code to define and constrain * regulator circuits e.g. valid voltage/current ranges, etc. NOTE: * Constraints *must* be set by platform code in order for some * regulator operations to proceed i.e. set_voltage, set_current_limit, * set_mode. */ static int set_machine_constraints(struct regulator_dev *rdev, const struct regulation_constraints *constraints) { int ret = 0; struct regulator_ops *ops = rdev->desc->ops; if (constraints) rdev->constraints = kmemdup(constraints, sizeof(*constraints), GFP_KERNEL); else rdev->constraints = kzalloc(sizeof(*constraints), GFP_KERNEL); if (!rdev->constraints) return -ENOMEM; ret = machine_constraints_voltage(rdev, rdev->constraints); if (ret != 0) goto out; /* do we need to setup our suspend state */ if (rdev->constraints->initial_state) { ret = suspend_prepare(rdev, rdev->constraints->initial_state); if (ret < 0) { rdev_err(rdev, "failed to set suspend state\n"); goto out; } } if (rdev->constraints->initial_mode) { if (!ops->set_mode) { rdev_err(rdev, "no set_mode operation\n"); ret = -EINVAL; goto out; } ret = ops->set_mode(rdev, rdev->constraints->initial_mode); if (ret < 0) { rdev_err(rdev, "failed to set initial mode: %d\n", ret); goto out; } } /* If the constraints say the regulator should be on at this point * and we have control then make sure it is enabled. */ if ((rdev->constraints->always_on || rdev->constraints->boot_on) && ops->enable) { ret = ops->enable(rdev); if (ret < 0) { rdev_err(rdev, "failed to enable\n"); goto out; } } if (!suppress_info_printing) print_constraints(rdev); return 0; out: kfree(rdev->constraints); rdev->constraints = NULL; return ret; } /** * set_supply - set regulator supply regulator * @rdev: regulator name * @supply_rdev: supply regulator name * * Called by platform initialisation code to set the supply regulator for this * regulator. This ensures that a regulators supply will also be enabled by the * core if it's child is enabled. */ static int set_supply(struct regulator_dev *rdev, struct regulator_dev *supply_rdev) { int err; if (!suppress_info_printing) rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); if (rdev->supply == NULL) { err = -ENOMEM; return err; } return 0; } /** * set_consumer_device_supply - Bind a regulator to a symbolic supply * @rdev: regulator source * @consumer_dev_name: dev_name() string for device supply applies to * @supply: symbolic name for supply * * Allows platform initialisation code to map physical regulator * sources to symbolic names for supplies for use by devices. Devices * should use these symbolic names to request regulators, avoiding the * need to provide board-specific regulator names as platform data. */ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, const char *supply) { struct regulator_map *node; int has_dev; if (supply == NULL) return -EINVAL; if (consumer_dev_name != NULL) has_dev = 1; else has_dev = 0; list_for_each_entry(node, &regulator_map_list, list) { if (node->dev_name && consumer_dev_name) { if (strcmp(node->dev_name, consumer_dev_name) != 0) continue; } else if (node->dev_name || consumer_dev_name) { continue; } if (strcmp(node->supply, supply) != 0) continue; pr_debug("%s: %s/%s is '%s' supply; fail %s/%s\n", consumer_dev_name, dev_name(&node->regulator->dev), node->regulator->desc->name, supply, dev_name(&rdev->dev), rdev_get_name(rdev)); return -EBUSY; } node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); if (node == NULL) return -ENOMEM; node->regulator = rdev; node->supply = supply; if (has_dev) { node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); if (node->dev_name == NULL) { kfree(node); return -ENOMEM; } } list_add(&node->list, &regulator_map_list); return 0; } static void unset_regulator_supplies(struct regulator_dev *rdev) { struct regulator_map *node, *n; list_for_each_entry_safe(node, n, &regulator_map_list, list) { if (rdev == node->regulator) { list_del(&node->list); kfree(node->dev_name); kfree(node); } } } #define REG_STR_SIZE 64 static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name) { struct regulator *regulator; char buf[REG_STR_SIZE]; int err, size; regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); if (regulator == NULL) return NULL; mutex_lock(&rdev->mutex); regulator->rdev = rdev; list_add(&regulator->list, &rdev->consumer_list); if (dev) { /* create a 'requested_microamps_name' sysfs entry */ size = scnprintf(buf, REG_STR_SIZE, "microamps_requested_%s-%s", dev_name(dev), supply_name); if (size >= REG_STR_SIZE) goto overflow_err; regulator->dev = dev; sysfs_attr_init(&regulator->dev_attr.attr); regulator->dev_attr.attr.name = kstrdup(buf, GFP_KERNEL); if (regulator->dev_attr.attr.name == NULL) goto attr_name_err; regulator->dev_attr.attr.mode = 0444; regulator->dev_attr.show = device_requested_uA_show; err = device_create_file(dev, &regulator->dev_attr); if (err < 0) { rdev_warn(rdev, "could not add regulator_dev requested microamps sysfs entry\n"); goto attr_name_err; } /* also add a link to the device sysfs entry */ size = scnprintf(buf, REG_STR_SIZE, "%s-%s", dev->kobj.name, supply_name); if (size >= REG_STR_SIZE) goto attr_err; regulator->supply_name = kstrdup(buf, GFP_KERNEL); if (regulator->supply_name == NULL) goto attr_err; err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj, buf); if (err) { rdev_warn(rdev, "could not add device link %s err %d\n", dev->kobj.name, err); goto link_name_err; } } else { regulator->supply_name = kstrdup(supply_name, GFP_KERNEL); if (regulator->supply_name == NULL) goto attr_err; } regulator->debugfs = debugfs_create_dir(regulator->supply_name, rdev->debugfs); if (!regulator->debugfs) { rdev_warn(rdev, "Failed to create debugfs directory\n"); } else { debugfs_create_u32("uA_load", 0444, regulator->debugfs, &regulator->uA_load); debugfs_create_u32("min_uV", 0444, regulator->debugfs, &regulator->min_uV); debugfs_create_u32("max_uV", 0444, regulator->debugfs, &regulator->max_uV); } mutex_unlock(&rdev->mutex); return regulator; link_name_err: kfree(regulator->supply_name); attr_err: device_remove_file(regulator->dev, &regulator->dev_attr); attr_name_err: kfree(regulator->dev_attr.attr.name); overflow_err: list_del(&regulator->list); kfree(regulator); mutex_unlock(&rdev->mutex); return NULL; } static int _regulator_get_enable_time(struct regulator_dev *rdev) { if (!rdev->desc->ops->enable_time) return 0; return rdev->desc->ops->enable_time(rdev); } static struct regulator_dev *regulator_dev_lookup(struct device *dev, const char *supply) { struct regulator_dev *r; struct device_node *node; /* first do a dt based lookup */ if (dev && dev->of_node) { node = of_get_regulator(dev, supply); if (node) list_for_each_entry(r, &regulator_list, list) if (r->dev.parent && node == r->dev.of_node) return r; } /* if not found, try doing it non-dt way */ list_for_each_entry(r, &regulator_list, list) if (strcmp(rdev_get_name(r), supply) == 0) return r; return NULL; } /* Internal regulator request function */ static struct regulator *_regulator_get(struct device *dev, const char *id, int exclusive) { struct regulator_dev *rdev; struct regulator_map *map; struct regulator *regulator = ERR_PTR(-EPROBE_DEFER); const char *devname = NULL; int ret; if (id == NULL) { pr_err("get() with no identifier\n"); return regulator; } if (dev) devname = dev_name(dev); mutex_lock(&regulator_list_mutex); rdev = regulator_dev_lookup(dev, id); if (rdev) goto found; list_for_each_entry(map, &regulator_map_list, list) { /* If the mapping has a device set up it must match */ if (map->dev_name && (!devname || strcmp(map->dev_name, devname))) continue; if (strcmp(map->supply, id) == 0) { rdev = map->regulator; goto found; } } if (board_wants_dummy_regulator) { rdev = dummy_regulator_rdev; goto found; } #ifdef CONFIG_REGULATOR_DUMMY if (!devname) devname = "deviceless"; /* If the board didn't flag that it was fully constrained then * substitute in a dummy regulator so consumers can continue. */ if (!has_full_constraints) { pr_warn("%s supply %s not found, using dummy regulator\n", devname, id); rdev = dummy_regulator_rdev; goto found; } #endif mutex_unlock(&regulator_list_mutex); return regulator; found: if (rdev->exclusive) { regulator = ERR_PTR(-EPERM); goto out; } if (exclusive && rdev->open_count) { regulator = ERR_PTR(-EBUSY); goto out; } if (!try_module_get(rdev->owner)) goto out; regulator = create_regulator(rdev, dev, id); if (regulator == NULL) { regulator = ERR_PTR(-ENOMEM); module_put(rdev->owner); goto out; } rdev->open_count++; if (exclusive) { rdev->exclusive = 1; ret = _regulator_is_enabled(rdev); if (ret > 0) rdev->use_count = 1; else rdev->use_count = 0; } out: mutex_unlock(&regulator_list_mutex); return regulator; } /** * regulator_get - lookup and obtain a reference to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Returns a struct regulator corresponding to the regulator producer, * or IS_ERR() condition containing errno. * * Use of supply names configured via regulator_set_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. */ struct regulator *regulator_get(struct device *dev, const char *id) { return _regulator_get(dev, id, 0); } EXPORT_SYMBOL_GPL(regulator_get); static void devm_regulator_release(struct device *dev, void *res) { regulator_put(*(struct regulator **)res); } /** * devm_regulator_get - Resource managed regulator_get() * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Managed regulator_get(). Regulators returned from this function are * automatically regulator_put() on driver detach. See regulator_get() for more * information. */ struct regulator *devm_regulator_get(struct device *dev, const char *id) { struct regulator **ptr, *regulator; ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); regulator = regulator_get(dev, id); if (!IS_ERR(regulator)) { *ptr = regulator; devres_add(dev, ptr); } else { devres_free(ptr); } return regulator; } EXPORT_SYMBOL_GPL(devm_regulator_get); /** * regulator_get_exclusive - obtain exclusive access to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Returns a struct regulator corresponding to the regulator producer, * or IS_ERR() condition containing errno. Other consumers will be * unable to obtain this reference is held and the use count for the * regulator will be initialised to reflect the current state of the * regulator. * * This is intended for use by consumers which cannot tolerate shared * use of the regulator such as those which need to force the * regulator off for correct operation of the hardware they are * controlling. * * Use of supply names configured via regulator_set_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. */ struct regulator *regulator_get_exclusive(struct device *dev, const char *id) { return _regulator_get(dev, id, 1); } EXPORT_SYMBOL_GPL(regulator_get_exclusive); /** * regulator_put - "free" the regulator source * @regulator: regulator source * * Note: drivers must ensure that all regulator_enable calls made on this * regulator source are balanced by regulator_disable calls prior to calling * this function. */ void regulator_put(struct regulator *regulator) { struct regulator_dev *rdev; if (regulator == NULL || IS_ERR(regulator)) return; mutex_lock(&regulator_list_mutex); rdev = regulator->rdev; debugfs_remove_recursive(regulator->debugfs); /* remove any sysfs entries */ if (regulator->dev) { sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); device_remove_file(regulator->dev, &regulator->dev_attr); kfree(regulator->dev_attr.attr.name); } kfree(regulator->supply_name); list_del(&regulator->list); kfree(regulator); rdev->open_count--; rdev->exclusive = 0; module_put(rdev->owner); mutex_unlock(&regulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_put); static int devm_regulator_match(struct device *dev, void *res, void *data) { struct regulator **r = res; if (!r || !*r) { WARN_ON(!r || !*r); return 0; } return *r == data; } /** * devm_regulator_put - Resource managed regulator_put() * @regulator: regulator to free * * Deallocate a regulator allocated with devm_regulator_get(). Normally * this function will not need to be called and the resource management * code will ensure that the resource is freed. */ void devm_regulator_put(struct regulator *regulator) { int rc; rc = devres_destroy(regulator->dev, devm_regulator_release, devm_regulator_match, regulator); if (rc == 0) regulator_put(regulator); else WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_regulator_put); static int _regulator_can_change_status(struct regulator_dev *rdev) { if (!rdev->constraints) return 0; if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS) return 1; else return 0; } /* locks held by regulator_enable() */ static int _regulator_enable(struct regulator_dev *rdev) { int ret, delay; /* check voltage and requested load before enabling */ if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) drms_uA_update(rdev); if (rdev->use_count == 0) { /* The regulator may on if it's not switchable or left on */ ret = _regulator_is_enabled(rdev); if (ret == -EINVAL || ret == 0) { if (!_regulator_can_change_status(rdev)) return -EPERM; if (!rdev->desc->ops->enable) return -EINVAL; /* Query before enabling in case configuration * dependent. */ ret = _regulator_get_enable_time(rdev); if (ret >= 0) { delay = ret; } else { rdev_warn(rdev, "enable_time() failed: %d\n", ret); delay = 0; } trace_regulator_enable(rdev_get_name(rdev)); /* Allow the regulator to ramp; it would be useful * to extend this for bulk operations so that the * regulators can ramp together. */ ret = rdev->desc->ops->enable(rdev); if (ret < 0) return ret; trace_regulator_enable_delay(rdev_get_name(rdev)); if (delay >= 1000) { mdelay(delay / 1000); udelay(delay % 1000); } else if (delay) { udelay(delay); } trace_regulator_enable_complete(rdev_get_name(rdev)); } else if (ret < 0) { rdev_err(rdev, "is_enabled() failed: %d\n", ret); return ret; } /* Fallthrough on positive return values - already enabled */ } rdev->use_count++; return 0; } /** * regulator_enable - enable regulator output * @regulator: regulator source * * Request that the regulator be enabled with the regulator output at * the predefined voltage or current value. Calls to regulator_enable() * must be balanced with calls to regulator_disable(). * * NOTE: the output value can be set by other drivers, boot loader or may be * hardwired in the regulator. */ int regulator_enable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; if (rdev->supply) { ret = regulator_enable(rdev->supply); if (ret != 0) return ret; } mutex_lock(&rdev->mutex); ret = _regulator_enable(rdev); if (ret == 0) regulator->enabled++; mutex_unlock(&rdev->mutex); if (ret != 0 && rdev->supply) regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_enable); /* locks held by regulator_disable() */ static int _regulator_disable(struct regulator_dev *rdev) { int ret = 0; if (WARN(rdev->use_count <= 0, "unbalanced disables for %s\n", rdev_get_name(rdev))) return -EIO; /* are we the last user and permitted to disable ? */ if (rdev->use_count == 1 && (rdev->constraints && !rdev->constraints->always_on)) { /* we are last user */ if (_regulator_can_change_status(rdev) && rdev->desc->ops->disable) { trace_regulator_disable(rdev_get_name(rdev)); ret = rdev->desc->ops->disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to disable\n"); return ret; } trace_regulator_disable_complete(rdev_get_name(rdev)); _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, NULL); } rdev->use_count = 0; } else if (rdev->use_count > 1) { if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) drms_uA_update(rdev); rdev->use_count--; } return ret; } /** * regulator_disable - disable regulator output * @regulator: regulator source * * Disable the regulator output voltage or current. Calls to * regulator_enable() must be balanced with calls to * regulator_disable(). * * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. */ int regulator_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; mutex_lock(&rdev->mutex); ret = _regulator_disable(rdev); if (ret == 0) regulator->enabled--; mutex_unlock(&rdev->mutex); if (ret == 0 && rdev->supply) regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_disable); /* locks held by regulator_force_disable() */ static int _regulator_force_disable(struct regulator_dev *rdev) { int ret = 0; /* force disable */ if (rdev->desc->ops->disable) { /* ah well, who wants to live forever... */ ret = rdev->desc->ops->disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to force disable\n"); return ret; } /* notify other consumers that power has been forced off */ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | REGULATOR_EVENT_DISABLE, NULL); } return ret; } /** * regulator_force_disable - force disable regulator output * @regulator: regulator source * * Forcibly disable the regulator output voltage or current. * NOTE: this *will* disable the regulator output even if other consumer * devices have it enabled. This should be used for situations when device * damage will likely occur if the regulator is not disabled (e.g. over temp). */ int regulator_force_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret; mutex_lock(&rdev->mutex); regulator->uA_load = 0; ret = _regulator_force_disable(regulator->rdev); mutex_unlock(&rdev->mutex); if (rdev->supply) while (rdev->open_count--) regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_force_disable); static void regulator_disable_work(struct work_struct *work) { struct regulator_dev *rdev = container_of(work, struct regulator_dev, disable_work.work); int count, i, ret; mutex_lock(&rdev->mutex); BUG_ON(!rdev->deferred_disables); count = rdev->deferred_disables; rdev->deferred_disables = 0; for (i = 0; i < count; i++) { ret = _regulator_disable(rdev); if (ret != 0) rdev_err(rdev, "Deferred disable failed: %d\n", ret); } mutex_unlock(&rdev->mutex); if (rdev->supply) { for (i = 0; i < count; i++) { ret = regulator_disable(rdev->supply); if (ret != 0) { rdev_err(rdev, "Supply disable failed: %d\n", ret); } } } } /** * regulator_disable_deferred - disable regulator output with delay * @regulator: regulator source * @ms: miliseconds until the regulator is disabled * * Execute regulator_disable() on the regulator after a delay. This * is intended for use with devices that require some time to quiesce. * * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. */ int regulator_disable_deferred(struct regulator *regulator, int ms) { struct regulator_dev *rdev = regulator->rdev; int ret; mutex_lock(&rdev->mutex); rdev->deferred_disables++; mutex_unlock(&rdev->mutex); ret = schedule_delayed_work(&rdev->disable_work, msecs_to_jiffies(ms)); if (ret < 0) return ret; else return 0; } EXPORT_SYMBOL_GPL(regulator_disable_deferred); static int _regulator_is_enabled(struct regulator_dev *rdev) { /* If we don't know then assume that the regulator is always on */ if (!rdev->desc->ops->is_enabled) return 1; return rdev->desc->ops->is_enabled(rdev); } /** * regulator_is_enabled - is the regulator output enabled * @regulator: regulator source * * Returns positive if the regulator driver backing the source/client * has requested that the device be enabled, zero if it hasn't, else a * negative errno code. * * Note that the device backing this regulator handle can have multiple * users, so it might be enabled even if regulator_enable() was never * called for this particular source. */ int regulator_is_enabled(struct regulator *regulator) { int ret; mutex_lock(&regulator->rdev->mutex); ret = _regulator_is_enabled(regulator->rdev); mutex_unlock(&regulator->rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_is_enabled); /** * regulator_count_voltages - count regulator_list_voltage() selectors * @regulator: regulator source * * Returns number of selectors, or negative errno. Selectors are * numbered starting at zero, and typically correspond to bitfields * in hardware registers. */ int regulator_count_voltages(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; return rdev->desc->n_voltages ? : -EINVAL; } EXPORT_SYMBOL_GPL(regulator_count_voltages); /** * regulator_list_voltage - enumerate supported voltages * @regulator: regulator source * @selector: identify voltage to list * Context: can sleep * * Returns a voltage that can be passed to @regulator_set_voltage(), * zero if this selector code can't be used on this system, or a * negative errno. */ int regulator_list_voltage(struct regulator *regulator, unsigned selector) { struct regulator_dev *rdev = regulator->rdev; struct regulator_ops *ops = rdev->desc->ops; int ret; if (!ops->list_voltage || selector >= rdev->desc->n_voltages) return -EINVAL; mutex_lock(&rdev->mutex); ret = ops->list_voltage(rdev, selector); mutex_unlock(&rdev->mutex); if (ret > 0) { if (ret < rdev->constraints->min_uV) ret = 0; else if (ret > rdev->constraints->max_uV) ret = 0; } return ret; } EXPORT_SYMBOL_GPL(regulator_list_voltage); /** * regulator_is_supported_voltage - check if a voltage range can be supported * * @regulator: Regulator to check. * @min_uV: Minimum required voltage in uV. * @max_uV: Maximum required voltage in uV. * * Returns a boolean or a negative error code. */ int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) { int i, voltages, ret; ret = regulator_count_voltages(regulator); if (ret < 0) return ret; voltages = ret; for (i = 0; i < voltages; i++) { ret = regulator_list_voltage(regulator, i); if (ret >= min_uV && ret <= max_uV) return 1; } return 0; } EXPORT_SYMBOL_GPL(regulator_is_supported_voltage); static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { int ret; int delay = 0; unsigned int selector; trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); min_uV += rdev->constraints->uV_offset; max_uV += rdev->constraints->uV_offset; if (rdev->desc->ops->set_voltage) { ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, &selector); if (rdev->desc->ops->list_voltage) selector = rdev->desc->ops->list_voltage(rdev, selector); else selector = -1; } else if (rdev->desc->ops->set_voltage_sel) { int best_val = INT_MAX; int i; selector = 0; /* Find the smallest voltage that falls within the specified * range. */ for (i = 0; i < rdev->desc->n_voltages; i++) { ret = rdev->desc->ops->list_voltage(rdev, i); if (ret < 0) continue; if (ret < best_val && ret >= min_uV && ret <= max_uV) { best_val = ret; selector = i; } } /* * If we can't obtain the old selector there is not enough * info to call set_voltage_time_sel(). */ if (rdev->desc->ops->set_voltage_time_sel && rdev->desc->ops->get_voltage_sel) { unsigned int old_selector = 0; ret = rdev->desc->ops->get_voltage_sel(rdev); if (ret < 0) return ret; old_selector = ret; ret = rdev->desc->ops->set_voltage_time_sel(rdev, old_selector, selector); if (ret < 0) rdev_warn(rdev, "set_voltage_time_sel() failed: %d\n", ret); else delay = ret; } if (best_val != INT_MAX) { ret = rdev->desc->ops->set_voltage_sel(rdev, selector); selector = best_val; } else { ret = -EINVAL; } } else { ret = -EINVAL; } /* Insert any necessary delays */ if (delay >= 1000) { mdelay(delay / 1000); udelay(delay % 1000); } else if (delay) { udelay(delay); } if (ret == 0) _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, NULL); trace_regulator_set_voltage_complete(rdev_get_name(rdev), selector); return ret; } /** * regulator_set_voltage - set regulator output voltage * @regulator: regulator source * @min_uV: Minimum required voltage in uV * @max_uV: Maximum acceptable voltage in uV * * Sets a voltage regulator to the desired output voltage. This can be set * during any regulator state. IOW, regulator can be disabled or enabled. * * If the regulator is enabled then the voltage will change to the new value * immediately otherwise if the regulator is disabled the regulator will * output at the new voltage when enabled. * * NOTE: If the regulator is shared between several devices then the lowest * request voltage that meets the system constraints will be used. * Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. */ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) { struct regulator_dev *rdev = regulator->rdev; int prev_min_uV, prev_max_uV; int ret = 0; mutex_lock(&rdev->mutex); /* If we're setting the same range as last time the change * should be a noop (some cpufreq implementations use the same * voltage for multiple frequencies, for example). */ if (regulator->min_uV == min_uV && regulator->max_uV == max_uV) goto out; /* sanity check */ if (!rdev->desc->ops->set_voltage && !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } /* constraints check */ ret = regulator_check_voltage(rdev, &min_uV, &max_uV); if (ret < 0) goto out; prev_min_uV = regulator->min_uV; prev_max_uV = regulator->max_uV; regulator->min_uV = min_uV; regulator->max_uV = max_uV; ret = regulator_check_consumers(rdev, &min_uV, &max_uV); if (ret < 0) { regulator->min_uV = prev_min_uV; regulator->max_uV = prev_max_uV; goto out; } ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_voltage); /** * regulator_set_voltage_time - get raise/fall time * @regulator: regulator source * @old_uV: starting voltage in microvolts * @new_uV: target voltage in microvolts * * Provided with the starting and ending voltage, this function attempts to * calculate the time in microseconds required to rise or fall to this new * voltage. */ int regulator_set_voltage_time(struct regulator *regulator, int old_uV, int new_uV) { struct regulator_dev *rdev = regulator->rdev; struct regulator_ops *ops = rdev->desc->ops; int old_sel = -1; int new_sel = -1; int voltage; int i; /* Currently requires operations to do this */ if (!ops->list_voltage || !ops->set_voltage_time_sel || !rdev->desc->n_voltages) return -EINVAL; for (i = 0; i < rdev->desc->n_voltages; i++) { /* We only look for exact voltage matches here */ voltage = regulator_list_voltage(regulator, i); if (voltage < 0) return -EINVAL; if (voltage == 0) continue; if (voltage == old_uV) old_sel = i; if (voltage == new_uV) new_sel = i; } if (old_sel < 0 || new_sel < 0) return -EINVAL; return ops->set_voltage_time_sel(rdev, old_sel, new_sel); } EXPORT_SYMBOL_GPL(regulator_set_voltage_time); /** * regulator_sync_voltage - re-apply last regulator output voltage * @regulator: regulator source * * Re-apply the last configured voltage. This is intended to be used * where some external control source the consumer is cooperating with * has caused the configured voltage to change. */ int regulator_sync_voltage(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret, min_uV, max_uV; mutex_lock(&rdev->mutex); if (!rdev->desc->ops->set_voltage && !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } /* This is only going to work if we've had a voltage configured. */ if (!regulator->min_uV && !regulator->max_uV) { ret = -EINVAL; goto out; } min_uV = regulator->min_uV; max_uV = regulator->max_uV; /* This should be a paranoia check... */ ret = regulator_check_voltage(rdev, &min_uV, &max_uV); if (ret < 0) goto out; ret = regulator_check_consumers(rdev, &min_uV, &max_uV); if (ret < 0) goto out; ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_sync_voltage); static int _regulator_get_voltage(struct regulator_dev *rdev) { int sel, ret; if (rdev->desc->ops->get_voltage_sel) { sel = rdev->desc->ops->get_voltage_sel(rdev); if (sel < 0) return sel; ret = rdev->desc->ops->list_voltage(rdev, sel); } else if (rdev->desc->ops->get_voltage) { ret = rdev->desc->ops->get_voltage(rdev); } else { return -EINVAL; } if (ret < 0) return ret; return ret - rdev->constraints->uV_offset; } /** * regulator_get_voltage - get regulator output voltage * @regulator: regulator source * * This returns the current regulator voltage in uV. * * NOTE: If the regulator is disabled it will return the voltage value. This * function should not be used to determine regulator state. */ int regulator_get_voltage(struct regulator *regulator) { int ret; mutex_lock(&regulator->rdev->mutex); ret = _regulator_get_voltage(regulator->rdev); mutex_unlock(&regulator->rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_get_voltage); /** * regulator_set_current_limit - set regulator output current limit * @regulator: regulator source * @min_uA: Minimuum supported current in uA * @max_uA: Maximum supported current in uA * * Sets current sink to the desired output current. This can be set during * any regulator state. IOW, regulator can be disabled or enabled. * * If the regulator is enabled then the current will change to the new value * immediately otherwise if the regulator is disabled the regulator will * output at the new current when enabled. * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. */ int regulator_set_current_limit(struct regulator *regulator, int min_uA, int max_uA) { struct regulator_dev *rdev = regulator->rdev; int ret; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->set_current_limit) { ret = -EINVAL; goto out; } /* constraints check */ ret = regulator_check_current_limit(rdev, &min_uA, &max_uA); if (ret < 0) goto out; ret = rdev->desc->ops->set_current_limit(rdev, min_uA, max_uA); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_current_limit); static int _regulator_get_current_limit(struct regulator_dev *rdev) { int ret; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->get_current_limit) { ret = -EINVAL; goto out; } ret = rdev->desc->ops->get_current_limit(rdev); out: mutex_unlock(&rdev->mutex); return ret; } /** * regulator_get_current_limit - get regulator output current * @regulator: regulator source * * This returns the current supplied by the specified current sink in uA. * * NOTE: If the regulator is disabled it will return the current value. This * function should not be used to determine regulator state. */ int regulator_get_current_limit(struct regulator *regulator) { return _regulator_get_current_limit(regulator->rdev); } EXPORT_SYMBOL_GPL(regulator_get_current_limit); /** * regulator_set_mode - set regulator operating mode * @regulator: regulator source * @mode: operating mode - one of the REGULATOR_MODE constants * * Set regulator operating mode to increase regulator efficiency or improve * regulation performance. * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. */ int regulator_set_mode(struct regulator *regulator, unsigned int mode) { struct regulator_dev *rdev = regulator->rdev; int ret; int regulator_curr_mode; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->set_mode) { ret = -EINVAL; goto out; } /* return if the same mode is requested */ if (rdev->desc->ops->get_mode) { regulator_curr_mode = rdev->desc->ops->get_mode(rdev); if (regulator_curr_mode == mode) { ret = 0; goto out; } } /* constraints check */ ret = regulator_mode_constrain(rdev, &mode); if (ret < 0) goto out; ret = rdev->desc->ops->set_mode(rdev, mode); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_mode); static unsigned int _regulator_get_mode(struct regulator_dev *rdev) { int ret; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->get_mode) { ret = -EINVAL; goto out; } ret = rdev->desc->ops->get_mode(rdev); out: mutex_unlock(&rdev->mutex); return ret; } /** * regulator_get_mode - get regulator operating mode * @regulator: regulator source * * Get the current regulator operating mode. */ unsigned int regulator_get_mode(struct regulator *regulator) { return _regulator_get_mode(regulator->rdev); } EXPORT_SYMBOL_GPL(regulator_get_mode); /** * regulator_set_optimum_mode - set regulator optimum operating mode * @regulator: regulator source * @uA_load: load current * * Notifies the regulator core of a new device load. This is then used by * DRMS (if enabled by constraints) to set the most efficient regulator * operating mode for the new regulator loading. * * Consumer devices notify their supply regulator of the maximum power * they will require (can be taken from device datasheet in the power * consumption tables) when they change operational status and hence power * state. Examples of operational state changes that can affect power * consumption are :- * * o Device is opened / closed. * o Device I/O is about to begin or has just finished. * o Device is idling in between work. * * This information is also exported via sysfs to userspace. * * DRMS will sum the total requested load on the regulator and change * to the most efficient operating mode if platform constraints allow. * * Returns the new regulator mode or error. */ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) { struct regulator_dev *rdev = regulator->rdev; struct regulator *consumer; int ret, output_uV, input_uV = 0, total_uA_load = 0; unsigned int mode; if (rdev->supply) input_uV = regulator_get_voltage(rdev->supply); mutex_lock(&rdev->mutex); /* * first check to see if we can set modes at all, otherwise just * tell the consumer everything is OK. */ regulator->uA_load = uA_load; ret = regulator_check_drms(rdev); if (ret < 0) { ret = 0; goto out; } if (!rdev->desc->ops->get_optimum_mode) goto out; /* * we can actually do this so any errors are indicators of * potential real failure. */ ret = -EINVAL; /* get output voltage */ output_uV = _regulator_get_voltage(rdev); if (output_uV <= 0) { rdev_err(rdev, "invalid output voltage found\n"); goto out; } /* No supply? Use constraint voltage */ if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) { rdev_err(rdev, "invalid input voltage found\n"); goto out; } /* calc total requested load for this regulator */ list_for_each_entry(consumer, &rdev->consumer_list, list) total_uA_load += consumer->uA_load; mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV, output_uV, total_uA_load); ret = regulator_mode_constrain(rdev, &mode); if (ret < 0) { rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", total_uA_load, input_uV, output_uV); goto out; } ret = rdev->desc->ops->set_mode(rdev, mode); if (ret < 0) { rdev_err(rdev, "failed to set optimum mode %x\n", mode); goto out; } ret = mode; out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); /** * regulator_register_notifier - register regulator event notifier * @regulator: regulator source * @nb: notifier block * * Register notifier block to receive regulator events. */ int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb) { return blocking_notifier_chain_register(&regulator->rdev->notifier, nb); } EXPORT_SYMBOL_GPL(regulator_register_notifier); /** * regulator_unregister_notifier - unregister regulator event notifier * @regulator: regulator source * @nb: notifier block * * Unregister regulator event notifier block. */ int regulator_unregister_notifier(struct regulator *regulator, struct notifier_block *nb) { return blocking_notifier_chain_unregister(&regulator->rdev->notifier, nb); } EXPORT_SYMBOL_GPL(regulator_unregister_notifier); /* notify regulator consumers and downstream regulator consumers. * Note mutex must be held by caller. */ static void _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { /* call rdev chain first */ blocking_notifier_call_chain(&rdev->notifier, event, NULL); } /** * regulator_bulk_get - get multiple regulator consumers * * @dev: Device to supply * @num_consumers: Number of consumers to register * @consumers: Configuration of consumers; clients are stored here. * * @return 0 on success, an errno on failure. * * This helper function allows drivers to get several regulator * consumers in one operation. If any of the regulators cannot be * acquired then any regulators that were allocated will be freed * before returning to the caller. */ int regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret; for (i = 0; i < num_consumers; i++) consumers[i].consumer = NULL; for (i = 0; i < num_consumers; i++) { consumers[i].consumer = regulator_get(dev, consumers[i].supply); if (IS_ERR(consumers[i].consumer)) { ret = PTR_ERR(consumers[i].consumer); dev_err(dev, "Failed to get supply '%s': %d\n", consumers[i].supply, ret); consumers[i].consumer = NULL; goto err; } } return 0; err: while (--i >= 0) regulator_put(consumers[i].consumer); return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_get); /** * devm_regulator_bulk_get - managed get multiple regulator consumers * * @dev: Device to supply * @num_consumers: Number of consumers to register * @consumers: Configuration of consumers; clients are stored here. * * @return 0 on success, an errno on failure. * * This helper function allows drivers to get several regulator * consumers in one operation with management, the regulators will * automatically be freed when the device is unbound. If any of the * regulators cannot be acquired then any regulators that were * allocated will be freed before returning to the caller. */ int devm_regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret; for (i = 0; i < num_consumers; i++) consumers[i].consumer = NULL; for (i = 0; i < num_consumers; i++) { consumers[i].consumer = devm_regulator_get(dev, consumers[i].supply); if (IS_ERR(consumers[i].consumer)) { ret = PTR_ERR(consumers[i].consumer); dev_err(dev, "Failed to get supply '%s': %d\n", consumers[i].supply, ret); consumers[i].consumer = NULL; goto err; } } return 0; err: for (i = 0; i < num_consumers && consumers[i].consumer; i++) devm_regulator_put(consumers[i].consumer); return ret; } EXPORT_SYMBOL_GPL(devm_regulator_bulk_get); static void regulator_bulk_enable_async(void *data, async_cookie_t cookie) { struct regulator_bulk_data *bulk = data; bulk->ret = regulator_enable(bulk->consumer); } /** * regulator_bulk_enable - enable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows consumers to enable multiple regulator * clients in a single API call. If any consumers cannot be enabled * then any others that were enabled will be disabled again prior to * return. */ int regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers) { LIST_HEAD(async_domain); int i; int ret = 0; for (i = 0; i < num_consumers; i++) async_schedule_domain(regulator_bulk_enable_async, &consumers[i], &async_domain); async_synchronize_full_domain(&async_domain); /* If any consumer failed we need to unwind any that succeeded */ for (i = 0; i < num_consumers; i++) { if (consumers[i].ret != 0) { ret = consumers[i].ret; goto err; } } return 0; err: pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret); while (--i >= 0) regulator_disable(consumers[i].consumer); return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_enable); /** * regulator_bulk_set_voltage - set voltage for multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows the voted voltage ranges of multiple regulator * clients to be set in a single API call. If any consumers cannot have their * voltages set, this function returns WITHOUT withdrawing votes for any * consumers that have already been set. */ int regulator_bulk_set_voltage(int num_consumers, struct regulator_bulk_data *consumers) { int i; int rc; for (i = 0; i < num_consumers; i++) { if (!consumers[i].min_uV && !consumers[i].max_uV) continue; rc = regulator_set_voltage(consumers[i].consumer, consumers[i].min_uV, consumers[i].max_uV); if (rc) goto err; } return 0; err: pr_err("Failed to set voltage for %s: %d\n", consumers[i].supply, rc); return rc; } EXPORT_SYMBOL_GPL(regulator_bulk_set_voltage); /** * regulator_bulk_disable - disable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows consumers to disable multiple regulator * clients in a single API call. If any consumers cannot be disabled * then any others that were disabled will be enabled again prior to * return. */ int regulator_bulk_disable(int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret; for (i = num_consumers - 1; i >= 0; --i) { ret = regulator_disable(consumers[i].consumer); if (ret != 0) goto err; } return 0; err: pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret); for (++i; i < num_consumers; ++i) regulator_enable(consumers[i].consumer); return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_disable); /** * regulator_bulk_force_disable - force disable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows consumers to forcibly disable multiple regulator * clients in a single API call. * NOTE: This should be used for situations when device damage will * likely occur if the regulators are not disabled (e.g. over temp). * Although regulator_force_disable function call for some consumers can * return error numbers, the function is called for all consumers. */ int regulator_bulk_force_disable(int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret; for (i = 0; i < num_consumers; i++) consumers[i].ret = regulator_force_disable(consumers[i].consumer); for (i = 0; i < num_consumers; i++) { if (consumers[i].ret != 0) { ret = consumers[i].ret; goto out; } } return 0; out: return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_force_disable); /** * regulator_bulk_free - free multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * * This convenience API allows consumers to free multiple regulator * clients in a single API call. */ void regulator_bulk_free(int num_consumers, struct regulator_bulk_data *consumers) { int i; for (i = 0; i < num_consumers; i++) { regulator_put(consumers[i].consumer); consumers[i].consumer = NULL; } } EXPORT_SYMBOL_GPL(regulator_bulk_free); /** * regulator_notifier_call_chain - call regulator event notifier * @rdev: regulator source * @event: notifier block * @data: callback-specific data. * * Called by regulator drivers to notify clients a regulator event has * occurred. We also notify regulator clients downstream. * Note lock must be held by caller. */ int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { _notifier_call_chain(rdev, event, data); return NOTIFY_DONE; } EXPORT_SYMBOL_GPL(regulator_notifier_call_chain); /** * regulator_mode_to_status - convert a regulator mode into a status * * @mode: Mode to convert * * Convert a regulator mode into a status. */ int regulator_mode_to_status(unsigned int mode) { switch (mode) { case REGULATOR_MODE_FAST: return REGULATOR_STATUS_FAST; case REGULATOR_MODE_NORMAL: return REGULATOR_STATUS_NORMAL; case REGULATOR_MODE_IDLE: return REGULATOR_STATUS_IDLE; case REGULATOR_STATUS_STANDBY: return REGULATOR_STATUS_STANDBY; default: return 0; } } EXPORT_SYMBOL_GPL(regulator_mode_to_status); /* * To avoid cluttering sysfs (and memory) with useless state, only * create attributes that can be meaningfully displayed. */ static int add_regulator_attributes(struct regulator_dev *rdev) { struct device *dev = &rdev->dev; struct regulator_ops *ops = rdev->desc->ops; int status = 0; /* some attributes need specific methods to be displayed */ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0)) { status = device_create_file(dev, &dev_attr_microvolts); if (status < 0) return status; } if (ops->get_current_limit) { status = device_create_file(dev, &dev_attr_microamps); if (status < 0) return status; } if (ops->get_mode) { status = device_create_file(dev, &dev_attr_opmode); if (status < 0) return status; } if (ops->is_enabled) { status = device_create_file(dev, &dev_attr_state); if (status < 0) return status; } if (ops->get_status) { status = device_create_file(dev, &dev_attr_status); if (status < 0) return status; } /* some attributes are type-specific */ if (rdev->desc->type == REGULATOR_CURRENT) { status = device_create_file(dev, &dev_attr_requested_microamps); if (status < 0) return status; } /* all the other attributes exist to support constraints; * don't show them if there are no constraints, or if the * relevant supporting methods are missing. */ if (!rdev->constraints) return status; /* constraints need specific supporting methods */ if (ops->set_voltage || ops->set_voltage_sel) { status = device_create_file(dev, &dev_attr_min_microvolts); if (status < 0) return status; status = device_create_file(dev, &dev_attr_max_microvolts); if (status < 0) return status; } if (ops->set_current_limit) { status = device_create_file(dev, &dev_attr_min_microamps); if (status < 0) return status; status = device_create_file(dev, &dev_attr_max_microamps); if (status < 0) return status; } /* suspend mode constraints need multiple supporting methods */ if (!(ops->set_suspend_enable && ops->set_suspend_disable)) return status; status = device_create_file(dev, &dev_attr_suspend_standby_state); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_mem_state); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_disk_state); if (status < 0) return status; if (ops->set_suspend_voltage) { status = device_create_file(dev, &dev_attr_suspend_standby_microvolts); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_mem_microvolts); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_disk_microvolts); if (status < 0) return status; } if (ops->set_suspend_mode) { status = device_create_file(dev, &dev_attr_suspend_standby_mode); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_mem_mode); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_disk_mode); if (status < 0) return status; } return status; } #ifdef CONFIG_DEBUG_FS #define MAX_DEBUG_BUF_LEN 50 static DEFINE_MUTEX(debug_buf_mutex); static char debug_buf[MAX_DEBUG_BUF_LEN]; static int reg_debug_enable_set(void *data, u64 val) { int err_info; if (IS_ERR(data) || data == NULL) { pr_err("Function Input Error %ld\n", PTR_ERR(data)); return -ENOMEM; } if (val) err_info = regulator_enable(data); else err_info = regulator_disable(data); return err_info; } static int reg_debug_enable_get(void *data, u64 *val) { if (IS_ERR(data) || data == NULL) { pr_err("Function Input Error %ld\n", PTR_ERR(data)); return -ENOMEM; } *val = regulator_is_enabled(data); return 0; } DEFINE_SIMPLE_ATTRIBUTE(reg_enable_fops, reg_debug_enable_get, reg_debug_enable_set, "%llu\n"); static int reg_debug_fdisable_set(void *data, u64 val) { int err_info; if (IS_ERR(data) || data == NULL) { pr_err("Function Input Error %ld\n", PTR_ERR(data)); return -ENOMEM; } if (val > 0) err_info = regulator_force_disable(data); else err_info = 0; return err_info; } DEFINE_SIMPLE_ATTRIBUTE(reg_fdisable_fops, reg_debug_enable_get, reg_debug_fdisable_set, "%llu\n"); static ssize_t reg_debug_volt_set(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int err_info, filled; int min, max = -1; if (IS_ERR(file) || file == NULL) { pr_err("Function Input Error %ld\n", PTR_ERR(file)); return -ENOMEM; } if (count < MAX_DEBUG_BUF_LEN) { mutex_lock(&debug_buf_mutex); if (copy_from_user(debug_buf, (void __user *) buf, count)) return -EFAULT; debug_buf[count] = '\0'; filled = sscanf(debug_buf, "%d %d", &min, &max); mutex_unlock(&debug_buf_mutex); /* check that user entered two numbers */ if (filled < 2 || min < 0 || max < min) { pr_info("Error, correct format: 'echo \"min max\"" " > voltage"); return -ENOMEM; } else { err_info = regulator_set_voltage(file->private_data, min, max); } } else { pr_err("Error-Input voltage pair" " string exceeds maximum buffer length"); return -ENOMEM; } return count; } static ssize_t reg_debug_volt_get(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int voltage, output, rc; if (IS_ERR(file) || file == NULL) { pr_err("Function Input Error %ld\n", PTR_ERR(file)); return -ENOMEM; } voltage = regulator_get_voltage(file->private_data); mutex_lock(&debug_buf_mutex); output = snprintf(debug_buf, MAX_DEBUG_BUF_LEN-1, "%d\n", voltage); rc = simple_read_from_buffer((void __user *) buf, output, ppos, (void *) debug_buf, output); mutex_unlock(&debug_buf_mutex); return rc; } static int reg_debug_volt_open(struct inode *inode, struct file *file) { if (IS_ERR(file) || file == NULL) { pr_err("Function Input Error %ld\n", PTR_ERR(file)); return -ENOMEM; } file->private_data = inode->i_private; return 0; } static const struct file_operations reg_volt_fops = { .write = reg_debug_volt_set, .open = reg_debug_volt_open, .read = reg_debug_volt_get, }; static int reg_debug_mode_set(void *data, u64 val) { int err_info; if (IS_ERR(data) || data == NULL) { pr_err("Function Input Error %ld\n", PTR_ERR(data)); return -ENOMEM; } err_info = regulator_set_mode(data, (unsigned int)val); return err_info; } static int reg_debug_mode_get(void *data, u64 *val) { int err_info; if (IS_ERR(data) || data == NULL) { pr_err("Function Input Error %ld\n", PTR_ERR(data)); return -ENOMEM; } err_info = regulator_get_mode(data); if (err_info < 0) { pr_err("Regulator_get_mode returned an error!\n"); return -ENOMEM; } else { *val = err_info; return 0; } } DEFINE_SIMPLE_ATTRIBUTE(reg_mode_fops, reg_debug_mode_get, reg_debug_mode_set, "%llu\n"); static int reg_debug_optimum_mode_set(void *data, u64 val) { int err_info; if (IS_ERR(data) || data == NULL) { pr_err("Function Input Error %ld\n", PTR_ERR(data)); return -ENOMEM; } err_info = regulator_set_optimum_mode(data, (unsigned int)val); if (err_info < 0) { pr_err("Regulator_set_optimum_mode returned an error!\n"); return err_info; } return 0; } DEFINE_SIMPLE_ATTRIBUTE(reg_optimum_mode_fops, reg_debug_mode_get, reg_debug_optimum_mode_set, "%llu\n"); static int reg_debug_consumers_show(struct seq_file *m, void *v) { struct regulator_dev *rdev = m->private; struct regulator *reg; char *supply_name; if (!rdev) { pr_err("regulator device missing"); return -EINVAL; } mutex_lock(&rdev->mutex); /* Print a header if there are consumers. */ if (rdev->open_count) seq_printf(m, "Device-Supply " "EN Min_uV Max_uV load_uA\n"); list_for_each_entry(reg, &rdev->consumer_list, list) { if (reg->supply_name) supply_name = reg->supply_name; else supply_name = "(null)-(null)"; seq_printf(m, "%-32s %c %8d %8d %8d\n", supply_name, (reg->enabled ? 'Y' : 'N'), reg->min_uV, reg->max_uV, reg->uA_load); } mutex_unlock(&rdev->mutex); return 0; } static int reg_debug_consumers_open(struct inode *inode, struct file *file) { return single_open(file, reg_debug_consumers_show, inode->i_private); } static const struct file_operations reg_consumers_fops = { .owner = THIS_MODULE, .open = reg_debug_consumers_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void rdev_init_debugfs(struct regulator_dev *rdev) { struct dentry *err_ptr = NULL; struct regulator *reg; struct regulator_ops *reg_ops; mode_t mode; if (IS_ERR(rdev) || rdev == NULL || IS_ERR(debugfs_root) || debugfs_root == NULL) { pr_err("Error-Bad Function Input\n"); goto error; } rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); if (IS_ERR(rdev->debugfs) || !rdev->debugfs) { rdev_warn(rdev, "Failed to create debugfs directory\n"); rdev->debugfs = NULL; goto error; } debugfs_create_u32("use_count", 0444, rdev->debugfs, &rdev->use_count); debugfs_create_u32("open_count", 0444, rdev->debugfs, &rdev->open_count); debugfs_create_file("consumers", 0444, rdev->debugfs, rdev, &reg_consumers_fops); reg = regulator_get(NULL, rdev->desc->name); if (IS_ERR(reg) || reg == NULL) { pr_err("Error-Bad Function Input\n"); goto error; } reg_ops = rdev->desc->ops; mode = S_IRUGO | S_IWUSR; /* Enabled File */ if (mode) err_ptr = debugfs_create_file("enable", mode, rdev->debugfs, reg, &reg_enable_fops); if (IS_ERR(err_ptr)) { pr_err("Error-Could not create enable file\n"); debugfs_remove_recursive(rdev->debugfs); goto error; } mode = 0; /* Force-Disable File */ if (reg_ops->is_enabled) mode |= S_IRUGO; if (reg_ops->enable || reg_ops->disable) mode |= S_IWUSR; if (mode) err_ptr = debugfs_create_file("force_disable", mode, rdev->debugfs, reg, &reg_fdisable_fops); if (IS_ERR(err_ptr)) { pr_err("Error-Could not create force_disable file\n"); debugfs_remove_recursive(rdev->debugfs); goto error; } mode = 0; /* Voltage File */ if (reg_ops->get_voltage) mode |= S_IRUGO; if (reg_ops->set_voltage) mode |= S_IWUSR; if (mode) err_ptr = debugfs_create_file("voltage", mode, rdev->debugfs, reg, &reg_volt_fops); if (IS_ERR(err_ptr)) { pr_err("Error-Could not create voltage file\n"); debugfs_remove_recursive(rdev->debugfs); goto error; } mode = 0; /* Mode File */ if (reg_ops->get_mode) mode |= S_IRUGO; if (reg_ops->set_mode) mode |= S_IWUSR; if (mode) err_ptr = debugfs_create_file("mode", mode, rdev->debugfs, reg, &reg_mode_fops); if (IS_ERR(err_ptr)) { pr_err("Error-Could not create mode file\n"); debugfs_remove_recursive(rdev->debugfs); goto error; } mode = 0; /* Optimum Mode File */ if (reg_ops->get_mode) mode |= S_IRUGO; if (reg_ops->set_mode) mode |= S_IWUSR; if (mode) err_ptr = debugfs_create_file("optimum_mode", mode, rdev->debugfs, reg, &reg_optimum_mode_fops); if (IS_ERR(err_ptr)) { pr_err("Error-Could not create optimum_mode file\n"); debugfs_remove_recursive(rdev->debugfs); goto error; } error: return; } #else static inline void rdev_init_debugfs(struct regulator_dev *rdev) { return; } #endif /** * regulator_register - register regulator * @regulator_desc: regulator to register * @dev: struct device for the regulator * @init_data: platform provided init data, passed through by driver * @driver_data: private regulator data * @of_node: OpenFirmware node to parse for device tree bindings (may be * NULL). * * Called by regulator drivers to register a regulator. * Returns 0 on success. */ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, struct device *dev, const struct regulator_init_data *init_data, void *driver_data, struct device_node *of_node) { const struct regulation_constraints *constraints = NULL; static atomic_t regulator_no = ATOMIC_INIT(0); struct regulator_dev *rdev; int ret, i; const char *supply = NULL; if (regulator_desc == NULL) return ERR_PTR(-EINVAL); if (regulator_desc->name == NULL || regulator_desc->ops == NULL) return ERR_PTR(-EINVAL); if (regulator_desc->type != REGULATOR_VOLTAGE && regulator_desc->type != REGULATOR_CURRENT) return ERR_PTR(-EINVAL); /* Only one of each should be implemented */ WARN_ON(regulator_desc->ops->get_voltage && regulator_desc->ops->get_voltage_sel); WARN_ON(regulator_desc->ops->set_voltage && regulator_desc->ops->set_voltage_sel); /* If we're using selectors we must implement list_voltage. */ if (regulator_desc->ops->get_voltage_sel && !regulator_desc->ops->list_voltage) { return ERR_PTR(-EINVAL); } if (regulator_desc->ops->set_voltage_sel && !regulator_desc->ops->list_voltage) { return ERR_PTR(-EINVAL); } rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL); if (rdev == NULL) return ERR_PTR(-ENOMEM); mutex_lock(&regulator_list_mutex); mutex_init(&rdev->mutex); rdev->reg_data = driver_data; rdev->owner = regulator_desc->owner; rdev->desc = regulator_desc; INIT_LIST_HEAD(&rdev->consumer_list); INIT_LIST_HEAD(&rdev->list); BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work); /* preform any regulator specific init */ if (init_data && init_data->regulator_init) { ret = init_data->regulator_init(rdev->reg_data); if (ret < 0) goto clean; } /* register with sysfs */ rdev->dev.class = &regulator_class; rdev->dev.of_node = of_node; rdev->dev.parent = dev; dev_set_name(&rdev->dev, "regulator.%d", atomic_inc_return(&regulator_no) - 1); ret = device_register(&rdev->dev); if (ret != 0) { put_device(&rdev->dev); goto clean; } dev_set_drvdata(&rdev->dev, rdev); /* set regulator constraints */ if (init_data) constraints = &init_data->constraints; ret = set_machine_constraints(rdev, constraints); if (ret < 0) goto scrub; /* add attributes supported by this regulator */ ret = add_regulator_attributes(rdev); if (ret < 0) goto scrub; if (init_data && init_data->supply_regulator) supply = init_data->supply_regulator; else if (regulator_desc->supply_name) supply = regulator_desc->supply_name; if (supply) { struct regulator_dev *r; r = regulator_dev_lookup(dev, supply); if (!r) { dev_err(dev, "Failed to find supply %s\n", supply); ret = -EPROBE_DEFER; goto scrub; } ret = set_supply(rdev, r); if (ret < 0) goto scrub; /* Enable supply if rail is enabled */ if (rdev->desc->ops->is_enabled && rdev->desc->ops->is_enabled(rdev)) { ret = regulator_enable(rdev->supply); if (ret < 0) goto scrub; } } /* add consumers devices */ if (init_data) { for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { dev_err(dev, "Failed to set supply %s\n", init_data->consumer_supplies[i].supply); goto unset_supplies; } } } list_add(&rdev->list, &regulator_list); mutex_unlock(&regulator_list_mutex); rdev_init_debugfs(rdev); return rdev; out: mutex_unlock(&regulator_list_mutex); return rdev; unset_supplies: unset_regulator_supplies(rdev); scrub: kfree(rdev->constraints); device_unregister(&rdev->dev); /* device core frees rdev */ rdev = ERR_PTR(ret); goto out; clean: kfree(rdev); rdev = ERR_PTR(ret); goto out; } EXPORT_SYMBOL_GPL(regulator_register); /** * regulator_unregister - unregister regulator * @rdev: regulator to unregister * * Called by regulator drivers to unregister a regulator. */ void regulator_unregister(struct regulator_dev *rdev) { if (rdev == NULL) return; if (rdev->supply) regulator_put(rdev->supply); mutex_lock(&regulator_list_mutex); debugfs_remove_recursive(rdev->debugfs); flush_work_sync(&rdev->disable_work.work); WARN_ON(rdev->open_count); unset_regulator_supplies(rdev); list_del(&rdev->list); kfree(rdev->constraints); device_unregister(&rdev->dev); mutex_unlock(&regulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_unregister); /** * regulator_suspend_prepare - prepare regulators for system wide suspend * @state: system suspend state * * Configure each regulator with it's suspend operating parameters for state. * This will usually be called by machine suspend code prior to supending. */ int regulator_suspend_prepare(suspend_state_t state) { struct regulator_dev *rdev; int ret = 0; /* ON is handled by regulator active state */ if (state == PM_SUSPEND_ON) return -EINVAL; mutex_lock(&regulator_list_mutex); list_for_each_entry(rdev, &regulator_list, list) { mutex_lock(&rdev->mutex); ret = suspend_prepare(rdev, state); mutex_unlock(&rdev->mutex); if (ret < 0) { rdev_err(rdev, "failed to prepare\n"); goto out; } } out: mutex_unlock(&regulator_list_mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_suspend_prepare); /** * regulator_suspend_finish - resume regulators from system wide suspend * * Turn on regulators that might be turned off by regulator_suspend_prepare * and that should be turned on according to the regulators properties. */ int regulator_suspend_finish(void) { struct regulator_dev *rdev; int ret = 0, error; mutex_lock(&regulator_list_mutex); list_for_each_entry(rdev, &regulator_list, list) { struct regulator_ops *ops = rdev->desc->ops; mutex_lock(&rdev->mutex); if ((rdev->use_count > 0 || rdev->constraints->always_on) && ops->enable) { error = ops->enable(rdev); if (error) ret = error; } else { if (!has_full_constraints) goto unlock; if (!ops->disable) goto unlock; if (ops->is_enabled && !ops->is_enabled(rdev)) goto unlock; error = ops->disable(rdev); if (error) ret = error; } unlock: mutex_unlock(&rdev->mutex); } mutex_unlock(&regulator_list_mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_suspend_finish); /** * regulator_has_full_constraints - the system has fully specified constraints * * Calling this function will cause the regulator API to disable all * regulators which have a zero use count and don't have an always_on * constraint in a late_initcall. * * The intention is that this will become the default behaviour in a * future kernel release so users are encouraged to use this facility * now. */ void regulator_has_full_constraints(void) { has_full_constraints = 1; } EXPORT_SYMBOL_GPL(regulator_has_full_constraints); /** * regulator_use_dummy_regulator - Provide a dummy regulator when none is found * * Calling this function will cause the regulator API to provide a * dummy regulator to consumers if no physical regulator is found, * allowing most consumers to proceed as though a regulator were * configured. This allows systems such as those with software * controllable regulators for the CPU core only to be brought up more * readily. */ void regulator_use_dummy_regulator(void) { board_wants_dummy_regulator = true; } EXPORT_SYMBOL_GPL(regulator_use_dummy_regulator); /** * regulator_suppress_info_printing - disable printing of info messages * * The regulator framework calls print_constraints() when a regulator is * registered. It also prints a disable message for each unused regulator in * regulator_init_complete(). * * Calling this function ensures that such messages do not end up in the * log. */ void regulator_suppress_info_printing(void) { suppress_info_printing = 1; } EXPORT_SYMBOL_GPL(regulator_suppress_info_printing); /** * rdev_get_drvdata - get rdev regulator driver data * @rdev: regulator * * Get rdev regulator driver private data. This call can be used in the * regulator driver context. */ void *rdev_get_drvdata(struct regulator_dev *rdev) { return rdev->reg_data; } EXPORT_SYMBOL_GPL(rdev_get_drvdata); /** * regulator_get_drvdata - get regulator driver data * @regulator: regulator * * Get regulator driver private data. This call can be used in the consumer * driver context when non API regulator specific functions need to be called. */ void *regulator_get_drvdata(struct regulator *regulator) { return regulator->rdev->reg_data; } EXPORT_SYMBOL_GPL(regulator_get_drvdata); /** * regulator_set_drvdata - set regulator driver data * @regulator: regulator * @data: data */ void regulator_set_drvdata(struct regulator *regulator, void *data) { regulator->rdev->reg_data = data; } EXPORT_SYMBOL_GPL(regulator_set_drvdata); /** * regulator_get_id - get regulator ID * @rdev: regulator */ int rdev_get_id(struct regulator_dev *rdev) { return rdev->desc->id; } EXPORT_SYMBOL_GPL(rdev_get_id); struct device *rdev_get_dev(struct regulator_dev *rdev) { return &rdev->dev; } EXPORT_SYMBOL_GPL(rdev_get_dev); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data) { return reg_init_data->driver_data; } EXPORT_SYMBOL_GPL(regulator_get_init_drvdata); #ifdef CONFIG_DEBUG_FS static ssize_t supply_map_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); ssize_t len, ret = 0; struct regulator_map *map; if (!buf) return -ENOMEM; list_for_each_entry(map, &regulator_map_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s -> %s.%s\n", rdev_get_name(map->regulator), map->dev_name, map->supply); if (len >= 0) ret += len; if (ret > PAGE_SIZE) { ret = PAGE_SIZE; break; } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } #endif static const struct file_operations supply_map_fops = { #ifdef CONFIG_DEBUG_FS .read = supply_map_read_file, .llseek = default_llseek, #endif }; static int __init regulator_init(void) { int ret; ret = class_register(&regulator_class); debugfs_root = debugfs_create_dir("regulator", NULL); if (!debugfs_root) pr_warn("regulator: Failed to create debugfs directory\n"); debugfs_create_file("supply_map", 0444, debugfs_root, NULL, &supply_map_fops); regulator_dummy_init(); return ret; } /* init early to allow our consumers to complete system booting */ core_initcall(regulator_init); static int __init regulator_init_complete(void) { struct regulator_dev *rdev; struct regulator_ops *ops; struct regulation_constraints *c; int enabled, ret; mutex_lock(&regulator_list_mutex); /* If we have a full configuration then disable any regulators * which are not in use or always_on. This will become the * default behaviour in the future. */ list_for_each_entry(rdev, &regulator_list, list) { ops = rdev->desc->ops; c = rdev->constraints; if (!ops->disable || (c && c->always_on)) continue; mutex_lock(&rdev->mutex); if (rdev->use_count) goto unlock; /* If we can't read the status assume it's on. */ if (ops->is_enabled) enabled = ops->is_enabled(rdev); else enabled = 1; if (!enabled) goto unlock; if (has_full_constraints) { /* We log since this may kill the system if it * goes wrong. */ if (!suppress_info_printing) rdev_info(rdev, "disabling\n"); ret = ops->disable(rdev); if (ret != 0) { rdev_err(rdev, "couldn't disable: %d\n", ret); } } else { /* The intention is that in future we will * assume that full constraints are provided * so warn even if we aren't going to do * anything here. */ if (!suppress_info_printing) rdev_warn(rdev, "incomplete constraints, " "leaving on\n"); } unlock: mutex_unlock(&rdev->mutex); } mutex_unlock(&regulator_list_mutex); return 0; } late_initcall(regulator_init_complete);
gpl-2.0
anshulsahni/linux
arch/mn10300/kernel/mn10300-watchdog.c
1848
4914
/* MN10300 Watchdog timer * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from arch/i386/kernel/nmi.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/nmi.h> #include <asm/processor.h> #include <linux/atomic.h> #include <asm/intctl-regs.h> #include <asm/rtc-regs.h> #include <asm/div64.h> #include <asm/smp.h> #include <asm/gdb-stub.h> #include <proc/clock.h> static DEFINE_SPINLOCK(watchdog_print_lock); static unsigned int watchdog; static unsigned int watchdog_hz = 1; unsigned int watchdog_alert_counter[NR_CPUS]; EXPORT_SYMBOL(touch_nmi_watchdog); /* * the best way to detect whether a CPU has a 'hard lockup' problem * is to check its timer makes IRQ counts. If they are not * changing then that CPU has some problem. * * since NMIs dont listen to _any_ locks, we have to be extremely * careful not to rely on unsafe variables. The printk might lock * up though, so we have to break up any console locks first ... * [when there will be more tty-related locks, break them up * here too!] */ static unsigned int last_irq_sums[NR_CPUS]; int __init check_watchdog(void) { irq_cpustat_t tmp[1]; printk(KERN_INFO "Testing Watchdog... "); memcpy(tmp, irq_stat, sizeof(tmp)); local_irq_enable(); mdelay((10 * 1000) / watchdog_hz); /* wait 10 ticks */ local_irq_disable(); if (nmi_count(0) - tmp[0].__nmi_count <= 5) { printk(KERN_WARNING "CPU#%d: Watchdog appears to be stuck!\n", 0); return -1; } printk(KERN_INFO "OK.\n"); /* now that we know it works we can reduce NMI frequency to something * more reasonable; makes a difference in some configs */ watchdog_hz = 1; return 0; } static int __init setup_watchdog(char *str) { unsigned tmp; int opt; u8 ctr; get_option(&str, &opt); if (opt != 1) return 0; watchdog = opt; if (watchdog) { set_intr_stub(EXCEP_WDT, watchdog_handler); ctr = WDCTR_WDCK_65536th; WDCTR = WDCTR_WDRST | ctr; WDCTR = ctr; tmp = WDCTR; tmp = __muldiv64u(1 << (16 + ctr * 2), 1000000, MN10300_WDCLK); tmp = 1000000000 / tmp; watchdog_hz = (tmp + 500) / 1000; } return 1; } __setup("watchdog=", setup_watchdog); void __init watchdog_go(void) { u8 wdt; if (watchdog) { printk(KERN_INFO "Watchdog: running at %uHz\n", watchdog_hz); wdt = WDCTR & ~WDCTR_WDCNE; WDCTR = wdt | WDCTR_WDRST; wdt = WDCTR; WDCTR = wdt | WDCTR_WDCNE; wdt = WDCTR; check_watchdog(); } } #ifdef CONFIG_SMP static void watchdog_dump_register(void *dummy) { printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID); show_registers(current_frame()); } #endif asmlinkage void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep) { /* * Since current-> is always on the stack, and we always switch * the stack NMI-atomically, it's safe to use smp_processor_id(). */ int sum, cpu; int irq = NMIIRQ; u8 wdt, tmp; wdt = WDCTR & ~WDCTR_WDCNE; WDCTR = wdt; tmp = WDCTR; NMICR = NMICR_WDIF; nmi_count(smp_processor_id())++; kstat_incr_irq_this_cpu(irq); for_each_online_cpu(cpu) { sum = irq_stat[cpu].__irq_count; if ((last_irq_sums[cpu] == sum) #if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP) && !(CHK_GDBSTUB_BUSY() || atomic_read(&cpu_doing_single_step)) #endif ) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ watchdog_alert_counter[cpu]++; if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) { spin_lock(&watchdog_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out. */ bust_spinlocks(1); printk(KERN_ERR "NMI Watchdog detected LOCKUP on CPU%d," " pc %08lx, registers:\n", cpu, regs->pc); #ifdef CONFIG_SMP printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID); #endif show_registers(regs); #ifdef CONFIG_SMP smp_nmi_call_function(watchdog_dump_register, NULL, 1); #endif printk(KERN_NOTICE "console shuts up ...\n"); console_silent(); spin_unlock(&watchdog_print_lock); bust_spinlocks(0); #ifdef CONFIG_GDBSTUB if (CHK_GDBSTUB_BUSY_AND_ACTIVE()) gdbstub_exception(regs, excep); else gdbstub_intercept(regs, excep); #endif do_exit(SIGSEGV); } } else { last_irq_sums[cpu] = sum; watchdog_alert_counter[cpu] = 0; } } WDCTR = wdt | WDCTR_WDRST; tmp = WDCTR; WDCTR = wdt | WDCTR_WDCNE; tmp = WDCTR; }
gpl-2.0
Shabbypenguin/Jellybean_kernel
arch/arm/mach-msm/board-msm8x60.c
2360
2613
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/irq.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/hardware/gic.h> #include <mach/board.h> #include <mach/msm_iomap.h> static void __init msm8x60_map_io(void) { msm_map_msm8x60_io(); } static void __init msm8x60_init_irq(void) { unsigned int i; gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE, (void *)MSM_QGIC_CPU_BASE); /* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */ writel(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4); /* RUMI does not adhere to GIC spec by enabling STIs by default. * Enable/clear is supposed to be RO for STIs, but is RW on RUMI. */ if (!machine_is_msm8x60_sim()) writel(0x0000FFFF, MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_SET); /* FIXME: Not installing AVS_SVICINT and AVS_SVICINTSWDONE yet * as they are configured as level, which does not play nice with * handle_percpu_irq. */ for (i = GIC_PPI_START; i < GIC_SPI_START; i++) { if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE) irq_set_handler(i, handle_percpu_irq); } } static void __init msm8x60_init(void) { } MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3") .map_io = msm8x60_map_io, .init_irq = msm8x60_init_irq, .init_machine = msm8x60_init, .timer = &msm_timer, MACHINE_END MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF") .map_io = msm8x60_map_io, .init_irq = msm8x60_init_irq, .init_machine = msm8x60_init, .timer = &msm_timer, MACHINE_END MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR") .map_io = msm8x60_map_io, .init_irq = msm8x60_init_irq, .init_machine = msm8x60_init, .timer = &msm_timer, MACHINE_END MACHINE_START(MSM8X60_FFA, "QCT MSM8X60 FFA") .map_io = msm8x60_map_io, .init_irq = msm8x60_init_irq, .init_machine = msm8x60_init, .timer = &msm_timer, MACHINE_END
gpl-2.0
EPDCenterSpain/bq-DC-v1
arch/arm/mach-omap2/omap_phy_internal.c
2616
6355
/* * This file configures the internal USB PHY in OMAP4430. Used * with TWL6030 transceiver and MUSB on OMAP4430. * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Author: Hema HK <hemahk@ti.com> * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/types.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/err.h> #include <linux/usb.h> #include <plat/usb.h> #include "control.h" /* OMAP control module register for UTMI PHY */ #define CONTROL_DEV_CONF 0x300 #define PHY_PD 0x1 #define USBOTGHS_CONTROL 0x33c #define AVALID BIT(0) #define BVALID BIT(1) #define VBUSVALID BIT(2) #define SESSEND BIT(3) #define IDDIG BIT(4) static struct clk *phyclk, *clk48m, *clk32k; static void __iomem *ctrl_base; static int usbotghs_control; int omap4430_phy_init(struct device *dev) { ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K); if (!ctrl_base) { pr_err("control module ioremap failed\n"); return -ENOMEM; } /* Power down the phy */ __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); if (!dev) { iounmap(ctrl_base); return 0; } phyclk = clk_get(dev, "ocp2scp_usb_phy_ick"); if (IS_ERR(phyclk)) { dev_err(dev, "cannot clk_get ocp2scp_usb_phy_ick\n"); iounmap(ctrl_base); return PTR_ERR(phyclk); } clk48m = clk_get(dev, "ocp2scp_usb_phy_phy_48m"); if (IS_ERR(clk48m)) { dev_err(dev, "cannot clk_get ocp2scp_usb_phy_phy_48m\n"); clk_put(phyclk); iounmap(ctrl_base); return PTR_ERR(clk48m); } clk32k = clk_get(dev, "usb_phy_cm_clk32k"); if (IS_ERR(clk32k)) { dev_err(dev, "cannot clk_get usb_phy_cm_clk32k\n"); clk_put(phyclk); clk_put(clk48m); iounmap(ctrl_base); return PTR_ERR(clk32k); } return 0; } int omap4430_phy_set_clk(struct device *dev, int on) { static int state; if (on && !state) { /* Enable the phy clocks */ clk_enable(phyclk); clk_enable(clk48m); clk_enable(clk32k); state = 1; } else if (state) { /* Disable the phy clocks */ clk_disable(phyclk); clk_disable(clk48m); clk_disable(clk32k); state = 0; } return 0; } int omap4430_phy_power(struct device *dev, int ID, int on) { if (on) { if (ID) /* enable VBUS valid, IDDIG groung */ __raw_writel(AVALID | VBUSVALID, ctrl_base + USBOTGHS_CONTROL); else /* * Enable VBUS Valid, AValid and IDDIG * high impedance */ __raw_writel(IDDIG | AVALID | VBUSVALID, ctrl_base + USBOTGHS_CONTROL); } else { /* Enable session END and IDIG to high impedance. */ __raw_writel(SESSEND | IDDIG, ctrl_base + USBOTGHS_CONTROL); } return 0; } int omap4430_phy_suspend(struct device *dev, int suspend) { if (suspend) { /* Disable the clocks */ omap4430_phy_set_clk(dev, 0); /* Power down the phy */ __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); /* save the context */ usbotghs_control = __raw_readl(ctrl_base + USBOTGHS_CONTROL); } else { /* Enable the internel phy clcoks */ omap4430_phy_set_clk(dev, 1); /* power on the phy */ if (__raw_readl(ctrl_base + CONTROL_DEV_CONF) & PHY_PD) { __raw_writel(~PHY_PD, ctrl_base + CONTROL_DEV_CONF); mdelay(200); } /* restore the context */ __raw_writel(usbotghs_control, ctrl_base + USBOTGHS_CONTROL); } return 0; } int omap4430_phy_exit(struct device *dev) { if (ctrl_base) iounmap(ctrl_base); if (phyclk) clk_put(phyclk); if (clk48m) clk_put(clk48m); if (clk32k) clk_put(clk32k); return 0; } void am35x_musb_reset(void) { u32 regval; /* Reset the musb interface */ regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); regval |= AM35XX_USBOTGSS_SW_RST; omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET); regval &= ~AM35XX_USBOTGSS_SW_RST; omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET); regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); } void am35x_musb_phy_power(u8 on) { unsigned long timeout = jiffies + msecs_to_jiffies(100); u32 devconf2; if (on) { /* * Start the on-chip PHY and its PLL. */ devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN); devconf2 |= CONF2_PHY_PLLON; omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); pr_info(KERN_INFO "Waiting for PHY clock good...\n"); while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2) & CONF2_PHYCLKGD)) { cpu_relax(); if (time_after(jiffies, timeout)) { pr_err(KERN_ERR "musb PHY clock good timed out\n"); break; } } } else { /* * Power down the on-chip PHY. */ devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); devconf2 &= ~CONF2_PHY_PLLON; devconf2 |= CONF2_PHYPWRDN | CONF2_OTGPWRDN; omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); } } void am35x_musb_clear_irq(void) { u32 regval; regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); regval |= AM35XX_USBOTGSS_INT_CLR; omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR); regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); } void am35x_set_mode(u8 musb_mode) { u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); devconf2 &= ~CONF2_OTGMODE; switch (musb_mode) { #ifdef CONFIG_USB_MUSB_HDRC_HCD case MUSB_HOST: /* Force VBUS valid, ID = 0 */ devconf2 |= CONF2_FORCE_HOST; break; #endif #ifdef CONFIG_USB_GADGET_MUSB_HDRC case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ devconf2 |= CONF2_FORCE_DEVICE; break; #endif #ifdef CONFIG_USB_MUSB_OTG case MUSB_OTG: /* Don't override the VBUS/ID comparators */ devconf2 |= CONF2_NO_OVERRIDE; break; #endif default: pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode); } omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2); }
gpl-2.0
Evervolv/android_kernel_hp_tenderloin
fs/reiserfs/stree.c
3896
65952
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ /* * Written by Anatoly P. Pinchuk pap@namesys.botik.ru * Programm System Institute * Pereslavl-Zalessky Russia */ /* * This file contains functions dealing with S+tree * * B_IS_IN_TREE * copy_item_head * comp_short_keys * comp_keys * comp_short_le_keys * le_key2cpu_key * comp_le_keys * bin_search * get_lkey * get_rkey * key_in_buffer * decrement_bcount * reiserfs_check_path * pathrelse_and_restore * pathrelse * search_by_key_reada * search_by_key * search_for_position_by_key * comp_items * prepare_for_direct_item * prepare_for_direntry_item * prepare_for_delete_or_cut * calc_deleted_bytes_number * init_tb_struct * padd_item * reiserfs_delete_item * reiserfs_delete_solid_item * reiserfs_delete_object * maybe_indirect_to_direct * indirect_to_direct_roll_back * reiserfs_cut_from_item * truncate_directory * reiserfs_do_truncate * reiserfs_paste_into_item * reiserfs_insert_item */ #include <linux/time.h> #include <linux/string.h> #include <linux/pagemap.h> #include "reiserfs.h" #include <linux/buffer_head.h> #include <linux/quotaops.h> /* Does the buffer contain a disk block which is in the tree. */ inline int B_IS_IN_TREE(const struct buffer_head *bh) { RFALSE(B_LEVEL(bh) > MAX_HEIGHT, "PAP-1010: block (%b) has too big level (%z)", bh, bh); return (B_LEVEL(bh) != FREE_LEVEL); } // // to gets item head in le form // inline void copy_item_head(struct item_head *to, const struct item_head *from) { memcpy(to, from, IH_SIZE); } /* k1 is pointer to on-disk structure which is stored in little-endian form. k2 is pointer to cpu variable. For key of items of the same object this returns 0. Returns: -1 if key1 < key2 0 if key1 == key2 1 if key1 > key2 */ inline int comp_short_keys(const struct reiserfs_key *le_key, const struct cpu_key *cpu_key) { __u32 n; n = le32_to_cpu(le_key->k_dir_id); if (n < cpu_key->on_disk_key.k_dir_id) return -1; if (n > cpu_key->on_disk_key.k_dir_id) return 1; n = le32_to_cpu(le_key->k_objectid); if (n < cpu_key->on_disk_key.k_objectid) return -1; if (n > cpu_key->on_disk_key.k_objectid) return 1; return 0; } /* k1 is pointer to on-disk structure which is stored in little-endian form. k2 is pointer to cpu variable. Compare keys using all 4 key fields. Returns: -1 if key1 < key2 0 if key1 = key2 1 if key1 > key2 */ static inline int comp_keys(const struct reiserfs_key *le_key, const struct cpu_key *cpu_key) { int retval; retval = comp_short_keys(le_key, cpu_key); if (retval) return retval; if (le_key_k_offset(le_key_version(le_key), le_key) < cpu_key_k_offset(cpu_key)) return -1; if (le_key_k_offset(le_key_version(le_key), le_key) > cpu_key_k_offset(cpu_key)) return 1; if (cpu_key->key_length == 3) return 0; /* this part is needed only when tail conversion is in progress */ if (le_key_k_type(le_key_version(le_key), le_key) < cpu_key_k_type(cpu_key)) return -1; if (le_key_k_type(le_key_version(le_key), le_key) > cpu_key_k_type(cpu_key)) return 1; return 0; } inline int comp_short_le_keys(const struct reiserfs_key *key1, const struct reiserfs_key *key2) { __u32 *k1_u32, *k2_u32; int key_length = REISERFS_SHORT_KEY_LEN; k1_u32 = (__u32 *) key1; k2_u32 = (__u32 *) key2; for (; key_length--; ++k1_u32, ++k2_u32) { if (le32_to_cpu(*k1_u32) < le32_to_cpu(*k2_u32)) return -1; if (le32_to_cpu(*k1_u32) > le32_to_cpu(*k2_u32)) return 1; } return 0; } inline void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from) { int version; to->on_disk_key.k_dir_id = le32_to_cpu(from->k_dir_id); to->on_disk_key.k_objectid = le32_to_cpu(from->k_objectid); // find out version of the key version = le_key_version(from); to->version = version; to->on_disk_key.k_offset = le_key_k_offset(version, from); to->on_disk_key.k_type = le_key_k_type(version, from); } // this does not say which one is bigger, it only returns 1 if keys // are not equal, 0 otherwise inline int comp_le_keys(const struct reiserfs_key *k1, const struct reiserfs_key *k2) { return memcmp(k1, k2, sizeof(struct reiserfs_key)); } /************************************************************************** * Binary search toolkit function * * Search for an item in the array by the item key * * Returns: 1 if found, 0 if not found; * * *pos = number of the searched element if found, else the * * number of the first element that is larger than key. * **************************************************************************/ /* For those not familiar with binary search: lbound is the leftmost item that it could be, rbound the rightmost item that it could be. We examine the item halfway between lbound and rbound, and that tells us either that we can increase lbound, or decrease rbound, or that we have found it, or if lbound <= rbound that there are no possible items, and we have not found it. With each examination we cut the number of possible items it could be by one more than half rounded down, or we find it. */ static inline int bin_search(const void *key, /* Key to search for. */ const void *base, /* First item in the array. */ int num, /* Number of items in the array. */ int width, /* Item size in the array. searched. Lest the reader be confused, note that this is crafted as a general function, and when it is applied specifically to the array of item headers in a node, width is actually the item header size not the item size. */ int *pos /* Number of the searched for element. */ ) { int rbound, lbound, j; for (j = ((rbound = num - 1) + (lbound = 0)) / 2; lbound <= rbound; j = (rbound + lbound) / 2) switch (comp_keys ((struct reiserfs_key *)((char *)base + j * width), (struct cpu_key *)key)) { case -1: lbound = j + 1; continue; case 1: rbound = j - 1; continue; case 0: *pos = j; return ITEM_FOUND; /* Key found in the array. */ } /* bin_search did not find given key, it returns position of key, that is minimal and greater than the given one. */ *pos = lbound; return ITEM_NOT_FOUND; } /* Minimal possible key. It is never in the tree. */ const struct reiserfs_key MIN_KEY = { 0, 0, {{0, 0},} }; /* Maximal possible key. It is never in the tree. */ static const struct reiserfs_key MAX_KEY = { __constant_cpu_to_le32(0xffffffff), __constant_cpu_to_le32(0xffffffff), {{__constant_cpu_to_le32(0xffffffff), __constant_cpu_to_le32(0xffffffff)},} }; /* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom of the path, and going upwards. We must check the path's validity at each step. If the key is not in the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this case we return a special key, either MIN_KEY or MAX_KEY. */ static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_path, const struct super_block *sb) { int position, path_offset = chk_path->path_length; struct buffer_head *parent; RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET, "PAP-5010: invalid offset in the path"); /* While not higher in path than first element. */ while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { RFALSE(!buffer_uptodate (PATH_OFFSET_PBUFFER(chk_path, path_offset)), "PAP-5020: parent is not uptodate"); /* Parent at the path is not in the tree now. */ if (!B_IS_IN_TREE (parent = PATH_OFFSET_PBUFFER(chk_path, path_offset))) return &MAX_KEY; /* Check whether position in the parent is correct. */ if ((position = PATH_OFFSET_POSITION(chk_path, path_offset)) > B_NR_ITEMS(parent)) return &MAX_KEY; /* Check whether parent at the path really points to the child. */ if (B_N_CHILD_NUM(parent, position) != PATH_OFFSET_PBUFFER(chk_path, path_offset + 1)->b_blocknr) return &MAX_KEY; /* Return delimiting key if position in the parent is not equal to zero. */ if (position) return B_N_PDELIM_KEY(parent, position - 1); } /* Return MIN_KEY if we are in the root of the buffer tree. */ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)-> b_blocknr == SB_ROOT_BLOCK(sb)) return &MIN_KEY; return &MAX_KEY; } /* Get delimiting key of the buffer at the path and its right neighbor. */ inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path, const struct super_block *sb) { int position, path_offset = chk_path->path_length; struct buffer_head *parent; RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET, "PAP-5030: invalid offset in the path"); while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) { RFALSE(!buffer_uptodate (PATH_OFFSET_PBUFFER(chk_path, path_offset)), "PAP-5040: parent is not uptodate"); /* Parent at the path is not in the tree now. */ if (!B_IS_IN_TREE (parent = PATH_OFFSET_PBUFFER(chk_path, path_offset))) return &MIN_KEY; /* Check whether position in the parent is correct. */ if ((position = PATH_OFFSET_POSITION(chk_path, path_offset)) > B_NR_ITEMS(parent)) return &MIN_KEY; /* Check whether parent at the path really points to the child. */ if (B_N_CHILD_NUM(parent, position) != PATH_OFFSET_PBUFFER(chk_path, path_offset + 1)->b_blocknr) return &MIN_KEY; /* Return delimiting key if position in the parent is not the last one. */ if (position != B_NR_ITEMS(parent)) return B_N_PDELIM_KEY(parent, position); } /* Return MAX_KEY if we are in the root of the buffer tree. */ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)-> b_blocknr == SB_ROOT_BLOCK(sb)) return &MAX_KEY; return &MIN_KEY; } /* Check whether a key is contained in the tree rooted from a buffer at a path. */ /* This works by looking at the left and right delimiting keys for the buffer in the last path_element in the path. These delimiting keys are stored at least one level above that buffer in the tree. If the buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */ static inline int key_in_buffer(struct treepath *chk_path, /* Path which should be checked. */ const struct cpu_key *key, /* Key which should be checked. */ struct super_block *sb ) { RFALSE(!key || chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET || chk_path->path_length > MAX_HEIGHT, "PAP-5050: pointer to the key(%p) is NULL or invalid path length(%d)", key, chk_path->path_length); RFALSE(!PATH_PLAST_BUFFER(chk_path)->b_bdev, "PAP-5060: device must not be NODEV"); if (comp_keys(get_lkey(chk_path, sb), key) == 1) /* left delimiting key is bigger, that the key we look for */ return 0; /* if ( comp_keys(key, get_rkey(chk_path, sb)) != -1 ) */ if (comp_keys(get_rkey(chk_path, sb), key) != 1) /* key must be less than right delimitiing key */ return 0; return 1; } int reiserfs_check_path(struct treepath *p) { RFALSE(p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET, "path not properly relsed"); return 0; } /* Drop the reference to each buffer in a path and restore * dirty bits clean when preparing the buffer for the log. * This version should only be called from fix_nodes() */ void pathrelse_and_restore(struct super_block *sb, struct treepath *search_path) { int path_offset = search_path->path_length; RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, "clm-4000: invalid path offset"); while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) { struct buffer_head *bh; bh = PATH_OFFSET_PBUFFER(search_path, path_offset--); reiserfs_restore_prepared_buffer(sb, bh); brelse(bh); } search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } /* Drop the reference to each buffer in a path */ void pathrelse(struct treepath *search_path) { int path_offset = search_path->path_length; RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET, "PAP-5090: invalid path offset"); while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) brelse(PATH_OFFSET_PBUFFER(search_path, path_offset--)); search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; struct item_head *ih; int used_space; int prev_location; int i; int nr; blkh = (struct block_head *)buf; if (blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) { reiserfs_warning(NULL, "reiserfs-5080", "this should be caught earlier"); return 0; } nr = blkh_nr_item(blkh); if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) { /* item number is too big or too small */ reiserfs_warning(NULL, "reiserfs-5081", "nr_item seems wrong: %z", bh); return 0; } ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1; used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih)); if (used_space != blocksize - blkh_free_space(blkh)) { /* free space does not match to calculated amount of use space */ reiserfs_warning(NULL, "reiserfs-5082", "free space seems wrong: %z", bh); return 0; } // FIXME: it is_leaf will hit performance too much - we may have // return 1 here /* check tables of item heads */ ih = (struct item_head *)(buf + BLKH_SIZE); prev_location = blocksize; for (i = 0; i < nr; i++, ih++) { if (le_ih_k_type(ih) == TYPE_ANY) { reiserfs_warning(NULL, "reiserfs-5083", "wrong item type for item %h", ih); return 0; } if (ih_location(ih) >= blocksize || ih_location(ih) < IH_SIZE * nr) { reiserfs_warning(NULL, "reiserfs-5084", "item location seems wrong: %h", ih); return 0; } if (ih_item_len(ih) < 1 || ih_item_len(ih) > MAX_ITEM_LEN(blocksize)) { reiserfs_warning(NULL, "reiserfs-5085", "item length seems wrong: %h", ih); return 0; } if (prev_location - ih_location(ih) != ih_item_len(ih)) { reiserfs_warning(NULL, "reiserfs-5086", "item location seems wrong " "(second one): %h", ih); return 0; } prev_location = ih_location(ih); } // one may imagine much more checks return 1; } /* returns 1 if buf looks like an internal node, 0 otherwise */ static int is_internal(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; int nr; int used_space; blkh = (struct block_head *)buf; nr = blkh_level(blkh); if (nr <= DISK_LEAF_NODE_LEVEL || nr > MAX_HEIGHT) { /* this level is not possible for internal nodes */ reiserfs_warning(NULL, "reiserfs-5087", "this should be caught earlier"); return 0; } nr = blkh_nr_item(blkh); if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) { /* for internal which is not root we might check min number of keys */ reiserfs_warning(NULL, "reiserfs-5088", "number of key seems wrong: %z", bh); return 0; } used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1); if (used_space != blocksize - blkh_free_space(blkh)) { reiserfs_warning(NULL, "reiserfs-5089", "free space seems wrong: %z", bh); return 0; } // one may imagine much more checks return 1; } // make sure that bh contains formatted node of reiserfs tree of // 'level'-th level static int is_tree_node(struct buffer_head *bh, int level) { if (B_LEVEL(bh) != level) { reiserfs_warning(NULL, "reiserfs-5090", "node level %d does " "not match to the expected one %d", B_LEVEL(bh), level); return 0; } if (level == DISK_LEAF_NODE_LEVEL) return is_leaf(bh->b_data, bh->b_size, bh); return is_internal(bh->b_data, bh->b_size, bh); } #define SEARCH_BY_KEY_READA 16 /* * The function is NOT SCHEDULE-SAFE! * It might unlock the write lock if we needed to wait for a block * to be read. Note that in this case it won't recover the lock to avoid * high contention resulting from too much lock requests, especially * the caller (search_by_key) will perform other schedule-unsafe * operations just after calling this function. * * @return true if we have unlocked */ static bool search_by_key_reada(struct super_block *s, struct buffer_head **bh, b_blocknr_t *b, int num) { int i, j; bool unlocked = false; for (i = 0; i < num; i++) { bh[i] = sb_getblk(s, b[i]); } /* * We are going to read some blocks on which we * have a reference. It's safe, though we might be * reading blocks concurrently changed if we release * the lock. But it's still fine because we check later * if the tree changed */ for (j = 0; j < i; j++) { /* * note, this needs attention if we are getting rid of the BKL * you have to make sure the prepared bit isn't set on this buffer */ if (!buffer_uptodate(bh[j])) { if (!unlocked) { reiserfs_write_unlock(s); unlocked = true; } ll_rw_block(READA, 1, bh + j); } brelse(bh[j]); } return unlocked; } /************************************************************************** * Algorithm SearchByKey * * look for item in the Disk S+Tree by its key * * Input: sb - super block * * key - pointer to the key to search * * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR * * search_path - path from the root to the needed leaf * **************************************************************************/ /* This function fills up the path from the root to the leaf as it descends the tree looking for the key. It uses reiserfs_bread to try to find buffers in the cache given their block number. If it does not find them in the cache it reads them from disk. For each node search_by_key finds using reiserfs_bread it then uses bin_search to look through that node. bin_search will find the position of the block_number of the next node if it is looking through an internal node. If it is looking through a leaf node bin_search will find the position of the item which has key either equal to given key, or which is the maximal key less than the given key. search_by_key returns a path that must be checked for the correctness of the top of the path but need not be checked for the correctness of the bottom of the path */ /* The function is NOT SCHEDULE-SAFE! */ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to search. */ struct treepath *search_path,/* This structure was allocated and initialized by the calling function. It is filled up by this function. */ int stop_level /* How far down the tree to search. To stop at leaf level - set to DISK_LEAF_NODE_LEVEL */ ) { b_blocknr_t block_number; int expected_level; struct buffer_head *bh; struct path_element *last_element; int node_level, retval; int right_neighbor_of_leaf_node; int fs_gen; struct buffer_head *reada_bh[SEARCH_BY_KEY_READA]; b_blocknr_t reada_blocks[SEARCH_BY_KEY_READA]; int reada_count = 0; #ifdef CONFIG_REISERFS_CHECK int repeat_counter = 0; #endif PROC_INFO_INC(sb, search_by_key); /* As we add each node to a path we increase its count. This means that we must be careful to release all nodes in a path before we either discard the path struct or re-use the path struct, as we do here. */ pathrelse(search_path); right_neighbor_of_leaf_node = 0; /* With each iteration of this loop we search through the items in the current node, and calculate the next current node(next path element) for the next iteration of this loop.. */ block_number = SB_ROOT_BLOCK(sb); expected_level = -1; while (1) { #ifdef CONFIG_REISERFS_CHECK if (!(++repeat_counter % 50000)) reiserfs_warning(sb, "PAP-5100", "%s: there were %d iterations of " "while loop looking for key %K", current->comm, repeat_counter, key); #endif /* prep path to have another element added to it. */ last_element = PATH_OFFSET_PELEMENT(search_path, ++search_path->path_length); fs_gen = get_generation(sb); /* Read the next tree node, and set the last element in the path to have a pointer to it. */ if ((bh = last_element->pe_buffer = sb_getblk(sb, block_number))) { bool unlocked = false; if (!buffer_uptodate(bh) && reada_count > 1) /* may unlock the write lock */ unlocked = search_by_key_reada(sb, reada_bh, reada_blocks, reada_count); /* * If we haven't already unlocked the write lock, * then we need to do that here before reading * the current block */ if (!buffer_uptodate(bh) && !unlocked) { reiserfs_write_unlock(sb); unlocked = true; } ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (unlocked) reiserfs_write_lock(sb); if (!buffer_uptodate(bh)) goto io_error; } else { io_error: search_path->path_length--; pathrelse(search_path); return IO_ERROR; } reada_count = 0; if (expected_level == -1) expected_level = SB_TREE_HEIGHT(sb); expected_level--; /* It is possible that schedule occurred. We must check whether the key to search is still in the tree rooted from the current buffer. If not then repeat search from the root. */ if (fs_changed(fs_gen, sb) && (!B_IS_IN_TREE(bh) || B_LEVEL(bh) != expected_level || !key_in_buffer(search_path, key, sb))) { PROC_INFO_INC(sb, search_by_key_fs_changed); PROC_INFO_INC(sb, search_by_key_restarted); PROC_INFO_INC(sb, sbk_restarted[expected_level - 1]); pathrelse(search_path); /* Get the root block number so that we can repeat the search starting from the root. */ block_number = SB_ROOT_BLOCK(sb); expected_level = -1; right_neighbor_of_leaf_node = 0; /* repeat search from the root */ continue; } /* only check that the key is in the buffer if key is not equal to the MAX_KEY. Latter case is only possible in "finish_unfinished()" processing during mount. */ RFALSE(comp_keys(&MAX_KEY, key) && !key_in_buffer(search_path, key, sb), "PAP-5130: key is not in the buffer"); #ifdef CONFIG_REISERFS_CHECK if (REISERFS_SB(sb)->cur_tb) { print_cur_tb("5140"); reiserfs_panic(sb, "PAP-5140", "schedule occurred in do_balance!"); } #endif // make sure, that the node contents look like a node of // certain level if (!is_tree_node(bh, expected_level)) { reiserfs_error(sb, "vs-5150", "invalid format found in block %ld. " "Fsck?", bh->b_blocknr); pathrelse(search_path); return IO_ERROR; } /* ok, we have acquired next formatted node in the tree */ node_level = B_LEVEL(bh); PROC_INFO_BH_STAT(sb, bh, node_level - 1); RFALSE(node_level < stop_level, "vs-5152: tree level (%d) is less than stop level (%d)", node_level, stop_level); retval = bin_search(key, B_N_PITEM_HEAD(bh, 0), B_NR_ITEMS(bh), (node_level == DISK_LEAF_NODE_LEVEL) ? IH_SIZE : KEY_SIZE, &(last_element->pe_position)); if (node_level == stop_level) { return retval; } /* we are not in the stop level */ if (retval == ITEM_FOUND) /* item has been found, so we choose the pointer which is to the right of the found one */ last_element->pe_position++; /* if item was not found we choose the position which is to the left of the found item. This requires no code, bin_search did it already. */ /* So we have chosen a position in the current node which is an internal node. Now we calculate child block number by position in the node. */ block_number = B_N_CHILD_NUM(bh, last_element->pe_position); /* if we are going to read leaf nodes, try for read ahead as well */ if ((search_path->reada & PATH_READA) && node_level == DISK_LEAF_NODE_LEVEL + 1) { int pos = last_element->pe_position; int limit = B_NR_ITEMS(bh); struct reiserfs_key *le_key; if (search_path->reada & PATH_READA_BACK) limit = 0; while (reada_count < SEARCH_BY_KEY_READA) { if (pos == limit) break; reada_blocks[reada_count++] = B_N_CHILD_NUM(bh, pos); if (search_path->reada & PATH_READA_BACK) pos--; else pos++; /* * check to make sure we're in the same object */ le_key = B_N_PDELIM_KEY(bh, pos); if (le32_to_cpu(le_key->k_objectid) != key->on_disk_key.k_objectid) { break; } } } } } /* Form the path to an item and position in this item which contains file byte defined by key. If there is no such item corresponding to the key, we point the path to the item with maximal key less than key, and *pos_in_item is set to one past the last entry/byte in the item. If searching for entry in a directory item, and it is not found, *pos_in_item is set to one entry more than the entry with maximal key which is less than the sought key. Note that if there is no entry in this same node which is one more, then we point to an imaginary entry. for direct items, the position is in units of bytes, for indirect items the position is in units of blocknr entries, for directory items the position is in units of directory entries. */ /* The function is NOT SCHEDULE-SAFE! */ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super block. */ const struct cpu_key *p_cpu_key, /* Key to search (cpu variable) */ struct treepath *search_path /* Filled up by this function. */ ) { struct item_head *p_le_ih; /* pointer to on-disk structure */ int blk_size; loff_t item_offset, offset; struct reiserfs_dir_entry de; int retval; /* If searching for directory entry. */ if (is_direntry_cpu_key(p_cpu_key)) return search_by_entry_key(sb, p_cpu_key, search_path, &de); /* If not searching for directory entry. */ /* If item is found. */ retval = search_item(sb, p_cpu_key, search_path); if (retval == IO_ERROR) return retval; if (retval == ITEM_FOUND) { RFALSE(!ih_item_len (B_N_PITEM_HEAD (PATH_PLAST_BUFFER(search_path), PATH_LAST_POSITION(search_path))), "PAP-5165: item length equals zero"); pos_in_item(search_path) = 0; return POSITION_FOUND; } RFALSE(!PATH_LAST_POSITION(search_path), "PAP-5170: position equals zero"); /* Item is not found. Set path to the previous item. */ p_le_ih = B_N_PITEM_HEAD(PATH_PLAST_BUFFER(search_path), --PATH_LAST_POSITION(search_path)); blk_size = sb->s_blocksize; if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) { return FILE_NOT_FOUND; } // FIXME: quite ugly this far item_offset = le_ih_k_offset(p_le_ih); offset = cpu_key_k_offset(p_cpu_key); /* Needed byte is contained in the item pointed to by the path. */ if (item_offset <= offset && item_offset + op_bytes_number(p_le_ih, blk_size) > offset) { pos_in_item(search_path) = offset - item_offset; if (is_indirect_le_ih(p_le_ih)) { pos_in_item(search_path) /= blk_size; } return POSITION_FOUND; } /* Needed byte is not contained in the item pointed to by the path. Set pos_in_item out of the item. */ if (is_indirect_le_ih(p_le_ih)) pos_in_item(search_path) = ih_item_len(p_le_ih) / UNFM_P_SIZE; else pos_in_item(search_path) = ih_item_len(p_le_ih); return POSITION_NOT_FOUND; } /* Compare given item and item pointed to by the path. */ int comp_items(const struct item_head *stored_ih, const struct treepath *path) { struct buffer_head *bh = PATH_PLAST_BUFFER(path); struct item_head *ih; /* Last buffer at the path is not in the tree. */ if (!B_IS_IN_TREE(bh)) return 1; /* Last path position is invalid. */ if (PATH_LAST_POSITION(path) >= B_NR_ITEMS(bh)) return 1; /* we need only to know, whether it is the same item */ ih = get_ih(path); return memcmp(stored_ih, ih, IH_SIZE); } /* unformatted nodes are not logged anymore, ever. This is safe ** now */ #define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1) // block can not be forgotten as it is in I/O or held by someone #define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh))) // prepare for delete or cut of direct item static inline int prepare_for_direct_item(struct treepath *path, struct item_head *le_ih, struct inode *inode, loff_t new_file_length, int *cut_size) { loff_t round_len; if (new_file_length == max_reiserfs_offset(inode)) { /* item has to be deleted */ *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; } // new file gets truncated if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) { // round_len = ROUND_UP(new_file_length); /* this was new_file_length < le_ih ... */ if (round_len < le_ih_k_offset(le_ih)) { *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; /* Delete this item. */ } /* Calculate first position and size for cutting from item. */ pos_in_item(path) = round_len - (le_ih_k_offset(le_ih) - 1); *cut_size = -(ih_item_len(le_ih) - pos_in_item(path)); return M_CUT; /* Cut from this item. */ } // old file: items may have any length if (new_file_length < le_ih_k_offset(le_ih)) { *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; /* Delete this item. */ } /* Calculate first position and size for cutting from item. */ *cut_size = -(ih_item_len(le_ih) - (pos_in_item(path) = new_file_length + 1 - le_ih_k_offset(le_ih))); return M_CUT; /* Cut from this item. */ } static inline int prepare_for_direntry_item(struct treepath *path, struct item_head *le_ih, struct inode *inode, loff_t new_file_length, int *cut_size) { if (le_ih_k_offset(le_ih) == DOT_OFFSET && new_file_length == max_reiserfs_offset(inode)) { RFALSE(ih_entry_count(le_ih) != 2, "PAP-5220: incorrect empty directory item (%h)", le_ih); *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; /* Delete the directory item containing "." and ".." entry. */ } if (ih_entry_count(le_ih) == 1) { /* Delete the directory item such as there is one record only in this item */ *cut_size = -(IH_SIZE + ih_item_len(le_ih)); return M_DELETE; } /* Cut one record from the directory item. */ *cut_size = -(DEH_SIZE + entry_length(get_last_bh(path), le_ih, pos_in_item(path))); return M_CUT; } #define JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD (2 * JOURNAL_PER_BALANCE_CNT + 1) /* If the path points to a directory or direct item, calculate mode and the size cut, for balance. If the path points to an indirect item, remove some number of its unformatted nodes. In case of file truncate calculate whether this item must be deleted/truncated or last unformatted node of this item will be converted to a direct item. This function returns a determination of what balance mode the calling function should employ. */ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed, /* Number of unformatted nodes which were removed from end of the file. */ int *cut_size, unsigned long long new_file_length /* MAX_KEY_OFFSET in case of delete. */ ) { struct super_block *sb = inode->i_sb; struct item_head *p_le_ih = PATH_PITEM_HEAD(path); struct buffer_head *bh = PATH_PLAST_BUFFER(path); BUG_ON(!th->t_trans_id); /* Stat_data item. */ if (is_statdata_le_ih(p_le_ih)) { RFALSE(new_file_length != max_reiserfs_offset(inode), "PAP-5210: mode must be M_DELETE"); *cut_size = -(IH_SIZE + ih_item_len(p_le_ih)); return M_DELETE; } /* Directory item. */ if (is_direntry_le_ih(p_le_ih)) return prepare_for_direntry_item(path, p_le_ih, inode, new_file_length, cut_size); /* Direct item. */ if (is_direct_le_ih(p_le_ih)) return prepare_for_direct_item(path, p_le_ih, inode, new_file_length, cut_size); /* Case of an indirect item. */ { int blk_size = sb->s_blocksize; struct item_head s_ih; int need_re_search; int delete = 0; int result = M_CUT; int pos = 0; if ( new_file_length == max_reiserfs_offset (inode) ) { /* prepare_for_delete_or_cut() is called by * reiserfs_delete_item() */ new_file_length = 0; delete = 1; } do { need_re_search = 0; *cut_size = 0; bh = PATH_PLAST_BUFFER(path); copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); pos = I_UNFM_NUM(&s_ih); while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > new_file_length) { __le32 *unfm; __u32 block; /* Each unformatted block deletion may involve one additional * bitmap block into the transaction, thereby the initial * journal space reservation might not be enough. */ if (!delete && (*cut_size) != 0 && reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) break; unfm = (__le32 *)B_I_PITEM(bh, &s_ih) + pos - 1; block = get_block_num(unfm, 0); if (block != 0) { reiserfs_prepare_for_journal(sb, bh, 1); put_block_num(unfm, 0, 0); journal_mark_dirty(th, sb, bh); reiserfs_free_block(th, inode, block, 1); } reiserfs_write_unlock(sb); cond_resched(); reiserfs_write_lock(sb); if (item_moved (&s_ih, path)) { need_re_search = 1; break; } pos --; (*removed)++; (*cut_size) -= UNFM_P_SIZE; if (pos == 0) { (*cut_size) -= IH_SIZE; result = M_DELETE; break; } } /* a trick. If the buffer has been logged, this will do nothing. If ** we've broken the loop without logging it, it will restore the ** buffer */ reiserfs_restore_prepared_buffer(sb, bh); } while (need_re_search && search_for_position_by_key(sb, item_key, path) == POSITION_FOUND); pos_in_item(path) = pos * UNFM_P_SIZE; if (*cut_size == 0) { /* Nothing were cut. maybe convert last unformatted node to the * direct item? */ result = M_CONVERT; } return result; } } /* Calculate number of bytes which will be deleted or cut during balance */ static int calc_deleted_bytes_number(struct tree_balance *tb, char mode) { int del_size; struct item_head *p_le_ih = PATH_PITEM_HEAD(tb->tb_path); if (is_statdata_le_ih(p_le_ih)) return 0; del_size = (mode == M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0]; if (is_direntry_le_ih(p_le_ih)) { /* return EMPTY_DIR_SIZE; We delete emty directoris only. * we can't use EMPTY_DIR_SIZE, as old format dirs have a different * empty size. ick. FIXME, is this right? */ return del_size; } if (is_indirect_le_ih(p_le_ih)) del_size = (del_size / UNFM_P_SIZE) * (PATH_PLAST_BUFFER(tb->tb_path)->b_size); return del_size; } static void init_tb_struct(struct reiserfs_transaction_handle *th, struct tree_balance *tb, struct super_block *sb, struct treepath *path, int size) { BUG_ON(!th->t_trans_id); memset(tb, '\0', sizeof(struct tree_balance)); tb->transaction_handle = th; tb->tb_sb = sb; tb->tb_path = path; PATH_OFFSET_PBUFFER(path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL; PATH_OFFSET_POSITION(path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0; tb->insert_size[0] = size; } void padd_item(char *item, int total_length, int length) { int i; for (i = total_length; i > length;) item[--i] = 0; } #ifdef REISERQUOTA_DEBUG char key2type(struct reiserfs_key *ih) { if (is_direntry_le_key(2, ih)) return 'd'; if (is_direct_le_key(2, ih)) return 'D'; if (is_indirect_le_key(2, ih)) return 'i'; if (is_statdata_le_key(2, ih)) return 's'; return 'u'; } char head2type(struct item_head *ih) { if (is_direntry_le_ih(ih)) return 'd'; if (is_direct_le_ih(ih)) return 'D'; if (is_indirect_le_ih(ih)) return 'i'; if (is_statdata_le_ih(ih)) return 's'; return 'u'; } #endif /* Delete object item. * th - active transaction handle * path - path to the deleted item * item_key - key to search for the deleted item * indode - used for updating i_blocks and quotas * un_bh - NULL or unformatted node pointer */ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath *path, const struct cpu_key *item_key, struct inode *inode, struct buffer_head *un_bh) { struct super_block *sb = inode->i_sb; struct tree_balance s_del_balance; struct item_head s_ih; struct item_head *q_ih; int quota_cut_bytes; int ret_value, del_size, removed; #ifdef CONFIG_REISERFS_CHECK char mode; int iter = 0; #endif BUG_ON(!th->t_trans_id); init_tb_struct(th, &s_del_balance, sb, path, 0 /*size is unknown */ ); while (1) { removed = 0; #ifdef CONFIG_REISERFS_CHECK iter++; mode = #endif prepare_for_delete_or_cut(th, inode, path, item_key, &removed, &del_size, max_reiserfs_offset(inode)); RFALSE(mode != M_DELETE, "PAP-5320: mode must be M_DELETE"); copy_item_head(&s_ih, PATH_PITEM_HEAD(path)); s_del_balance.insert_size[0] = del_size; ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL); if (ret_value != REPEAT_SEARCH) break; PROC_INFO_INC(sb, delete_item_restarted); // file system changed, repeat search ret_value = search_for_position_by_key(sb, item_key, path); if (ret_value == IO_ERROR) break; if (ret_value == FILE_NOT_FOUND) { reiserfs_warning(sb, "vs-5340", "no items of the file %K found", item_key); break; } } /* while (1) */ if (ret_value != CARRY_ON) { unfix_nodes(&s_del_balance); return 0; } // reiserfs_delete_item returns item length when success ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE); q_ih = get_ih(path); quota_cut_bytes = ih_item_len(q_ih); /* hack so the quota code doesn't have to guess if the file ** has a tail. On tail insert, we allocate quota for 1 unformatted node. ** We test the offset because the tail might have been ** split into multiple items, and we only want to decrement for ** the unfm node once */ if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(q_ih)) { if ((le_ih_k_offset(q_ih) & (sb->s_blocksize - 1)) == 1) { quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE; } else { quota_cut_bytes = 0; } } if (un_bh) { int off; char *data; /* We are in direct2indirect conversion, so move tail contents to the unformatted node */ /* note, we do the copy before preparing the buffer because we ** don't care about the contents of the unformatted node yet. ** the only thing we really care about is the direct item's data ** is in the unformatted node. ** ** Otherwise, we would have to call reiserfs_prepare_for_journal on ** the unformatted node, which might schedule, meaning we'd have to ** loop all the way back up to the start of the while loop. ** ** The unformatted node must be dirtied later on. We can't be ** sure here if the entire tail has been deleted yet. ** ** un_bh is from the page cache (all unformatted nodes are ** from the page cache) and might be a highmem page. So, we ** can't use un_bh->b_data. ** -clm */ data = kmap_atomic(un_bh->b_page); off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); memcpy(data + off, B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih), ret_value); kunmap_atomic(data); } /* Perform balancing after all resources have been collected at once. */ do_balance(&s_del_balance, NULL, NULL, M_DELETE); #ifdef REISERQUOTA_DEBUG reiserfs_debug(sb, REISERFS_DEBUG_CODE, "reiserquota delete_item(): freeing %u, id=%u type=%c", quota_cut_bytes, inode->i_uid, head2type(&s_ih)); #endif dquot_free_space_nodirty(inode, quota_cut_bytes); /* Return deleted body length */ return ret_value; } /* Summary Of Mechanisms For Handling Collisions Between Processes: deletion of the body of the object is performed by iput(), with the result that if multiple processes are operating on a file, the deletion of the body of the file is deferred until the last process that has an open inode performs its iput(). writes and truncates are protected from collisions by use of semaphores. creates, linking, and mknod are protected from collisions with other processes by making the reiserfs_add_entry() the last step in the creation, and then rolling back all changes if there was a collision. - Hans */ /* this deletes item which never gets split */ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, struct inode *inode, struct reiserfs_key *key) { struct tree_balance tb; INITIALIZE_PATH(path); int item_len = 0; int tb_init = 0; struct cpu_key cpu_key; int retval; int quota_cut_bytes = 0; BUG_ON(!th->t_trans_id); le_key2cpu_key(&cpu_key, key); while (1) { retval = search_item(th->t_super, &cpu_key, &path); if (retval == IO_ERROR) { reiserfs_error(th->t_super, "vs-5350", "i/o failure occurred trying " "to delete %K", &cpu_key); break; } if (retval != ITEM_FOUND) { pathrelse(&path); // No need for a warning, if there is just no free space to insert '..' item into the newly-created subdir if (! ((unsigned long long) GET_HASH_VALUE(le_key_k_offset (le_key_version(key), key)) == 0 && (unsigned long long) GET_GENERATION_NUMBER(le_key_k_offset (le_key_version(key), key)) == 1)) reiserfs_warning(th->t_super, "vs-5355", "%k not found", key); break; } if (!tb_init) { tb_init = 1; item_len = ih_item_len(PATH_PITEM_HEAD(&path)); init_tb_struct(th, &tb, th->t_super, &path, -(IH_SIZE + item_len)); } quota_cut_bytes = ih_item_len(PATH_PITEM_HEAD(&path)); retval = fix_nodes(M_DELETE, &tb, NULL, NULL); if (retval == REPEAT_SEARCH) { PROC_INFO_INC(th->t_super, delete_solid_item_restarted); continue; } if (retval == CARRY_ON) { do_balance(&tb, NULL, NULL, M_DELETE); if (inode) { /* Should we count quota for item? (we don't count quotas for save-links) */ #ifdef REISERQUOTA_DEBUG reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE, "reiserquota delete_solid_item(): freeing %u id=%u type=%c", quota_cut_bytes, inode->i_uid, key2type(key)); #endif dquot_free_space_nodirty(inode, quota_cut_bytes); } break; } // IO_ERROR, NO_DISK_SPACE, etc reiserfs_warning(th->t_super, "vs-5360", "could not delete %K due to fix_nodes failure", &cpu_key); unfix_nodes(&tb); break; } reiserfs_check_path(&path); } int reiserfs_delete_object(struct reiserfs_transaction_handle *th, struct inode *inode) { int err; inode->i_size = 0; BUG_ON(!th->t_trans_id); /* for directory this deletes item containing "." and ".." */ err = reiserfs_do_truncate(th, inode, NULL, 0 /*no timestamp updates */ ); if (err) return err; #if defined( USE_INODE_GENERATION_COUNTER ) if (!old_format_only(th->t_super)) { __le32 *inode_generation; inode_generation = &REISERFS_SB(th->t_super)->s_rs->s_inode_generation; le32_add_cpu(inode_generation, 1); } /* USE_INODE_GENERATION_COUNTER */ #endif reiserfs_delete_solid_item(th, inode, INODE_PKEY(inode)); return err; } static void unmap_buffers(struct page *page, loff_t pos) { struct buffer_head *bh; struct buffer_head *head; struct buffer_head *next; unsigned long tail_index; unsigned long cur_index; if (page) { if (page_has_buffers(page)) { tail_index = pos & (PAGE_CACHE_SIZE - 1); cur_index = 0; head = page_buffers(page); bh = head; do { next = bh->b_this_page; /* we want to unmap the buffers that contain the tail, and ** all the buffers after it (since the tail must be at the ** end of the file). We don't want to unmap file data ** before the tail, since it might be dirty and waiting to ** reach disk */ cur_index += bh->b_size; if (cur_index > tail_index) { reiserfs_unmap_buffer(bh); } bh = next; } while (bh != head); } } } static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th, struct inode *inode, struct page *page, struct treepath *path, const struct cpu_key *item_key, loff_t new_file_size, char *mode) { struct super_block *sb = inode->i_sb; int block_size = sb->s_blocksize; int cut_bytes; BUG_ON(!th->t_trans_id); BUG_ON(new_file_size != inode->i_size); /* the page being sent in could be NULL if there was an i/o error ** reading in the last block. The user will hit problems trying to ** read the file, but for now we just skip the indirect2direct */ if (atomic_read(&inode->i_count) > 1 || !tail_has_to_be_packed(inode) || !page || (REISERFS_I(inode)->i_flags & i_nopack_mask)) { /* leave tail in an unformatted node */ *mode = M_SKIP_BALANCING; cut_bytes = block_size - (new_file_size & (block_size - 1)); pathrelse(path); return cut_bytes; } /* Perform the conversion to a direct_item. */ /* return indirect_to_direct(inode, path, item_key, new_file_size, mode); */ return indirect2direct(th, inode, page, path, item_key, new_file_size, mode); } /* we did indirect_to_direct conversion. And we have inserted direct item successesfully, but there were no disk space to cut unfm pointer being converted. Therefore we have to delete inserted direct item(s) */ static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path) { struct cpu_key tail_key; int tail_len; int removed; BUG_ON(!th->t_trans_id); make_cpu_key(&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4); // !!!! tail_key.key_length = 4; tail_len = (cpu_key_k_offset(&tail_key) & (inode->i_sb->s_blocksize - 1)) - 1; while (tail_len) { /* look for the last byte of the tail */ if (search_for_position_by_key(inode->i_sb, &tail_key, path) == POSITION_NOT_FOUND) reiserfs_panic(inode->i_sb, "vs-5615", "found invalid item"); RFALSE(path->pos_in_item != ih_item_len(PATH_PITEM_HEAD(path)) - 1, "vs-5616: appended bytes found"); PATH_LAST_POSITION(path)--; removed = reiserfs_delete_item(th, path, &tail_key, inode, NULL /*unbh not needed */ ); RFALSE(removed <= 0 || removed > tail_len, "vs-5617: there was tail %d bytes, removed item length %d bytes", tail_len, removed); tail_len -= removed; set_cpu_key_k_offset(&tail_key, cpu_key_k_offset(&tail_key) - removed); } reiserfs_warning(inode->i_sb, "reiserfs-5091", "indirect_to_direct " "conversion has been rolled back due to " "lack of disk space"); //mark_file_without_tail (inode); mark_inode_dirty(inode); } /* (Truncate or cut entry) or delete object item. Returns < 0 on failure */ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, struct treepath *path, struct cpu_key *item_key, struct inode *inode, struct page *page, loff_t new_file_size) { struct super_block *sb = inode->i_sb; /* Every function which is going to call do_balance must first create a tree_balance structure. Then it must fill up this structure by using the init_tb_struct and fix_nodes functions. After that we can make tree balancing. */ struct tree_balance s_cut_balance; struct item_head *p_le_ih; int cut_size = 0, /* Amount to be cut. */ ret_value = CARRY_ON, removed = 0, /* Number of the removed unformatted nodes. */ is_inode_locked = 0; char mode; /* Mode of the balance. */ int retval2 = -1; int quota_cut_bytes; loff_t tail_pos = 0; BUG_ON(!th->t_trans_id); init_tb_struct(th, &s_cut_balance, inode->i_sb, path, cut_size); /* Repeat this loop until we either cut the item without needing to balance, or we fix_nodes without schedule occurring */ while (1) { /* Determine the balance mode, position of the first byte to be cut, and size to be cut. In case of the indirect item free unformatted nodes which are pointed to by the cut pointers. */ mode = prepare_for_delete_or_cut(th, inode, path, item_key, &removed, &cut_size, new_file_size); if (mode == M_CONVERT) { /* convert last unformatted node to direct item or leave tail in the unformatted node */ RFALSE(ret_value != CARRY_ON, "PAP-5570: can not convert twice"); ret_value = maybe_indirect_to_direct(th, inode, page, path, item_key, new_file_size, &mode); if (mode == M_SKIP_BALANCING) /* tail has been left in the unformatted node */ return ret_value; is_inode_locked = 1; /* removing of last unformatted node will change value we have to return to truncate. Save it */ retval2 = ret_value; /*retval2 = sb->s_blocksize - (new_file_size & (sb->s_blocksize - 1)); */ /* So, we have performed the first part of the conversion: inserting the new direct item. Now we are removing the last unformatted node pointer. Set key to search for it. */ set_cpu_key_k_type(item_key, TYPE_INDIRECT); item_key->key_length = 4; new_file_size -= (new_file_size & (sb->s_blocksize - 1)); tail_pos = new_file_size; set_cpu_key_k_offset(item_key, new_file_size + 1); if (search_for_position_by_key (sb, item_key, path) == POSITION_NOT_FOUND) { print_block(PATH_PLAST_BUFFER(path), 3, PATH_LAST_POSITION(path) - 1, PATH_LAST_POSITION(path) + 1); reiserfs_panic(sb, "PAP-5580", "item to " "convert does not exist (%K)", item_key); } continue; } if (cut_size == 0) { pathrelse(path); return 0; } s_cut_balance.insert_size[0] = cut_size; ret_value = fix_nodes(mode, &s_cut_balance, NULL, NULL); if (ret_value != REPEAT_SEARCH) break; PROC_INFO_INC(sb, cut_from_item_restarted); ret_value = search_for_position_by_key(sb, item_key, path); if (ret_value == POSITION_FOUND) continue; reiserfs_warning(sb, "PAP-5610", "item %K not found", item_key); unfix_nodes(&s_cut_balance); return (ret_value == IO_ERROR) ? -EIO : -ENOENT; } /* while */ // check fix_nodes results (IO_ERROR or NO_DISK_SPACE) if (ret_value != CARRY_ON) { if (is_inode_locked) { // FIXME: this seems to be not needed: we are always able // to cut item indirect_to_direct_roll_back(th, inode, path); } if (ret_value == NO_DISK_SPACE) reiserfs_warning(sb, "reiserfs-5092", "NO_DISK_SPACE"); unfix_nodes(&s_cut_balance); return -EIO; } /* go ahead and perform balancing */ RFALSE(mode == M_PASTE || mode == M_INSERT, "invalid mode"); /* Calculate number of bytes that need to be cut from the item. */ quota_cut_bytes = (mode == M_DELETE) ? ih_item_len(get_ih(path)) : -s_cut_balance. insert_size[0]; if (retval2 == -1) ret_value = calc_deleted_bytes_number(&s_cut_balance, mode); else ret_value = retval2; /* For direct items, we only change the quota when deleting the last ** item. */ p_le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path); if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_le_ih)) { if (mode == M_DELETE && (le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) == 1) { // FIXME: this is to keep 3.5 happy REISERFS_I(inode)->i_first_direct_byte = U32_MAX; quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE; } else { quota_cut_bytes = 0; } } #ifdef CONFIG_REISERFS_CHECK if (is_inode_locked) { struct item_head *le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path); /* we are going to complete indirect2direct conversion. Make sure, that we exactly remove last unformatted node pointer of the item */ if (!is_indirect_le_ih(le_ih)) reiserfs_panic(sb, "vs-5652", "item must be indirect %h", le_ih); if (mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE) reiserfs_panic(sb, "vs-5653", "completing " "indirect2direct conversion indirect " "item %h being deleted must be of " "4 byte long", le_ih); if (mode == M_CUT && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) { reiserfs_panic(sb, "vs-5654", "can not complete " "indirect2direct conversion of %h " "(CUT, insert_size==%d)", le_ih, s_cut_balance.insert_size[0]); } /* it would be useful to make sure, that right neighboring item is direct item of this file */ } #endif do_balance(&s_cut_balance, NULL, NULL, mode); if (is_inode_locked) { /* we've done an indirect->direct conversion. when the data block ** was freed, it was removed from the list of blocks that must ** be flushed before the transaction commits, make sure to ** unmap and invalidate it */ unmap_buffers(page, tail_pos); REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask; } #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota cut_from_item(): freeing %u id=%u type=%c", quota_cut_bytes, inode->i_uid, '?'); #endif dquot_free_space_nodirty(inode, quota_cut_bytes); return ret_value; } static void truncate_directory(struct reiserfs_transaction_handle *th, struct inode *inode) { BUG_ON(!th->t_trans_id); if (inode->i_nlink) reiserfs_error(inode->i_sb, "vs-5655", "link count != 0"); set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), DOT_OFFSET); set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_DIRENTRY); reiserfs_delete_solid_item(th, inode, INODE_PKEY(inode)); reiserfs_update_sd(th, inode); set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), SD_OFFSET); set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_STAT_DATA); } /* Truncate file to the new size. Note, this must be called with a transaction already started */ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, struct inode *inode, /* ->i_size contains new size */ struct page *page, /* up to date for last block */ int update_timestamps /* when it is called by file_release to convert the tail - no timestamps should be updated */ ) { INITIALIZE_PATH(s_search_path); /* Path to the current object item. */ struct item_head *p_le_ih; /* Pointer to an item header. */ struct cpu_key s_item_key; /* Key to search for a previous file item. */ loff_t file_size, /* Old file size. */ new_file_size; /* New file size. */ int deleted; /* Number of deleted or truncated bytes. */ int retval; int err = 0; BUG_ON(!th->t_trans_id); if (! (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return 0; if (S_ISDIR(inode->i_mode)) { // deletion of directory - no need to update timestamps truncate_directory(th, inode); return 0; } /* Get new file size. */ new_file_size = inode->i_size; // FIXME: note, that key type is unimportant here make_cpu_key(&s_item_key, inode, max_reiserfs_offset(inode), TYPE_DIRECT, 3); retval = search_for_position_by_key(inode->i_sb, &s_item_key, &s_search_path); if (retval == IO_ERROR) { reiserfs_error(inode->i_sb, "vs-5657", "i/o failure occurred trying to truncate %K", &s_item_key); err = -EIO; goto out; } if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) { reiserfs_error(inode->i_sb, "PAP-5660", "wrong result %d of search for %K", retval, &s_item_key); err = -EIO; goto out; } s_search_path.pos_in_item--; /* Get real file size (total length of all file items) */ p_le_ih = PATH_PITEM_HEAD(&s_search_path); if (is_statdata_le_ih(p_le_ih)) file_size = 0; else { loff_t offset = le_ih_k_offset(p_le_ih); int bytes = op_bytes_number(p_le_ih, inode->i_sb->s_blocksize); /* this may mismatch with real file size: if last direct item had no padding zeros and last unformatted node had no free space, this file would have this file size */ file_size = offset + bytes - 1; } /* * are we doing a full truncate or delete, if so * kick in the reada code */ if (new_file_size == 0) s_search_path.reada = PATH_READA | PATH_READA_BACK; if (file_size == 0 || file_size < new_file_size) { goto update_and_out; } /* Update key to search for the last file item. */ set_cpu_key_k_offset(&s_item_key, file_size); do { /* Cut or delete file item. */ deleted = reiserfs_cut_from_item(th, &s_search_path, &s_item_key, inode, page, new_file_size); if (deleted < 0) { reiserfs_warning(inode->i_sb, "vs-5665", "reiserfs_cut_from_item failed"); reiserfs_check_path(&s_search_path); return 0; } RFALSE(deleted > file_size, "PAP-5670: reiserfs_cut_from_item: too many bytes deleted: deleted %d, file_size %lu, item_key %K", deleted, file_size, &s_item_key); /* Change key to search the last file item. */ file_size -= deleted; set_cpu_key_k_offset(&s_item_key, file_size); /* While there are bytes to truncate and previous file item is presented in the tree. */ /* ** This loop could take a really long time, and could log ** many more blocks than a transaction can hold. So, we do a polite ** journal end here, and if the transaction needs ending, we make ** sure the file is consistent before ending the current trans ** and starting a new one */ if (journal_transaction_should_end(th, 0) || reiserfs_transaction_free_space(th) <= JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) { int orig_len_alloc = th->t_blocks_allocated; pathrelse(&s_search_path); if (update_timestamps) { inode->i_mtime = CURRENT_TIME_SEC; inode->i_ctime = CURRENT_TIME_SEC; } reiserfs_update_sd(th, inode); err = journal_end(th, inode->i_sb, orig_len_alloc); if (err) goto out; err = journal_begin(th, inode->i_sb, JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD + JOURNAL_PER_BALANCE_CNT * 4) ; if (err) goto out; reiserfs_update_inode_transaction(inode); } } while (file_size > ROUND_UP(new_file_size) && search_for_position_by_key(inode->i_sb, &s_item_key, &s_search_path) == POSITION_FOUND); RFALSE(file_size > ROUND_UP(new_file_size), "PAP-5680: truncate did not finish: new_file_size %Ld, current %Ld, oid %d", new_file_size, file_size, s_item_key.on_disk_key.k_objectid); update_and_out: if (update_timestamps) { // this is truncate, not file closing inode->i_mtime = CURRENT_TIME_SEC; inode->i_ctime = CURRENT_TIME_SEC; } reiserfs_update_sd(th, inode); out: pathrelse(&s_search_path); return err; } #ifdef CONFIG_REISERFS_CHECK // this makes sure, that we __append__, not overwrite or add holes static void check_research_for_paste(struct treepath *path, const struct cpu_key *key) { struct item_head *found_ih = get_ih(path); if (is_direct_le_ih(found_ih)) { if (le_ih_k_offset(found_ih) + op_bytes_number(found_ih, get_last_bh(path)->b_size) != cpu_key_k_offset(key) || op_bytes_number(found_ih, get_last_bh(path)->b_size) != pos_in_item(path)) reiserfs_panic(NULL, "PAP-5720", "found direct item " "%h or position (%d) does not match " "to key %K", found_ih, pos_in_item(path), key); } if (is_indirect_le_ih(found_ih)) { if (le_ih_k_offset(found_ih) + op_bytes_number(found_ih, get_last_bh(path)->b_size) != cpu_key_k_offset(key) || I_UNFM_NUM(found_ih) != pos_in_item(path) || get_ih_free_space(found_ih) != 0) reiserfs_panic(NULL, "PAP-5730", "found indirect " "item (%h) or position (%d) does not " "match to key (%K)", found_ih, pos_in_item(path), key); } } #endif /* config reiserfs check */ /* Paste bytes to the existing item. Returns bytes number pasted into the item. */ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct treepath *search_path, /* Path to the pasted item. */ const struct cpu_key *key, /* Key to search for the needed item. */ struct inode *inode, /* Inode item belongs to */ const char *body, /* Pointer to the bytes to paste. */ int pasted_size) { /* Size of pasted bytes. */ struct tree_balance s_paste_balance; int retval; int fs_gen; BUG_ON(!th->t_trans_id); fs_gen = get_generation(inode->i_sb); #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): allocating %u id=%u type=%c", pasted_size, inode->i_uid, key2type(&(key->on_disk_key))); #endif reiserfs_write_unlock(inode->i_sb); retval = dquot_alloc_space_nodirty(inode, pasted_size); reiserfs_write_lock(inode->i_sb); if (retval) { pathrelse(search_path); return retval; } init_tb_struct(th, &s_paste_balance, th->t_super, search_path, pasted_size); #ifdef DISPLACE_NEW_PACKING_LOCALITIES s_paste_balance.key = key->on_disk_key; #endif /* DQUOT_* can schedule, must check before the fix_nodes */ if (fs_changed(fs_gen, inode->i_sb)) { goto search_again; } while ((retval = fix_nodes(M_PASTE, &s_paste_balance, NULL, body)) == REPEAT_SEARCH) { search_again: /* file system changed while we were in the fix_nodes */ PROC_INFO_INC(th->t_super, paste_into_item_restarted); retval = search_for_position_by_key(th->t_super, key, search_path); if (retval == IO_ERROR) { retval = -EIO; goto error_out; } if (retval == POSITION_FOUND) { reiserfs_warning(inode->i_sb, "PAP-5710", "entry or pasted byte (%K) exists", key); retval = -EEXIST; goto error_out; } #ifdef CONFIG_REISERFS_CHECK check_research_for_paste(search_path, key); #endif } /* Perform balancing after all resources are collected by fix_nodes, and accessing them will not risk triggering schedule. */ if (retval == CARRY_ON) { do_balance(&s_paste_balance, NULL /*ih */ , body, M_PASTE); return 0; } retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO; error_out: /* this also releases the path */ unfix_nodes(&s_paste_balance); #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota paste_into_item(): freeing %u id=%u type=%c", pasted_size, inode->i_uid, key2type(&(key->on_disk_key))); #endif dquot_free_space_nodirty(inode, pasted_size); return retval; } /* Insert new item into the buffer at the path. * th - active transaction handle * path - path to the inserted item * ih - pointer to the item header to insert * body - pointer to the bytes to insert */ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath *path, const struct cpu_key *key, struct item_head *ih, struct inode *inode, const char *body) { struct tree_balance s_ins_balance; int retval; int fs_gen = 0; int quota_bytes = 0; BUG_ON(!th->t_trans_id); if (inode) { /* Do we count quotas for item? */ fs_gen = get_generation(inode->i_sb); quota_bytes = ih_item_len(ih); /* hack so the quota code doesn't have to guess if the file has ** a tail, links are always tails, so there's no guessing needed */ if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(ih)) quota_bytes = inode->i_sb->s_blocksize + UNFM_P_SIZE; #ifdef REISERQUOTA_DEBUG reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE, "reiserquota insert_item(): allocating %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(ih)); #endif reiserfs_write_unlock(inode->i_sb); /* We can't dirty inode here. It would be immediately written but * appropriate stat item isn't inserted yet... */ retval = dquot_alloc_space_nodirty(inode, quota_bytes); reiserfs_write_lock(inode->i_sb); if (retval) { pathrelse(path); return retval; } } init_tb_struct(th, &s_ins_balance, th->t_super, path, IH_SIZE + ih_item_len(ih)); #ifdef DISPLACE_NEW_PACKING_LOCALITIES s_ins_balance.key = key->on_disk_key; #endif /* DQUOT_* can schedule, must check to be sure calling fix_nodes is safe */ if (inode && fs_changed(fs_gen, inode->i_sb)) { goto search_again; } while ((retval = fix_nodes(M_INSERT, &s_ins_balance, ih, body)) == REPEAT_SEARCH) { search_again: /* file system changed while we were in the fix_nodes */ PROC_INFO_INC(th->t_super, insert_item_restarted); retval = search_item(th->t_super, key, path); if (retval == IO_ERROR) { retval = -EIO; goto error_out; } if (retval == ITEM_FOUND) { reiserfs_warning(th->t_super, "PAP-5760", "key %K already exists in the tree", key); retval = -EEXIST; goto error_out; } } /* make balancing after all resources will be collected at a time */ if (retval == CARRY_ON) { do_balance(&s_ins_balance, ih, body, M_INSERT); return 0; } retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO; error_out: /* also releases the path */ unfix_nodes(&s_ins_balance); #ifdef REISERQUOTA_DEBUG reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE, "reiserquota insert_item(): freeing %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(ih)); #endif if (inode) dquot_free_space_nodirty(inode, quota_bytes); return retval; }
gpl-2.0
eoghan2t9/Oppo-Find5-4.2-Kernel
drivers/staging/usbip/usbip_common.c
4920
20444
/* * Copyright (C) 2003-2008 Takahiro Hirofuchi * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <asm/byteorder.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <net/sock.h> #include "usbip_common.h" #define DRIVER_AUTHOR "Takahiro Hirofuchi <hirofuchi@users.sourceforge.net>" #define DRIVER_DESC "USB/IP Core" #ifdef CONFIG_USBIP_DEBUG unsigned long usbip_debug_flag = 0xffffffff; #else unsigned long usbip_debug_flag; #endif EXPORT_SYMBOL_GPL(usbip_debug_flag); /* FIXME */ struct device_attribute dev_attr_usbip_debug; EXPORT_SYMBOL_GPL(dev_attr_usbip_debug); static ssize_t show_flag(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lx\n", usbip_debug_flag); } static ssize_t store_flag(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { sscanf(buf, "%lx", &usbip_debug_flag); return count; } DEVICE_ATTR(usbip_debug, (S_IRUGO | S_IWUSR), show_flag, store_flag); static void usbip_dump_buffer(char *buff, int bufflen) { print_hex_dump(KERN_DEBUG, "usbip-core", DUMP_PREFIX_OFFSET, 16, 4, buff, bufflen, false); } static void usbip_dump_pipe(unsigned int p) { unsigned char type = usb_pipetype(p); unsigned char ep = usb_pipeendpoint(p); unsigned char dev = usb_pipedevice(p); unsigned char dir = usb_pipein(p); pr_debug("dev(%d) ep(%d) [%s] ", dev, ep, dir ? "IN" : "OUT"); switch (type) { case PIPE_ISOCHRONOUS: pr_debug("ISO\n"); break; case PIPE_INTERRUPT: pr_debug("INT\n"); break; case PIPE_CONTROL: pr_debug("CTRL\n"); break; case PIPE_BULK: pr_debug("BULK\n"); break; default: pr_debug("ERR\n"); break; } } static void usbip_dump_usb_device(struct usb_device *udev) { struct device *dev = &udev->dev; int i; dev_dbg(dev, " devnum(%d) devpath(%s) ", udev->devnum, udev->devpath); switch (udev->speed) { case USB_SPEED_HIGH: pr_debug("SPD_HIGH "); break; case USB_SPEED_FULL: pr_debug("SPD_FULL "); break; case USB_SPEED_LOW: pr_debug("SPD_LOW "); break; case USB_SPEED_UNKNOWN: pr_debug("SPD_UNKNOWN "); break; default: pr_debug("SPD_ERROR "); break; } pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); dev_dbg(dev, " "); for (i = 0; i < 16; i++) pr_debug(" %2u", i); pr_debug("\n"); dev_dbg(dev, " toggle0(IN) :"); for (i = 0; i < 16; i++) pr_debug(" %2u", (udev->toggle[0] & (1 << i)) ? 1 : 0); pr_debug("\n"); dev_dbg(dev, " toggle1(OUT):"); for (i = 0; i < 16; i++) pr_debug(" %2u", (udev->toggle[1] & (1 << i)) ? 1 : 0); pr_debug("\n"); dev_dbg(dev, " epmaxp_in :"); for (i = 0; i < 16; i++) { if (udev->ep_in[i]) pr_debug(" %2u", le16_to_cpu(udev->ep_in[i]->desc.wMaxPacketSize)); } pr_debug("\n"); dev_dbg(dev, " epmaxp_out :"); for (i = 0; i < 16; i++) { if (udev->ep_out[i]) pr_debug(" %2u", le16_to_cpu(udev->ep_out[i]->desc.wMaxPacketSize)); } pr_debug("\n"); dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); dev_dbg(dev, "descriptor %p, config %p, actconfig %p, " "rawdescriptors %p\n", &udev->descriptor, udev->config, udev->actconfig, udev->rawdescriptors); dev_dbg(dev, "have_langid %d, string_langid %d\n", udev->have_langid, udev->string_langid); dev_dbg(dev, "maxchild %d, children %p\n", udev->maxchild, udev->children); } static void usbip_dump_request_type(__u8 rt) { switch (rt & USB_RECIP_MASK) { case USB_RECIP_DEVICE: pr_debug("DEVICE"); break; case USB_RECIP_INTERFACE: pr_debug("INTERF"); break; case USB_RECIP_ENDPOINT: pr_debug("ENDPOI"); break; case USB_RECIP_OTHER: pr_debug("OTHER "); break; default: pr_debug("------"); break; } } static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd) { if (!cmd) { pr_debug(" : null pointer\n"); return; } pr_debug(" "); pr_debug("bRequestType(%02X) bRequest(%02X) wValue(%04X) wIndex(%04X) " "wLength(%04X) ", cmd->bRequestType, cmd->bRequest, cmd->wValue, cmd->wIndex, cmd->wLength); pr_debug("\n "); if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { pr_debug("STANDARD "); switch (cmd->bRequest) { case USB_REQ_GET_STATUS: pr_debug("GET_STATUS\n"); break; case USB_REQ_CLEAR_FEATURE: pr_debug("CLEAR_FEAT\n"); break; case USB_REQ_SET_FEATURE: pr_debug("SET_FEAT\n"); break; case USB_REQ_SET_ADDRESS: pr_debug("SET_ADDRRS\n"); break; case USB_REQ_GET_DESCRIPTOR: pr_debug("GET_DESCRI\n"); break; case USB_REQ_SET_DESCRIPTOR: pr_debug("SET_DESCRI\n"); break; case USB_REQ_GET_CONFIGURATION: pr_debug("GET_CONFIG\n"); break; case USB_REQ_SET_CONFIGURATION: pr_debug("SET_CONFIG\n"); break; case USB_REQ_GET_INTERFACE: pr_debug("GET_INTERF\n"); break; case USB_REQ_SET_INTERFACE: pr_debug("SET_INTERF\n"); break; case USB_REQ_SYNCH_FRAME: pr_debug("SYNC_FRAME\n"); break; default: pr_debug("REQ(%02X)\n", cmd->bRequest); break; } usbip_dump_request_type(cmd->bRequestType); } else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { pr_debug("CLASS\n"); } else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { pr_debug("VENDOR\n"); } else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_RESERVED) { pr_debug("RESERVED\n"); } } void usbip_dump_urb(struct urb *urb) { struct device *dev; if (!urb) { pr_debug("urb: null pointer!!\n"); return; } if (!urb->dev) { pr_debug("urb->dev: null pointer!!\n"); return; } dev = &urb->dev->dev; dev_dbg(dev, " urb :%p\n", urb); dev_dbg(dev, " dev :%p\n", urb->dev); usbip_dump_usb_device(urb->dev); dev_dbg(dev, " pipe :%08x ", urb->pipe); usbip_dump_pipe(urb->pipe); dev_dbg(dev, " status :%d\n", urb->status); dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer); dev_dbg(dev, " transfer_buffer_length:%d\n", urb->transfer_buffer_length); dev_dbg(dev, " actual_length :%d\n", urb->actual_length); dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet); if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) usbip_dump_usb_ctrlrequest( (struct usb_ctrlrequest *)urb->setup_packet); dev_dbg(dev, " start_frame :%d\n", urb->start_frame); dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); dev_dbg(dev, " interval :%d\n", urb->interval); dev_dbg(dev, " error_count :%d\n", urb->error_count); dev_dbg(dev, " context :%p\n", urb->context); dev_dbg(dev, " complete :%p\n", urb->complete); } EXPORT_SYMBOL_GPL(usbip_dump_urb); void usbip_dump_header(struct usbip_header *pdu) { pr_debug("BASE: cmd %u seq %u devid %u dir %u ep %u\n", pdu->base.command, pdu->base.seqnum, pdu->base.devid, pdu->base.direction, pdu->base.ep); switch (pdu->base.command) { case USBIP_CMD_SUBMIT: pr_debug("USBIP_CMD_SUBMIT: " "x_flags %u x_len %u sf %u #p %d iv %d\n", pdu->u.cmd_submit.transfer_flags, pdu->u.cmd_submit.transfer_buffer_length, pdu->u.cmd_submit.start_frame, pdu->u.cmd_submit.number_of_packets, pdu->u.cmd_submit.interval); break; case USBIP_CMD_UNLINK: pr_debug("USBIP_CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum); break; case USBIP_RET_SUBMIT: pr_debug("USBIP_RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n", pdu->u.ret_submit.status, pdu->u.ret_submit.actual_length, pdu->u.ret_submit.start_frame, pdu->u.ret_submit.number_of_packets, pdu->u.ret_submit.error_count); break; case USBIP_RET_UNLINK: pr_debug("USBIP_RET_UNLINK: status %d\n", pdu->u.ret_unlink.status); break; default: /* NOT REACHED */ pr_err("unknown command\n"); break; } } EXPORT_SYMBOL_GPL(usbip_dump_header); /* Receive data over TCP/IP. */ int usbip_recv(struct socket *sock, void *buf, int size) { int result; struct msghdr msg; struct kvec iov; int total = 0; /* for blocks of if (usbip_dbg_flag_xmit) */ char *bp = buf; int osize = size; usbip_dbg_xmit("enter\n"); if (!sock || !buf || !size) { pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf, size); return -EINVAL; } do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; msg.msg_flags = MSG_NOSIGNAL; result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); if (result <= 0) { pr_debug("receive sock %p buf %p size %u ret %d total %d\n", sock, buf, size, result, total); goto err; } size -= result; buf += result; total += result; } while (size > 0); if (usbip_dbg_flag_xmit) { if (!in_interrupt()) pr_debug("%-10s:", current->comm); else pr_debug("interrupt :"); pr_debug("receiving....\n"); usbip_dump_buffer(bp, osize); pr_debug("received, osize %d ret %d size %d total %d\n", osize, result, size, total); } return total; err: return result; } EXPORT_SYMBOL_GPL(usbip_recv); struct socket *sockfd_to_socket(unsigned int sockfd) { struct socket *socket; struct file *file; struct inode *inode; file = fget(sockfd); if (!file) { pr_err("invalid sockfd\n"); return NULL; } inode = file->f_dentry->d_inode; if (!inode || !S_ISSOCK(inode->i_mode)) return NULL; socket = SOCKET_I(inode); return socket; } EXPORT_SYMBOL_GPL(sockfd_to_socket); /* there may be more cases to tweak the flags. */ static unsigned int tweak_transfer_flags(unsigned int flags) { flags &= ~URB_NO_TRANSFER_DMA_MAP; return flags; } static void usbip_pack_cmd_submit(struct usbip_header *pdu, struct urb *urb, int pack) { struct usbip_header_cmd_submit *spdu = &pdu->u.cmd_submit; /* * Some members are not still implemented in usbip. I hope this issue * will be discussed when usbip is ported to other operating systems. */ if (pack) { /* vhci_tx.c */ spdu->transfer_flags = tweak_transfer_flags(urb->transfer_flags); spdu->transfer_buffer_length = urb->transfer_buffer_length; spdu->start_frame = urb->start_frame; spdu->number_of_packets = urb->number_of_packets; spdu->interval = urb->interval; } else { /* stub_rx.c */ urb->transfer_flags = spdu->transfer_flags; urb->transfer_buffer_length = spdu->transfer_buffer_length; urb->start_frame = spdu->start_frame; urb->number_of_packets = spdu->number_of_packets; urb->interval = spdu->interval; } } static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, int pack) { struct usbip_header_ret_submit *rpdu = &pdu->u.ret_submit; if (pack) { /* stub_tx.c */ rpdu->status = urb->status; rpdu->actual_length = urb->actual_length; rpdu->start_frame = urb->start_frame; rpdu->number_of_packets = urb->number_of_packets; rpdu->error_count = urb->error_count; } else { /* vhci_rx.c */ urb->status = rpdu->status; urb->actual_length = rpdu->actual_length; urb->start_frame = rpdu->start_frame; urb->number_of_packets = rpdu->number_of_packets; urb->error_count = rpdu->error_count; } } void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd, int pack) { switch (cmd) { case USBIP_CMD_SUBMIT: usbip_pack_cmd_submit(pdu, urb, pack); break; case USBIP_RET_SUBMIT: usbip_pack_ret_submit(pdu, urb, pack); break; default: /* NOT REACHED */ pr_err("unknown command\n"); break; } } EXPORT_SYMBOL_GPL(usbip_pack_pdu); static void correct_endian_basic(struct usbip_header_basic *base, int send) { if (send) { base->command = cpu_to_be32(base->command); base->seqnum = cpu_to_be32(base->seqnum); base->devid = cpu_to_be32(base->devid); base->direction = cpu_to_be32(base->direction); base->ep = cpu_to_be32(base->ep); } else { base->command = be32_to_cpu(base->command); base->seqnum = be32_to_cpu(base->seqnum); base->devid = be32_to_cpu(base->devid); base->direction = be32_to_cpu(base->direction); base->ep = be32_to_cpu(base->ep); } } static void correct_endian_cmd_submit(struct usbip_header_cmd_submit *pdu, int send) { if (send) { pdu->transfer_flags = cpu_to_be32(pdu->transfer_flags); cpu_to_be32s(&pdu->transfer_buffer_length); cpu_to_be32s(&pdu->start_frame); cpu_to_be32s(&pdu->number_of_packets); cpu_to_be32s(&pdu->interval); } else { pdu->transfer_flags = be32_to_cpu(pdu->transfer_flags); be32_to_cpus(&pdu->transfer_buffer_length); be32_to_cpus(&pdu->start_frame); be32_to_cpus(&pdu->number_of_packets); be32_to_cpus(&pdu->interval); } } static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu, int send) { if (send) { cpu_to_be32s(&pdu->status); cpu_to_be32s(&pdu->actual_length); cpu_to_be32s(&pdu->start_frame); cpu_to_be32s(&pdu->number_of_packets); cpu_to_be32s(&pdu->error_count); } else { be32_to_cpus(&pdu->status); be32_to_cpus(&pdu->actual_length); be32_to_cpus(&pdu->start_frame); be32_to_cpus(&pdu->number_of_packets); be32_to_cpus(&pdu->error_count); } } static void correct_endian_cmd_unlink(struct usbip_header_cmd_unlink *pdu, int send) { if (send) pdu->seqnum = cpu_to_be32(pdu->seqnum); else pdu->seqnum = be32_to_cpu(pdu->seqnum); } static void correct_endian_ret_unlink(struct usbip_header_ret_unlink *pdu, int send) { if (send) cpu_to_be32s(&pdu->status); else be32_to_cpus(&pdu->status); } void usbip_header_correct_endian(struct usbip_header *pdu, int send) { __u32 cmd = 0; if (send) cmd = pdu->base.command; correct_endian_basic(&pdu->base, send); if (!send) cmd = pdu->base.command; switch (cmd) { case USBIP_CMD_SUBMIT: correct_endian_cmd_submit(&pdu->u.cmd_submit, send); break; case USBIP_RET_SUBMIT: correct_endian_ret_submit(&pdu->u.ret_submit, send); break; case USBIP_CMD_UNLINK: correct_endian_cmd_unlink(&pdu->u.cmd_unlink, send); break; case USBIP_RET_UNLINK: correct_endian_ret_unlink(&pdu->u.ret_unlink, send); break; default: /* NOT REACHED */ pr_err("unknown command\n"); break; } } EXPORT_SYMBOL_GPL(usbip_header_correct_endian); static void usbip_iso_packet_correct_endian( struct usbip_iso_packet_descriptor *iso, int send) { /* does not need all members. but copy all simply. */ if (send) { iso->offset = cpu_to_be32(iso->offset); iso->length = cpu_to_be32(iso->length); iso->status = cpu_to_be32(iso->status); iso->actual_length = cpu_to_be32(iso->actual_length); } else { iso->offset = be32_to_cpu(iso->offset); iso->length = be32_to_cpu(iso->length); iso->status = be32_to_cpu(iso->status); iso->actual_length = be32_to_cpu(iso->actual_length); } } static void usbip_pack_iso(struct usbip_iso_packet_descriptor *iso, struct usb_iso_packet_descriptor *uiso, int pack) { if (pack) { iso->offset = uiso->offset; iso->length = uiso->length; iso->status = uiso->status; iso->actual_length = uiso->actual_length; } else { uiso->offset = iso->offset; uiso->length = iso->length; uiso->status = iso->status; uiso->actual_length = iso->actual_length; } } /* must free buffer */ void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen) { void *buff; struct usbip_iso_packet_descriptor *iso; int np = urb->number_of_packets; ssize_t size = np * sizeof(*iso); int i; buff = kzalloc(size, GFP_KERNEL); if (!buff) return NULL; for (i = 0; i < np; i++) { iso = buff + (i * sizeof(*iso)); usbip_pack_iso(iso, &urb->iso_frame_desc[i], 1); usbip_iso_packet_correct_endian(iso, 1); } *bufflen = size; return buff; } EXPORT_SYMBOL_GPL(usbip_alloc_iso_desc_pdu); /* some members of urb must be substituted before. */ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) { void *buff; struct usbip_iso_packet_descriptor *iso; int np = urb->number_of_packets; int size = np * sizeof(*iso); int i; int ret; int total_length = 0; if (!usb_pipeisoc(urb->pipe)) return 0; /* my Bluetooth dongle gets ISO URBs which are np = 0 */ if (np == 0) { /* pr_info("iso np == 0\n"); */ /* usbip_dump_urb(urb); */ return 0; } buff = kzalloc(size, GFP_KERNEL); if (!buff) return -ENOMEM; ret = usbip_recv(ud->tcp_socket, buff, size); if (ret != size) { dev_err(&urb->dev->dev, "recv iso_frame_descriptor, %d\n", ret); kfree(buff); if (ud->side == USBIP_STUB) usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); else usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } for (i = 0; i < np; i++) { iso = buff + (i * sizeof(*iso)); usbip_iso_packet_correct_endian(iso, 0); usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0); total_length += urb->iso_frame_desc[i].actual_length; } kfree(buff); if (total_length != urb->actual_length) { dev_err(&urb->dev->dev, "total length of iso packets %d not equal to actual " "length of buffer %d\n", total_length, urb->actual_length); if (ud->side == USBIP_STUB) usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); else usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } return ret; } EXPORT_SYMBOL_GPL(usbip_recv_iso); /* * This functions restores the padding which was removed for optimizing * the bandwidth during transfer over tcp/ip * * buffer and iso packets need to be stored and be in propeper endian in urb * before calling this function */ void usbip_pad_iso(struct usbip_device *ud, struct urb *urb) { int np = urb->number_of_packets; int i; int actualoffset = urb->actual_length; if (!usb_pipeisoc(urb->pipe)) return; /* if no packets or length of data is 0, then nothing to unpack */ if (np == 0 || urb->actual_length == 0) return; /* * if actual_length is transfer_buffer_length then no padding is * present. */ if (urb->actual_length == urb->transfer_buffer_length) return; /* * loop over all packets from last to first (to prevent overwritting * memory when padding) and move them into the proper place */ for (i = np-1; i > 0; i--) { actualoffset -= urb->iso_frame_desc[i].actual_length; memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset, urb->transfer_buffer + actualoffset, urb->iso_frame_desc[i].actual_length); } } EXPORT_SYMBOL_GPL(usbip_pad_iso); /* some members of urb must be substituted before. */ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) { int ret; int size; if (ud->side == USBIP_STUB) { /* stub_rx.c */ /* the direction of urb must be OUT. */ if (usb_pipein(urb->pipe)) return 0; size = urb->transfer_buffer_length; } else { /* vhci_rx.c */ /* the direction of urb must be IN. */ if (usb_pipeout(urb->pipe)) return 0; size = urb->actual_length; } /* no need to recv xbuff */ if (!(size > 0)) return 0; ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); if (ret != size) { dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); if (ud->side == USBIP_STUB) { usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); } else { usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } } return ret; } EXPORT_SYMBOL_GPL(usbip_recv_xbuff); static int __init usbip_core_init(void) { pr_info(DRIVER_DESC " v" USBIP_VERSION "\n"); return 0; } static void __exit usbip_core_exit(void) { return; } module_init(usbip_core_init); module_exit(usbip_core_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(USBIP_VERSION);
gpl-2.0
SimpleAOSP-Kernel/kernel_shamu
drivers/input/touchscreen/hp680_ts_input.c
9784
2949
#include <linux/input.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/adc.h> #include <mach/hp6xx.h> #define MODNAME "hp680_ts_input" #define HP680_TS_ABS_X_MIN 40 #define HP680_TS_ABS_X_MAX 950 #define HP680_TS_ABS_Y_MIN 80 #define HP680_TS_ABS_Y_MAX 910 #define PHDR 0xa400012e #define SCPDR 0xa4000136 static void do_softint(struct work_struct *work); static struct input_dev *hp680_ts_dev; static DECLARE_DELAYED_WORK(work, do_softint); static void do_softint(struct work_struct *work) { int absx = 0, absy = 0; u8 scpdr; int touched = 0; if (__raw_readb(PHDR) & PHDR_TS_PEN_DOWN) { scpdr = __raw_readb(SCPDR); scpdr |= SCPDR_TS_SCAN_ENABLE; scpdr &= ~SCPDR_TS_SCAN_Y; __raw_writeb(scpdr, SCPDR); udelay(30); absy = adc_single(ADC_CHANNEL_TS_Y); scpdr = __raw_readb(SCPDR); scpdr |= SCPDR_TS_SCAN_Y; scpdr &= ~SCPDR_TS_SCAN_X; __raw_writeb(scpdr, SCPDR); udelay(30); absx = adc_single(ADC_CHANNEL_TS_X); scpdr = __raw_readb(SCPDR); scpdr |= SCPDR_TS_SCAN_X; scpdr &= ~SCPDR_TS_SCAN_ENABLE; __raw_writeb(scpdr, SCPDR); udelay(100); touched = __raw_readb(PHDR) & PHDR_TS_PEN_DOWN; } if (touched) { input_report_key(hp680_ts_dev, BTN_TOUCH, 1); input_report_abs(hp680_ts_dev, ABS_X, absx); input_report_abs(hp680_ts_dev, ABS_Y, absy); } else { input_report_key(hp680_ts_dev, BTN_TOUCH, 0); } input_sync(hp680_ts_dev); enable_irq(HP680_TS_IRQ); } static irqreturn_t hp680_ts_interrupt(int irq, void *dev) { disable_irq_nosync(irq); schedule_delayed_work(&work, HZ / 20); return IRQ_HANDLED; } static int __init hp680_ts_init(void) { int err; hp680_ts_dev = input_allocate_device(); if (!hp680_ts_dev) return -ENOMEM; hp680_ts_dev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); hp680_ts_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(hp680_ts_dev, ABS_X, HP680_TS_ABS_X_MIN, HP680_TS_ABS_X_MAX, 0, 0); input_set_abs_params(hp680_ts_dev, ABS_Y, HP680_TS_ABS_Y_MIN, HP680_TS_ABS_Y_MAX, 0, 0); hp680_ts_dev->name = "HP Jornada touchscreen"; hp680_ts_dev->phys = "hp680_ts/input0"; if (request_irq(HP680_TS_IRQ, hp680_ts_interrupt, 0, MODNAME, NULL) < 0) { printk(KERN_ERR "hp680_touchscreen.c: Can't allocate irq %d\n", HP680_TS_IRQ); err = -EBUSY; goto fail1; } err = input_register_device(hp680_ts_dev); if (err) goto fail2; return 0; fail2: free_irq(HP680_TS_IRQ, NULL); cancel_delayed_work_sync(&work); fail1: input_free_device(hp680_ts_dev); return err; } static void __exit hp680_ts_exit(void) { free_irq(HP680_TS_IRQ, NULL); cancel_delayed_work_sync(&work); input_unregister_device(hp680_ts_dev); } module_init(hp680_ts_init); module_exit(hp680_ts_exit); MODULE_AUTHOR("Andriy Skulysh, askulysh@image.kiev.ua"); MODULE_DESCRIPTION("HP Jornada 680 touchscreen driver"); MODULE_LICENSE("GPL");
gpl-2.0
linux-pmfs/pmfs
sound/isa/gus/gus_volume.c
10040
5656
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <linux/export.h> #include <sound/core.h> #include <sound/gus.h> #define __GUS_TABLES_ALLOC__ #include "gus_tables.h" EXPORT_SYMBOL(snd_gf1_atten_table); /* for snd-gus-synth module */ unsigned short snd_gf1_lvol_to_gvol_raw(unsigned int vol) { unsigned short e, m, tmp; if (vol > 65535) vol = 65535; tmp = vol; e = 7; if (tmp < 128) { while (e > 0 && tmp < (1 << e)) e--; } else { while (tmp > 255) { tmp >>= 1; e++; } } m = vol - (1 << e); if (m > 0) { if (e > 8) m >>= e - 8; else if (e < 8) m <<= 8 - e; m &= 255; } return (e << 8) | m; } #if 0 unsigned int snd_gf1_gvol_to_lvol_raw(unsigned short gf1_vol) { unsigned int rvol; unsigned short e, m; if (!gf1_vol) return 0; e = gf1_vol >> 8; m = (unsigned char) gf1_vol; rvol = 1 << e; if (e > 8) return rvol | (m << (e - 8)); return rvol | (m >> (8 - e)); } unsigned int snd_gf1_calc_ramp_rate(struct snd_gus_card * gus, unsigned short start, unsigned short end, unsigned int us) { static unsigned char vol_rates[19] = { 23, 24, 26, 28, 29, 31, 32, 34, 36, 37, 39, 40, 42, 44, 45, 47, 49, 50, 52 }; unsigned short range, increment, value, i; start >>= 4; end >>= 4; if (start < end) us /= end - start; else us /= start - end; range = 4; value = gus->gf1.enh_mode ? vol_rates[0] : vol_rates[gus->gf1.active_voices - 14]; for (i = 0; i < 3; i++) { if (us < value) { range = i; break; } else value <<= 3; } if (range == 4) { range = 3; increment = 1; } else increment = (value + (value >> 1)) / us; return (range << 6) | (increment & 0x3f); } #endif /* 0 */ unsigned short snd_gf1_translate_freq(struct snd_gus_card * gus, unsigned int freq16) { freq16 >>= 3; if (freq16 < 50) freq16 = 50; if (freq16 & 0xf8000000) { freq16 = ~0xf8000000; snd_printk(KERN_ERR "snd_gf1_translate_freq: overflow - freq = 0x%x\n", freq16); } return ((freq16 << 9) + (gus->gf1.playback_freq >> 1)) / gus->gf1.playback_freq; } #if 0 short snd_gf1_compute_vibrato(short cents, unsigned short fc_register) { static short vibrato_table[] = { 0, 0, 32, 592, 61, 1175, 93, 1808, 124, 2433, 152, 3007, 182, 3632, 213, 4290, 241, 4834, 255, 5200 }; long depth; short *vi1, *vi2, pcents, v1; pcents = cents < 0 ? -cents : cents; for (vi1 = vibrato_table, vi2 = vi1 + 2; pcents > *vi2; vi1 = vi2, vi2 += 2); v1 = *(vi1 + 1); /* The FC table above is a list of pairs. The first number in the pair */ /* is the cents index from 0-255 cents, and the second number in the */ /* pair is the FC adjustment needed to change the pitch by the indexed */ /* number of cents. The table was created for an FC of 32768. */ /* The following expression does a linear interpolation against the */ /* approximated log curve in the table above, and then scales the number */ /* by the FC before the LFO. This calculation also adjusts the output */ /* value to produce the appropriate depth for the hardware. The depth */ /* is 2 * desired FC + 1. */ depth = (((int) (*(vi2 + 1) - *vi1) * (pcents - *vi1) / (*vi2 - *vi1)) + v1) * fc_register >> 14; if (depth) depth++; if (depth > 255) depth = 255; return cents < 0 ? -(short) depth : (short) depth; } unsigned short snd_gf1_compute_pitchbend(unsigned short pitchbend, unsigned short sens) { static long log_table[] = {1024, 1085, 1149, 1218, 1290, 1367, 1448, 1534, 1625, 1722, 1825, 1933}; int wheel, sensitivity; unsigned int mantissa, f1, f2; unsigned short semitones, f1_index, f2_index, f1_power, f2_power; char bend_down = 0; int bend; if (!sens) return 1024; wheel = (int) pitchbend - 8192; sensitivity = ((int) sens * wheel) / 128; if (sensitivity < 0) { bend_down = 1; sensitivity = -sensitivity; } semitones = (unsigned int) (sensitivity >> 13); mantissa = sensitivity % 8192; f1_index = semitones % 12; f2_index = (semitones + 1) % 12; f1_power = semitones / 12; f2_power = (semitones + 1) / 12; f1 = log_table[f1_index] << f1_power; f2 = log_table[f2_index] << f2_power; bend = (int) ((((f2 - f1) * mantissa) >> 13) + f1); if (bend_down) bend = 1048576L / bend; return bend; } unsigned short snd_gf1_compute_freq(unsigned int freq, unsigned int rate, unsigned short mix_rate) { unsigned int fc; int scale = 0; while (freq >= 4194304L) { scale++; freq >>= 1; } fc = (freq << 10) / rate; if (fc > 97391L) { fc = 97391; snd_printk(KERN_ERR "patch: (1) fc frequency overflow - %u\n", fc); } fc = (fc * 44100UL) / mix_rate; while (scale--) fc <<= 1; if (fc > 65535L) { fc = 65535; snd_printk(KERN_ERR "patch: (2) fc frequency overflow - %u\n", fc); } return (unsigned short) fc; } #endif /* 0 */
gpl-2.0
wjrsonic/openwrt
qca/src/qca-legacy-uboot/board/cmi/cmi.c
57
1819
/* * (C) Copyright 2003 * Martin Winistoerfer, martinwinistoerfer@gmx.ch. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ /* * File: cmi.c * * Discription: For generic board specific functions * */ #include <common.h> #include <mpc5xx.h> #define SRAM_SIZE 1024000L /* 1M RAM available*/ #if defined(__APPLE__) /* Leading underscore on symbols */ # define SYM_CHAR "_" #else /* No leading character on symbols */ # define SYM_CHAR #endif /* * Macros to generate global absolutes. */ #define GEN_SYMNAME(str) SYM_CHAR #str #define GEN_VALUE(str) #str #define GEN_ABS(name, value) \ asm (".globl " GEN_SYMNAME(name)); \ asm (GEN_SYMNAME(name) " = " GEN_VALUE(value)) /* * Check the board */ int checkboard(void) { puts ("Board: ### No HW ID - assuming CMI board\n"); return (0); } /* * Get RAM size. */ long int initdram(int board_type) { return (SRAM_SIZE); /* We currently have a static size adapted for cmi board. */ } /* * Absolute environment address for linker file. */ GEN_ABS(env_start, CFG_ENV_OFFSET + CFG_FLASH_BASE);
gpl-2.0
hoonir/iamroot_hypstudy_5th
drivers/staging/bcm/InterfaceDld.c
57
11102
#include "headers.h" int InterfaceFileDownload(PVOID arg, struct file *flp, unsigned int on_chip_loc) { /* unsigned int reg = 0; */ mm_segment_t oldfs = {0}; int errno = 0, len = 0; /* ,is_config_file = 0 */ loff_t pos = 0; struct bcm_interface_adapter *psIntfAdapter = (struct bcm_interface_adapter *)arg; /* struct bcm_mini_adapter *Adapter = psIntfAdapter->psAdapter; */ char *buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL); if (!buff) return -ENOMEM; while (1) { oldfs = get_fs(); set_fs(get_ds()); len = vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos); set_fs(oldfs); if (len <= 0) { if (len < 0) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0"); errno = len; } else { errno = 0; BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!"); } break; } /* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_INITEXIT, MP_INIT, * DBG_LVL_ALL, buff, * MAX_TRANSFER_CTRL_BYTE_USB); */ errno = InterfaceWRM(psIntfAdapter, on_chip_loc, buff, len); if (errno) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "WRM Failed! status: %d", errno); break; } on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB; } kfree(buff); return errno; } int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_chip_loc) { char *buff, *buff_readback; unsigned int reg = 0; mm_segment_t oldfs = {0}; int errno = 0, len = 0, is_config_file = 0; loff_t pos = 0; static int fw_down; INT Status = STATUS_SUCCESS; struct bcm_interface_adapter *psIntfAdapter = (struct bcm_interface_adapter *)arg; int bytes; buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA); buff_readback = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA); if (!buff || !buff_readback) { kfree(buff); kfree(buff_readback); return -ENOMEM; } is_config_file = (on_chip_loc == CONFIG_BEGIN_ADDR) ? 1 : 0; memset(buff_readback, 0, MAX_TRANSFER_CTRL_BYTE_USB); memset(buff, 0, MAX_TRANSFER_CTRL_BYTE_USB); while (1) { oldfs = get_fs(); set_fs(get_ds()); len = vfs_read(flp, (void __force __user *)buff, MAX_TRANSFER_CTRL_BYTE_USB, &pos); set_fs(oldfs); fw_down++; if (len <= 0) { if (len < 0) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len < 0"); errno = len; } else { errno = 0; BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got end of file!"); } break; } bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len); if (bytes < 0) { Status = bytes; BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg); goto exit; } reg++; if ((len-sizeof(unsigned int)) < 4) { if (memcmp(buff_readback, buff, len)) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down); BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Length is: %d", len); Status = -EIO; goto exit; } } else { len -= 4; while (len) { if (*(unsigned int *)&buff_readback[len] != *(unsigned int *)&buff[len]) { BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper %d", fw_down); BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&buff_readback[len]); BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len); Status = -EIO; goto exit; } len -= 4; } } on_chip_loc += MAX_TRANSFER_CTRL_BYTE_USB; } /* End of while(1) */ exit: kfree(buff); kfree(buff_readback); return Status; } static int bcm_download_config_file(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo) { int retval = STATUS_SUCCESS; B_UINT32 value = 0; if (Adapter->pstargetparams == NULL) { Adapter->pstargetparams = kmalloc(sizeof(struct bcm_target_params), GFP_KERNEL); if (Adapter->pstargetparams == NULL) return -ENOMEM; } if (psFwInfo->u32FirmwareLength != sizeof(struct bcm_target_params)) return -EIO; retval = copy_from_user(Adapter->pstargetparams, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength); if (retval) { kfree(Adapter->pstargetparams); Adapter->pstargetparams = NULL; return -EFAULT; } /* Parse the structure and then Download the Firmware */ beceem_parse_target_struct(Adapter); /* Initializing the NVM. */ BcmInitNVM(Adapter); retval = InitLedSettings(Adapter); if (retval) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "INIT LED Failed\n"); return retval; } if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { Adapter->LEDInfo.bLedInitDone = FALSE; Adapter->DriverState = DRIVER_INIT; wake_up(&Adapter->LEDInfo.notify_led_event); } if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { Adapter->DriverState = FW_DOWNLOAD; wake_up(&Adapter->LEDInfo.notify_led_event); } /* Initialize the DDR Controller */ retval = ddr_init(Adapter); if (retval) { BCM_DEBUG_PRINT (Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "DDR Init Failed\n"); return retval; } value = 0; wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value)); wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value)); if (Adapter->eNVMType == NVM_FLASH) { retval = PropagateCalParamsFromFlashToMemory(Adapter); if (retval) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "propagaion of cal param failed with status :%d", retval); return retval; } } retval = buffDnldVerify(Adapter, (PUCHAR)Adapter->pstargetparams, sizeof(struct bcm_target_params), CONFIG_BEGIN_ADDR); if (retval) BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "configuration file not downloaded properly"); else Adapter->bCfgDownloaded = TRUE; return retval; } static int bcm_compare_buff_contents(unsigned char *readbackbuff, unsigned char *buff, unsigned int len) { int retval = STATUS_SUCCESS; struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev); if ((len-sizeof(unsigned int)) < 4) { if (memcmp(readbackbuff , buff, len)) retval = -EINVAL; } else { len -= 4; while (len) { if (*(unsigned int *)&readbackbuff[len] != *(unsigned int *)&buff[len]) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Firmware Download is not proper"); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Val from Binary %x, Val From Read Back %x ", *(unsigned int *)&buff[len], *(unsigned int*)&readbackbuff[len]); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "len =%x!!!", len); retval = -EINVAL; break; } len -= 4; } } return retval; } int bcm_ioctl_fw_download(struct bcm_mini_adapter *Adapter, struct bcm_firmware_info *psFwInfo) { int retval = STATUS_SUCCESS; PUCHAR buff = NULL; /* Config File is needed for the Driver to download the Config file and * Firmware. Check for the Config file to be first to be sent from the * Application */ atomic_set(&Adapter->uiMBupdate, FALSE); if (!Adapter->bCfgDownloaded && psFwInfo->u32StartingAddress != CONFIG_BEGIN_ADDR) { /* Can't Download Firmware. */ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Download the config File first\n"); return -EINVAL; } /* If Config File, Finish the DDR Settings and then Download CFG File */ if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR) { retval = bcm_download_config_file(Adapter, psFwInfo); } else { buff = kzalloc(psFwInfo->u32FirmwareLength, GFP_KERNEL); if (buff == NULL) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed in allocation memory"); return -ENOMEM; } retval = copy_from_user(buff, psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength); if (retval != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copying buffer from user space failed"); retval = -EFAULT; goto error; } retval = buffDnldVerify(Adapter, buff, psFwInfo->u32FirmwareLength, psFwInfo->u32StartingAddress); if (retval != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "f/w download failed status :%d", retval); goto error; } } error: kfree(buff); return retval; } static INT buffDnld(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress) { unsigned int len = 0; int retval = STATUS_SUCCESS; len = u32FirmwareLength; while (u32FirmwareLength) { len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB); retval = wrm(Adapter, u32StartingAddress, mappedbuffer, len); if (retval) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "wrm failed with status :%d", retval); break; } u32StartingAddress += len; u32FirmwareLength -= len; mappedbuffer += len; } return retval; } static INT buffRdbkVerify(struct bcm_mini_adapter *Adapter, PUCHAR mappedbuffer, UINT u32FirmwareLength, ULONG u32StartingAddress) { UINT len = u32FirmwareLength; INT retval = STATUS_SUCCESS; PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL); int bytes; if (NULL == readbackbuff) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED"); return -ENOMEM; } while (u32FirmwareLength && !retval) { len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB); bytes = rdm(Adapter, u32StartingAddress, readbackbuff, len); if (bytes < 0) { retval = bytes; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d", retval); break; } retval = bcm_compare_buff_contents(readbackbuff, mappedbuffer, len); if (STATUS_SUCCESS != retval) break; u32StartingAddress += len; u32FirmwareLength -= len; mappedbuffer += len; } /* end of while (u32FirmwareLength && !retval) */ kfree(readbackbuff); return retval; } INT buffDnldVerify(struct bcm_mini_adapter *Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength, unsigned long u32StartingAddress) { INT status = STATUS_SUCCESS; status = buffDnld(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress); if (status != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer download failed"); goto error; } status = buffRdbkVerify(Adapter, mappedbuffer, u32FirmwareLength, u32StartingAddress); if (status != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Buffer readback verifier failed"); goto error; } error: return status; }
gpl-2.0
AresHou/android_kernel_lge_geehrc
kernel/trace/blktrace.c
57
42907
/* * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/blktrace_api.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/time.h> #include <linux/uaccess.h> #include <trace/events/block.h> #include "trace_output.h" #ifdef CONFIG_BLK_DEV_IO_TRACE static unsigned int blktrace_seq __read_mostly = 1; static struct trace_array *blk_tr; static bool blk_tracer_enabled __read_mostly; /* Select an alternative, minimalistic output than the original one */ #define TRACE_BLK_OPT_CLASSIC 0x1 static struct tracer_opt blk_tracer_opts[] = { /* Default disable the minimalistic output */ { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, { } }; static struct tracer_flags blk_tracer_flags = { .val = 0, .opts = blk_tracer_opts, }; /* Global reference count of probes */ static atomic_t blk_probes_ref = ATOMIC_INIT(0); static void blk_register_tracepoints(void); static void blk_unregister_tracepoints(void); /* * Send out a notify message. */ static void trace_note(struct blk_trace *bt, pid_t pid, int action, const void *data, size_t len) { struct blk_io_trace *t; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; int pc = 0; int cpu = smp_processor_id(); bool blk_tracer = blk_tracer_enabled; if (blk_tracer) { buffer = blk_tr->buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + len, 0, pc); if (!event) return; t = ring_buffer_event_data(event); goto record_it; } if (!bt->rchan) return; t = relay_reserve(bt->rchan, sizeof(*t) + len); if (t) { t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->time = ktime_to_ns(ktime_get()); record_it: t->device = bt->dev; t->action = action; t->pid = pid; t->cpu = cpu; t->pdu_len = len; memcpy((void *) t + sizeof(*t), data, len); if (blk_tracer) trace_buffer_unlock_commit(buffer, event, 0, pc); } } /* * Send out a notify for this process, if we haven't done so since a trace * started */ static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) { tsk->btrace_seq = blktrace_seq; trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); } static void trace_note_time(struct blk_trace *bt) { struct timespec now; unsigned long flags; u32 words[2]; getnstimeofday(&now); words[0] = now.tv_sec; words[1] = now.tv_nsec; local_irq_save(flags); trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words)); local_irq_restore(flags); } void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) { int n; va_list args; unsigned long flags; char *buf; if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer_enabled)) return; /* * If the BLK_TC_NOTIFY action mask isn't set, don't send any note * message to the trace. */ if (!(bt->act_mask & BLK_TC_NOTIFY)) return; local_irq_save(flags); buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); va_start(args, fmt); n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); va_end(args); trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(__trace_note_message); static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, pid_t pid) { if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) return 1; if (sector && (sector < bt->start_lba || sector > bt->end_lba)) return 1; if (bt->pid && pid != bt->pid) return 1; return 0; } /* * Data direction bit lookup */ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; #define BLK_TC_RAHEAD BLK_TC_AHEAD /* The ilog2() calls fall out because they're constant */ #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) /* * The worker for the various blk_add_trace*() types. Fills out a * blk_io_trace structure and places it in a per-cpu subbuffer. */ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, int rw, u32 what, int error, int pdu_len, void *pdu_data) { struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; struct blk_io_trace *t; unsigned long flags = 0; unsigned long *sequence; pid_t pid; int cpu, pc = 0; bool blk_tracer = blk_tracer_enabled; if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) return; what |= ddir_act[rw & WRITE]; what |= MASK_TC_BIT(rw, SYNC); what |= MASK_TC_BIT(rw, RAHEAD); what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, DISCARD); pid = tsk->pid; if (act_log_check(bt, what, sector, pid)) return; cpu = raw_smp_processor_id(); if (blk_tracer) { tracing_record_cmdline(current); buffer = blk_tr->buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + pdu_len, 0, pc); if (!event) return; t = ring_buffer_event_data(event); goto record_it; } /* * A word about the locking here - we disable interrupts to reserve * some space in the relay per-cpu buffer, to prevent an irq * from coming in and stepping on our toes. */ local_irq_save(flags); if (unlikely(tsk->btrace_seq != blktrace_seq)) trace_note_tsk(bt, tsk); t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); if (t) { sequence = per_cpu_ptr(bt->sequence, cpu); t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->sequence = ++(*sequence); t->time = ktime_to_ns(ktime_get()); record_it: /* * These two are not needed in ftrace as they are in the * generic trace_entry, filled by tracing_generic_entry_update, * but for the trace_event->bin() synthesizer benefit we do it * here too. */ t->cpu = cpu; t->pid = pid; t->sector = sector; t->bytes = bytes; t->action = what; t->device = bt->dev; t->error = error; t->pdu_len = pdu_len; if (pdu_len) memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); if (blk_tracer) { trace_buffer_unlock_commit(buffer, event, 0, pc); return; } } local_irq_restore(flags); } static struct dentry *blk_tree_root; static DEFINE_MUTEX(blk_tree_mutex); static void blk_trace_free(struct blk_trace *bt) { debugfs_remove(bt->msg_file); debugfs_remove(bt->dropped_file); relay_close(bt->rchan); debugfs_remove(bt->dir); free_percpu(bt->sequence); free_percpu(bt->msg_data); kfree(bt); } static void blk_trace_cleanup(struct blk_trace *bt) { blk_trace_free(bt); if (atomic_dec_and_test(&blk_probes_ref)) blk_unregister_tracepoints(); } int blk_trace_remove(struct request_queue *q) { struct blk_trace *bt; bt = xchg(&q->blk_trace, NULL); if (!bt) return -EINVAL; if (bt->trace_state != Blktrace_running) blk_trace_cleanup(bt); return 0; } EXPORT_SYMBOL_GPL(blk_trace_remove); static int blk_dropped_open(struct inode *inode, struct file *filp) { filp->private_data = inode->i_private; return 0; } static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct blk_trace *bt = filp->private_data; char buf[16]; snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); } static const struct file_operations blk_dropped_fops = { .owner = THIS_MODULE, .open = blk_dropped_open, .read = blk_dropped_read, .llseek = default_llseek, }; static int blk_msg_open(struct inode *inode, struct file *filp) { filp->private_data = inode->i_private; return 0; } static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { char *msg; struct blk_trace *bt; if (count >= BLK_TN_MAX_MSG) return -EINVAL; msg = kmalloc(count + 1, GFP_KERNEL); if (msg == NULL) return -ENOMEM; if (copy_from_user(msg, buffer, count)) { kfree(msg); return -EFAULT; } msg[count] = '\0'; bt = filp->private_data; __trace_note_message(bt, "%s", msg); kfree(msg); return count; } static const struct file_operations blk_msg_fops = { .owner = THIS_MODULE, .open = blk_msg_open, .write = blk_msg_write, .llseek = noop_llseek, }; /* * Keep track of how many times we encountered a full subbuffer, to aid * the user space app in telling how many lost events there were. */ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding) { struct blk_trace *bt; if (!relay_buf_full(buf)) return 1; bt = buf->chan->private_data; atomic_inc(&bt->dropped); return 0; } static int blk_remove_buf_file_callback(struct dentry *dentry) { debugfs_remove(dentry); return 0; } static struct dentry *blk_create_buf_file_callback(const char *filename, struct dentry *parent, int mode, struct rchan_buf *buf, int *is_global) { return debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); } static struct rchan_callbacks blk_relay_callbacks = { .subbuf_start = blk_subbuf_start_callback, .create_buf_file = blk_create_buf_file_callback, .remove_buf_file = blk_remove_buf_file_callback, }; static void blk_trace_setup_lba(struct blk_trace *bt, struct block_device *bdev) { struct hd_struct *part = NULL; if (bdev) part = bdev->bd_part; if (part) { bt->start_lba = part->start_sect; bt->end_lba = part->start_sect + part->nr_sects; } else { bt->start_lba = 0; bt->end_lba = -1ULL; } } /* * Setup everything required to start tracing */ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, struct blk_user_trace_setup *buts) { struct blk_trace *old_bt, *bt = NULL; struct dentry *dir = NULL; int ret, i; if (!buts->buf_size || !buts->buf_nr) return -EINVAL; strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; /* * some device names have larger paths - convert the slashes * to underscores for this to work as expected */ for (i = 0; i < strlen(buts->name); i++) if (buts->name[i] == '/') buts->name[i] = '_'; bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; ret = -ENOMEM; bt->sequence = alloc_percpu(unsigned long); if (!bt->sequence) goto err; bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); if (!bt->msg_data) goto err; ret = -ENOENT; mutex_lock(&blk_tree_mutex); if (!blk_tree_root) { blk_tree_root = debugfs_create_dir("block", NULL); if (!blk_tree_root) { mutex_unlock(&blk_tree_mutex); goto err; } } mutex_unlock(&blk_tree_mutex); dir = debugfs_create_dir(buts->name, blk_tree_root); if (!dir) goto err; bt->dir = dir; bt->dev = dev; atomic_set(&bt->dropped, 0); ret = -EIO; bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); if (!bt->dropped_file) goto err; bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); if (!bt->msg_file) goto err; bt->rchan = relay_open("trace", dir, buts->buf_size, buts->buf_nr, &blk_relay_callbacks, bt); if (!bt->rchan) goto err; bt->act_mask = buts->act_mask; if (!bt->act_mask) bt->act_mask = (u16) -1; blk_trace_setup_lba(bt, bdev); /* overwrite with user settings */ if (buts->start_lba) bt->start_lba = buts->start_lba; if (buts->end_lba) bt->end_lba = buts->end_lba; bt->pid = buts->pid; bt->trace_state = Blktrace_setup; ret = -EBUSY; old_bt = xchg(&q->blk_trace, bt); if (old_bt) { (void) xchg(&q->blk_trace, old_bt); goto err; } if (atomic_inc_return(&blk_probes_ref) == 1) blk_register_tracepoints(); return 0; err: blk_trace_free(bt); return ret; } int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, char __user *arg) { struct blk_user_trace_setup buts; int ret; ret = copy_from_user(&buts, arg, sizeof(buts)); if (ret) return -EFAULT; ret = do_blk_trace_setup(q, name, dev, bdev, &buts); if (ret) return ret; if (copy_to_user(arg, &buts, sizeof(buts))) { blk_trace_remove(q); return -EFAULT; } return 0; } EXPORT_SYMBOL_GPL(blk_trace_setup); #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) static int compat_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, char __user *arg) { struct blk_user_trace_setup buts; struct compat_blk_user_trace_setup cbuts; int ret; if (copy_from_user(&cbuts, arg, sizeof(cbuts))) return -EFAULT; buts = (struct blk_user_trace_setup) { .act_mask = cbuts.act_mask, .buf_size = cbuts.buf_size, .buf_nr = cbuts.buf_nr, .start_lba = cbuts.start_lba, .end_lba = cbuts.end_lba, .pid = cbuts.pid, }; memcpy(&buts.name, &cbuts.name, 32); ret = do_blk_trace_setup(q, name, dev, bdev, &buts); if (ret) return ret; if (copy_to_user(arg, &buts.name, 32)) { blk_trace_remove(q); return -EFAULT; } return 0; } #endif int blk_trace_startstop(struct request_queue *q, int start) { int ret; struct blk_trace *bt = q->blk_trace; if (bt == NULL) return -EINVAL; /* * For starting a trace, we can transition from a setup or stopped * trace. For stopping a trace, the state must be running */ ret = -EINVAL; if (start) { if (bt->trace_state == Blktrace_setup || bt->trace_state == Blktrace_stopped) { blktrace_seq++; smp_mb(); bt->trace_state = Blktrace_running; trace_note_time(bt); ret = 0; } } else { if (bt->trace_state == Blktrace_running) { bt->trace_state = Blktrace_stopped; relay_flush(bt->rchan); ret = 0; } } return ret; } EXPORT_SYMBOL_GPL(blk_trace_startstop); /** * blk_trace_ioctl: - handle the ioctls associated with tracing * @bdev: the block device * @cmd: the ioctl cmd * @arg: the argument data, if any * **/ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) { struct request_queue *q; int ret, start = 0; char b[BDEVNAME_SIZE]; q = bdev_get_queue(bdev); if (!q) return -ENXIO; mutex_lock(&bdev->bd_mutex); switch (cmd) { case BLKTRACESETUP: bdevname(bdev, b); ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); break; #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) case BLKTRACESETUP32: bdevname(bdev, b); ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); break; #endif case BLKTRACESTART: start = 1; case BLKTRACESTOP: ret = blk_trace_startstop(q, start); break; case BLKTRACETEARDOWN: ret = blk_trace_remove(q); break; default: ret = -ENOTTY; break; } mutex_unlock(&bdev->bd_mutex); return ret; } /** * blk_trace_shutdown: - stop and cleanup trace structures * @q: the request queue associated with the device * **/ void blk_trace_shutdown(struct request_queue *q) { if (q->blk_trace) { blk_trace_startstop(q, 0); blk_trace_remove(q); } } /* * blktrace probes */ /** * blk_add_trace_rq - Add a trace for a request oriented action * @q: queue the io is for * @rq: the source request * @what: the action * * Description: * Records an action against a request. Will log the bio offset + size. * **/ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, u32 what) { struct blk_trace *bt = q->blk_trace; if (likely(!bt)) return; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags, what, rq->errors, rq->cmd_len, rq->cmd); } else { what |= BLK_TC_ACT(BLK_TC_FS); __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq->cmd_flags, what, rq->errors, 0, NULL); } } static void blk_add_trace_rq_abort(void *ignore, struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_ABORT); } static void blk_add_trace_rq_insert(void *ignore, struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_INSERT); } static void blk_add_trace_rq_issue(void *ignore, struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_ISSUE); } static void blk_add_trace_rq_requeue(void *ignore, struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); } static void blk_add_trace_rq_complete(void *ignore, struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); } /** * blk_add_trace_bio - Add a trace for a bio oriented action * @q: queue the io is for * @bio: the source bio * @what: the action * @error: error, if any * * Description: * Records an action against a bio. Will log the bio offset + size. * **/ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, u32 what, int error) { struct blk_trace *bt = q->blk_trace; if (likely(!bt)) return; if (!error && !bio_flagged(bio, BIO_UPTODATE)) error = EIO; __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, error, 0, NULL); } static void blk_add_trace_bio_bounce(void *ignore, struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); } static void blk_add_trace_bio_complete(void *ignore, struct request_queue *q, struct bio *bio, int error) { blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); } static void blk_add_trace_bio_backmerge(void *ignore, struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); } static void blk_add_trace_bio_frontmerge(void *ignore, struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); } static void blk_add_trace_bio_queue(void *ignore, struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0); } static void blk_add_trace_getrq(void *ignore, struct request_queue *q, struct bio *bio, int rw) { if (bio) blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); else { struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); } } static void blk_add_trace_sleeprq(void *ignore, struct request_queue *q, struct bio *bio, int rw) { if (bio) blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); else { struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL); } } static void blk_add_trace_plug(void *ignore, struct request_queue *q) { struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); } static void blk_add_trace_unplug(void *ignore, struct request_queue *q, unsigned int depth, bool explicit) { struct blk_trace *bt = q->blk_trace; if (bt) { __be64 rpdu = cpu_to_be64(depth); u32 what; if (explicit) what = BLK_TA_UNPLUG_IO; else what = BLK_TA_UNPLUG_TIMER; __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); } } static void blk_add_trace_split(void *ignore, struct request_queue *q, struct bio *bio, unsigned int pdu) { struct blk_trace *bt = q->blk_trace; if (bt) { __be64 rpdu = cpu_to_be64(pdu); __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); } } /** * blk_add_trace_bio_remap - Add a trace for a bio-remap operation * @ignore: trace callback data parameter (not used) * @q: queue the io is for * @bio: the source bio * @dev: target device * @from: source sector * * Description: * Device mapper or raid target sometimes need to split a bio because * it spans a stripe (or similar). Add a trace for that action. * **/ static void blk_add_trace_bio_remap(void *ignore, struct request_queue *q, struct bio *bio, dev_t dev, sector_t from) { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; if (likely(!bt)) return; r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); r.sector_from = cpu_to_be64(from); __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); } /** * blk_add_trace_rq_remap - Add a trace for a request-remap operation * @ignore: trace callback data parameter (not used) * @q: queue the io is for * @rq: the source request * @dev: target device * @from: source sector * * Description: * Device mapper remaps request to other devices. * Add a trace for that action. * **/ static void blk_add_trace_rq_remap(void *ignore, struct request_queue *q, struct request *rq, dev_t dev, sector_t from) { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; if (likely(!bt)) return; r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); r.sector_from = cpu_to_be64(from); __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, sizeof(r), &r); } /** * blk_add_driver_data - Add binary message with driver-specific data * @q: queue the io is for * @rq: io request * @data: driver-specific data * @len: length of driver-specific data * * Description: * Some drivers might want to write driver-specific data per request. * **/ void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len) { struct blk_trace *bt = q->blk_trace; if (likely(!bt)) return; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); else __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); } EXPORT_SYMBOL_GPL(blk_add_driver_data); static void blk_register_tracepoints(void) { int ret; ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL); WARN_ON(ret); ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); WARN_ON(ret); ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); WARN_ON(ret); ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); WARN_ON(ret); ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); WARN_ON(ret); ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); WARN_ON(ret); ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); WARN_ON(ret); ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); WARN_ON(ret); ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); WARN_ON(ret); ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); WARN_ON(ret); ret = register_trace_block_getrq(blk_add_trace_getrq, NULL); WARN_ON(ret); ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); WARN_ON(ret); ret = register_trace_block_plug(blk_add_trace_plug, NULL); WARN_ON(ret); ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); WARN_ON(ret); ret = register_trace_block_split(blk_add_trace_split, NULL); WARN_ON(ret); ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); WARN_ON(ret); ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); WARN_ON(ret); } static void blk_unregister_tracepoints(void) { unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); unregister_trace_block_split(blk_add_trace_split, NULL); unregister_trace_block_unplug(blk_add_trace_unplug, NULL); unregister_trace_block_plug(blk_add_trace_plug, NULL); unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); unregister_trace_block_getrq(blk_add_trace_getrq, NULL); unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL); tracepoint_synchronize_unregister(); } /* * struct blk_io_tracer formatting routines */ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) { int i = 0; int tc = t->action >> BLK_TC_SHIFT; if (t->action == BLK_TN_MESSAGE) { rwbs[i++] = 'N'; goto out; } if (tc & BLK_TC_DISCARD) rwbs[i++] = 'D'; else if (tc & BLK_TC_WRITE) rwbs[i++] = 'W'; else if (t->bytes) rwbs[i++] = 'R'; else rwbs[i++] = 'N'; if (tc & BLK_TC_AHEAD) rwbs[i++] = 'A'; if (tc & BLK_TC_BARRIER) rwbs[i++] = 'B'; if (tc & BLK_TC_SYNC) rwbs[i++] = 'S'; if (tc & BLK_TC_META) rwbs[i++] = 'M'; out: rwbs[i] = '\0'; } static inline const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) { return (const struct blk_io_trace *)ent; } static inline const void *pdu_start(const struct trace_entry *ent) { return te_blk_io_trace(ent) + 1; } static inline u32 t_action(const struct trace_entry *ent) { return te_blk_io_trace(ent)->action; } static inline u32 t_bytes(const struct trace_entry *ent) { return te_blk_io_trace(ent)->bytes; } static inline u32 t_sec(const struct trace_entry *ent) { return te_blk_io_trace(ent)->bytes >> 9; } static inline unsigned long long t_sector(const struct trace_entry *ent) { return te_blk_io_trace(ent)->sector; } static inline __u16 t_error(const struct trace_entry *ent) { return te_blk_io_trace(ent)->error; } static __u64 get_pdu_int(const struct trace_entry *ent) { const __u64 *val = pdu_start(ent); return be64_to_cpu(*val); } static void get_pdu_remap(const struct trace_entry *ent, struct blk_io_trace_remap *r) { const struct blk_io_trace_remap *__r = pdu_start(ent); __u64 sector_from = __r->sector_from; r->device_from = be32_to_cpu(__r->device_from); r->device_to = be32_to_cpu(__r->device_to); r->sector_from = be64_to_cpu(sector_from); } typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act); static int blk_log_action_classic(struct trace_iterator *iter, const char *act) { char rwbs[6]; unsigned long long ts = iter->ts; unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); unsigned secs = (unsigned long)ts; const struct blk_io_trace *t = te_blk_io_trace(iter->ent); fill_rwbs(rwbs, t); return trace_seq_printf(&iter->seq, "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", MAJOR(t->device), MINOR(t->device), iter->cpu, secs, nsec_rem, iter->ent->pid, act, rwbs); } static int blk_log_action(struct trace_iterator *iter, const char *act) { char rwbs[6]; const struct blk_io_trace *t = te_blk_io_trace(iter->ent); fill_rwbs(rwbs, t); return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", MAJOR(t->device), MINOR(t->device), act, rwbs); } static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) { const unsigned char *pdu_buf; int pdu_len; int i, end, ret; pdu_buf = pdu_start(ent); pdu_len = te_blk_io_trace(ent)->pdu_len; if (!pdu_len) return 1; /* find the last zero that needs to be printed */ for (end = pdu_len - 1; end >= 0; end--) if (pdu_buf[end]) break; end++; if (!trace_seq_putc(s, '(')) return 0; for (i = 0; i < pdu_len; i++) { ret = trace_seq_printf(s, "%s%02x", i == 0 ? "" : " ", pdu_buf[i]); if (!ret) return ret; /* * stop when the rest is just zeroes and indicate so * with a ".." appended */ if (i == end && end != pdu_len - 1) return trace_seq_puts(s, " ..) "); } return trace_seq_puts(s, ") "); } static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { int ret; ret = trace_seq_printf(s, "%u ", t_bytes(ent)); if (!ret) return 0; ret = blk_log_dump_pdu(s, ent); if (!ret) return 0; return trace_seq_printf(s, "[%s]\n", cmd); } else { if (t_sec(ent)) return trace_seq_printf(s, "%llu + %u [%s]\n", t_sector(ent), t_sec(ent), cmd); return trace_seq_printf(s, "[%s]\n", cmd); } } static int blk_log_with_error(struct trace_seq *s, const struct trace_entry *ent) { if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { int ret; ret = blk_log_dump_pdu(s, ent); if (ret) return trace_seq_printf(s, "[%d]\n", t_error(ent)); return 0; } else { if (t_sec(ent)) return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent), t_sec(ent), t_error(ent)); return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent)); } } static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) { struct blk_io_trace_remap r = { .device_from = 0, }; get_pdu_remap(ent, &r); return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", t_sector(ent), t_sec(ent), MAJOR(r.device_from), MINOR(r.device_from), (unsigned long long)r.sector_from); } static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); return trace_seq_printf(s, "[%s]\n", cmd); } static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); } static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), get_pdu_int(ent), cmd); } static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) { int ret; const struct blk_io_trace *t = te_blk_io_trace(ent); ret = trace_seq_putmem(s, t + 1, t->pdu_len); if (ret) return trace_seq_putc(s, '\n'); return ret; } /* * struct tracer operations */ static void blk_tracer_print_header(struct seq_file *m) { if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) return; seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" "# | | | | | |\n"); } static void blk_tracer_start(struct trace_array *tr) { blk_tracer_enabled = true; } static int blk_tracer_init(struct trace_array *tr) { blk_tr = tr; blk_tracer_start(tr); return 0; } static void blk_tracer_stop(struct trace_array *tr) { blk_tracer_enabled = false; } static void blk_tracer_reset(struct trace_array *tr) { blk_tracer_stop(tr); } static const struct { const char *act[2]; int (*print)(struct trace_seq *s, const struct trace_entry *ent); } what2act[] = { [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, }; static enum print_line_t print_one_line(struct trace_iterator *iter, bool classic) { struct trace_seq *s = &iter->seq; const struct blk_io_trace *t; u16 what; int ret; bool long_act; blk_log_action_t *log_action; t = te_blk_io_trace(iter->ent); what = t->action & ((1 << BLK_TC_SHIFT) - 1); long_act = !!(trace_flags & TRACE_ITER_VERBOSE); log_action = classic ? &blk_log_action_classic : &blk_log_action; if (t->action == BLK_TN_MESSAGE) { ret = log_action(iter, long_act ? "message" : "m"); if (ret) ret = blk_log_msg(s, iter->ent); goto out; } if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) ret = trace_seq_printf(s, "Unknown action %x\n", what); else { ret = log_action(iter, what2act[what].act[long_act]); if (ret) ret = what2act[what].print(s, iter->ent); } out: return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; } static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, int flags, struct trace_event *event) { return print_one_line(iter, false); } static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; const int offset = offsetof(struct blk_io_trace, sector); struct blk_io_trace old = { .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION, .time = iter->ts, }; if (!trace_seq_putmem(s, &old, offset)) return 0; return trace_seq_putmem(s, &t->sector, sizeof(old) - offset + t->pdu_len); } static enum print_line_t blk_trace_event_print_binary(struct trace_iterator *iter, int flags, struct trace_event *event) { return blk_trace_synthesize_old_trace(iter) ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; } static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) { if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) return TRACE_TYPE_UNHANDLED; return print_one_line(iter, true); } static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set) { /* don't output context-info for blk_classic output */ if (bit == TRACE_BLK_OPT_CLASSIC) { if (set) trace_flags &= ~TRACE_ITER_CONTEXT_INFO; else trace_flags |= TRACE_ITER_CONTEXT_INFO; } return 0; } static struct tracer blk_tracer __read_mostly = { .name = "blk", .init = blk_tracer_init, .reset = blk_tracer_reset, .start = blk_tracer_start, .stop = blk_tracer_stop, .print_header = blk_tracer_print_header, .print_line = blk_tracer_print_line, .flags = &blk_tracer_flags, .set_flag = blk_tracer_set_flag, }; static struct trace_event_functions trace_blk_event_funcs = { .trace = blk_trace_event_print, .binary = blk_trace_event_print_binary, }; static struct trace_event trace_blk_event = { .type = TRACE_BLK, .funcs = &trace_blk_event_funcs, }; static int __init init_blk_tracer(void) { if (!register_ftrace_event(&trace_blk_event)) { pr_warning("Warning: could not register block events\n"); return 1; } if (register_tracer(&blk_tracer) != 0) { pr_warning("Warning: could not register the block tracer\n"); unregister_ftrace_event(&trace_blk_event); return 1; } return 0; } device_initcall(init_blk_tracer); static int blk_trace_remove_queue(struct request_queue *q) { struct blk_trace *bt; bt = xchg(&q->blk_trace, NULL); if (bt == NULL) return -EINVAL; if (atomic_dec_and_test(&blk_probes_ref)) blk_unregister_tracepoints(); blk_trace_free(bt); return 0; } /* * Setup everything required to start tracing */ static int blk_trace_setup_queue(struct request_queue *q, struct block_device *bdev) { struct blk_trace *old_bt, *bt = NULL; int ret = -ENOMEM; bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); if (!bt->msg_data) goto free_bt; bt->dev = bdev->bd_dev; bt->act_mask = (u16)-1; blk_trace_setup_lba(bt, bdev); old_bt = xchg(&q->blk_trace, bt); if (old_bt != NULL) { (void)xchg(&q->blk_trace, old_bt); ret = -EBUSY; goto free_bt; } if (atomic_inc_return(&blk_probes_ref) == 1) blk_register_tracepoints(); return 0; free_bt: blk_trace_free(bt); return ret; } /* * sysfs interface to enable and configure tracing */ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t sysfs_blk_trace_attr_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); #define BLK_TRACE_DEVICE_ATTR(_name) \ DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ sysfs_blk_trace_attr_show, \ sysfs_blk_trace_attr_store) static BLK_TRACE_DEVICE_ATTR(enable); static BLK_TRACE_DEVICE_ATTR(act_mask); static BLK_TRACE_DEVICE_ATTR(pid); static BLK_TRACE_DEVICE_ATTR(start_lba); static BLK_TRACE_DEVICE_ATTR(end_lba); static struct attribute *blk_trace_attrs[] = { &dev_attr_enable.attr, &dev_attr_act_mask.attr, &dev_attr_pid.attr, &dev_attr_start_lba.attr, &dev_attr_end_lba.attr, NULL }; struct attribute_group blk_trace_attr_group = { .name = "trace", .attrs = blk_trace_attrs, }; static const struct { int mask; const char *str; } mask_maps[] = { { BLK_TC_READ, "read" }, { BLK_TC_WRITE, "write" }, { BLK_TC_BARRIER, "barrier" }, { BLK_TC_SYNC, "sync" }, { BLK_TC_QUEUE, "queue" }, { BLK_TC_REQUEUE, "requeue" }, { BLK_TC_ISSUE, "issue" }, { BLK_TC_COMPLETE, "complete" }, { BLK_TC_FS, "fs" }, { BLK_TC_PC, "pc" }, { BLK_TC_AHEAD, "ahead" }, { BLK_TC_META, "meta" }, { BLK_TC_DISCARD, "discard" }, { BLK_TC_DRV_DATA, "drv_data" }, }; static int blk_trace_str2mask(const char *str) { int i; int mask = 0; char *buf, *s, *token; buf = kstrdup(str, GFP_KERNEL); if (buf == NULL) return -ENOMEM; s = strstrip(buf); while (1) { token = strsep(&s, ","); if (token == NULL) break; if (*token == '\0') continue; for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { if (strcasecmp(token, mask_maps[i].str) == 0) { mask |= mask_maps[i].mask; break; } } if (i == ARRAY_SIZE(mask_maps)) { mask = -EINVAL; break; } } kfree(buf); return mask; } static ssize_t blk_trace_mask2str(char *buf, int mask) { int i; char *p = buf; for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { if (mask & mask_maps[i].mask) { p += sprintf(p, "%s%s", (p == buf) ? "" : ",", mask_maps[i].str); } } *p++ = '\n'; return p - buf; } static struct request_queue *blk_trace_get_queue(struct block_device *bdev) { if (bdev->bd_disk == NULL) return NULL; return bdev_get_queue(bdev); } static ssize_t sysfs_blk_trace_attr_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); struct request_queue *q; struct block_device *bdev; ssize_t ret = -ENXIO; bdev = bdget(part_devt(p)); if (bdev == NULL) goto out; q = blk_trace_get_queue(bdev); if (q == NULL) goto out_bdput; mutex_lock(&bdev->bd_mutex); if (attr == &dev_attr_enable) { ret = sprintf(buf, "%u\n", !!q->blk_trace); goto out_unlock_bdev; } if (q->blk_trace == NULL) ret = sprintf(buf, "disabled\n"); else if (attr == &dev_attr_act_mask) ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); else if (attr == &dev_attr_pid) ret = sprintf(buf, "%u\n", q->blk_trace->pid); else if (attr == &dev_attr_start_lba) ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); else if (attr == &dev_attr_end_lba) ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); out_unlock_bdev: mutex_unlock(&bdev->bd_mutex); out_bdput: bdput(bdev); out: return ret; } static ssize_t sysfs_blk_trace_attr_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct block_device *bdev; struct request_queue *q; struct hd_struct *p; u64 value; ssize_t ret = -EINVAL; if (count == 0) goto out; if (attr == &dev_attr_act_mask) { if (sscanf(buf, "%llx", &value) != 1) { /* Assume it is a list of trace category names */ ret = blk_trace_str2mask(buf); if (ret < 0) goto out; value = ret; } } else if (sscanf(buf, "%llu", &value) != 1) goto out; ret = -ENXIO; p = dev_to_part(dev); bdev = bdget(part_devt(p)); if (bdev == NULL) goto out; q = blk_trace_get_queue(bdev); if (q == NULL) goto out_bdput; mutex_lock(&bdev->bd_mutex); if (attr == &dev_attr_enable) { if (value) ret = blk_trace_setup_queue(q, bdev); else ret = blk_trace_remove_queue(q); goto out_unlock_bdev; } ret = 0; if (q->blk_trace == NULL) ret = blk_trace_setup_queue(q, bdev); if (ret == 0) { if (attr == &dev_attr_act_mask) q->blk_trace->act_mask = value; else if (attr == &dev_attr_pid) q->blk_trace->pid = value; else if (attr == &dev_attr_start_lba) q->blk_trace->start_lba = value; else if (attr == &dev_attr_end_lba) q->blk_trace->end_lba = value; } out_unlock_bdev: mutex_unlock(&bdev->bd_mutex); out_bdput: bdput(bdev); out: return ret ? ret : count; } int blk_trace_init_sysfs(struct device *dev) { return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); } void blk_trace_remove_sysfs(struct device *dev) { sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); } #endif /* CONFIG_BLK_DEV_IO_TRACE */ #ifdef CONFIG_EVENT_TRACING void blk_dump_cmd(char *buf, struct request *rq) { int i, end; int len = rq->cmd_len; unsigned char *cmd = rq->cmd; if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { buf[0] = '\0'; return; } for (end = len - 1; end >= 0; end--) if (cmd[end]) break; end++; for (i = 0; i < len; i++) { buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]); if (i == end && end != len - 1) { sprintf(buf, " .."); break; } } } void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) { int i = 0; if (rw & WRITE) rwbs[i++] = 'W'; else if (rw & REQ_DISCARD) rwbs[i++] = 'D'; else if (rw & REQ_SANITIZE) rwbs[i++] = 'Z'; else if (bytes) rwbs[i++] = 'R'; else rwbs[i++] = 'N'; if (rw & REQ_RAHEAD) rwbs[i++] = 'A'; if (rw & REQ_SYNC) rwbs[i++] = 'S'; if (rw & REQ_META) rwbs[i++] = 'M'; if (rw & REQ_SECURE) rwbs[i++] = 'E'; rwbs[i] = '\0'; } #endif /* CONFIG_EVENT_TRACING */
gpl-2.0
vamanea/u-boot-linaro
board/exmeritus/hww1u1a/hww1u1a.c
57
7040
/* * Copyright 2009-2011 eXMeritus, A Boeing Company * Copyright 2007-2009 Freescale Semiconductor, Inc. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <command.h> #include <pci.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/cache.h> #include <asm/immap_85xx.h> #include <asm/fsl_pci.h> #include <asm/fsl_ddr_sdram.h> #include <asm/io.h> #include <miiphy.h> #include <libfdt.h> #include <linux/ctype.h> #include <fdt_support.h> #include <fsl_mdio.h> #include <tsec.h> #include <asm/fsl_law.h> #include <netdev.h> #include <malloc.h> #include <i2c.h> #include <pca953x.h> #include "gpios.h" DECLARE_GLOBAL_DATA_PTR; int checkboard(void) { unsigned int gpio_high = 0; unsigned int gpio_low = 0; unsigned int gpio_in = 0; unsigned int i; puts("Board: HWW-1U-1A "); /* * First just figure out which CPU we're on, then use that to * configure the lists of other GPIOs to be programmed. */ mpc85xx_gpio_set_in(GPIO_CPU_ID); if (hww1u1a_is_cpu_a()) { puts("CPU A\n"); /* We want to turn on some LEDs */ gpio_high |= GPIO_CPUA_CPU_READY; gpio_low |= GPIO_CPUA_DEBUG_LED1; gpio_low |= GPIO_CPUA_DEBUG_LED2; /* Disable the unused transmitters */ gpio_low |= GPIO_CPUA_TDIS1A; gpio_high |= GPIO_CPUA_TDIS1B; gpio_low |= GPIO_CPUA_TDIS2A; gpio_high |= GPIO_CPUA_TDIS2B; } else { puts("CPU B\n"); /* We want to turn on some LEDs */ gpio_high |= GPIO_CPUB_CPU_READY; gpio_low |= GPIO_CPUB_DEBUG_LED1; gpio_low |= GPIO_CPUB_DEBUG_LED2; /* Enable the appropriate receivers */ gpio_high |= GPIO_CPUB_RMUX_SEL0A; gpio_high |= GPIO_CPUB_RMUX_SEL0B; gpio_low |= GPIO_CPUB_RMUX_SEL1A; gpio_low |= GPIO_CPUB_RMUX_SEL1B; } /* These GPIOs are common */ gpio_in |= IRQ_I2CINT | IRQ_FANINT | IRQ_DIMM_EVENT; gpio_low |= GPIO_RS422_RE; gpio_high |= GPIO_RS422_DE; /* Ok, now go ahead and program all of those in one go */ mpc85xx_gpio_set(gpio_high|gpio_low|gpio_in, gpio_high|gpio_low, gpio_high); /* * If things have been taken out of reset early (for example, by one * of the BDI3000 debuggers), then we need to put them back in reset * and delay a while before we continue. */ if (mpc85xx_gpio_get(GPIO_RESETS)) { ccsr_ddr_t *ddr = (ccsr_ddr_t *)CONFIG_SYS_MPC85xx_DDR_ADDR; puts("Debugger detected... extra device reset enabled!\n"); /* Put stuff into reset and disable the DDR controller */ mpc85xx_gpio_set_low(GPIO_RESETS); out_be32(&ddr->sdram_cfg, 0x00000000); puts(" Waiting 1 sec for reset..."); for (i = 0; i < 10; i++) { udelay(100000); puts("."); } puts("\n"); } /* Now bring everything back out of reset again */ mpc85xx_gpio_set_high(GPIO_RESETS); return 0; } /* * This little shell function just returns whether or not it's CPU A. * It can be used to select the right device-tree when booting, etc. */ int do_hww1u1a_test_cpu_a(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) { if (argc > 1) cmd_usage(cmdtp); if (hww1u1a_is_cpu_a()) return 0; else return 1; } U_BOOT_CMD( test_cpu_a, 1, 0, do_hww1u1a_test_cpu_a, "Test if this is CPU A (versus B) on the eXMeritus HWW-1U-1A board", "" ); /* Create a prompt-like string: "uboot@HOSTNAME% " */ #define PROMPT_PREFIX "uboot@exm" #define PROMPT_SUFFIX "% " /* This function returns a PS1 prompt based on the serial number */ static char *hww1u1a_prompt; const char *hww1u1a_get_ps1(void) { unsigned long len, i, j; const char *serialnr; /* If our prompt was already set, just use that */ if (hww1u1a_prompt) return hww1u1a_prompt; /* Use our serial number if present, otherwise a default */ serialnr = getenv("serial#"); if (!serialnr || !serialnr[0]) serialnr = "999999-X"; /* * We will turn the serial number into a hostname by: * (A) Delete all non-alphanumerics. * (B) Lowercase all letters. * (C) Prefix "exm". * (D) Suffix "a" for CPU A and "b" for CPU B. */ for (i = 0, len = 0; serialnr[i]; i++) { if (isalnum(serialnr[i])) len++; } len += sizeof(PROMPT_PREFIX PROMPT_SUFFIX) + 1; /* Includes NUL */ hww1u1a_prompt = malloc(len); if (!hww1u1a_prompt) return PROMPT_PREFIX "UNKNOWN(ENOMEM)" PROMPT_SUFFIX; /* Now actually fill it in */ i = 0; /* Handle the prefix */ for (j = 0; j < sizeof(PROMPT_PREFIX) - 1; j++) hww1u1a_prompt[i++] = PROMPT_PREFIX[j]; /* Now the serial# part of the hostname */ for (j = 0; serialnr[j]; j++) if (isalnum(serialnr[j])) hww1u1a_prompt[i++] = tolower(serialnr[j]); /* Now the CPU id ("a" or "b") */ hww1u1a_prompt[i++] = hww1u1a_is_cpu_a() ? 'a' : 'b'; /* Finally the suffix */ for (j = 0; j < sizeof(PROMPT_SUFFIX); j++) hww1u1a_prompt[i++] = PROMPT_SUFFIX[j]; /* This should all have added up, but just in case */ hww1u1a_prompt[len - 1] = '\0'; /* Now we're done */ return hww1u1a_prompt; } void pci_init_board(void) { fsl_pcie_init_board(0); } int board_early_init_r(void) { const unsigned int flashbase = CONFIG_SYS_FLASH_BASE; const u8 flash_esel = find_tlb_idx((void *)flashbase, 1); /* * Remap bootflash region to caching-inhibited * so that flash can be erased properly. */ /* Flush d-cache and invalidate i-cache of any FLASH data */ flush_dcache(); invalidate_icache(); /* invalidate existing TLB entry for FLASH */ disable_tlb(flash_esel); set_tlb(1, flashbase, CONFIG_SYS_FLASH_BASE_PHYS, MAS3_SX|MAS3_SW|MAS3_SR, MAS2_I|MAS2_G, 0, flash_esel, BOOKE_PAGESZ_256M, 1); return 0; } int board_eth_init(bd_t *bis) { struct tsec_info_struct tsec_info[4]; struct fsl_pq_mdio_info mdio_info; SET_STD_TSEC_INFO(tsec_info[0], 1); SET_STD_TSEC_INFO(tsec_info[1], 2); SET_STD_TSEC_INFO(tsec_info[2], 3); if (hww1u1a_is_cpu_a()) tsec_info[2].phyaddr = TSEC3_PHY_ADDR_CPUA; else tsec_info[2].phyaddr = TSEC3_PHY_ADDR_CPUB; mdio_info.regs = (struct tsec_mii_mng *)CONFIG_SYS_MDIO_BASE_ADDR; mdio_info.name = DEFAULT_MII_NAME; fsl_pq_mdio_init(bis, &mdio_info); tsec_eth_init(bis, tsec_info, 3); return pci_eth_init(bis); } void ft_board_setup(void *blob, bd_t *bd) { phys_addr_t base; phys_size_t size; ft_cpu_setup(blob, bd); base = getenv_bootm_low(); size = getenv_bootm_size(); fdt_fixup_memory(blob, (u64)base, (u64)size); FT_FSL_PCI_SETUP; }
gpl-2.0
randomstuffpaul/android_kernel_samsung_klimtwifi
drivers/block/nbd.c
313
21594
/* * Network block device - make block devices work over TCP * * Note that you can not swap over this thing, yet. Seems to work but * deadlocks sometimes - you can not swap over TCP in general. * * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> * * This file is released under GPLv2 or later. * * (part of code stolen from loop.c) */ #include <linux/major.h> #include <linux/blkdev.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/bio.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/ioctl.h> #include <linux/mutex.h> #include <linux/compiler.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/net.h> #include <linux/kthread.h> #include <asm/uaccess.h> #include <asm/types.h> #include <linux/nbd.h> #define NBD_MAGIC 0x68797548 #ifdef NDEBUG #define dprintk(flags, fmt...) #else /* NDEBUG */ #define dprintk(flags, fmt...) do { \ if (debugflags & (flags)) printk(KERN_DEBUG fmt); \ } while (0) #define DBG_IOCTL 0x0004 #define DBG_INIT 0x0010 #define DBG_EXIT 0x0020 #define DBG_BLKDEV 0x0100 #define DBG_RX 0x0200 #define DBG_TX 0x0400 static unsigned int debugflags; #endif /* NDEBUG */ static unsigned int nbds_max = 16; static struct nbd_device *nbd_dev; static int max_part; /* * Use just one lock (or at most 1 per NIC). Two arguments for this: * 1. Each NIC is essentially a synchronization point for all servers * accessed through that NIC so there's no need to have more locks * than NICs anyway. * 2. More locks lead to more "Dirty cache line bouncing" which will slow * down each lock to the point where they're actually slower than just * a single lock. * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this! */ static DEFINE_SPINLOCK(nbd_lock); #ifndef NDEBUG static const char *ioctl_cmd_to_ascii(int cmd) { switch (cmd) { case NBD_SET_SOCK: return "set-sock"; case NBD_SET_BLKSIZE: return "set-blksize"; case NBD_SET_SIZE: return "set-size"; case NBD_DO_IT: return "do-it"; case NBD_CLEAR_SOCK: return "clear-sock"; case NBD_CLEAR_QUE: return "clear-que"; case NBD_PRINT_DEBUG: return "print-debug"; case NBD_SET_SIZE_BLOCKS: return "set-size-blocks"; case NBD_DISCONNECT: return "disconnect"; case BLKROSET: return "set-read-only"; case BLKFLSBUF: return "flush-buffer-cache"; } return "unknown"; } static const char *nbdcmd_to_ascii(int cmd) { switch (cmd) { case NBD_CMD_READ: return "read"; case NBD_CMD_WRITE: return "write"; case NBD_CMD_DISC: return "disconnect"; } return "invalid"; } #endif /* NDEBUG */ static void nbd_end_request(struct request *req) { int error = req->errors ? -EIO : 0; struct request_queue *q = req->q; unsigned long flags; dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, req, error ? "failed" : "done"); spin_lock_irqsave(q->queue_lock, flags); __blk_end_request_all(req, error); spin_unlock_irqrestore(q->queue_lock, flags); } static void sock_shutdown(struct nbd_device *nbd, int lock) { /* Forcibly shutdown the socket causing all listeners * to error * * FIXME: This code is duplicated from sys_shutdown, but * there should be a more generic interface rather than * calling socket ops directly here */ if (lock) mutex_lock(&nbd->tx_lock); if (nbd->sock) { dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); kernel_sock_shutdown(nbd->sock, SHUT_RDWR); nbd->sock = NULL; } if (lock) mutex_unlock(&nbd->tx_lock); } static void nbd_xmit_timeout(unsigned long arg) { struct task_struct *task = (struct task_struct *)arg; printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n", task->comm, task->pid); force_sig(SIGKILL, task); } /* * Send or receive packet. */ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, int msg_flags) { struct socket *sock = nbd->sock; int result; struct msghdr msg; struct kvec iov; sigset_t blocked, oldset; if (unlikely(!sock)) { dev_err(disk_to_dev(nbd->disk), "Attempted %s on closed socket in sock_xmit\n", (send ? "send" : "recv")); return -EINVAL; } /* Allow interception of SIGKILL only * Don't allow other signals to interrupt the transmission */ siginitsetinv(&blocked, sigmask(SIGKILL)); sigprocmask(SIG_SETMASK, &blocked, &oldset); do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) { struct timer_list ti; if (nbd->xmit_timeout) { init_timer(&ti); ti.function = nbd_xmit_timeout; ti.data = (unsigned long)current; ti.expires = jiffies + nbd->xmit_timeout; add_timer(&ti); } result = kernel_sendmsg(sock, &msg, &iov, 1, size); if (nbd->xmit_timeout) del_timer_sync(&ti); } else result = kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags); if (signal_pending(current)) { siginfo_t info; printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", task_pid_nr(current), current->comm, dequeue_signal_lock(current, &current->blocked, &info)); result = -EINTR; sock_shutdown(nbd, !send); break; } if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } size -= result; buf += result; } while (size > 0); sigprocmask(SIG_SETMASK, &oldset, NULL); return result; } static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec, int flags) { int result; void *kaddr = kmap(bvec->bv_page); result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags); kunmap(bvec->bv_page); return result; } /* always call with the tx_lock held */ static int nbd_send_req(struct nbd_device *nbd, struct request *req) { int result, flags; struct nbd_request request; unsigned long size = blk_rq_bytes(req); request.magic = htonl(NBD_REQUEST_MAGIC); request.type = htonl(nbd_cmd(req)); request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); memcpy(request.handle, &req, sizeof(req)); dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", nbd->disk->disk_name, req, nbdcmd_to_ascii(nbd_cmd(req)), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, 1, &request, sizeof(request), (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); goto error_out; } if (nbd_cmd(req) == NBD_CMD_WRITE) { struct req_iterator iter; struct bio_vec *bvec; /* * we are really probing at internals to determine * whether to set MSG_MORE or not... */ rq_for_each_segment(bvec, req, iter) { flags = 0; if (!rq_iter_last(req, iter)) flags = MSG_MORE; dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", nbd->disk->disk_name, req, bvec->bv_len); result = sock_send_bvec(nbd, bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); goto error_out; } } } return 0; error_out: return -EIO; } static struct request *nbd_find_request(struct nbd_device *nbd, struct request *xreq) { struct request *req, *tmp; int err; err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); if (unlikely(err)) goto out; spin_lock(&nbd->queue_lock); list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { if (req != xreq) continue; list_del_init(&req->queuelist); spin_unlock(&nbd->queue_lock); return req; } spin_unlock(&nbd->queue_lock); err = -ENOENT; out: return ERR_PTR(err); } static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) { int result; void *kaddr = kmap(bvec->bv_page); result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len, MSG_WAITALL); kunmap(bvec->bv_page); return result; } /* NULL returned = something went wrong, inform userspace */ static struct request *nbd_read_stat(struct nbd_device *nbd) { int result; struct nbd_reply reply; struct request *req; reply.magic = 0; result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive control failed (result %d)\n", result); goto harderror; } if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", (unsigned long)ntohl(reply.magic)); result = -EPROTO; goto harderror; } req = nbd_find_request(nbd, *(struct request **)reply.handle); if (IS_ERR(req)) { result = PTR_ERR(req); if (result != -ENOENT) goto harderror; dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n", reply.handle); result = -EBADR; goto harderror; } if (ntohl(reply.error)) { dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", ntohl(reply.error)); req->errors++; return req; } dprintk(DBG_RX, "%s: request %p: got reply\n", nbd->disk->disk_name, req); if (nbd_cmd(req) == NBD_CMD_READ) { struct req_iterator iter; struct bio_vec *bvec; rq_for_each_segment(bvec, req, iter) { result = sock_recv_bvec(nbd, bvec); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); req->errors++; return req; } dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", nbd->disk->disk_name, req, bvec->bv_len); } } return req; harderror: nbd->harderror = result; return NULL; } static ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%ld\n", (long) ((struct nbd_device *)disk->private_data)->pid); } static struct device_attribute pid_attr = { .attr = { .name = "pid", .mode = S_IRUGO}, .show = pid_show, }; static int nbd_do_it(struct nbd_device *nbd) { struct request *req; int ret; BUG_ON(nbd->magic != NBD_MAGIC); nbd->pid = task_pid_nr(current); ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); if (ret) { dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); nbd->pid = 0; return ret; } while ((req = nbd_read_stat(nbd)) != NULL) nbd_end_request(req); device_remove_file(disk_to_dev(nbd->disk), &pid_attr); nbd->pid = 0; return 0; } static void nbd_clear_que(struct nbd_device *nbd) { struct request *req; BUG_ON(nbd->magic != NBD_MAGIC); /* * Because we have set nbd->sock to NULL under the tx_lock, all * modifications to the list must have completed by now. For * the same reason, the active_req must be NULL. * * As a consequence, we don't need to take the spin lock while * purging the list here. */ BUG_ON(nbd->sock); BUG_ON(nbd->active_req); while (!list_empty(&nbd->queue_head)) { req = list_entry(nbd->queue_head.next, struct request, queuelist); list_del_init(&req->queuelist); req->errors++; nbd_end_request(req); } while (!list_empty(&nbd->waiting_queue)) { req = list_entry(nbd->waiting_queue.next, struct request, queuelist); list_del_init(&req->queuelist); req->errors++; nbd_end_request(req); } } static void nbd_handle_req(struct nbd_device *nbd, struct request *req) { if (req->cmd_type != REQ_TYPE_FS) goto error_out; nbd_cmd(req) = NBD_CMD_READ; if (rq_data_dir(req) == WRITE) { nbd_cmd(req) = NBD_CMD_WRITE; if (nbd->flags & NBD_READ_ONLY) { dev_err(disk_to_dev(nbd->disk), "Write on read-only\n"); goto error_out; } } req->errors = 0; mutex_lock(&nbd->tx_lock); if (unlikely(!nbd->sock)) { mutex_unlock(&nbd->tx_lock); dev_err(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); goto error_out; } nbd->active_req = req; if (nbd_send_req(nbd, req) != 0) { dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); req->errors++; nbd_end_request(req); } else { spin_lock(&nbd->queue_lock); list_add(&req->queuelist, &nbd->queue_head); spin_unlock(&nbd->queue_lock); } nbd->active_req = NULL; mutex_unlock(&nbd->tx_lock); wake_up_all(&nbd->active_wq); return; error_out: req->errors++; nbd_end_request(req); } static int nbd_thread(void *data) { struct nbd_device *nbd = data; struct request *req; set_user_nice(current, -20); while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { /* wait for something to do */ wait_event_interruptible(nbd->waiting_wq, kthread_should_stop() || !list_empty(&nbd->waiting_queue)); /* extract request */ if (list_empty(&nbd->waiting_queue)) continue; spin_lock_irq(&nbd->queue_lock); req = list_entry(nbd->waiting_queue.next, struct request, queuelist); list_del_init(&req->queuelist); spin_unlock_irq(&nbd->queue_lock); /* handle request */ nbd_handle_req(nbd, req); } return 0; } /* * We always wait for result of write, for now. It would be nice to make it optional * in future * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK)) * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } */ static void do_nbd_request(struct request_queue *q) { struct request *req; while ((req = blk_fetch_request(q)) != NULL) { struct nbd_device *nbd; spin_unlock_irq(q->queue_lock); dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", req->rq_disk->disk_name, req, req->cmd_type); nbd = req->rq_disk->private_data; BUG_ON(nbd->magic != NBD_MAGIC); if (unlikely(!nbd->sock)) { dev_err(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); req->errors++; nbd_end_request(req); spin_lock_irq(q->queue_lock); continue; } spin_lock_irq(&nbd->queue_lock); list_add_tail(&req->queuelist, &nbd->waiting_queue); spin_unlock_irq(&nbd->queue_lock); wake_up(&nbd->waiting_wq); spin_lock_irq(q->queue_lock); } } /* Must be called with tx_lock held */ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, unsigned int cmd, unsigned long arg) { switch (cmd) { case NBD_DISCONNECT: { struct request sreq; dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); if (!nbd->sock) return -EINVAL; mutex_unlock(&nbd->tx_lock); fsync_bdev(bdev); mutex_lock(&nbd->tx_lock); blk_rq_init(NULL, &sreq); sreq.cmd_type = REQ_TYPE_SPECIAL; nbd_cmd(&sreq) = NBD_CMD_DISC; /* Check again after getting mutex back. */ if (!nbd->sock) return -EINVAL; nbd->disconnect = 1; nbd_send_req(nbd, &sreq); return 0; } case NBD_CLEAR_SOCK: { struct file *file; nbd->sock = NULL; file = nbd->file; nbd->file = NULL; nbd_clear_que(nbd); BUG_ON(!list_empty(&nbd->queue_head)); BUG_ON(!list_empty(&nbd->waiting_queue)); kill_bdev(bdev); if (file) fput(file); return 0; } case NBD_SET_SOCK: { struct file *file; if (nbd->file) return -EBUSY; file = fget(arg); if (file) { struct inode *inode = file->f_path.dentry->d_inode; if (S_ISSOCK(inode->i_mode)) { nbd->file = file; nbd->sock = SOCKET_I(inode); if (max_part > 0) bdev->bd_invalidated = 1; nbd->disconnect = 0; /* we're connected now */ return 0; } else { fput(file); } } return -EINVAL; } case NBD_SET_BLKSIZE: nbd->blksize = arg; nbd->bytesize &= ~(nbd->blksize-1); bdev->bd_inode->i_size = nbd->bytesize; set_blocksize(bdev, nbd->blksize); set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_SET_SIZE: nbd->bytesize = arg & ~(nbd->blksize-1); bdev->bd_inode->i_size = nbd->bytesize; set_blocksize(bdev, nbd->blksize); set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_SET_TIMEOUT: nbd->xmit_timeout = arg * HZ; return 0; case NBD_SET_SIZE_BLOCKS: nbd->bytesize = ((u64) arg) * nbd->blksize; bdev->bd_inode->i_size = nbd->bytesize; set_blocksize(bdev, nbd->blksize); set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_DO_IT: { struct task_struct *thread; struct file *file; int error; if (nbd->pid) return -EBUSY; if (!nbd->file) return -EINVAL; mutex_unlock(&nbd->tx_lock); thread = kthread_create(nbd_thread, nbd, "%s", nbd->disk->disk_name); if (IS_ERR(thread)) { mutex_lock(&nbd->tx_lock); return PTR_ERR(thread); } wake_up_process(thread); error = nbd_do_it(nbd); kthread_stop(thread); mutex_lock(&nbd->tx_lock); if (error) return error; sock_shutdown(nbd, 0); file = nbd->file; nbd->file = NULL; nbd_clear_que(nbd); dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); kill_bdev(bdev); if (file) fput(file); nbd->bytesize = 0; bdev->bd_inode->i_size = 0; set_capacity(nbd->disk, 0); if (max_part > 0) ioctl_by_bdev(bdev, BLKRRPART, 0); if (nbd->disconnect) /* user requested, ignore socket errors */ return 0; return nbd->harderror; } case NBD_CLEAR_QUE: /* * This is for compatibility only. The queue is always cleared * by NBD_DO_IT or NBD_CLEAR_SOCK. */ BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head)); return 0; case NBD_PRINT_DEBUG: dev_info(disk_to_dev(nbd->disk), "next = %p, prev = %p, head = %p\n", nbd->queue_head.next, nbd->queue_head.prev, &nbd->queue_head); return 0; } return -ENOTTY; } static int nbd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct nbd_device *nbd = bdev->bd_disk->private_data; int error; if (!capable(CAP_SYS_ADMIN)) return -EPERM; BUG_ON(nbd->magic != NBD_MAGIC); /* Anyone capable of this syscall can do *real bad* things */ dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); mutex_lock(&nbd->tx_lock); error = __nbd_ioctl(bdev, nbd, cmd, arg); mutex_unlock(&nbd->tx_lock); return error; } static const struct block_device_operations nbd_fops = { .owner = THIS_MODULE, .ioctl = nbd_ioctl, }; /* * And here should be modules and kernel interface * (Just smiley confuses emacs :-) */ static int __init nbd_init(void) { int err = -ENOMEM; int i; int part_shift; BUILD_BUG_ON(sizeof(struct nbd_request) != 28); if (max_part < 0) { printk(KERN_ERR "nbd: max_part must be >= 0\n"); return -EINVAL; } part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); /* * Adjust max_part according to part_shift as it is exported * to user space so that user can know the max number of * partition kernel should be able to manage. * * Note that -1 is required because partition 0 is reserved * for the whole disk. */ max_part = (1UL << part_shift) - 1; } if ((1UL << part_shift) > DISK_MAX_PARTS) return -EINVAL; if (nbds_max > 1UL << (MINORBITS - part_shift)) return -EINVAL; nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); if (!nbd_dev) return -ENOMEM; for (i = 0; i < nbds_max; i++) { struct gendisk *disk = alloc_disk(1 << part_shift); if (!disk) goto out; nbd_dev[i].disk = disk; /* * The new linux 2.5 block layer implementation requires * every gendisk to have its very own request_queue struct. * These structs are big so we dynamically allocate them. */ disk->queue = blk_init_queue(do_nbd_request, &nbd_lock); if (!disk->queue) { put_disk(disk); goto out; } /* * Tell the block layer that we are not a rotational device */ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); } if (register_blkdev(NBD_MAJOR, "nbd")) { err = -EIO; goto out; } printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags); for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].file = NULL; nbd_dev[i].magic = NBD_MAGIC; nbd_dev[i].flags = 0; INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); spin_lock_init(&nbd_dev[i].queue_lock); INIT_LIST_HEAD(&nbd_dev[i].queue_head); mutex_init(&nbd_dev[i].tx_lock); init_waitqueue_head(&nbd_dev[i].active_wq); init_waitqueue_head(&nbd_dev[i].waiting_wq); nbd_dev[i].blksize = 1024; nbd_dev[i].bytesize = 0; disk->major = NBD_MAJOR; disk->first_minor = i << part_shift; disk->fops = &nbd_fops; disk->private_data = &nbd_dev[i]; sprintf(disk->disk_name, "nbd%d", i); set_capacity(disk, 0); add_disk(disk); } return 0; out: while (i--) { blk_cleanup_queue(nbd_dev[i].disk->queue); put_disk(nbd_dev[i].disk); } kfree(nbd_dev); return err; } static void __exit nbd_cleanup(void) { int i; for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].magic = 0; if (disk) { del_gendisk(disk); blk_cleanup_queue(disk->queue); put_disk(disk); } } unregister_blkdev(NBD_MAJOR, "nbd"); kfree(nbd_dev); printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); } module_init(nbd_init); module_exit(nbd_cleanup); MODULE_DESCRIPTION("Network Block Device"); MODULE_LICENSE("GPL"); module_param(nbds_max, int, 0444); MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); module_param(max_part, int, 0444); MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); #ifndef NDEBUG module_param(debugflags, int, 0644); MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); #endif
gpl-2.0
RC-MODULE/linux-3.10.x
drivers/edac/edac_mc_sysfs.c
313
29611
/* * edac_mc kernel module * (C) 2005-2007 Linux Networx (http://lnxi.com) * * This file may be distributed under the terms of the * GNU General Public License. * * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com * * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com> * The entire API were re-written, and ported to use struct device * */ #include <linux/ctype.h> #include <linux/slab.h> #include <linux/edac.h> #include <linux/bug.h> #include <linux/pm_runtime.h> #include <linux/uaccess.h> #include "edac_core.h" #include "edac_module.h" /* MC EDAC Controls, setable by module parameter, and sysfs */ static int edac_mc_log_ue = 1; static int edac_mc_log_ce = 1; static int edac_mc_panic_on_ue; static int edac_mc_poll_msec = 1000; /* Getter functions for above */ int edac_mc_get_log_ue(void) { return edac_mc_log_ue; } int edac_mc_get_log_ce(void) { return edac_mc_log_ce; } int edac_mc_get_panic_on_ue(void) { return edac_mc_panic_on_ue; } /* this is temporary */ int edac_mc_get_poll_msec(void) { return edac_mc_poll_msec; } static int edac_set_poll_msec(const char *val, struct kernel_param *kp) { long l; int ret; if (!val) return -EINVAL; ret = strict_strtol(val, 0, &l); if (ret == -EINVAL || ((int)l != l)) return -EINVAL; *((int *)kp->arg) = l; /* notify edac_mc engine to reset the poll period */ edac_mc_reset_delay_period(l); return 0; } /* Parameter declarations for above */ module_param(edac_mc_panic_on_ue, int, 0644); MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); module_param(edac_mc_log_ue, int, 0644); MODULE_PARM_DESC(edac_mc_log_ue, "Log uncorrectable error to console: 0=off 1=on"); module_param(edac_mc_log_ce, int, 0644); MODULE_PARM_DESC(edac_mc_log_ce, "Log correctable error to console: 0=off 1=on"); module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int, &edac_mc_poll_msec, 0644); MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); static struct device *mci_pdev; /* * various constants for Memory Controllers */ static const char * const mem_types[] = { [MEM_EMPTY] = "Empty", [MEM_RESERVED] = "Reserved", [MEM_UNKNOWN] = "Unknown", [MEM_FPM] = "FPM", [MEM_EDO] = "EDO", [MEM_BEDO] = "BEDO", [MEM_SDR] = "Unbuffered-SDR", [MEM_RDR] = "Registered-SDR", [MEM_DDR] = "Unbuffered-DDR", [MEM_RDDR] = "Registered-DDR", [MEM_RMBS] = "RMBS", [MEM_DDR2] = "Unbuffered-DDR2", [MEM_FB_DDR2] = "FullyBuffered-DDR2", [MEM_RDDR2] = "Registered-DDR2", [MEM_XDR] = "XDR", [MEM_DDR3] = "Unbuffered-DDR3", [MEM_RDDR3] = "Registered-DDR3" }; static const char * const dev_types[] = { [DEV_UNKNOWN] = "Unknown", [DEV_X1] = "x1", [DEV_X2] = "x2", [DEV_X4] = "x4", [DEV_X8] = "x8", [DEV_X16] = "x16", [DEV_X32] = "x32", [DEV_X64] = "x64" }; static const char * const edac_caps[] = { [EDAC_UNKNOWN] = "Unknown", [EDAC_NONE] = "None", [EDAC_RESERVED] = "Reserved", [EDAC_PARITY] = "PARITY", [EDAC_EC] = "EC", [EDAC_SECDED] = "SECDED", [EDAC_S2ECD2ED] = "S2ECD2ED", [EDAC_S4ECD4ED] = "S4ECD4ED", [EDAC_S8ECD8ED] = "S8ECD8ED", [EDAC_S16ECD16ED] = "S16ECD16ED" }; #ifdef CONFIG_EDAC_LEGACY_SYSFS /* * EDAC sysfs CSROW data structures and methods */ #define to_csrow(k) container_of(k, struct csrow_info, dev) /* * We need it to avoid namespace conflicts between the legacy API * and the per-dimm/per-rank one */ #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) struct dev_ch_attribute { struct device_attribute attr; int channel; }; #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ struct dev_ch_attribute dev_attr_legacy_##_name = \ { __ATTR(_name, _mode, _show, _store), (_var) } #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel) /* Set of more default csrow<id> attribute show/store functions */ static ssize_t csrow_ue_count_show(struct device *dev, struct device_attribute *mattr, char *data) { struct csrow_info *csrow = to_csrow(dev); return sprintf(data, "%u\n", csrow->ue_count); } static ssize_t csrow_ce_count_show(struct device *dev, struct device_attribute *mattr, char *data) { struct csrow_info *csrow = to_csrow(dev); return sprintf(data, "%u\n", csrow->ce_count); } static ssize_t csrow_size_show(struct device *dev, struct device_attribute *mattr, char *data) { struct csrow_info *csrow = to_csrow(dev); int i; u32 nr_pages = 0; for (i = 0; i < csrow->nr_channels; i++) nr_pages += csrow->channels[i]->dimm->nr_pages; return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); } static ssize_t csrow_mem_type_show(struct device *dev, struct device_attribute *mattr, char *data) { struct csrow_info *csrow = to_csrow(dev); return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]); } static ssize_t csrow_dev_type_show(struct device *dev, struct device_attribute *mattr, char *data) { struct csrow_info *csrow = to_csrow(dev); return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]); } static ssize_t csrow_edac_mode_show(struct device *dev, struct device_attribute *mattr, char *data) { struct csrow_info *csrow = to_csrow(dev); return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]); } /* show/store functions for DIMM Label attributes */ static ssize_t channel_dimm_label_show(struct device *dev, struct device_attribute *mattr, char *data) { struct csrow_info *csrow = to_csrow(dev); unsigned chan = to_channel(mattr); struct rank_info *rank = csrow->channels[chan]; /* if field has not been initialized, there is nothing to send */ if (!rank->dimm->label[0]) return 0; return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", rank->dimm->label); } static ssize_t channel_dimm_label_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct csrow_info *csrow = to_csrow(dev); unsigned chan = to_channel(mattr); struct rank_info *rank = csrow->channels[chan]; ssize_t max_size = 0; max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); strncpy(rank->dimm->label, data, max_size); rank->dimm->label[max_size] = '\0'; return max_size; } /* show function for dynamic chX_ce_count attribute */ static ssize_t channel_ce_count_show(struct device *dev, struct device_attribute *mattr, char *data) { struct csrow_info *csrow = to_csrow(dev); unsigned chan = to_channel(mattr); struct rank_info *rank = csrow->channels[chan]; return sprintf(data, "%u\n", rank->ce_count); } /* cwrow<id>/attribute files */ DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL); DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL); DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL); DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL); DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL); DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL); /* default attributes of the CSROW<id> object */ static struct attribute *csrow_attrs[] = { &dev_attr_legacy_dev_type.attr, &dev_attr_legacy_mem_type.attr, &dev_attr_legacy_edac_mode.attr, &dev_attr_legacy_size_mb.attr, &dev_attr_legacy_ue_count.attr, &dev_attr_legacy_ce_count.attr, NULL, }; static struct attribute_group csrow_attr_grp = { .attrs = csrow_attrs, }; static const struct attribute_group *csrow_attr_groups[] = { &csrow_attr_grp, NULL }; static void csrow_attr_release(struct device *dev) { struct csrow_info *csrow = container_of(dev, struct csrow_info, dev); edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); kfree(csrow); } static struct device_type csrow_attr_type = { .groups = csrow_attr_groups, .release = csrow_attr_release, }; /* * possible dynamic channel DIMM Label attribute files * */ #define EDAC_NR_CHANNELS 6 DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 0); DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 1); DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 2); DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 3); DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 4); DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 5); /* Total possible dynamic DIMM Label attribute file table */ static struct device_attribute *dynamic_csrow_dimm_attr[] = { &dev_attr_legacy_ch0_dimm_label.attr, &dev_attr_legacy_ch1_dimm_label.attr, &dev_attr_legacy_ch2_dimm_label.attr, &dev_attr_legacy_ch3_dimm_label.attr, &dev_attr_legacy_ch4_dimm_label.attr, &dev_attr_legacy_ch5_dimm_label.attr }; /* possible dynamic channel ce_count attribute files */ DEVICE_CHANNEL(ch0_ce_count, S_IRUGO, channel_ce_count_show, NULL, 0); DEVICE_CHANNEL(ch1_ce_count, S_IRUGO, channel_ce_count_show, NULL, 1); DEVICE_CHANNEL(ch2_ce_count, S_IRUGO, channel_ce_count_show, NULL, 2); DEVICE_CHANNEL(ch3_ce_count, S_IRUGO, channel_ce_count_show, NULL, 3); DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, channel_ce_count_show, NULL, 4); DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, channel_ce_count_show, NULL, 5); /* Total possible dynamic ce_count attribute file table */ static struct device_attribute *dynamic_csrow_ce_count_attr[] = { &dev_attr_legacy_ch0_ce_count.attr, &dev_attr_legacy_ch1_ce_count.attr, &dev_attr_legacy_ch2_ce_count.attr, &dev_attr_legacy_ch3_ce_count.attr, &dev_attr_legacy_ch4_ce_count.attr, &dev_attr_legacy_ch5_ce_count.attr }; static inline int nr_pages_per_csrow(struct csrow_info *csrow) { int chan, nr_pages = 0; for (chan = 0; chan < csrow->nr_channels; chan++) nr_pages += csrow->channels[chan]->dimm->nr_pages; return nr_pages; } /* Create a CSROW object under specifed edac_mc_device */ static int edac_create_csrow_object(struct mem_ctl_info *mci, struct csrow_info *csrow, int index) { int err, chan; if (csrow->nr_channels >= EDAC_NR_CHANNELS) return -ENODEV; csrow->dev.type = &csrow_attr_type; csrow->dev.bus = mci->bus; device_initialize(&csrow->dev); csrow->dev.parent = &mci->dev; csrow->mci = mci; dev_set_name(&csrow->dev, "csrow%d", index); dev_set_drvdata(&csrow->dev, csrow); edac_dbg(0, "creating (virtual) csrow node %s\n", dev_name(&csrow->dev)); err = device_add(&csrow->dev); if (err < 0) return err; for (chan = 0; chan < csrow->nr_channels; chan++) { /* Only expose populated DIMMs */ if (!csrow->channels[chan]->dimm->nr_pages) continue; err = device_create_file(&csrow->dev, dynamic_csrow_dimm_attr[chan]); if (err < 0) goto error; err = device_create_file(&csrow->dev, dynamic_csrow_ce_count_attr[chan]); if (err < 0) { device_remove_file(&csrow->dev, dynamic_csrow_dimm_attr[chan]); goto error; } } return 0; error: for (--chan; chan >= 0; chan--) { device_remove_file(&csrow->dev, dynamic_csrow_dimm_attr[chan]); device_remove_file(&csrow->dev, dynamic_csrow_ce_count_attr[chan]); } put_device(&csrow->dev); return err; } /* Create a CSROW object under specifed edac_mc_device */ static int edac_create_csrow_objects(struct mem_ctl_info *mci) { int err, i, chan; struct csrow_info *csrow; for (i = 0; i < mci->nr_csrows; i++) { csrow = mci->csrows[i]; if (!nr_pages_per_csrow(csrow)) continue; err = edac_create_csrow_object(mci, mci->csrows[i], i); if (err < 0) { edac_dbg(1, "failure: create csrow objects for csrow %d\n", i); goto error; } } return 0; error: for (--i; i >= 0; i--) { csrow = mci->csrows[i]; if (!nr_pages_per_csrow(csrow)) continue; for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { if (!csrow->channels[chan]->dimm->nr_pages) continue; device_remove_file(&csrow->dev, dynamic_csrow_dimm_attr[chan]); device_remove_file(&csrow->dev, dynamic_csrow_ce_count_attr[chan]); } put_device(&mci->csrows[i]->dev); } return err; } static void edac_delete_csrow_objects(struct mem_ctl_info *mci) { int i, chan; struct csrow_info *csrow; for (i = mci->nr_csrows - 1; i >= 0; i--) { csrow = mci->csrows[i]; if (!nr_pages_per_csrow(csrow)) continue; for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { if (!csrow->channels[chan]->dimm->nr_pages) continue; edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n", i, chan); device_remove_file(&csrow->dev, dynamic_csrow_dimm_attr[chan]); device_remove_file(&csrow->dev, dynamic_csrow_ce_count_attr[chan]); } device_unregister(&mci->csrows[i]->dev); } } #endif /* * Per-dimm (or per-rank) devices */ #define to_dimm(k) container_of(k, struct dimm_info, dev) /* show/store functions for DIMM Label attributes */ static ssize_t dimmdev_location_show(struct device *dev, struct device_attribute *mattr, char *data) { struct dimm_info *dimm = to_dimm(dev); return edac_dimm_info_location(dimm, data, PAGE_SIZE); } static ssize_t dimmdev_label_show(struct device *dev, struct device_attribute *mattr, char *data) { struct dimm_info *dimm = to_dimm(dev); /* if field has not been initialized, there is nothing to send */ if (!dimm->label[0]) return 0; return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label); } static ssize_t dimmdev_label_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct dimm_info *dimm = to_dimm(dev); ssize_t max_size = 0; max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); strncpy(dimm->label, data, max_size); dimm->label[max_size] = '\0'; return max_size; } static ssize_t dimmdev_size_show(struct device *dev, struct device_attribute *mattr, char *data) { struct dimm_info *dimm = to_dimm(dev); return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); } static ssize_t dimmdev_mem_type_show(struct device *dev, struct device_attribute *mattr, char *data) { struct dimm_info *dimm = to_dimm(dev); return sprintf(data, "%s\n", mem_types[dimm->mtype]); } static ssize_t dimmdev_dev_type_show(struct device *dev, struct device_attribute *mattr, char *data) { struct dimm_info *dimm = to_dimm(dev); return sprintf(data, "%s\n", dev_types[dimm->dtype]); } static ssize_t dimmdev_edac_mode_show(struct device *dev, struct device_attribute *mattr, char *data) { struct dimm_info *dimm = to_dimm(dev); return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]); } /* dimm/rank attribute files */ static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR, dimmdev_label_show, dimmdev_label_store); static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL); static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL); static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL); static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL); static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL); /* attributes of the dimm<id>/rank<id> object */ static struct attribute *dimm_attrs[] = { &dev_attr_dimm_label.attr, &dev_attr_dimm_location.attr, &dev_attr_size.attr, &dev_attr_dimm_mem_type.attr, &dev_attr_dimm_dev_type.attr, &dev_attr_dimm_edac_mode.attr, NULL, }; static struct attribute_group dimm_attr_grp = { .attrs = dimm_attrs, }; static const struct attribute_group *dimm_attr_groups[] = { &dimm_attr_grp, NULL }; static void dimm_attr_release(struct device *dev) { struct dimm_info *dimm = container_of(dev, struct dimm_info, dev); edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev)); kfree(dimm); } static struct device_type dimm_attr_type = { .groups = dimm_attr_groups, .release = dimm_attr_release, }; /* Create a DIMM object under specifed memory controller device */ static int edac_create_dimm_object(struct mem_ctl_info *mci, struct dimm_info *dimm, int index) { int err; dimm->mci = mci; dimm->dev.type = &dimm_attr_type; dimm->dev.bus = mci->bus; device_initialize(&dimm->dev); dimm->dev.parent = &mci->dev; if (mci->csbased) dev_set_name(&dimm->dev, "rank%d", index); else dev_set_name(&dimm->dev, "dimm%d", index); dev_set_drvdata(&dimm->dev, dimm); pm_runtime_forbid(&mci->dev); err = device_add(&dimm->dev); edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev)); return err; } /* * Memory controller device */ #define to_mci(k) container_of(k, struct mem_ctl_info, dev) static ssize_t mci_reset_counters_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); int cnt, row, chan, i; mci->ue_mc = 0; mci->ce_mc = 0; mci->ue_noinfo_count = 0; mci->ce_noinfo_count = 0; for (row = 0; row < mci->nr_csrows; row++) { struct csrow_info *ri = mci->csrows[row]; ri->ue_count = 0; ri->ce_count = 0; for (chan = 0; chan < ri->nr_channels; chan++) ri->channels[chan]->ce_count = 0; } cnt = 1; for (i = 0; i < mci->n_layers; i++) { cnt *= mci->layers[i].size; memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32)); memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32)); } mci->start_time = jiffies; return count; } /* Memory scrubbing interface: * * A MC driver can limit the scrubbing bandwidth based on the CPU type. * Therefore, ->set_sdram_scrub_rate should be made to return the actual * bandwidth that is accepted or 0 when scrubbing is to be disabled. * * Negative value still means that an error has occurred while setting * the scrub rate. */ static ssize_t mci_sdram_scrub_rate_store(struct device *dev, struct device_attribute *mattr, const char *data, size_t count) { struct mem_ctl_info *mci = to_mci(dev); unsigned long bandwidth = 0; int new_bw = 0; if (strict_strtoul(data, 10, &bandwidth) < 0) return -EINVAL; new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); if (new_bw < 0) { edac_printk(KERN_WARNING, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth); return -EINVAL; } return count; } /* * ->get_sdram_scrub_rate() return value semantics same as above. */ static ssize_t mci_sdram_scrub_rate_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); int bandwidth = 0; bandwidth = mci->get_sdram_scrub_rate(mci); if (bandwidth < 0) { edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); return bandwidth; } return sprintf(data, "%d\n", bandwidth); } /* default attribute files for the MCI object */ static ssize_t mci_ue_count_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); return sprintf(data, "%d\n", mci->ue_mc); } static ssize_t mci_ce_count_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); return sprintf(data, "%d\n", mci->ce_mc); } static ssize_t mci_ce_noinfo_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); return sprintf(data, "%d\n", mci->ce_noinfo_count); } static ssize_t mci_ue_noinfo_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); return sprintf(data, "%d\n", mci->ue_noinfo_count); } static ssize_t mci_seconds_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); } static ssize_t mci_ctl_name_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); return sprintf(data, "%s\n", mci->ctl_name); } static ssize_t mci_size_mb_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); int total_pages = 0, csrow_idx, j; for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { struct csrow_info *csrow = mci->csrows[csrow_idx]; for (j = 0; j < csrow->nr_channels; j++) { struct dimm_info *dimm = csrow->channels[j]->dimm; total_pages += dimm->nr_pages; } } return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); } static ssize_t mci_max_location_show(struct device *dev, struct device_attribute *mattr, char *data) { struct mem_ctl_info *mci = to_mci(dev); int i; char *p = data; for (i = 0; i < mci->n_layers; i++) { p += sprintf(p, "%s %d ", edac_layer_name[mci->layers[i].type], mci->layers[i].size - 1); } return p - data; } #ifdef CONFIG_EDAC_DEBUG static ssize_t edac_fake_inject_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct device *dev = file->private_data; struct mem_ctl_info *mci = to_mci(dev); static enum hw_event_mc_err_type type; u16 errcount = mci->fake_inject_count; if (!errcount) errcount = 1; type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_CORRECTED; printk(KERN_DEBUG "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n", errcount, (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE", errcount > 1 ? "s" : "", mci->fake_inject_layer[0], mci->fake_inject_layer[1], mci->fake_inject_layer[2] ); edac_mc_handle_error(type, mci, errcount, 0, 0, 0, mci->fake_inject_layer[0], mci->fake_inject_layer[1], mci->fake_inject_layer[2], "FAKE ERROR", "for EDAC testing only"); return count; } static const struct file_operations debug_fake_inject_fops = { .open = simple_open, .write = edac_fake_inject_write, .llseek = generic_file_llseek, }; #endif /* default Control file */ DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); /* default Attribute files */ DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL); /* memory scrubber attribute file */ DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL); static struct attribute *mci_attrs[] = { &dev_attr_reset_counters.attr, &dev_attr_mc_name.attr, &dev_attr_size_mb.attr, &dev_attr_seconds_since_reset.attr, &dev_attr_ue_noinfo_count.attr, &dev_attr_ce_noinfo_count.attr, &dev_attr_ue_count.attr, &dev_attr_ce_count.attr, &dev_attr_max_location.attr, NULL }; static struct attribute_group mci_attr_grp = { .attrs = mci_attrs, }; static const struct attribute_group *mci_attr_groups[] = { &mci_attr_grp, NULL }; static void mci_attr_release(struct device *dev) { struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); kfree(mci); } static struct device_type mci_attr_type = { .groups = mci_attr_groups, .release = mci_attr_release, }; #ifdef CONFIG_EDAC_DEBUG static struct dentry *edac_debugfs; int __init edac_debugfs_init(void) { edac_debugfs = debugfs_create_dir("edac", NULL); if (IS_ERR(edac_debugfs)) { edac_debugfs = NULL; return -ENOMEM; } return 0; } void __exit edac_debugfs_exit(void) { debugfs_remove(edac_debugfs); } int edac_create_debug_nodes(struct mem_ctl_info *mci) { struct dentry *d, *parent; char name[80]; int i; if (!edac_debugfs) return -ENODEV; d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs); if (!d) return -ENOMEM; parent = d; for (i = 0; i < mci->n_layers; i++) { sprintf(name, "fake_inject_%s", edac_layer_name[mci->layers[i].type]); d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent, &mci->fake_inject_layer[i]); if (!d) goto nomem; } d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent, &mci->fake_inject_ue); if (!d) goto nomem; d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent, &mci->fake_inject_count); if (!d) goto nomem; d = debugfs_create_file("fake_inject", S_IWUSR, parent, &mci->dev, &debug_fake_inject_fops); if (!d) goto nomem; mci->debugfs = parent; return 0; nomem: debugfs_remove(mci->debugfs); return -ENOMEM; } #endif /* * Create a new Memory Controller kobject instance, * mc<id> under the 'mc' directory * * Return: * 0 Success * !0 Failure */ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) { int i, err; /* * The memory controller needs its own bus, in order to avoid * namespace conflicts at /sys/bus/edac. */ mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); if (!mci->bus->name) return -ENOMEM; edac_dbg(0, "creating bus %s\n", mci->bus->name); err = bus_register(mci->bus); if (err < 0) return err; /* get the /sys/devices/system/edac subsys reference */ mci->dev.type = &mci_attr_type; device_initialize(&mci->dev); mci->dev.parent = mci_pdev; mci->dev.bus = mci->bus; dev_set_name(&mci->dev, "mc%d", mci->mc_idx); dev_set_drvdata(&mci->dev, mci); pm_runtime_forbid(&mci->dev); edac_dbg(0, "creating device %s\n", dev_name(&mci->dev)); err = device_add(&mci->dev); if (err < 0) { edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); bus_unregister(mci->bus); kfree(mci->bus->name); return err; } if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) { if (mci->get_sdram_scrub_rate) { dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO; dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show; } if (mci->set_sdram_scrub_rate) { dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR; dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store; } err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate); if (err) { edac_dbg(1, "failure: create sdram_scrub_rate\n"); goto fail2; } } /* * Create the dimm/rank devices */ for (i = 0; i < mci->tot_dimms; i++) { struct dimm_info *dimm = mci->dimms[i]; /* Only expose populated DIMMs */ if (dimm->nr_pages == 0) continue; #ifdef CONFIG_EDAC_DEBUG edac_dbg(1, "creating dimm%d, located at ", i); if (edac_debug_level >= 1) { int lay; for (lay = 0; lay < mci->n_layers; lay++) printk(KERN_CONT "%s %d ", edac_layer_name[mci->layers[lay].type], dimm->location[lay]); printk(KERN_CONT "\n"); } #endif err = edac_create_dimm_object(mci, dimm, i); if (err) { edac_dbg(1, "failure: create dimm %d obj\n", i); goto fail; } } #ifdef CONFIG_EDAC_LEGACY_SYSFS err = edac_create_csrow_objects(mci); if (err < 0) goto fail; #endif #ifdef CONFIG_EDAC_DEBUG edac_create_debug_nodes(mci); #endif return 0; fail: for (i--; i >= 0; i--) { struct dimm_info *dimm = mci->dimms[i]; if (dimm->nr_pages == 0) continue; device_unregister(&dimm->dev); } fail2: device_unregister(&mci->dev); bus_unregister(mci->bus); kfree(mci->bus->name); return err; } /* * remove a Memory Controller instance */ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) { int i; edac_dbg(0, "\n"); #ifdef CONFIG_EDAC_DEBUG debugfs_remove(mci->debugfs); #endif #ifdef CONFIG_EDAC_LEGACY_SYSFS edac_delete_csrow_objects(mci); #endif for (i = 0; i < mci->tot_dimms; i++) { struct dimm_info *dimm = mci->dimms[i]; if (dimm->nr_pages == 0) continue; edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev)); device_unregister(&dimm->dev); } } void edac_unregister_sysfs(struct mem_ctl_info *mci) { edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); device_unregister(&mci->dev); bus_unregister(mci->bus); kfree(mci->bus->name); } static void mc_attr_release(struct device *dev) { /* * There's no container structure here, as this is just the mci * parent device, used to create the /sys/devices/mc sysfs node. * So, there are no attributes on it. */ edac_dbg(1, "Releasing device %s\n", dev_name(dev)); kfree(dev); } static struct device_type mc_attr_type = { .release = mc_attr_release, }; /* * Init/exit code for the module. Basically, creates/removes /sys/class/rc */ int __init edac_mc_sysfs_init(void) { struct bus_type *edac_subsys; int err; /* get the /sys/devices/system/edac subsys reference */ edac_subsys = edac_get_sysfs_subsys(); if (edac_subsys == NULL) { edac_dbg(1, "no edac_subsys\n"); err = -EINVAL; goto out; } mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL); if (!mci_pdev) { err = -ENOMEM; goto out_put_sysfs; } mci_pdev->bus = edac_subsys; mci_pdev->type = &mc_attr_type; device_initialize(mci_pdev); dev_set_name(mci_pdev, "mc"); err = device_add(mci_pdev); if (err < 0) goto out_dev_free; edac_dbg(0, "device %s created\n", dev_name(mci_pdev)); return 0; out_dev_free: kfree(mci_pdev); out_put_sysfs: edac_put_sysfs_subsys(); out: return err; } void __exit edac_mc_sysfs_exit(void) { device_unregister(mci_pdev); edac_put_sysfs_subsys(); }
gpl-2.0
NamJa/surface3-kernel
arch/powerpc/platforms/pseries/nvram.c
313
23445
/* * c 2001 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * /dev/nvram driver for PPC64 * * This perhaps should live in drivers/char */ #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/kmsg_dump.h> #include <linux/pstore.h> #include <linux/ctype.h> #include <linux/zlib.h> #include <asm/uaccess.h> #include <asm/nvram.h> #include <asm/rtas.h> #include <asm/prom.h> #include <asm/machdep.h> /* Max bytes to read/write in one go */ #define NVRW_CNT 0x20 /* * Set oops header version to distinguish between old and new format header. * lnx,oops-log partition max size is 4000, header version > 4000 will * help in identifying new header. */ #define OOPS_HDR_VERSION 5000 static unsigned int nvram_size; static int nvram_fetch, nvram_store; static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ static DEFINE_SPINLOCK(nvram_lock); struct err_log_info { __be32 error_type; __be32 seq_num; }; struct nvram_os_partition { const char *name; int req_size; /* desired size, in bytes */ int min_size; /* minimum acceptable size (0 means req_size) */ long size; /* size of data portion (excluding err_log_info) */ long index; /* offset of data portion of partition */ bool os_partition; /* partition initialized by OS, not FW */ }; static struct nvram_os_partition rtas_log_partition = { .name = "ibm,rtas-log", .req_size = 2079, .min_size = 1055, .index = -1, .os_partition = true }; static struct nvram_os_partition oops_log_partition = { .name = "lnx,oops-log", .req_size = 4000, .min_size = 2000, .index = -1, .os_partition = true }; static const char *pseries_nvram_os_partitions[] = { "ibm,rtas-log", "lnx,oops-log", NULL }; struct oops_log_info { __be16 version; __be16 report_length; __be64 timestamp; } __attribute__((packed)); static void oops_to_nvram(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); static struct kmsg_dumper nvram_kmsg_dumper = { .dump = oops_to_nvram }; /* See clobbering_unread_rtas_event() */ #define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */ static unsigned long last_unread_rtas_event; /* timestamp */ /* * For capturing and compressing an oops or panic report... * big_oops_buf[] holds the uncompressed text we're capturing. * * oops_buf[] holds the compressed text, preceded by a oops header. * oops header has u16 holding the version of oops header (to differentiate * between old and new format header) followed by u16 holding the length of * the compressed* text (*Or uncompressed, if compression fails.) and u64 * holding the timestamp. oops_buf[] gets written to NVRAM. * * oops_log_info points to the header. oops_data points to the compressed text. * * +- oops_buf * | +- oops_data * v v * +-----------+-----------+-----------+------------------------+ * | version | length | timestamp | text | * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) | * +-----------+-----------+-----------+------------------------+ * ^ * +- oops_log_info * * We preallocate these buffers during init to avoid kmalloc during oops/panic. */ static size_t big_oops_buf_sz; static char *big_oops_buf, *oops_buf; static char *oops_data; static size_t oops_data_sz; /* Compression parameters */ #define COMPR_LEVEL 6 #define WINDOW_BITS 12 #define MEM_LEVEL 4 static struct z_stream_s stream; #ifdef CONFIG_PSTORE static struct nvram_os_partition of_config_partition = { .name = "of-config", .index = -1, .os_partition = false }; static struct nvram_os_partition common_partition = { .name = "common", .index = -1, .os_partition = false }; static enum pstore_type_id nvram_type_ids[] = { PSTORE_TYPE_DMESG, PSTORE_TYPE_PPC_RTAS, PSTORE_TYPE_PPC_OF, PSTORE_TYPE_PPC_COMMON, -1 }; static int read_type; static unsigned long last_rtas_event; #endif static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) { unsigned int i; unsigned long len; int done; unsigned long flags; char *p = buf; if (nvram_size == 0 || nvram_fetch == RTAS_UNKNOWN_SERVICE) return -ENODEV; if (*index >= nvram_size) return 0; i = *index; if (i + count > nvram_size) count = nvram_size - i; spin_lock_irqsave(&nvram_lock, flags); for (; count != 0; count -= len) { len = count; if (len > NVRW_CNT) len = NVRW_CNT; if ((rtas_call(nvram_fetch, 3, 2, &done, i, __pa(nvram_buf), len) != 0) || len != done) { spin_unlock_irqrestore(&nvram_lock, flags); return -EIO; } memcpy(p, nvram_buf, len); p += len; i += len; } spin_unlock_irqrestore(&nvram_lock, flags); *index = i; return p - buf; } static ssize_t pSeries_nvram_write(char *buf, size_t count, loff_t *index) { unsigned int i; unsigned long len; int done; unsigned long flags; const char *p = buf; if (nvram_size == 0 || nvram_store == RTAS_UNKNOWN_SERVICE) return -ENODEV; if (*index >= nvram_size) return 0; i = *index; if (i + count > nvram_size) count = nvram_size - i; spin_lock_irqsave(&nvram_lock, flags); for (; count != 0; count -= len) { len = count; if (len > NVRW_CNT) len = NVRW_CNT; memcpy(nvram_buf, p, len); if ((rtas_call(nvram_store, 3, 2, &done, i, __pa(nvram_buf), len) != 0) || len != done) { spin_unlock_irqrestore(&nvram_lock, flags); return -EIO; } p += len; i += len; } spin_unlock_irqrestore(&nvram_lock, flags); *index = i; return p - buf; } static ssize_t pSeries_nvram_get_size(void) { return nvram_size ? nvram_size : -ENODEV; } /* nvram_write_os_partition, nvram_write_error_log * * We need to buffer the error logs into nvram to ensure that we have * the failure information to decode. If we have a severe error there * is no way to guarantee that the OS or the machine is in a state to * get back to user land and write the error to disk. For example if * the SCSI device driver causes a Machine Check by writing to a bad * IO address, there is no way of guaranteeing that the device driver * is in any state that is would also be able to write the error data * captured to disk, thus we buffer it in NVRAM for analysis on the * next boot. * * In NVRAM the partition containing the error log buffer will looks like: * Header (in bytes): * +-----------+----------+--------+------------+------------------+ * | signature | checksum | length | name | data | * |0 |1 |2 3|4 15|16 length-1| * +-----------+----------+--------+------------+------------------+ * * The 'data' section would look like (in bytes): * +--------------+------------+-----------------------------------+ * | event_logged | sequence # | error log | * |0 3|4 7|8 error_log_size-1| * +--------------+------------+-----------------------------------+ * * event_logged: 0 if event has not been logged to syslog, 1 if it has * sequence #: The unique sequence # for each event. (until it wraps) * error log: The error log from event_scan */ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, int length, unsigned int err_type, unsigned int error_log_cnt) { int rc; loff_t tmp_index; struct err_log_info info; if (part->index == -1) { return -ESPIPE; } if (length > part->size) { length = part->size; } info.error_type = cpu_to_be32(err_type); info.seq_num = cpu_to_be32(error_log_cnt); tmp_index = part->index; rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index); if (rc <= 0) { pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc); return rc; } rc = ppc_md.nvram_write(buff, length, &tmp_index); if (rc <= 0) { pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc); return rc; } return 0; } int nvram_write_error_log(char * buff, int length, unsigned int err_type, unsigned int error_log_cnt) { int rc = nvram_write_os_partition(&rtas_log_partition, buff, length, err_type, error_log_cnt); if (!rc) { last_unread_rtas_event = get_seconds(); #ifdef CONFIG_PSTORE last_rtas_event = get_seconds(); #endif } return rc; } /* nvram_read_partition * * Reads nvram partition for at most 'length' */ int nvram_read_partition(struct nvram_os_partition *part, char *buff, int length, unsigned int *err_type, unsigned int *error_log_cnt) { int rc; loff_t tmp_index; struct err_log_info info; if (part->index == -1) return -1; if (length > part->size) length = part->size; tmp_index = part->index; if (part->os_partition) { rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index); if (rc <= 0) { pr_err("%s: Failed nvram_read (%d)\n", __FUNCTION__, rc); return rc; } } rc = ppc_md.nvram_read(buff, length, &tmp_index); if (rc <= 0) { pr_err("%s: Failed nvram_read (%d)\n", __FUNCTION__, rc); return rc; } if (part->os_partition) { *error_log_cnt = be32_to_cpu(info.seq_num); *err_type = be32_to_cpu(info.error_type); } return 0; } /* nvram_read_error_log * * Reads nvram for error log for at most 'length' */ int nvram_read_error_log(char *buff, int length, unsigned int *err_type, unsigned int *error_log_cnt) { return nvram_read_partition(&rtas_log_partition, buff, length, err_type, error_log_cnt); } /* This doesn't actually zero anything, but it sets the event_logged * word to tell that this event is safely in syslog. */ int nvram_clear_error_log(void) { loff_t tmp_index; int clear_word = ERR_FLAG_ALREADY_LOGGED; int rc; if (rtas_log_partition.index == -1) return -1; tmp_index = rtas_log_partition.index; rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index); if (rc <= 0) { printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc); return rc; } last_unread_rtas_event = 0; return 0; } /* pseries_nvram_init_os_partition * * This sets up a partition with an "OS" signature. * * The general strategy is the following: * 1.) If a partition with the indicated name already exists... * - If it's large enough, use it. * - Otherwise, recycle it and keep going. * 2.) Search for a free partition that is large enough. * 3.) If there's not a free partition large enough, recycle any obsolete * OS partitions and try again. * 4.) Will first try getting a chunk that will satisfy the requested size. * 5.) If a chunk of the requested size cannot be allocated, then try finding * a chunk that will satisfy the minum needed. * * Returns 0 on success, else -1. */ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition *part) { loff_t p; int size; /* Look for ours */ p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size); /* Found one but too small, remove it */ if (p && size < part->min_size) { pr_info("nvram: Found too small %s partition," " removing it...\n", part->name); nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL); p = 0; } /* Create one if we didn't find */ if (!p) { p = nvram_create_partition(part->name, NVRAM_SIG_OS, part->req_size, part->min_size); if (p == -ENOSPC) { pr_info("nvram: No room to create %s partition, " "deleting any obsolete OS partitions...\n", part->name); nvram_remove_partition(NULL, NVRAM_SIG_OS, pseries_nvram_os_partitions); p = nvram_create_partition(part->name, NVRAM_SIG_OS, part->req_size, part->min_size); } } if (p <= 0) { pr_err("nvram: Failed to find or create %s" " partition, err %d\n", part->name, (int)p); return -1; } part->index = p; part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info); return 0; } /* * Are we using the ibm,rtas-log for oops/panic reports? And if so, * would logging this oops/panic overwrite an RTAS event that rtas_errd * hasn't had a chance to read and process? Return 1 if so, else 0. * * We assume that if rtas_errd hasn't read the RTAS event in * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to. */ static int clobbering_unread_rtas_event(void) { return (oops_log_partition.index == rtas_log_partition.index && last_unread_rtas_event && get_seconds() - last_unread_rtas_event <= NVRAM_RTAS_READ_TIMEOUT); } /* Derived from logfs_compress() */ static int nvram_compress(const void *in, void *out, size_t inlen, size_t outlen) { int err, ret; ret = -EIO; err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS, MEM_LEVEL, Z_DEFAULT_STRATEGY); if (err != Z_OK) goto error; stream.next_in = in; stream.avail_in = inlen; stream.total_in = 0; stream.next_out = out; stream.avail_out = outlen; stream.total_out = 0; err = zlib_deflate(&stream, Z_FINISH); if (err != Z_STREAM_END) goto error; err = zlib_deflateEnd(&stream); if (err != Z_OK) goto error; if (stream.total_out >= stream.total_in) goto error; ret = stream.total_out; error: return ret; } /* Compress the text from big_oops_buf into oops_buf. */ static int zip_oops(size_t text_len) { struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len, oops_data_sz); if (zipped_len < 0) { pr_err("nvram: compression failed; returned %d\n", zipped_len); pr_err("nvram: logging uncompressed oops/panic report\n"); return -1; } oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); oops_hdr->report_length = cpu_to_be16(zipped_len); oops_hdr->timestamp = cpu_to_be64(get_seconds()); return 0; } #ifdef CONFIG_PSTORE static int nvram_pstore_open(struct pstore_info *psi) { /* Reset the iterator to start reading partitions again */ read_type = -1; return 0; } /** * nvram_pstore_write - pstore write callback for nvram * @type: Type of message logged * @reason: reason behind dump (oops/panic) * @id: identifier to indicate the write performed * @part: pstore writes data to registered buffer in parts, * part number will indicate the same. * @count: Indicates oops count * @compressed: Flag to indicate the log is compressed * @size: number of bytes written to the registered buffer * @psi: registered pstore_info structure * * Called by pstore_dump() when an oops or panic report is logged in the * printk buffer. * Returns 0 on successful write. */ static int nvram_pstore_write(enum pstore_type_id type, enum kmsg_dump_reason reason, u64 *id, unsigned int part, int count, bool compressed, size_t size, struct pstore_info *psi) { int rc; unsigned int err_type = ERR_TYPE_KERNEL_PANIC; struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf; /* part 1 has the recent messages from printk buffer */ if (part > 1 || type != PSTORE_TYPE_DMESG || clobbering_unread_rtas_event()) return -1; oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); oops_hdr->report_length = cpu_to_be16(size); oops_hdr->timestamp = cpu_to_be64(get_seconds()); if (compressed) err_type = ERR_TYPE_KERNEL_PANIC_GZ; rc = nvram_write_os_partition(&oops_log_partition, oops_buf, (int) (sizeof(*oops_hdr) + size), err_type, count); if (rc != 0) return rc; *id = part; return 0; } /* * Reads the oops/panic report, rtas, of-config and common partition. * Returns the length of the data we read from each partition. * Returns 0 if we've been called before. */ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, int *count, struct timespec *time, char **buf, bool *compressed, struct pstore_info *psi) { struct oops_log_info *oops_hdr; unsigned int err_type, id_no, size = 0; struct nvram_os_partition *part = NULL; char *buff = NULL; int sig = 0; loff_t p; read_type++; switch (nvram_type_ids[read_type]) { case PSTORE_TYPE_DMESG: part = &oops_log_partition; *type = PSTORE_TYPE_DMESG; break; case PSTORE_TYPE_PPC_RTAS: part = &rtas_log_partition; *type = PSTORE_TYPE_PPC_RTAS; time->tv_sec = last_rtas_event; time->tv_nsec = 0; break; case PSTORE_TYPE_PPC_OF: sig = NVRAM_SIG_OF; part = &of_config_partition; *type = PSTORE_TYPE_PPC_OF; *id = PSTORE_TYPE_PPC_OF; time->tv_sec = 0; time->tv_nsec = 0; break; case PSTORE_TYPE_PPC_COMMON: sig = NVRAM_SIG_SYS; part = &common_partition; *type = PSTORE_TYPE_PPC_COMMON; *id = PSTORE_TYPE_PPC_COMMON; time->tv_sec = 0; time->tv_nsec = 0; break; default: return 0; } if (!part->os_partition) { p = nvram_find_partition(part->name, sig, &size); if (p <= 0) { pr_err("nvram: Failed to find partition %s, " "err %d\n", part->name, (int)p); return 0; } part->index = p; part->size = size; } buff = kmalloc(part->size, GFP_KERNEL); if (!buff) return -ENOMEM; if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) { kfree(buff); return 0; } *count = 0; if (part->os_partition) *id = id_no; if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { size_t length, hdr_size; oops_hdr = (struct oops_log_info *)buff; if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { /* Old format oops header had 2-byte record size */ hdr_size = sizeof(u16); length = be16_to_cpu(oops_hdr->version); time->tv_sec = 0; time->tv_nsec = 0; } else { hdr_size = sizeof(*oops_hdr); length = be16_to_cpu(oops_hdr->report_length); time->tv_sec = be64_to_cpu(oops_hdr->timestamp); time->tv_nsec = 0; } *buf = kmalloc(length, GFP_KERNEL); if (*buf == NULL) return -ENOMEM; memcpy(*buf, buff + hdr_size, length); kfree(buff); if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) *compressed = true; else *compressed = false; return length; } *buf = buff; return part->size; } static struct pstore_info nvram_pstore_info = { .owner = THIS_MODULE, .name = "nvram", .open = nvram_pstore_open, .read = nvram_pstore_read, .write = nvram_pstore_write, }; static int nvram_pstore_init(void) { int rc = 0; nvram_pstore_info.buf = oops_data; nvram_pstore_info.bufsize = oops_data_sz; rc = pstore_register(&nvram_pstore_info); if (rc != 0) pr_err("nvram: pstore_register() failed, defaults to " "kmsg_dump; returned %d\n", rc); return rc; } #else static int nvram_pstore_init(void) { return -1; } #endif static void __init nvram_init_oops_partition(int rtas_partition_exists) { int rc; rc = pseries_nvram_init_os_partition(&oops_log_partition); if (rc != 0) { if (!rtas_partition_exists) return; pr_notice("nvram: Using %s partition to log both" " RTAS errors and oops/panic reports\n", rtas_log_partition.name); memcpy(&oops_log_partition, &rtas_log_partition, sizeof(rtas_log_partition)); } oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL); if (!oops_buf) { pr_err("nvram: No memory for %s partition\n", oops_log_partition.name); return; } oops_data = oops_buf + sizeof(struct oops_log_info); oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info); rc = nvram_pstore_init(); if (!rc) return; /* * Figure compression (preceded by elimination of each line's <n> * severity prefix) will reduce the oops/panic report to at most * 45% of its original size. */ big_oops_buf_sz = (oops_data_sz * 100) / 45; big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); if (big_oops_buf) { stream.workspace = kmalloc(zlib_deflate_workspacesize( WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); if (!stream.workspace) { pr_err("nvram: No memory for compression workspace; " "skipping compression of %s partition data\n", oops_log_partition.name); kfree(big_oops_buf); big_oops_buf = NULL; } } else { pr_err("No memory for uncompressed %s data; " "skipping compression\n", oops_log_partition.name); stream.workspace = NULL; } rc = kmsg_dump_register(&nvram_kmsg_dumper); if (rc != 0) { pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); kfree(oops_buf); kfree(big_oops_buf); kfree(stream.workspace); } } static int __init pseries_nvram_init_log_partitions(void) { int rc; /* Scan nvram for partitions */ nvram_scan_partitions(); rc = pseries_nvram_init_os_partition(&rtas_log_partition); nvram_init_oops_partition(rc == 0); return 0; } machine_arch_initcall(pseries, pseries_nvram_init_log_partitions); int __init pSeries_nvram_init(void) { struct device_node *nvram; const __be32 *nbytes_p; unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); if (nvram == NULL) return -ENODEV; nbytes_p = of_get_property(nvram, "#bytes", &proplen); if (nbytes_p == NULL || proplen != sizeof(unsigned int)) { of_node_put(nvram); return -EIO; } nvram_size = be32_to_cpup(nbytes_p); nvram_fetch = rtas_token("nvram-fetch"); nvram_store = rtas_token("nvram-store"); printk(KERN_INFO "PPC64 nvram contains %d bytes\n", nvram_size); of_node_put(nvram); ppc_md.nvram_read = pSeries_nvram_read; ppc_md.nvram_write = pSeries_nvram_write; ppc_md.nvram_size = pSeries_nvram_get_size; return 0; } /* * This is our kmsg_dump callback, called after an oops or panic report * has been written to the printk buffer. We want to capture as much * of the printk buffer as possible. First, capture as much as we can * that we think will compress sufficiently to fit in the lnx,oops-log * partition. If that's too much, go back and capture uncompressed text. */ static void oops_to_nvram(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) { struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; static unsigned int oops_count = 0; static bool panicking = false; static DEFINE_SPINLOCK(lock); unsigned long flags; size_t text_len; unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ; int rc = -1; switch (reason) { case KMSG_DUMP_RESTART: case KMSG_DUMP_HALT: case KMSG_DUMP_POWEROFF: /* These are almost always orderly shutdowns. */ return; case KMSG_DUMP_OOPS: break; case KMSG_DUMP_PANIC: panicking = true; break; case KMSG_DUMP_EMERG: if (panicking) /* Panic report already captured. */ return; break; default: pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", __FUNCTION__, (int) reason); return; } if (clobbering_unread_rtas_event()) return; if (!spin_trylock_irqsave(&lock, flags)) return; if (big_oops_buf) { kmsg_dump_get_buffer(dumper, false, big_oops_buf, big_oops_buf_sz, &text_len); rc = zip_oops(text_len); } if (rc != 0) { kmsg_dump_rewind(dumper); kmsg_dump_get_buffer(dumper, false, oops_data, oops_data_sz, &text_len); err_type = ERR_TYPE_KERNEL_PANIC; oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); oops_hdr->report_length = cpu_to_be16(text_len); oops_hdr->timestamp = cpu_to_be64(get_seconds()); } (void) nvram_write_os_partition(&oops_log_partition, oops_buf, (int) (sizeof(*oops_hdr) + text_len), err_type, ++oops_count); spin_unlock_irqrestore(&lock, flags); }
gpl-2.0
agat63/GS4_test
kernel/time/tick-broadcast.c
569
16436
/* * linux/kernel/time/tick-broadcast.c * * This file contains functions which emulate a local clock-event * device via a broadcast event source. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner * * This code is licenced under the GPL version 2. For details see * kernel-base/COPYING. */ #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> #include "tick-internal.h" /* * Broadcast support for broken x86 hardware, where the local apic * timer stops in C3 state. */ static struct tick_device tick_broadcast_device; /* FIXME: Use cpumask_var_t. */ static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); static DECLARE_BITMAP(tmpmask, NR_CPUS); static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); static int tick_broadcast_force; #ifdef CONFIG_TICK_ONESHOT static void tick_broadcast_clear_oneshot(int cpu); #else static inline void tick_broadcast_clear_oneshot(int cpu) { } #endif /* * Debugging: see timer_list.c */ struct tick_device *tick_get_broadcast_device(void) { return &tick_broadcast_device; } struct cpumask *tick_get_broadcast_mask(void) { return to_cpumask(tick_broadcast_mask); } /* * Start the device in periodic mode */ static void tick_broadcast_start_periodic(struct clock_event_device *bc) { if (bc) tick_setup_periodic(bc, 1); } /* * Check, if the device can be utilized as broadcast device: */ int tick_check_broadcast_device(struct clock_event_device *dev) { struct clock_event_device *cur = tick_broadcast_device.evtdev; if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || (tick_broadcast_device.evtdev && tick_broadcast_device.evtdev->rating >= dev->rating) || (dev->features & CLOCK_EVT_FEAT_C3STOP)) return 0; clockevents_exchange_device(tick_broadcast_device.evtdev, dev); if (cur) cur->event_handler = clockevents_handle_noop; tick_broadcast_device.evtdev = dev; if (!cpumask_empty(tick_get_broadcast_mask())) tick_broadcast_start_periodic(dev); return 1; } /* * Check, if the device is the broadcast device */ int tick_is_broadcast_device(struct clock_event_device *dev) { return (dev && tick_broadcast_device.evtdev == dev); } /* * Check, if the device is disfunctional and a place holder, which * needs to be handled by the broadcast device. */ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { unsigned long flags; int ret = 0; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); /* * Devices might be registered with both periodic and oneshot * mode disabled. This signals, that the device needs to be * operated from the broadcast device and is a placeholder for * the cpu local device. */ if (!tick_device_is_functional(dev)) { dev->event_handler = tick_handle_periodic; cpumask_set_cpu(cpu, tick_get_broadcast_mask()); tick_broadcast_start_periodic(tick_broadcast_device.evtdev); ret = 1; } else { /* * When the new device is not affected by the stop * feature and the cpu is marked in the broadcast mask * then clear the broadcast bit. */ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { int cpu = smp_processor_id(); cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); tick_broadcast_clear_oneshot(cpu); } } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); return ret; } /* * Broadcast the event to the cpus, which are set in the mask (mangled). */ static void tick_do_broadcast(struct cpumask *mask) { int cpu = smp_processor_id(); struct tick_device *td; /* * Check, if the current cpu is in the mask */ if (cpumask_test_cpu(cpu, mask)) { cpumask_clear_cpu(cpu, mask); td = &per_cpu(tick_cpu_device, cpu); td->evtdev->event_handler(td->evtdev); } if (!cpumask_empty(mask)) { /* * It might be necessary to actually check whether the devices * have different broadcast functions. For now, just use the * one of the first device. This works as long as we have this * misfeature only on x86 (lapic) */ td = &per_cpu(tick_cpu_device, cpumask_first(mask)); td->evtdev->broadcast(mask); } } /* * Periodic broadcast: * - invoke the broadcast handlers */ static void tick_do_periodic_broadcast(void) { raw_spin_lock(&tick_broadcast_lock); cpumask_and(to_cpumask(tmpmask), cpu_online_mask, tick_get_broadcast_mask()); tick_do_broadcast(to_cpumask(tmpmask)); raw_spin_unlock(&tick_broadcast_lock); } /* * Event handler for periodic broadcast ticks */ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) { ktime_t next; tick_do_periodic_broadcast(); /* * The device is in periodic mode. No reprogramming necessary: */ if (dev->mode == CLOCK_EVT_MODE_PERIODIC) return; /* * Setup the next period for devices, which do not have * periodic mode. We read dev->next_event first and add to it * when the event already expired. clockevents_program_event() * sets dev->next_event only when the event is really * programmed to the device. */ for (next = dev->next_event; ;) { next = ktime_add(next, tick_period); if (!clockevents_program_event(dev, next, false)) return; tick_do_periodic_broadcast(); } } /* * Powerstate information: The system enters/leaves a state, where * affected devices might stop */ static void tick_do_broadcast_on_off(unsigned long *reason) { struct clock_event_device *bc, *dev; struct tick_device *td; unsigned long flags; int cpu, bc_stopped; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); cpu = smp_processor_id(); td = &per_cpu(tick_cpu_device, cpu); dev = td->evtdev; bc = tick_broadcast_device.evtdev; /* * Is the device not affected by the powerstate ? */ if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) goto out; if (!tick_device_is_functional(dev)) goto out; bc_stopped = cpumask_empty(tick_get_broadcast_mask()); switch (*reason) { case CLOCK_EVT_NOTIFY_BROADCAST_ON: case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { cpumask_set_cpu(cpu, tick_get_broadcast_mask()); if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) clockevents_shutdown(dev); } if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) tick_broadcast_force = 1; break; case CLOCK_EVT_NOTIFY_BROADCAST_OFF: if (!tick_broadcast_force && cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) tick_setup_periodic(dev, 0); } break; } if (cpumask_empty(tick_get_broadcast_mask())) { if (!bc_stopped) clockevents_shutdown(bc); } else if (bc_stopped) { if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) tick_broadcast_start_periodic(bc); else tick_broadcast_setup_oneshot(bc); } out: raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } /* * Powerstate information: The system enters/leaves a state, where * affected devices might stop. */ void tick_broadcast_on_off(unsigned long reason, int *oncpu) { if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) printk(KERN_ERR "tick-broadcast: ignoring broadcast for " "offline CPU #%d\n", *oncpu); else tick_do_broadcast_on_off(&reason); } /* * Set the periodic handler depending on broadcast on/off */ void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) { if (!broadcast) dev->event_handler = tick_handle_periodic; else dev->event_handler = tick_handle_periodic_broadcast; } /* * Remove a CPU from broadcasting */ void tick_shutdown_broadcast(unsigned int *cpup) { struct clock_event_device *bc; unsigned long flags; unsigned int cpu = *cpup; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { if (bc && cpumask_empty(tick_get_broadcast_mask())) clockevents_shutdown(bc); } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } void tick_suspend_broadcast(void) { struct clock_event_device *bc; unsigned long flags; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; if (bc) clockevents_shutdown(bc); raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } int tick_resume_broadcast(void) { struct clock_event_device *bc; unsigned long flags; int broadcast = 0; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; if (bc) { clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); switch (tick_broadcast_device.mode) { case TICKDEV_MODE_PERIODIC: if (!cpumask_empty(tick_get_broadcast_mask())) tick_broadcast_start_periodic(bc); broadcast = cpumask_test_cpu(smp_processor_id(), tick_get_broadcast_mask()); break; case TICKDEV_MODE_ONESHOT: if (!cpumask_empty(tick_get_broadcast_mask())) broadcast = tick_resume_broadcast_oneshot(bc); break; } } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); return broadcast; } #ifdef CONFIG_TICK_ONESHOT /* FIXME: use cpumask_var_t. */ static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS); /* * Exposed for debugging: see timer_list.c */ struct cpumask *tick_get_broadcast_oneshot_mask(void) { return to_cpumask(tick_broadcast_oneshot_mask); } static int tick_broadcast_set_event(ktime_t expires, int force) { struct clock_event_device *bc = tick_broadcast_device.evtdev; if (bc->mode != CLOCK_EVT_MODE_ONESHOT) clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); return clockevents_program_event(bc, expires, force); } int tick_resume_broadcast_oneshot(struct clock_event_device *bc) { clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); return 0; } /* * Called from irq_enter() when idle was interrupted to reenable the * per cpu device. */ void tick_check_oneshot_broadcast(int cpu) { if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) { struct tick_device *td = &per_cpu(tick_cpu_device, cpu); /* * We might be in the middle of switching over from * periodic to oneshot. If the CPU has not yet * switched over, leave the device alone. */ if (td->mode == TICKDEV_MODE_ONESHOT) { clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); } } } /* * Handle oneshot mode broadcasting */ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) { struct tick_device *td; ktime_t now, next_event; int cpu; raw_spin_lock(&tick_broadcast_lock); again: dev->next_event.tv64 = KTIME_MAX; next_event.tv64 = KTIME_MAX; cpumask_clear(to_cpumask(tmpmask)); now = ktime_get(); /* Find all expired events */ for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) { td = &per_cpu(tick_cpu_device, cpu); if (td->evtdev->next_event.tv64 <= now.tv64) cpumask_set_cpu(cpu, to_cpumask(tmpmask)); else if (td->evtdev->next_event.tv64 < next_event.tv64) next_event.tv64 = td->evtdev->next_event.tv64; } /* * Wakeup the cpus which have an expired event. */ tick_do_broadcast(to_cpumask(tmpmask)); /* * Two reasons for reprogram: * * - The global event did not expire any CPU local * events. This happens in dyntick mode, as the maximum PIT * delta is quite small. * * - There are pending events on sleeping CPUs which were not * in the event mask */ if (next_event.tv64 != KTIME_MAX) { /* * Rearm the broadcast device. If event expired, * repeat the above */ if (tick_broadcast_set_event(next_event, 0)) goto again; } raw_spin_unlock(&tick_broadcast_lock); } /* * Powerstate information: The system enters/leaves a state, where * affected devices might stop */ void tick_broadcast_oneshot_control(unsigned long reason) { struct clock_event_device *bc, *dev; struct tick_device *td; unsigned long flags; int cpu; /* * Periodic mode does not care about the enter/exit of power * states */ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) return; /* * We are called with preemtion disabled from the depth of the * idle code, so we can't be moved away. */ cpu = smp_processor_id(); td = &per_cpu(tick_cpu_device, cpu); dev = td->evtdev; if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) return; bc = tick_broadcast_device.evtdev; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask()); clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); if (dev->next_event.tv64 < bc->next_event.tv64) tick_broadcast_set_event(dev->next_event, 1); } } else { if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); if (dev->next_event.tv64 != KTIME_MAX) tick_program_event(dev->next_event, 1); } } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } /* * Reset the one shot broadcast for a cpu * * Called with tick_broadcast_lock held */ static void tick_broadcast_clear_oneshot(int cpu) { cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); } static void tick_broadcast_init_next_event(struct cpumask *mask, ktime_t expires) { struct tick_device *td; int cpu; for_each_cpu(cpu, mask) { td = &per_cpu(tick_cpu_device, cpu); if (td->evtdev) td->evtdev->next_event = expires; } } /** * tick_broadcast_setup_oneshot - setup the broadcast device */ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { int cpu = smp_processor_id(); /* Set it up only once ! */ if (bc->event_handler != tick_handle_oneshot_broadcast) { int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; bc->event_handler = tick_handle_oneshot_broadcast; /* Take the do_timer update */ tick_do_timer_cpu = cpu; /* * We must be careful here. There might be other CPUs * waiting for periodic broadcast. We need to set the * oneshot_mask bits for those and program the * broadcast device to fire. */ cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask()); cpumask_clear_cpu(cpu, to_cpumask(tmpmask)); cpumask_or(tick_get_broadcast_oneshot_mask(), tick_get_broadcast_oneshot_mask(), to_cpumask(tmpmask)); if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); tick_broadcast_init_next_event(to_cpumask(tmpmask), tick_next_period); tick_broadcast_set_event(tick_next_period, 1); } else bc->next_event.tv64 = KTIME_MAX; } else { /* * The first cpu which switches to oneshot mode sets * the bit for all other cpus which are in the general * (periodic) broadcast mask. So the bit is set and * would prevent the first broadcast enter after this * to program the bc device. */ tick_broadcast_clear_oneshot(cpu); } } /* * Select oneshot operating mode for the broadcast device */ void tick_broadcast_switch_to_oneshot(void) { struct clock_event_device *bc; unsigned long flags; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; bc = tick_broadcast_device.evtdev; if (bc) tick_broadcast_setup_oneshot(bc); raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } /* * Remove a dead CPU from broadcasting */ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { unsigned long flags; unsigned int cpu = *cpup; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); /* * Clear the broadcast mask flag for the dead cpu, but do not * stop the broadcast device! */ cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } /* * Check, whether the broadcast device is in one shot mode */ int tick_broadcast_oneshot_active(void) { return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; } /* * Check whether the broadcast device supports oneshot. */ bool tick_broadcast_oneshot_available(void) { struct clock_event_device *bc = tick_broadcast_device.evtdev; return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; } #endif
gpl-2.0
croniccorey/WIP-8x50
arch/arm/kernel/perf_event_v7.c
1081
24771
/* * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. * * ARMv7 support: Jean Pihet <jpihet@mvista.com> * 2010 (c) MontaVista Software, LLC. * * Copied from ARMv6 code, with the low level code inspired * by the ARMv7 Oprofile code. * * Cortex-A8 has up to 4 configurable performance counters and * a single cycle counter. * Cortex-A9 has up to 31 configurable performance counters and * a single cycle counter. * * All counters can be enabled/disabled and IRQ masked separately. The cycle * counter and all 4 performance counters together can be reset separately. */ #ifdef CONFIG_CPU_V7 /* Common ARMv7 event types */ enum armv7_perf_types { ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, ARMV7_PERFCTR_IFETCH_MISS = 0x01, ARMV7_PERFCTR_ITLB_MISS = 0x02, ARMV7_PERFCTR_DCACHE_REFILL = 0x03, ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, ARMV7_PERFCTR_DTLB_REFILL = 0x05, ARMV7_PERFCTR_DREAD = 0x06, ARMV7_PERFCTR_DWRITE = 0x07, ARMV7_PERFCTR_EXC_TAKEN = 0x09, ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, ARMV7_PERFCTR_CID_WRITE = 0x0B, /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. * It counts: * - all branch instructions, * - instructions that explicitly write the PC, * - exception generating instructions. */ ARMV7_PERFCTR_PC_WRITE = 0x0C, ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, ARMV7_PERFCTR_CPU_CYCLES = 0xFF }; /* ARMv7 Cortex-A8 specific event types */ enum armv7_a8_perf_types { ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, ARMV7_PERFCTR_L2_ACCESS = 0x43, ARMV7_PERFCTR_L2_CACH_MISS = 0x44, ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, ARMV7_PERFCTR_L1_DATA_MISS = 0x49, ARMV7_PERFCTR_L1_INST_MISS = 0x4A, ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, ARMV7_PERFCTR_L2_NEON = 0x4E, ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, ARMV7_PERFCTR_L1_INST = 0x50, ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, ARMV7_PERFCTR_OP_EXECUTED = 0x55, ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, ARMV7_PERFCTR_CYCLES_INST = 0x57, ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, ARMV7_PERFCTR_NEON_CYCLES = 0x5A, ARMV7_PERFCTR_PMU0_EVENTS = 0x70, ARMV7_PERFCTR_PMU1_EVENTS = 0x71, ARMV7_PERFCTR_PMU_EVENTS = 0x72, }; /* ARMv7 Cortex-A9 specific event types */ enum armv7_a9_perf_types { ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, ARMV7_PERFCTR_DATA_EVICTION = 0x65, ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, ARMV7_PERFCTR_ISB_INST = 0x90, ARMV7_PERFCTR_DSB_INST = 0x91, ARMV7_PERFCTR_DMB_INST = 0x92, ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 }; /* * Cortex-A8 HW events mapping * * The hardware events that we support. We do support cache operations but * we have harvard caches and no way to combine instruction and data * accesses/misses in hardware. */ static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, }; static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { [C(L1D)] = { /* * The performance counters don't differentiate between read * and write accesses/misses so this isn't strictly correct, * but it's the best we can do. Writes and reads get * combined. */ [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(DTLB)] = { /* * Only ITLB misses and DTLB refills are supported. * If users want the DTLB refills misses a raw counter * must be used. */ [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, }; /* * Cortex-A9 HW events mapping */ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, }; static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { [C(L1D)] = { /* * The performance counters don't differentiate between read * and write accesses/misses so this isn't strictly correct, * but it's the best we can do. Writes and reads get * combined. */ [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(LL)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(DTLB)] = { /* * Only ITLB misses and DTLB refills are supported. * If users want the DTLB refills misses a raw counter * must be used. */ [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, }; /* * Perf Events counters */ enum armv7_counters { ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ ARMV7_COUNTER0 = 2, /* First event counter */ }; /* * The cycle counter is ARMV7_CYCLE_COUNTER. * The first event counter is ARMV7_COUNTER0. * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). */ #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) /* * ARMv7 low level PMNC access */ /* * Per-CPU PMNC: config reg */ #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ #define ARMV7_PMNC_N_MASK 0x1f #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ /* * Available counters */ #define ARMV7_CNT0 0 /* First event counter */ #define ARMV7_CCNT 31 /* Cycle counter */ /* Perf Event to low level counters mapping */ #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) /* * CNTENS: counters enable reg */ #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) #define ARMV7_CNTENS_C (1 << ARMV7_CCNT) /* * CNTENC: counters disable reg */ #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) #define ARMV7_CNTENC_C (1 << ARMV7_CCNT) /* * INTENS: counters overflow interrupt enable reg */ #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) #define ARMV7_INTENS_C (1 << ARMV7_CCNT) /* * INTENC: counters overflow interrupt disable reg */ #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) #define ARMV7_INTENC_C (1 << ARMV7_CCNT) /* * EVTSEL: Event selection reg */ #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ /* * SELECT: Counter selection reg */ #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ /* * FLAG: counters overflow flag status reg */ #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) #define ARMV7_FLAG_C (1 << ARMV7_CCNT) #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK static inline unsigned long armv7_pmnc_read(void) { u32 val; asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); return val; } static inline void armv7_pmnc_write(unsigned long val) { val &= ARMV7_PMNC_MASK; isb(); asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); } static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) { return pmnc & ARMV7_OVERFLOWED_MASK; } static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, enum armv7_counters counter) { int ret = 0; if (counter == ARMV7_CYCLE_COUNTER) ret = pmnc & ARMV7_FLAG_C; else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) ret = pmnc & ARMV7_FLAG_P(counter); else pr_err("CPU%u checking wrong counter %d overflow status\n", smp_processor_id(), counter); return ret; } static inline int armv7_pmnc_select_counter(unsigned int idx) { u32 val; if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { pr_err("CPU%u selecting wrong PMNC counter" " %d\n", smp_processor_id(), idx); return -1; } val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); isb(); return idx; } static inline u32 armv7pmu_read_counter(int idx) { unsigned long value = 0; if (idx == ARMV7_CYCLE_COUNTER) asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { if (armv7_pmnc_select_counter(idx) == idx) asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value)); } else pr_err("CPU%u reading wrong counter %d\n", smp_processor_id(), idx); return value; } static inline void armv7pmu_write_counter(int idx, u32 value) { if (idx == ARMV7_CYCLE_COUNTER) asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { if (armv7_pmnc_select_counter(idx) == idx) asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); } else pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); } static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) { if (armv7_pmnc_select_counter(idx) == idx) { val &= ARMV7_EVTSEL_MASK; asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); } } static inline u32 armv7_pmnc_enable_counter(unsigned int idx) { u32 val; if ((idx != ARMV7_CYCLE_COUNTER) && ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u enabling wrong PMNC counter" " %d\n", smp_processor_id(), idx); return -1; } if (idx == ARMV7_CYCLE_COUNTER) val = ARMV7_CNTENS_C; else val = ARMV7_CNTENS_P(idx); asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); return idx; } static inline u32 armv7_pmnc_disable_counter(unsigned int idx) { u32 val; if ((idx != ARMV7_CYCLE_COUNTER) && ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u disabling wrong PMNC counter" " %d\n", smp_processor_id(), idx); return -1; } if (idx == ARMV7_CYCLE_COUNTER) val = ARMV7_CNTENC_C; else val = ARMV7_CNTENC_P(idx); asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); return idx; } static inline u32 armv7_pmnc_enable_intens(unsigned int idx) { u32 val; if ((idx != ARMV7_CYCLE_COUNTER) && ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u enabling wrong PMNC counter" " interrupt enable %d\n", smp_processor_id(), idx); return -1; } if (idx == ARMV7_CYCLE_COUNTER) val = ARMV7_INTENS_C; else val = ARMV7_INTENS_P(idx); asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); return idx; } static inline u32 armv7_pmnc_disable_intens(unsigned int idx) { u32 val; if ((idx != ARMV7_CYCLE_COUNTER) && ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { pr_err("CPU%u disabling wrong PMNC counter" " interrupt enable %d\n", smp_processor_id(), idx); return -1; } if (idx == ARMV7_CYCLE_COUNTER) val = ARMV7_INTENC_C; else val = ARMV7_INTENC_P(idx); asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); return idx; } static inline u32 armv7_pmnc_getreset_flags(void) { u32 val; /* Read */ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); /* Write to clear flags */ val &= ARMV7_FLAG_MASK; asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); return val; } #ifdef DEBUG static void armv7_pmnc_dump_regs(void) { u32 val; unsigned int cnt; printk(KERN_INFO "PMNC registers dump:\n"); asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); printk(KERN_INFO "PMNC =0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); printk(KERN_INFO "CNTENS=0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); printk(KERN_INFO "INTENS=0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); printk(KERN_INFO "FLAGS =0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); printk(KERN_INFO "SELECT=0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); printk(KERN_INFO "CCNT =0x%08x\n", val); for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { armv7_pmnc_select_counter(cnt); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); printk(KERN_INFO "CNT[%d] count =0x%08x\n", cnt-ARMV7_EVENT_CNT_TO_CNTx, val); asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", cnt-ARMV7_EVENT_CNT_TO_CNTx, val); } } #endif static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ raw_spin_lock_irqsave(&pmu_lock, flags); /* * Disable counter */ armv7_pmnc_disable_counter(idx); /* * Set event (if destined for PMNx counters) * We don't need to set the event if it's a cycle count */ if (idx != ARMV7_CYCLE_COUNTER) armv7_pmnc_write_evtsel(idx, hwc->config_base); /* * Enable interrupt for this counter */ armv7_pmnc_enable_intens(idx); /* * Enable counter */ armv7_pmnc_enable_counter(idx); raw_spin_unlock_irqrestore(&pmu_lock, flags); } static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; /* * Disable counter and interrupt */ raw_spin_lock_irqsave(&pmu_lock, flags); /* * Disable counter */ armv7_pmnc_disable_counter(idx); /* * Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); raw_spin_unlock_irqrestore(&pmu_lock, flags); } static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) { unsigned long pmnc; struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; int idx; /* * Get and reset the IRQ flags */ pmnc = armv7_pmnc_getreset_flags(); /* * Did an overflow occur? */ if (!armv7_pmnc_has_overflowed(pmnc)) return IRQ_NONE; /* * Handle the counter(s) overflow(s) */ regs = get_irq_regs(); perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; if (!test_bit(idx, cpuc->active_mask)) continue; /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. */ if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) continue; hwc = &event->hw; armpmu_event_update(event, hwc, idx, 1); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, 0, &data, regs)) armpmu->disable(hwc, idx); } /* * Handle the pending perf events. * * Note: this call *must* be run with interrupts disabled. For * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ irq_work_run(); return IRQ_HANDLED; } static void armv7pmu_start(void) { unsigned long flags; raw_spin_lock_irqsave(&pmu_lock, flags); /* Enable all counters */ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); raw_spin_unlock_irqrestore(&pmu_lock, flags); } static void armv7pmu_stop(void) { unsigned long flags; raw_spin_lock_irqsave(&pmu_lock, flags); /* Disable all counters */ armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); raw_spin_unlock_irqrestore(&pmu_lock, flags); } static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *event) { int idx; /* Always place a cycle counter into the cycle counter. */ if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; return ARMV7_CYCLE_COUNTER; } else { /* * For anything other than a cycle counter, try and use * the events counters */ for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } /* The counters are all in use. */ return -EAGAIN; } } static void armv7pmu_reset(void *info) { u32 idx, nb_cnt = armpmu->num_events; /* The counter and interrupt enable registers are unknown at reset. */ for (idx = 1; idx < nb_cnt; ++idx) armv7pmu_disable_event(NULL, idx); /* Initialize & Reset PMNC: C and P bits */ armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); } static struct arm_pmu armv7pmu = { .handle_irq = armv7pmu_handle_irq, .enable = armv7pmu_enable_event, .disable = armv7pmu_disable_event, .read_counter = armv7pmu_read_counter, .write_counter = armv7pmu_write_counter, .get_event_idx = armv7pmu_get_event_idx, .start = armv7pmu_start, .stop = armv7pmu_stop, .reset = armv7pmu_reset, .raw_event_mask = 0xFF, .max_period = (1LLU << 32) - 1, }; static u32 __init armv7_read_num_pmnc_events(void) { u32 nb_cnt; /* Read the nb of CNTx counters supported from PMNC */ nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; /* Add the CPU cycles counter and return */ return nb_cnt + 1; } static const struct arm_pmu *__init armv7_a8_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA8; armv7pmu.name = "ARMv7 Cortex-A8"; armv7pmu.cache_map = &armv7_a8_perf_cache_map; armv7pmu.event_map = &armv7_a8_perf_map; armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } static const struct arm_pmu *__init armv7_a9_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA9; armv7pmu.name = "ARMv7 Cortex-A9"; armv7pmu.cache_map = &armv7_a9_perf_cache_map; armv7pmu.event_map = &armv7_a9_perf_map; armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } #else static const struct arm_pmu *__init armv7_a8_pmu_init(void) { return NULL; } static const struct arm_pmu *__init armv7_a9_pmu_init(void) { return NULL; } #endif /* CONFIG_CPU_V7 */
gpl-2.0
darchstar/kernel-heroc-2.6.32
arch/avr32/kernel/module.c
1337
8775
/* * AVR32-specific kernel module loader * * Copyright (C) 2005-2006 Atmel Corporation * * GOT initialization parts are based on the s390 version * Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH, * IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/bug.h> #include <linux/elf.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleloader.h> #include <linux/vmalloc.h> void *module_alloc(unsigned long size) { if (size == 0) return NULL; return vmalloc(size); } void module_free(struct module *mod, void *module_region) { vfree(mod->arch.syminfo); mod->arch.syminfo = NULL; vfree(module_region); } static inline int check_rela(Elf32_Rela *rela, struct module *module, char *strings, Elf32_Sym *symbols) { struct mod_arch_syminfo *info; info = module->arch.syminfo + ELF32_R_SYM(rela->r_info); switch (ELF32_R_TYPE(rela->r_info)) { case R_AVR32_GOT32: case R_AVR32_GOT16: case R_AVR32_GOT8: case R_AVR32_GOT21S: case R_AVR32_GOT18SW: /* mcall */ case R_AVR32_GOT16S: /* ld.w */ if (rela->r_addend != 0) { printk(KERN_ERR "GOT relocation against %s at offset %u with addend\n", strings + symbols[ELF32_R_SYM(rela->r_info)].st_name, rela->r_offset); return -ENOEXEC; } if (info->got_offset == -1UL) { info->got_offset = module->arch.got_size; module->arch.got_size += sizeof(void *); } pr_debug("GOT[%3lu] %s\n", info->got_offset, strings + symbols[ELF32_R_SYM(rela->r_info)].st_name); break; } return 0; } int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *module) { Elf32_Shdr *symtab; Elf32_Sym *symbols; Elf32_Rela *rela; char *strings; int nrela, i, j; int ret; /* Find the symbol table */ symtab = NULL; for (i = 0; i < hdr->e_shnum; i++) switch (sechdrs[i].sh_type) { case SHT_SYMTAB: symtab = &sechdrs[i]; break; } if (!symtab) { printk(KERN_ERR "module %s: no symbol table\n", module->name); return -ENOEXEC; } /* Allocate room for one syminfo structure per symbol. */ module->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); module->arch.syminfo = vmalloc(module->arch.nsyms * sizeof(struct mod_arch_syminfo)); if (!module->arch.syminfo) return -ENOMEM; symbols = (void *)hdr + symtab->sh_offset; strings = (void *)hdr + sechdrs[symtab->sh_link].sh_offset; for (i = 0; i < module->arch.nsyms; i++) { if (symbols[i].st_shndx == SHN_UNDEF && strcmp(strings + symbols[i].st_name, "_GLOBAL_OFFSET_TABLE_") == 0) /* "Define" it as absolute. */ symbols[i].st_shndx = SHN_ABS; module->arch.syminfo[i].got_offset = -1UL; module->arch.syminfo[i].got_initialized = 0; } /* Allocate GOT entries for symbols that need it. */ module->arch.got_size = 0; for (i = 0; i < hdr->e_shnum; i++) { if (sechdrs[i].sh_type != SHT_RELA) continue; nrela = sechdrs[i].sh_size / sizeof(Elf32_Rela); rela = (void *)hdr + sechdrs[i].sh_offset; for (j = 0; j < nrela; j++) { ret = check_rela(rela + j, module, strings, symbols); if (ret) goto out_free_syminfo; } } /* * Increase core size to make room for GOT and set start * offset for GOT. */ module->core_size = ALIGN(module->core_size, 4); module->arch.got_offset = module->core_size; module->core_size += module->arch.got_size; return 0; out_free_syminfo: vfree(module->arch.syminfo); module->arch.syminfo = NULL; return ret; } static inline int reloc_overflow(struct module *module, const char *reloc_name, Elf32_Addr relocation) { printk(KERN_ERR "module %s: Value %lx does not fit relocation %s\n", module->name, (unsigned long)relocation, reloc_name); return -ENOEXEC; } #define get_u16(loc) (*((uint16_t *)loc)) #define put_u16(loc, val) (*((uint16_t *)loc) = (val)) int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relindex, struct module *module) { Elf32_Shdr *symsec = sechdrs + symindex; Elf32_Shdr *relsec = sechdrs + relindex; Elf32_Shdr *dstsec = sechdrs + relsec->sh_info; Elf32_Rela *rel = (void *)relsec->sh_addr; unsigned int i; int ret = 0; for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rela); i++, rel++) { struct mod_arch_syminfo *info; Elf32_Sym *sym; Elf32_Addr relocation; uint32_t *location; uint32_t value; location = (void *)dstsec->sh_addr + rel->r_offset; sym = (Elf32_Sym *)symsec->sh_addr + ELF32_R_SYM(rel->r_info); relocation = sym->st_value + rel->r_addend; info = module->arch.syminfo + ELF32_R_SYM(rel->r_info); /* Initialize GOT entry if necessary */ switch (ELF32_R_TYPE(rel->r_info)) { case R_AVR32_GOT32: case R_AVR32_GOT16: case R_AVR32_GOT8: case R_AVR32_GOT21S: case R_AVR32_GOT18SW: case R_AVR32_GOT16S: if (!info->got_initialized) { Elf32_Addr *gotent; gotent = (module->module_core + module->arch.got_offset + info->got_offset); *gotent = relocation; info->got_initialized = 1; } relocation = info->got_offset; break; } switch (ELF32_R_TYPE(rel->r_info)) { case R_AVR32_32: case R_AVR32_32_CPENT: *location = relocation; break; case R_AVR32_22H_PCREL: relocation -= (Elf32_Addr)location; if ((relocation & 0xffe00001) != 0 && (relocation & 0xffc00001) != 0xffc00000) return reloc_overflow(module, "R_AVR32_22H_PCREL", relocation); relocation >>= 1; value = *location; value = ((value & 0xe1ef0000) | (relocation & 0xffff) | ((relocation & 0x10000) << 4) | ((relocation & 0x1e0000) << 8)); *location = value; break; case R_AVR32_11H_PCREL: relocation -= (Elf32_Addr)location; if ((relocation & 0xfffffc01) != 0 && (relocation & 0xfffff801) != 0xfffff800) return reloc_overflow(module, "R_AVR32_11H_PCREL", relocation); value = get_u16(location); value = ((value & 0xf00c) | ((relocation & 0x1fe) << 3) | ((relocation & 0x600) >> 9)); put_u16(location, value); break; case R_AVR32_9H_PCREL: relocation -= (Elf32_Addr)location; if ((relocation & 0xffffff01) != 0 && (relocation & 0xfffffe01) != 0xfffffe00) return reloc_overflow(module, "R_AVR32_9H_PCREL", relocation); value = get_u16(location); value = ((value & 0xf00f) | ((relocation & 0x1fe) << 3)); put_u16(location, value); break; case R_AVR32_9UW_PCREL: relocation -= ((Elf32_Addr)location) & 0xfffffffc; if ((relocation & 0xfffffc03) != 0) return reloc_overflow(module, "R_AVR32_9UW_PCREL", relocation); value = get_u16(location); value = ((value & 0xf80f) | ((relocation & 0x1fc) << 2)); put_u16(location, value); break; case R_AVR32_GOTPC: /* * R6 = PC - (PC - GOT) * * At this point, relocation contains the * value of PC. Just subtract the value of * GOT, and we're done. */ pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n", relocation, module->arch.got_offset, module->module_core); relocation -= ((unsigned long)module->module_core + module->arch.got_offset); *location = relocation; break; case R_AVR32_GOT18SW: if ((relocation & 0xfffe0003) != 0 && (relocation & 0xfffc0003) != 0xffff0000) return reloc_overflow(module, "R_AVR32_GOT18SW", relocation); relocation >>= 2; /* fall through */ case R_AVR32_GOT16S: if ((relocation & 0xffff8000) != 0 && (relocation & 0xffff0000) != 0xffff0000) return reloc_overflow(module, "R_AVR32_GOT16S", relocation); pr_debug("GOT reloc @ 0x%x -> %u\n", rel->r_offset, relocation); value = *location; value = ((value & 0xffff0000) | (relocation & 0xffff)); *location = value; break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", module->name, ELF32_R_TYPE(rel->r_info)); return -ENOEXEC; } } return ret; } int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relindex, struct module *module) { printk(KERN_ERR "module %s: REL relocations are not supported\n", module->name); return -ENOEXEC; } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *module) { vfree(module->arch.syminfo); module->arch.syminfo = NULL; return module_bug_finalize(hdr, sechdrs, module); } void module_arch_cleanup(struct module *module) { module_bug_cleanup(module); }
gpl-2.0
Alucard24/Alucard-Kernel-LG-G5
Documentation/vDSO/vdso_standalone_test_x86.c
1849
2853
/* * vdso_test.c: Sample code to test parse_vdso.c on x86 * Copyright (c) 2011-2014 Andy Lutomirski * Subject to the GNU General Public License, version 2 * * You can amuse yourself by compiling with: * gcc -std=gnu99 -nostdlib * -Os -fno-asynchronous-unwind-tables -flto -lgcc_s * vdso_standalone_test_x86.c parse_vdso.c * to generate a small binary. On x86_64, you can omit -lgcc_s * if you want the binary to be completely standalone. */ #include <sys/syscall.h> #include <sys/time.h> #include <unistd.h> #include <stdint.h> extern void *vdso_sym(const char *version, const char *name); extern void vdso_init_from_sysinfo_ehdr(uintptr_t base); extern void vdso_init_from_auxv(void *auxv); /* We need a libc functions... */ int strcmp(const char *a, const char *b) { /* This implementation is buggy: it never returns -1. */ while (*a || *b) { if (*a != *b) return 1; if (*a == 0 || *b == 0) return 1; a++; b++; } return 0; } /* ...and two syscalls. This is x86-specific. */ static inline long x86_syscall3(long nr, long a0, long a1, long a2) { long ret; #ifdef __x86_64__ asm volatile ("syscall" : "=a" (ret) : "a" (nr), "D" (a0), "S" (a1), "d" (a2) : "cc", "memory", "rcx", "r8", "r9", "r10", "r11" ); #else asm volatile ("int $0x80" : "=a" (ret) : "a" (nr), "b" (a0), "c" (a1), "d" (a2) : "cc", "memory" ); #endif return ret; } static inline long linux_write(int fd, const void *data, size_t len) { return x86_syscall3(__NR_write, fd, (long)data, (long)len); } static inline void linux_exit(int code) { x86_syscall3(__NR_exit, code, 0, 0); } void to_base10(char *lastdig, time_t n) { while (n) { *lastdig = (n % 10) + '0'; n /= 10; lastdig--; } } __attribute__((externally_visible)) void c_main(void **stack) { /* Parse the stack */ long argc = (long)*stack; stack += argc + 2; /* Now we're pointing at the environment. Skip it. */ while(*stack) stack++; stack++; /* Now we're pointing at auxv. Initialize the vDSO parser. */ vdso_init_from_auxv((void *)stack); /* Find gettimeofday. */ typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz); gtod_t gtod = (gtod_t)vdso_sym("LINUX_2.6", "__vdso_gettimeofday"); if (!gtod) linux_exit(1); struct timeval tv; long ret = gtod(&tv, 0); if (ret == 0) { char buf[] = "The time is .000000\n"; to_base10(buf + 31, tv.tv_sec); to_base10(buf + 38, tv.tv_usec); linux_write(1, buf, sizeof(buf) - 1); } else { linux_exit(ret); } linux_exit(0); } /* * This is the real entry point. It passes the initial stack into * the C entry point. */ asm ( ".text\n" ".global _start\n" ".type _start,@function\n" "_start:\n\t" #ifdef __x86_64__ "mov %rsp,%rdi\n\t" "jmp c_main" #else "push %esp\n\t" "call c_main\n\t" "int $3" #endif );
gpl-2.0