repo_name
stringlengths
5
85
path
stringlengths
3
252
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
922
999k
license
stringclasses
15 values
garyvan/openwrt
linux-3.10.20-rt14/tools/perf/util/quote.c
13587
1265
#include "cache.h" #include "quote.h" /* Help to copy the thing properly quoted for the shell safety. * any single quote is replaced with '\'', any exclamation point * is replaced with '\!', and the whole thing is enclosed in a * * E.g. * original sq_quote result * name ==> name ==> 'name' * a b ==> a b ==> 'a b' * a'b ==> a'\''b ==> 'a'\''b' * a!b ==> a'\!'b ==> 'a'\!'b' */ static inline int need_bs_quote(char c) { return (c == '\'' || c == '!'); } static void sq_quote_buf(struct strbuf *dst, const char *src) { char *to_free = NULL; if (dst->buf == src) to_free = strbuf_detach(dst, NULL); strbuf_addch(dst, '\''); while (*src) { size_t len = strcspn(src, "'!"); strbuf_add(dst, src, len); src += len; while (need_bs_quote(*src)) { strbuf_addstr(dst, "'\\"); strbuf_addch(dst, *src++); strbuf_addch(dst, '\''); } } strbuf_addch(dst, '\''); free(to_free); } void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) { int i; /* Copy into destination buffer. */ strbuf_grow(dst, 255); for (i = 0; argv[i]; ++i) { strbuf_addch(dst, ' '); sq_quote_buf(dst, argv[i]); if (maxlen && dst->len > maxlen) die("Too many or long arguments"); } }
gpl-2.0
davidmueller13/android_kernel_google_msm
arch/m68k/sun3/prom/misc.c
13843
1803
/* * misc.c: Miscellaneous prom functions that don't belong * anywhere else. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <asm/sun3-head.h> #include <asm/idprom.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/movs.h> /* Reset and reboot the machine with the command 'bcommand'. */ void prom_reboot(char *bcommand) { unsigned long flags; local_irq_save(flags); (*(romvec->pv_reboot))(bcommand); local_irq_restore(flags); } /* Drop into the prom, with the chance to continue with the 'go' * prom command. */ void prom_cmdline(void) { } /* Drop into the prom, but completely terminate the program. * No chance of continuing. */ void prom_halt(void) { unsigned long flags; again: local_irq_save(flags); (*(romvec->pv_halt))(); local_irq_restore(flags); goto again; /* PROM is out to get me -DaveM */ } typedef void (*sfunc_t)(void); /* Get the idprom and stuff it into buffer 'idbuf'. Returns the * format type. 'num_bytes' is the number of bytes that your idbuf * has space for. Returns 0xff on error. */ unsigned char prom_get_idprom(char *idbuf, int num_bytes) { int i, oldsfc; GET_SFC(oldsfc); SET_SFC(FC_CONTROL); for(i=0;i<num_bytes; i++) { /* There is a problem with the GET_CONTROL_BYTE macro; defining the extra variable gets around it. */ int c; GET_CONTROL_BYTE(SUN3_IDPROM_BASE + i, c); idbuf[i] = c; } SET_SFC(oldsfc); return idbuf[0]; } /* Get the major prom version number. */ int prom_version(void) { return romvec->pv_romvers; } /* Get the prom plugin-revision. */ int prom_getrev(void) { return prom_rev; } /* Get the prom firmware print revision. */ int prom_getprev(void) { return prom_prev; }
gpl-2.0
GHackAnonymous/linux
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
20
66916
/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/string_helpers.h> #include <linux/sort.h> #include <linux/ctype.h> #include "cxgb4.h" #include "t4_regs.h" #include "t4_values.h" #include "t4fw_api.h" #include "cxgb4_debugfs.h" #include "clip_tbl.h" #include "l2t.h" /* generic seq_file support for showing a table of size rows x width. */ static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos) { pos -= tb->skip_first; return pos >= tb->rows ? NULL : &tb->data[pos * tb->width]; } static void *seq_tab_start(struct seq_file *seq, loff_t *pos) { struct seq_tab *tb = seq->private; if (tb->skip_first && *pos == 0) return SEQ_START_TOKEN; return seq_tab_get_idx(tb, *pos); } static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos) { v = seq_tab_get_idx(seq->private, *pos + 1); if (v) ++*pos; return v; } static void seq_tab_stop(struct seq_file *seq, void *v) { } static int seq_tab_show(struct seq_file *seq, void *v) { const struct seq_tab *tb = seq->private; return tb->show(seq, v, ((char *)v - tb->data) / tb->width); } static const struct seq_operations seq_tab_ops = { .start = seq_tab_start, .next = seq_tab_next, .stop = seq_tab_stop, .show = seq_tab_show }; struct seq_tab *seq_open_tab(struct file *f, unsigned int rows, unsigned int width, unsigned int have_header, int (*show)(struct seq_file *seq, void *v, int i)) { struct seq_tab *p; p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width); if (p) { p->show = show; p->rows = rows; p->width = width; p->skip_first = have_header != 0; } return p; } /* Trim the size of a seq_tab to the supplied number of rows. The operation is * irreversible. */ static int seq_tab_trim(struct seq_tab *p, unsigned int new_rows) { if (new_rows > p->rows) return -EINVAL; p->rows = new_rows; return 0; } static int cim_la_show(struct seq_file *seq, void *v, int idx) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Status Data PC LS0Stat LS0Addr " " LS0Data\n"); else { const u32 *p = v; seq_printf(seq, " %02x %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], p[6], p[7]); } return 0; } static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx) { if (v == SEQ_START_TOKEN) { seq_puts(seq, "Status Data PC\n"); } else { const u32 *p = v; seq_printf(seq, " %02x %08x %08x\n", p[5] & 0xff, p[6], p[7]); seq_printf(seq, " %02x %02x%06x %02x%06x\n", (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, p[4] & 0xff, p[5] >> 8); seq_printf(seq, " %02x %x%07x %x%07x\n", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4); } return 0; } static int cim_la_open(struct inode *inode, struct file *file) { int ret; unsigned int cfg; struct seq_tab *p; struct adapter *adap = inode->i_private; ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg); if (ret) return ret; p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1, cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 : cim_la_show); if (!p) return -ENOMEM; ret = t4_cim_read_la(adap, (u32 *)p->data, NULL); if (ret) seq_release_private(inode, file); return ret; } static const struct file_operations cim_la_fops = { .owner = THIS_MODULE, .open = cim_la_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; static int cim_pif_la_show(struct seq_file *seq, void *v, int idx) { const u32 *p = v; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Cntl ID DataBE Addr Data\n"); } else if (idx < CIM_PIFLA_SIZE) { seq_printf(seq, " %02x %02x %04x %08x %08x%08x%08x%08x\n", (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]); } else { if (idx == CIM_PIFLA_SIZE) seq_puts(seq, "\nCntl ID Data\n"); seq_printf(seq, " %02x %02x %08x%08x%08x%08x\n", (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); } return 0; } static int cim_pif_la_open(struct inode *inode, struct file *file) { struct seq_tab *p; struct adapter *adap = inode->i_private; p = seq_open_tab(file, 2 * CIM_PIFLA_SIZE, 6 * sizeof(u32), 1, cim_pif_la_show); if (!p) return -ENOMEM; t4_cim_read_pif_la(adap, (u32 *)p->data, (u32 *)p->data + 6 * CIM_PIFLA_SIZE, NULL, NULL); return 0; } static const struct file_operations cim_pif_la_fops = { .owner = THIS_MODULE, .open = cim_pif_la_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; static int cim_ma_la_show(struct seq_file *seq, void *v, int idx) { const u32 *p = v; if (v == SEQ_START_TOKEN) { seq_puts(seq, "\n"); } else if (idx < CIM_MALA_SIZE) { seq_printf(seq, "%02x%08x%08x%08x%08x\n", p[4], p[3], p[2], p[1], p[0]); } else { if (idx == CIM_MALA_SIZE) seq_puts(seq, "\nCnt ID Tag UE Data RDY VLD\n"); seq_printf(seq, "%3u %2u %x %u %08x%08x %u %u\n", (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, (p[1] >> 2) | ((p[2] & 3) << 30), (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, p[0] & 1); } return 0; } static int cim_ma_la_open(struct inode *inode, struct file *file) { struct seq_tab *p; struct adapter *adap = inode->i_private; p = seq_open_tab(file, 2 * CIM_MALA_SIZE, 5 * sizeof(u32), 1, cim_ma_la_show); if (!p) return -ENOMEM; t4_cim_read_ma_la(adap, (u32 *)p->data, (u32 *)p->data + 5 * CIM_MALA_SIZE); return 0; } static const struct file_operations cim_ma_la_fops = { .owner = THIS_MODULE, .open = cim_ma_la_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; static int cim_qcfg_show(struct seq_file *seq, void *v) { static const char * const qname[] = { "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", "SGE0-RX", "SGE1-RX" }; int i; struct adapter *adap = seq->private; u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))]; u16 thres[CIM_NUM_IBQ]; u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr; u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5]; u32 *p = stat; int cim_num_obq = is_t4(adap->params.chip) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5; i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A : UP_IBQ_0_SHADOW_RDADDR_A, ARRAY_SIZE(stat), stat); if (!i) { if (is_t4(adap->params.chip)) { i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A, ARRAY_SIZE(obq_wr_t4), obq_wr_t4); wr = obq_wr_t4; } else { i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A, ARRAY_SIZE(obq_wr_t5), obq_wr_t5); wr = obq_wr_t5; } } if (i) return i; t4_read_cimq_cfg(adap, base, size, thres); seq_printf(seq, " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail\n"); for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) seq_printf(seq, "%7s %5x %5u %5u %6x %4x %4u %4u %5u\n", qname[i], base[i], size[i], thres[i], IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]), QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]), QUEREMFLITS_G(p[2]) * 16); for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2) seq_printf(seq, "%7s %5x %5u %12x %4x %4u %4u %5u\n", qname[i], base[i], size[i], QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i], QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]), QUEREMFLITS_G(p[2]) * 16); return 0; } static int cim_qcfg_open(struct inode *inode, struct file *file) { return single_open(file, cim_qcfg_show, inode->i_private); } static const struct file_operations cim_qcfg_fops = { .owner = THIS_MODULE, .open = cim_qcfg_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int cimq_show(struct seq_file *seq, void *v, int idx) { const u32 *p = v; seq_printf(seq, "%#06x: %08x %08x %08x %08x\n", idx * 16, p[0], p[1], p[2], p[3]); return 0; } static int cim_ibq_open(struct inode *inode, struct file *file) { int ret; struct seq_tab *p; unsigned int qid = (uintptr_t)inode->i_private & 7; struct adapter *adap = inode->i_private - qid; p = seq_open_tab(file, CIM_IBQ_SIZE, 4 * sizeof(u32), 0, cimq_show); if (!p) return -ENOMEM; ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4); if (ret < 0) seq_release_private(inode, file); else ret = 0; return ret; } static const struct file_operations cim_ibq_fops = { .owner = THIS_MODULE, .open = cim_ibq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; static int cim_obq_open(struct inode *inode, struct file *file) { int ret; struct seq_tab *p; unsigned int qid = (uintptr_t)inode->i_private & 7; struct adapter *adap = inode->i_private - qid; p = seq_open_tab(file, 6 * CIM_OBQ_SIZE, 4 * sizeof(u32), 0, cimq_show); if (!p) return -ENOMEM; ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4); if (ret < 0) { seq_release_private(inode, file); } else { seq_tab_trim(p, ret / 4); ret = 0; } return ret; } static const struct file_operations cim_obq_fops = { .owner = THIS_MODULE, .open = cim_obq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; struct field_desc { const char *name; unsigned int start; unsigned int width; }; static void field_desc_show(struct seq_file *seq, u64 v, const struct field_desc *p) { char buf[32]; int line_size = 0; while (p->name) { u64 mask = (1ULL << p->width) - 1; int len = scnprintf(buf, sizeof(buf), "%s: %llu", p->name, ((unsigned long long)v >> p->start) & mask); if (line_size + len >= 79) { line_size = 8; seq_puts(seq, "\n "); } seq_printf(seq, "%s ", buf); line_size += len + 1; p++; } seq_putc(seq, '\n'); } static struct field_desc tp_la0[] = { { "RcfOpCodeOut", 60, 4 }, { "State", 56, 4 }, { "WcfState", 52, 4 }, { "RcfOpcSrcOut", 50, 2 }, { "CRxError", 49, 1 }, { "ERxError", 48, 1 }, { "SanityFailed", 47, 1 }, { "SpuriousMsg", 46, 1 }, { "FlushInputMsg", 45, 1 }, { "FlushInputCpl", 44, 1 }, { "RssUpBit", 43, 1 }, { "RssFilterHit", 42, 1 }, { "Tid", 32, 10 }, { "InitTcb", 31, 1 }, { "LineNumber", 24, 7 }, { "Emsg", 23, 1 }, { "EdataOut", 22, 1 }, { "Cmsg", 21, 1 }, { "CdataOut", 20, 1 }, { "EreadPdu", 19, 1 }, { "CreadPdu", 18, 1 }, { "TunnelPkt", 17, 1 }, { "RcfPeerFin", 16, 1 }, { "RcfReasonOut", 12, 4 }, { "TxCchannel", 10, 2 }, { "RcfTxChannel", 8, 2 }, { "RxEchannel", 6, 2 }, { "RcfRxChannel", 5, 1 }, { "RcfDataOutSrdy", 4, 1 }, { "RxDvld", 3, 1 }, { "RxOoDvld", 2, 1 }, { "RxCongestion", 1, 1 }, { "TxCongestion", 0, 1 }, { NULL } }; static int tp_la_show(struct seq_file *seq, void *v, int idx) { const u64 *p = v; field_desc_show(seq, *p, tp_la0); return 0; } static int tp_la_show2(struct seq_file *seq, void *v, int idx) { const u64 *p = v; if (idx) seq_putc(seq, '\n'); field_desc_show(seq, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(seq, p[1], tp_la0); return 0; } static int tp_la_show3(struct seq_file *seq, void *v, int idx) { static struct field_desc tp_la1[] = { { "CplCmdIn", 56, 8 }, { "CplCmdOut", 48, 8 }, { "ESynOut", 47, 1 }, { "EAckOut", 46, 1 }, { "EFinOut", 45, 1 }, { "ERstOut", 44, 1 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; static struct field_desc tp_la2[] = { { "CplCmdIn", 56, 8 }, { "MpsVfVld", 55, 1 }, { "MpsPf", 52, 3 }, { "MpsVf", 44, 8 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; const u64 *p = v; if (idx) seq_putc(seq, '\n'); field_desc_show(seq, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(seq, p[1], (p[0] & BIT(17)) ? tp_la2 : tp_la1); return 0; } static int tp_la_open(struct inode *inode, struct file *file) { struct seq_tab *p; struct adapter *adap = inode->i_private; switch (DBGLAMODE_G(t4_read_reg(adap, TP_DBG_LA_CONFIG_A))) { case 2: p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0, tp_la_show2); break; case 3: p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0, tp_la_show3); break; default: p = seq_open_tab(file, TPLA_SIZE, sizeof(u64), 0, tp_la_show); } if (!p) return -ENOMEM; t4_tp_read_la(adap, (u64 *)p->data, NULL); return 0; } static ssize_t tp_la_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { int err; char s[32]; unsigned long val; size_t size = min(sizeof(s) - 1, count); struct adapter *adap = file_inode(file)->i_private; if (copy_from_user(s, buf, size)) return -EFAULT; s[size] = '\0'; err = kstrtoul(s, 0, &val); if (err) return err; if (val > 0xffff) return -EINVAL; adap->params.tp.la_mask = val << 16; t4_set_reg_field(adap, TP_DBG_LA_CONFIG_A, 0xffff0000U, adap->params.tp.la_mask); return count; } static const struct file_operations tp_la_fops = { .owner = THIS_MODULE, .open = tp_la_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, .write = tp_la_write }; static int ulprx_la_show(struct seq_file *seq, void *v, int idx) { const u32 *p = v; if (v == SEQ_START_TOKEN) seq_puts(seq, " Pcmd Type Message" " Data\n"); else seq_printf(seq, "%08x%08x %4x %08x %08x%08x%08x%08x\n", p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); return 0; } static int ulprx_la_open(struct inode *inode, struct file *file) { struct seq_tab *p; struct adapter *adap = inode->i_private; p = seq_open_tab(file, ULPRX_LA_SIZE, 8 * sizeof(u32), 1, ulprx_la_show); if (!p) return -ENOMEM; t4_ulprx_read_la(adap, (u32 *)p->data); return 0; } static const struct file_operations ulprx_la_fops = { .owner = THIS_MODULE, .open = ulprx_la_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; /* Show the PM memory stats. These stats include: * * TX: * Read: memory read operation * Write Bypass: cut-through * Bypass + mem: cut-through and save copy * * RX: * Read: memory read * Write Bypass: cut-through * Flush: payload trim or drop */ static int pm_stats_show(struct seq_file *seq, void *v) { static const char * const tx_pm_stats[] = { "Read:", "Write bypass:", "Write mem:", "Bypass + mem:" }; static const char * const rx_pm_stats[] = { "Read:", "Write bypass:", "Write mem:", "Flush:" }; int i; u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS]; u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS]; struct adapter *adap = seq->private; t4_pmtx_get_stats(adap, tx_cnt, tx_cyc); t4_pmrx_get_stats(adap, rx_cnt, rx_cyc); seq_printf(seq, "%13s %10s %20s\n", " ", "Tx pcmds", "Tx bytes"); for (i = 0; i < PM_NSTATS - 1; i++) seq_printf(seq, "%-13s %10u %20llu\n", tx_pm_stats[i], tx_cnt[i], tx_cyc[i]); seq_printf(seq, "%13s %10s %20s\n", " ", "Rx pcmds", "Rx bytes"); for (i = 0; i < PM_NSTATS - 1; i++) seq_printf(seq, "%-13s %10u %20llu\n", rx_pm_stats[i], rx_cnt[i], rx_cyc[i]); return 0; } static int pm_stats_open(struct inode *inode, struct file *file) { return single_open(file, pm_stats_show, inode->i_private); } static ssize_t pm_stats_clear(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct adapter *adap = file_inode(file)->i_private; t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0); t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0); return count; } static const struct file_operations pm_stats_debugfs_fops = { .owner = THIS_MODULE, .open = pm_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = pm_stats_clear }; static int tx_rate_show(struct seq_file *seq, void *v) { u64 nrate[NCHAN], orate[NCHAN]; struct adapter *adap = seq->private; t4_get_chan_txrate(adap, nrate, orate); if (adap->params.arch.nchan == NCHAN) { seq_puts(seq, " channel 0 channel 1 " "channel 2 channel 3\n"); seq_printf(seq, "NIC B/s: %10llu %10llu %10llu %10llu\n", (unsigned long long)nrate[0], (unsigned long long)nrate[1], (unsigned long long)nrate[2], (unsigned long long)nrate[3]); seq_printf(seq, "Offload B/s: %10llu %10llu %10llu %10llu\n", (unsigned long long)orate[0], (unsigned long long)orate[1], (unsigned long long)orate[2], (unsigned long long)orate[3]); } else { seq_puts(seq, " channel 0 channel 1\n"); seq_printf(seq, "NIC B/s: %10llu %10llu\n", (unsigned long long)nrate[0], (unsigned long long)nrate[1]); seq_printf(seq, "Offload B/s: %10llu %10llu\n", (unsigned long long)orate[0], (unsigned long long)orate[1]); } return 0; } DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate); static int cctrl_tbl_show(struct seq_file *seq, void *v) { static const char * const dec_fac[] = { "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", "0.9375" }; int i; u16 (*incr)[NCCTRL_WIN]; struct adapter *adap = seq->private; incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL); if (!incr) return -ENOMEM; t4_read_cong_tbl(adap, incr); for (i = 0; i < NCCTRL_WIN; ++i) { seq_printf(seq, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], incr[5][i], incr[6][i], incr[7][i]); seq_printf(seq, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", incr[8][i], incr[9][i], incr[10][i], incr[11][i], incr[12][i], incr[13][i], incr[14][i], incr[15][i], adap->params.a_wnd[i], dec_fac[adap->params.b_wnd[i]]); } kfree(incr); return 0; } DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl); /* Format a value in a unit that differs from the value's native unit by the * given factor. */ static char *unit_conv(char *buf, size_t len, unsigned int val, unsigned int factor) { unsigned int rem = val % factor; if (rem == 0) { snprintf(buf, len, "%u", val / factor); } else { while (rem % 10 == 0) rem /= 10; snprintf(buf, len, "%u.%u", val / factor, rem); } return buf; } static int clk_show(struct seq_file *seq, void *v) { char buf[32]; struct adapter *adap = seq->private; unsigned int cclk_ps = 1000000000 / adap->params.vpd.cclk; /* in ps */ u32 res = t4_read_reg(adap, TP_TIMER_RESOLUTION_A); unsigned int tre = TIMERRESOLUTION_G(res); unsigned int dack_re = DELAYEDACKRESOLUTION_G(res); unsigned long long tp_tick_us = (cclk_ps << tre) / 1000000; /* in us */ seq_printf(seq, "Core clock period: %s ns\n", unit_conv(buf, sizeof(buf), cclk_ps, 1000)); seq_printf(seq, "TP timer tick: %s us\n", unit_conv(buf, sizeof(buf), (cclk_ps << tre), 1000000)); seq_printf(seq, "TCP timestamp tick: %s us\n", unit_conv(buf, sizeof(buf), (cclk_ps << TIMESTAMPRESOLUTION_G(res)), 1000000)); seq_printf(seq, "DACK tick: %s us\n", unit_conv(buf, sizeof(buf), (cclk_ps << dack_re), 1000000)); seq_printf(seq, "DACK timer: %u us\n", ((cclk_ps << dack_re) / 1000000) * t4_read_reg(adap, TP_DACK_TIMER_A)); seq_printf(seq, "Retransmit min: %llu us\n", tp_tick_us * t4_read_reg(adap, TP_RXT_MIN_A)); seq_printf(seq, "Retransmit max: %llu us\n", tp_tick_us * t4_read_reg(adap, TP_RXT_MAX_A)); seq_printf(seq, "Persist timer min: %llu us\n", tp_tick_us * t4_read_reg(adap, TP_PERS_MIN_A)); seq_printf(seq, "Persist timer max: %llu us\n", tp_tick_us * t4_read_reg(adap, TP_PERS_MAX_A)); seq_printf(seq, "Keepalive idle timer: %llu us\n", tp_tick_us * t4_read_reg(adap, TP_KEEP_IDLE_A)); seq_printf(seq, "Keepalive interval: %llu us\n", tp_tick_us * t4_read_reg(adap, TP_KEEP_INTVL_A)); seq_printf(seq, "Initial SRTT: %llu us\n", tp_tick_us * INITSRTT_G(t4_read_reg(adap, TP_INIT_SRTT_A))); seq_printf(seq, "FINWAIT2 timer: %llu us\n", tp_tick_us * t4_read_reg(adap, TP_FINWAIT2_TIMER_A)); return 0; } DEFINE_SIMPLE_DEBUGFS_FILE(clk); /* Firmware Device Log dump. */ static const char * const devlog_level_strings[] = { [FW_DEVLOG_LEVEL_EMERG] = "EMERG", [FW_DEVLOG_LEVEL_CRIT] = "CRIT", [FW_DEVLOG_LEVEL_ERR] = "ERR", [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", [FW_DEVLOG_LEVEL_INFO] = "INFO", [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" }; static const char * const devlog_facility_strings[] = { [FW_DEVLOG_FACILITY_CORE] = "CORE", [FW_DEVLOG_FACILITY_SCHED] = "SCHED", [FW_DEVLOG_FACILITY_TIMER] = "TIMER", [FW_DEVLOG_FACILITY_RES] = "RES", [FW_DEVLOG_FACILITY_HW] = "HW", [FW_DEVLOG_FACILITY_FLR] = "FLR", [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", [FW_DEVLOG_FACILITY_PHY] = "PHY", [FW_DEVLOG_FACILITY_MAC] = "MAC", [FW_DEVLOG_FACILITY_PORT] = "PORT", [FW_DEVLOG_FACILITY_VI] = "VI", [FW_DEVLOG_FACILITY_FILTER] = "FILTER", [FW_DEVLOG_FACILITY_ACL] = "ACL", [FW_DEVLOG_FACILITY_TM] = "TM", [FW_DEVLOG_FACILITY_QFC] = "QFC", [FW_DEVLOG_FACILITY_DCB] = "DCB", [FW_DEVLOG_FACILITY_ETH] = "ETH", [FW_DEVLOG_FACILITY_OFLD] = "OFLD", [FW_DEVLOG_FACILITY_RI] = "RI", [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", [FW_DEVLOG_FACILITY_FCOE] = "FCOE", [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE" }; /* Information gathered by Device Log Open routine for the display routine. */ struct devlog_info { unsigned int nentries; /* number of entries in log[] */ unsigned int first; /* first [temporal] entry in log[] */ struct fw_devlog_e log[0]; /* Firmware Device Log */ }; /* Dump a Firmaware Device Log entry. */ static int devlog_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%10s %15s %8s %8s %s\n", "Seq#", "Tstamp", "Level", "Facility", "Message"); else { struct devlog_info *dinfo = seq->private; int fidx = (uintptr_t)v - 2; unsigned long index; struct fw_devlog_e *e; /* Get a pointer to the log entry to display. Skip unused log * entries. */ index = dinfo->first + fidx; if (index >= dinfo->nentries) index -= dinfo->nentries; e = &dinfo->log[index]; if (e->timestamp == 0) return 0; /* Print the message. This depends on the firmware using * exactly the same formating strings as the kernel so we may * eventually have to put a format interpreter in here ... */ seq_printf(seq, "%10d %15llu %8s %8s ", e->seqno, e->timestamp, (e->level < ARRAY_SIZE(devlog_level_strings) ? devlog_level_strings[e->level] : "UNKNOWN"), (e->facility < ARRAY_SIZE(devlog_facility_strings) ? devlog_facility_strings[e->facility] : "UNKNOWN")); seq_printf(seq, e->fmt, e->params[0], e->params[1], e->params[2], e->params[3], e->params[4], e->params[5], e->params[6], e->params[7]); } return 0; } /* Sequential File Operations for Device Log. */ static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos) { if (pos > dinfo->nentries) return NULL; return (void *)(uintptr_t)(pos + 1); } static void *devlog_start(struct seq_file *seq, loff_t *pos) { struct devlog_info *dinfo = seq->private; return (*pos ? devlog_get_idx(dinfo, *pos) : SEQ_START_TOKEN); } static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos) { struct devlog_info *dinfo = seq->private; (*pos)++; return devlog_get_idx(dinfo, *pos); } static void devlog_stop(struct seq_file *seq, void *v) { } static const struct seq_operations devlog_seq_ops = { .start = devlog_start, .next = devlog_next, .stop = devlog_stop, .show = devlog_show }; /* Set up for reading the firmware's device log. We read the entire log here * and then display it incrementally in devlog_show(). */ static int devlog_open(struct inode *inode, struct file *file) { struct adapter *adap = inode->i_private; struct devlog_params *dparams = &adap->params.devlog; struct devlog_info *dinfo; unsigned int index; u32 fseqno; int ret; /* If we don't know where the log is we can't do anything. */ if (dparams->start == 0) return -ENXIO; /* Allocate the space to read in the firmware's device log and set up * for the iterated call to our display function. */ dinfo = __seq_open_private(file, &devlog_seq_ops, sizeof(*dinfo) + dparams->size); if (!dinfo) return -ENOMEM; /* Record the basic log buffer information and read in the raw log. */ dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e)); dinfo->first = 0; spin_lock(&adap->win0_lock); ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype, dparams->start, dparams->size, (__be32 *)dinfo->log, T4_MEMORY_READ); spin_unlock(&adap->win0_lock); if (ret) { seq_release_private(inode, file); return ret; } /* Translate log multi-byte integral elements into host native format * and determine where the first entry in the log is. */ for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) { struct fw_devlog_e *e = &dinfo->log[index]; int i; __u32 seqno; if (e->timestamp == 0) continue; e->timestamp = (__force __be64)be64_to_cpu(e->timestamp); seqno = be32_to_cpu(e->seqno); for (i = 0; i < 8; i++) e->params[i] = (__force __be32)be32_to_cpu(e->params[i]); if (seqno < fseqno) { fseqno = seqno; dinfo->first = index; } } return 0; } static const struct file_operations devlog_fops = { .owner = THIS_MODULE, .open = devlog_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; static int mbox_show(struct seq_file *seq, void *v) { static const char * const owner[] = { "none", "FW", "driver", "unknown" }; int i; unsigned int mbox = (uintptr_t)seq->private & 7; struct adapter *adap = seq->private - mbox; void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); unsigned int ctrl_reg = (is_t4(adap->params.chip) ? CIM_PF_MAILBOX_CTRL_A : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A); void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg); i = MBOWNER_G(readl(ctrl)); seq_printf(seq, "mailbox owned by %s\n\n", owner[i]); for (i = 0; i < MBOX_LEN; i += 8) seq_printf(seq, "%016llx\n", (unsigned long long)readq(addr + i)); return 0; } static int mbox_open(struct inode *inode, struct file *file) { return single_open(file, mbox_show, inode->i_private); } static ssize_t mbox_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { int i; char c = '\n', s[256]; unsigned long long data[8]; const struct inode *ino; unsigned int mbox; struct adapter *adap; void __iomem *addr; void __iomem *ctrl; if (count > sizeof(s) - 1 || !count) return -EINVAL; if (copy_from_user(s, buf, count)) return -EFAULT; s[count] = '\0'; if (sscanf(s, "%llx %llx %llx %llx %llx %llx %llx %llx%c", &data[0], &data[1], &data[2], &data[3], &data[4], &data[5], &data[6], &data[7], &c) < 8 || c != '\n') return -EINVAL; ino = file_inode(file); mbox = (uintptr_t)ino->i_private & 7; adap = ino->i_private - mbox; addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); ctrl = addr + MBOX_LEN; if (MBOWNER_G(readl(ctrl)) != X_MBOWNER_PL) return -EBUSY; for (i = 0; i < 8; i++) writeq(data[i], addr + 8 * i); writel(MBMSGVALID_F | MBOWNER_V(X_MBOWNER_FW), ctrl); return count; } static const struct file_operations mbox_debugfs_fops = { .owner = THIS_MODULE, .open = mbox_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = mbox_write }; static ssize_t flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { loff_t pos = *ppos; loff_t avail = file_inode(file)->i_size; struct adapter *adap = file->private_data; if (pos < 0) return -EINVAL; if (pos >= avail) return 0; if (count > avail - pos) count = avail - pos; while (count) { size_t len; int ret, ofst; u8 data[256]; ofst = pos & 3; len = min(count + ofst, sizeof(data)); ret = t4_read_flash(adap, pos - ofst, (len + 3) / 4, (u32 *)data, 1); if (ret) return ret; len -= ofst; if (copy_to_user(buf, data + ofst, len)) return -EFAULT; buf += len; pos += len; count -= len; } count = pos - *ppos; *ppos = pos; return count; } static const struct file_operations flash_debugfs_fops = { .owner = THIS_MODULE, .open = mem_open, .read = flash_read, }; static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) { *mask = x | y; y = (__force u64)cpu_to_be64(y); memcpy(addr, (char *)&y + 2, ETH_ALEN); } static int mps_tcam_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); if (v == SEQ_START_TOKEN) { if (adap->params.arch.mps_rplc_size > 128) seq_puts(seq, "Idx Ethernet address Mask " "Vld Ports PF VF " "Replication " " P0 P1 P2 P3 ML\n"); else seq_puts(seq, "Idx Ethernet address Mask " "Vld Ports PF VF Replication" " P0 P1 P2 P3 ML\n"); } else { u64 mask; u8 addr[ETH_ALEN]; bool replicate; unsigned int idx = (uintptr_t)v - 2; u64 tcamy, tcamx, val; u32 cls_lo, cls_hi, ctl; u32 rplc[8] = {0}; if (chip_ver > CHELSIO_T5) { /* CtlCmdType - 0: Read, 1: Write * CtlTcamSel - 0: TCAM0, 1: TCAM1 * CtlXYBitSel- 0: Y bit, 1: X bit */ /* Read tcamy */ ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0); if (idx < 256) ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0); else ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1); t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); tcamy = DMACH_G(val) << 32; tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); /* Read tcamx. Change the control param */ ctl |= CTLXYBITSEL_V(1); t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); tcamx = DMACH_G(val) << 32; tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); } else { tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx)); tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx)); } cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx)); cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx)); if (tcamx & tcamy) { seq_printf(seq, "%3u -\n", idx); goto out; } rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0; if (chip_ver > CHELSIO_T5) replicate = (cls_lo & T6_REPLICATE_F); else replicate = (cls_lo & REPLICATE_F); if (replicate) { struct fw_ldst_cmd ldst_cmd; int ret; struct fw_ldst_mps_rplc mps_rplc; u32 ldst_addrspc; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_addrspc = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS); ldst_cmd.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | FW_CMD_READ_F | ldst_addrspc); ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); ldst_cmd.u.mps.rplc.fid_idx = htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) | FW_LDST_CMD_IDX_V(idx)); ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); if (ret) dev_warn(adap->pdev_dev, "Can't read MPS " "replication map for idx %d: %d\n", idx, -ret); else { mps_rplc = ldst_cmd.u.mps.rplc; rplc[0] = ntohl(mps_rplc.rplc31_0); rplc[1] = ntohl(mps_rplc.rplc63_32); rplc[2] = ntohl(mps_rplc.rplc95_64); rplc[3] = ntohl(mps_rplc.rplc127_96); if (adap->params.arch.mps_rplc_size > 128) { rplc[4] = ntohl(mps_rplc.rplc159_128); rplc[5] = ntohl(mps_rplc.rplc191_160); rplc[6] = ntohl(mps_rplc.rplc223_192); rplc[7] = ntohl(mps_rplc.rplc255_224); } } } tcamxy2valmask(tcamx, tcamy, addr, &mask); if (chip_ver > CHELSIO_T5) seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012llx%3c %#x%4u%4d", idx, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (unsigned long long)mask, (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi), T6_PF_G(cls_lo), (cls_lo & T6_VF_VALID_F) ? T6_VF_G(cls_lo) : -1); else seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012llx%3c %#x%4u%4d", idx, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (unsigned long long)mask, (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi), PF_G(cls_lo), (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1); if (replicate) { if (adap->params.arch.mps_rplc_size > 128) seq_printf(seq, " %08x %08x %08x %08x " "%08x %08x %08x %08x", rplc[7], rplc[6], rplc[5], rplc[4], rplc[3], rplc[2], rplc[1], rplc[0]); else seq_printf(seq, " %08x %08x %08x %08x", rplc[3], rplc[2], rplc[1], rplc[0]); } else { if (adap->params.arch.mps_rplc_size > 128) seq_printf(seq, "%72c", ' '); else seq_printf(seq, "%36c", ' '); } if (chip_ver > CHELSIO_T5) seq_printf(seq, "%4u%3u%3u%3u %#x\n", T6_SRAM_PRIO0_G(cls_lo), T6_SRAM_PRIO1_G(cls_lo), T6_SRAM_PRIO2_G(cls_lo), T6_SRAM_PRIO3_G(cls_lo), (cls_lo >> T6_MULTILISTEN0_S) & 0xf); else seq_printf(seq, "%4u%3u%3u%3u %#x\n", SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo), SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo), (cls_lo >> MULTILISTEN0_S) & 0xf); } out: return 0; } static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos) { struct adapter *adap = seq->private; int max_mac_addr = is_t4(adap->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL); } static void *mps_tcam_start(struct seq_file *seq, loff_t *pos) { return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN; } static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return mps_tcam_get_idx(seq, *pos); } static void mps_tcam_stop(struct seq_file *seq, void *v) { } static const struct seq_operations mps_tcam_seq_ops = { .start = mps_tcam_start, .next = mps_tcam_next, .stop = mps_tcam_stop, .show = mps_tcam_show }; static int mps_tcam_open(struct inode *inode, struct file *file) { int res = seq_open(file, &mps_tcam_seq_ops); if (!res) { struct seq_file *seq = file->private_data; seq->private = inode->i_private; } return res; } static const struct file_operations mps_tcam_debugfs_fops = { .owner = THIS_MODULE, .open = mps_tcam_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* Display various sensor information. */ static int sensors_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; u32 param[7], val[7]; int ret; /* Note that if the sensors haven't been initialized and turned on * we'll get values of 0, so treat those as "<unknown>" ... */ param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP)); param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD)); ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, param, val); if (ret < 0 || val[0] == 0) seq_puts(seq, "Temperature: <unknown>\n"); else seq_printf(seq, "Temperature: %dC\n", val[0]); if (ret < 0 || val[1] == 0) seq_puts(seq, "Core VDD: <unknown>\n"); else seq_printf(seq, "Core VDD: %dmV\n", val[1]); return 0; } DEFINE_SIMPLE_DEBUGFS_FILE(sensors); #if IS_ENABLED(CONFIG_IPV6) static int clip_tbl_open(struct inode *inode, struct file *file) { return single_open(file, clip_tbl_show, inode->i_private); } static const struct file_operations clip_tbl_debugfs_fops = { .owner = THIS_MODULE, .open = clip_tbl_open, .read = seq_read, .llseek = seq_lseek, .release = single_release }; #endif /*RSS Table. */ static int rss_show(struct seq_file *seq, void *v, int idx) { u16 *entry = v; seq_printf(seq, "%4d: %4u %4u %4u %4u %4u %4u %4u %4u\n", idx * 8, entry[0], entry[1], entry[2], entry[3], entry[4], entry[5], entry[6], entry[7]); return 0; } static int rss_open(struct inode *inode, struct file *file) { int ret; struct seq_tab *p; struct adapter *adap = inode->i_private; p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show); if (!p) return -ENOMEM; ret = t4_read_rss(adap, (u16 *)p->data); if (ret) seq_release_private(inode, file); return ret; } static const struct file_operations rss_debugfs_fops = { .owner = THIS_MODULE, .open = rss_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; /* RSS Configuration. */ /* Small utility function to return the strings "yes" or "no" if the supplied * argument is non-zero. */ static const char *yesno(int x) { static const char *yes = "yes"; static const char *no = "no"; return x ? yes : no; } static int rss_config_show(struct seq_file *seq, void *v) { struct adapter *adapter = seq->private; static const char * const keymode[] = { "global", "global and per-VF scramble", "per-PF and per-VF scramble", "per-VF and per-VF scramble", }; u32 rssconf; rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_A); seq_printf(seq, "TP_RSS_CONFIG: %#x\n", rssconf); seq_printf(seq, " Tnl4TupEnIpv6: %3s\n", yesno(rssconf & TNL4TUPENIPV6_F)); seq_printf(seq, " Tnl2TupEnIpv6: %3s\n", yesno(rssconf & TNL2TUPENIPV6_F)); seq_printf(seq, " Tnl4TupEnIpv4: %3s\n", yesno(rssconf & TNL4TUPENIPV4_F)); seq_printf(seq, " Tnl2TupEnIpv4: %3s\n", yesno(rssconf & TNL2TUPENIPV4_F)); seq_printf(seq, " TnlTcpSel: %3s\n", yesno(rssconf & TNLTCPSEL_F)); seq_printf(seq, " TnlIp6Sel: %3s\n", yesno(rssconf & TNLIP6SEL_F)); seq_printf(seq, " TnlVrtSel: %3s\n", yesno(rssconf & TNLVRTSEL_F)); seq_printf(seq, " TnlMapEn: %3s\n", yesno(rssconf & TNLMAPEN_F)); seq_printf(seq, " OfdHashSave: %3s\n", yesno(rssconf & OFDHASHSAVE_F)); seq_printf(seq, " OfdVrtSel: %3s\n", yesno(rssconf & OFDVRTSEL_F)); seq_printf(seq, " OfdMapEn: %3s\n", yesno(rssconf & OFDMAPEN_F)); seq_printf(seq, " OfdLkpEn: %3s\n", yesno(rssconf & OFDLKPEN_F)); seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf & SYN4TUPENIPV6_F)); seq_printf(seq, " Syn2TupEnIpv6: %3s\n", yesno(rssconf & SYN2TUPENIPV6_F)); seq_printf(seq, " Syn4TupEnIpv4: %3s\n", yesno(rssconf & SYN4TUPENIPV4_F)); seq_printf(seq, " Syn2TupEnIpv4: %3s\n", yesno(rssconf & SYN2TUPENIPV4_F)); seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf & SYN4TUPENIPV6_F)); seq_printf(seq, " SynIp6Sel: %3s\n", yesno(rssconf & SYNIP6SEL_F)); seq_printf(seq, " SynVrt6Sel: %3s\n", yesno(rssconf & SYNVRTSEL_F)); seq_printf(seq, " SynMapEn: %3s\n", yesno(rssconf & SYNMAPEN_F)); seq_printf(seq, " SynLkpEn: %3s\n", yesno(rssconf & SYNLKPEN_F)); seq_printf(seq, " ChnEn: %3s\n", yesno(rssconf & CHANNELENABLE_F)); seq_printf(seq, " PrtEn: %3s\n", yesno(rssconf & PORTENABLE_F)); seq_printf(seq, " TnlAllLkp: %3s\n", yesno(rssconf & TNLALLLOOKUP_F)); seq_printf(seq, " VrtEn: %3s\n", yesno(rssconf & VIRTENABLE_F)); seq_printf(seq, " CngEn: %3s\n", yesno(rssconf & CONGESTIONENABLE_F)); seq_printf(seq, " HashToeplitz: %3s\n", yesno(rssconf & HASHTOEPLITZ_F)); seq_printf(seq, " Udp4En: %3s\n", yesno(rssconf & UDPENABLE_F)); seq_printf(seq, " Disable: %3s\n", yesno(rssconf & DISABLE_F)); seq_puts(seq, "\n"); rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_TNL_A); seq_printf(seq, "TP_RSS_CONFIG_TNL: %#x\n", rssconf); seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf)); seq_printf(seq, " MaskFilter: %3d\n", MASKFILTER_G(rssconf)); if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) { seq_printf(seq, " HashAll: %3s\n", yesno(rssconf & HASHALL_F)); seq_printf(seq, " HashEth: %3s\n", yesno(rssconf & HASHETH_F)); } seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F)); seq_puts(seq, "\n"); rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_OFD_A); seq_printf(seq, "TP_RSS_CONFIG_OFD: %#x\n", rssconf); seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf)); seq_printf(seq, " RRCplMapEn: %3s\n", yesno(rssconf & RRCPLMAPEN_F)); seq_printf(seq, " RRCplQueWidth: %3d\n", RRCPLQUEWIDTH_G(rssconf)); seq_puts(seq, "\n"); rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_SYN_A); seq_printf(seq, "TP_RSS_CONFIG_SYN: %#x\n", rssconf); seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf)); seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F)); seq_puts(seq, "\n"); rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A); seq_printf(seq, "TP_RSS_CONFIG_VRT: %#x\n", rssconf); if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) { seq_printf(seq, " KeyWrAddrX: %3d\n", KEYWRADDRX_G(rssconf)); seq_printf(seq, " KeyExtend: %3s\n", yesno(rssconf & KEYEXTEND_F)); } seq_printf(seq, " VfRdRg: %3s\n", yesno(rssconf & VFRDRG_F)); seq_printf(seq, " VfRdEn: %3s\n", yesno(rssconf & VFRDEN_F)); seq_printf(seq, " VfPerrEn: %3s\n", yesno(rssconf & VFPERREN_F)); seq_printf(seq, " KeyPerrEn: %3s\n", yesno(rssconf & KEYPERREN_F)); seq_printf(seq, " DisVfVlan: %3s\n", yesno(rssconf & DISABLEVLAN_F)); seq_printf(seq, " EnUpSwt: %3s\n", yesno(rssconf & ENABLEUP0_F)); seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf)); if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf)); else seq_printf(seq, " VfWrAddr: %3d\n", T6_VFWRADDR_G(rssconf)); seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]); seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F)); seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F)); seq_printf(seq, " KeyWrAddr: %3d\n", KEYWRADDR_G(rssconf)); seq_puts(seq, "\n"); rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_CNG_A); seq_printf(seq, "TP_RSS_CONFIG_CNG: %#x\n", rssconf); seq_printf(seq, " ChnCount3: %3s\n", yesno(rssconf & CHNCOUNT3_F)); seq_printf(seq, " ChnCount2: %3s\n", yesno(rssconf & CHNCOUNT2_F)); seq_printf(seq, " ChnCount1: %3s\n", yesno(rssconf & CHNCOUNT1_F)); seq_printf(seq, " ChnCount0: %3s\n", yesno(rssconf & CHNCOUNT0_F)); seq_printf(seq, " ChnUndFlow3: %3s\n", yesno(rssconf & CHNUNDFLOW3_F)); seq_printf(seq, " ChnUndFlow2: %3s\n", yesno(rssconf & CHNUNDFLOW2_F)); seq_printf(seq, " ChnUndFlow1: %3s\n", yesno(rssconf & CHNUNDFLOW1_F)); seq_printf(seq, " ChnUndFlow0: %3s\n", yesno(rssconf & CHNUNDFLOW0_F)); seq_printf(seq, " RstChn3: %3s\n", yesno(rssconf & RSTCHN3_F)); seq_printf(seq, " RstChn2: %3s\n", yesno(rssconf & RSTCHN2_F)); seq_printf(seq, " RstChn1: %3s\n", yesno(rssconf & RSTCHN1_F)); seq_printf(seq, " RstChn0: %3s\n", yesno(rssconf & RSTCHN0_F)); seq_printf(seq, " UpdVld: %3s\n", yesno(rssconf & UPDVLD_F)); seq_printf(seq, " Xoff: %3s\n", yesno(rssconf & XOFF_F)); seq_printf(seq, " UpdChn3: %3s\n", yesno(rssconf & UPDCHN3_F)); seq_printf(seq, " UpdChn2: %3s\n", yesno(rssconf & UPDCHN2_F)); seq_printf(seq, " UpdChn1: %3s\n", yesno(rssconf & UPDCHN1_F)); seq_printf(seq, " UpdChn0: %3s\n", yesno(rssconf & UPDCHN0_F)); seq_printf(seq, " Queue: %3d\n", QUEUE_G(rssconf)); return 0; } DEFINE_SIMPLE_DEBUGFS_FILE(rss_config); /* RSS Secret Key. */ static int rss_key_show(struct seq_file *seq, void *v) { u32 key[10]; t4_read_rss_key(seq->private, key); seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n", key[9], key[8], key[7], key[6], key[5], key[4], key[3], key[2], key[1], key[0]); return 0; } static int rss_key_open(struct inode *inode, struct file *file) { return single_open(file, rss_key_show, inode->i_private); } static ssize_t rss_key_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { int i, j; u32 key[10]; char s[100], *p; struct adapter *adap = file_inode(file)->i_private; if (count > sizeof(s) - 1) return -EINVAL; if (copy_from_user(s, buf, count)) return -EFAULT; for (i = count; i > 0 && isspace(s[i - 1]); i--) ; s[i] = '\0'; for (p = s, i = 9; i >= 0; i--) { key[i] = 0; for (j = 0; j < 8; j++, p++) { if (!isxdigit(*p)) return -EINVAL; key[i] = (key[i] << 4) | hex2val(*p); } } t4_write_rss_key(adap, key, -1); return count; } static const struct file_operations rss_key_debugfs_fops = { .owner = THIS_MODULE, .open = rss_key_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = rss_key_write }; /* PF RSS Configuration. */ struct rss_pf_conf { u32 rss_pf_map; u32 rss_pf_mask; u32 rss_pf_config; }; static int rss_pf_config_show(struct seq_file *seq, void *v, int idx) { struct rss_pf_conf *pfconf; if (v == SEQ_START_TOKEN) { /* use the 0th entry to dump the PF Map Index Size */ pfconf = seq->private + offsetof(struct seq_tab, data); seq_printf(seq, "PF Map Index Size = %d\n\n", LKPIDXSIZE_G(pfconf->rss_pf_map)); seq_puts(seq, " RSS PF VF Hash Tuple Enable Default\n"); seq_puts(seq, " Enable IPF Mask Mask IPv6 IPv4 UDP Queue\n"); seq_puts(seq, " PF Map Chn Prt Map Size Size Four Two Four Two Four Ch1 Ch0\n"); } else { #define G_PFnLKPIDX(map, n) \ (((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M) #define G_PFnMSKSIZE(mask, n) \ (((mask) >> PF1MSKSIZE_S*(n)) & PF1MSKSIZE_M) pfconf = v; seq_printf(seq, "%3d %3s %3s %3s %3d %3d %3d %3s %3s %3s %3s %3s %3d %3d\n", idx, yesno(pfconf->rss_pf_config & MAPENABLE_F), yesno(pfconf->rss_pf_config & CHNENABLE_F), yesno(pfconf->rss_pf_config & PRTENABLE_F), G_PFnLKPIDX(pfconf->rss_pf_map, idx), G_PFnMSKSIZE(pfconf->rss_pf_mask, idx), IVFWIDTH_G(pfconf->rss_pf_config), yesno(pfconf->rss_pf_config & IP6FOURTUPEN_F), yesno(pfconf->rss_pf_config & IP6TWOTUPEN_F), yesno(pfconf->rss_pf_config & IP4FOURTUPEN_F), yesno(pfconf->rss_pf_config & IP4TWOTUPEN_F), yesno(pfconf->rss_pf_config & UDPFOURTUPEN_F), CH1DEFAULTQUEUE_G(pfconf->rss_pf_config), CH0DEFAULTQUEUE_G(pfconf->rss_pf_config)); #undef G_PFnLKPIDX #undef G_PFnMSKSIZE } return 0; } static int rss_pf_config_open(struct inode *inode, struct file *file) { struct adapter *adapter = inode->i_private; struct seq_tab *p; u32 rss_pf_map, rss_pf_mask; struct rss_pf_conf *pfconf; int pf; p = seq_open_tab(file, 8, sizeof(*pfconf), 1, rss_pf_config_show); if (!p) return -ENOMEM; pfconf = (struct rss_pf_conf *)p->data; rss_pf_map = t4_read_rss_pf_map(adapter); rss_pf_mask = t4_read_rss_pf_mask(adapter); for (pf = 0; pf < 8; pf++) { pfconf[pf].rss_pf_map = rss_pf_map; pfconf[pf].rss_pf_mask = rss_pf_mask; t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config); } return 0; } static const struct file_operations rss_pf_config_debugfs_fops = { .owner = THIS_MODULE, .open = rss_pf_config_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; /* VF RSS Configuration. */ struct rss_vf_conf { u32 rss_vf_vfl; u32 rss_vf_vfh; }; static int rss_vf_config_show(struct seq_file *seq, void *v, int idx) { if (v == SEQ_START_TOKEN) { seq_puts(seq, " RSS Hash Tuple Enable\n"); seq_puts(seq, " Enable IVF Dis Enb IPv6 IPv4 UDP Def Secret Key\n"); seq_puts(seq, " VF Chn Prt Map VLAN uP Four Two Four Two Four Que Idx Hash\n"); } else { struct rss_vf_conf *vfconf = v; seq_printf(seq, "%3d %3s %3s %3d %3s %3s %3s %3s %3s %3s %3s %4d %3d %#10x\n", idx, yesno(vfconf->rss_vf_vfh & VFCHNEN_F), yesno(vfconf->rss_vf_vfh & VFPRTEN_F), VFLKPIDX_G(vfconf->rss_vf_vfh), yesno(vfconf->rss_vf_vfh & VFVLNEX_F), yesno(vfconf->rss_vf_vfh & VFUPEN_F), yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F), yesno(vfconf->rss_vf_vfh & VFIP6TWOTUPEN_F), yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F), yesno(vfconf->rss_vf_vfh & VFIP4TWOTUPEN_F), yesno(vfconf->rss_vf_vfh & ENABLEUDPHASH_F), DEFAULTQUEUE_G(vfconf->rss_vf_vfh), KEYINDEX_G(vfconf->rss_vf_vfh), vfconf->rss_vf_vfl); } return 0; } static int rss_vf_config_open(struct inode *inode, struct file *file) { struct adapter *adapter = inode->i_private; struct seq_tab *p; struct rss_vf_conf *vfconf; int vf, vfcount = adapter->params.arch.vfcount; p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show); if (!p) return -ENOMEM; vfconf = (struct rss_vf_conf *)p->data; for (vf = 0; vf < vfcount; vf++) { t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl, &vfconf[vf].rss_vf_vfh); } return 0; } static const struct file_operations rss_vf_config_debugfs_fops = { .owner = THIS_MODULE, .open = rss_vf_config_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private }; /** * ethqset2pinfo - return port_info of an Ethernet Queue Set * @adap: the adapter * @qset: Ethernet Queue Set */ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset) { int pidx; for_each_port(adap, pidx) { struct port_info *pi = adap2pinfo(adap, pidx); if (qset >= pi->first_qset && qset < pi->first_qset + pi->nqsets) return pi; } /* should never happen! */ BUG_ON(1); return NULL; } static int sge_qinfo_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); int i, r = (uintptr_t)v - 1; int toe_idx = r - eth_entries; int rdma_idx = toe_idx - toe_entries; int ciq_idx = rdma_idx - rdma_entries; int ctrl_idx = ciq_idx - ciq_entries; int fq_idx = ctrl_idx - ctrl_entries; if (r) seq_putc(seq, '\n'); #define S3(fmt_spec, s, v) \ do { \ seq_printf(seq, "%-12s", s); \ for (i = 0; i < n; ++i) \ seq_printf(seq, " %16" fmt_spec, v); \ seq_putc(seq, '\n'); \ } while (0) #define S(s, v) S3("s", s, v) #define T(s, v) S3("u", s, tx[i].v) #define R(s, v) S3("u", s, rx[i].v) if (r < eth_entries) { int base_qset = r * 4; const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset]; const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset]; int n = min(4, adap->sge.ethqsets - 4 * r); S("QType:", "Ethernet"); S("Interface:", rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); T("TxQ ID:", q.cntxt_id); T("TxQ size:", q.size); T("TxQ inuse:", q.in_use); T("TxQ CIDX:", q.cidx); T("TxQ PIDX:", q.pidx); #ifdef CONFIG_CHELSIO_T4_DCB T("DCB Prio:", dcb_prio); S3("u", "DCB PGID:", (ethqset2pinfo(adap, base_qset + i)->dcb.pgid >> 4*(7-tx[i].dcb_prio)) & 0xf); S3("u", "DCB PFC:", (ethqset2pinfo(adap, base_qset + i)->dcb.pfcen >> 1*(7-tx[i].dcb_prio)) & 0x1); #endif R("RspQ ID:", rspq.abs_id); R("RspQ size:", rspq.size); R("RspQE size:", rspq.iqe_len); R("RspQ CIDX:", rspq.cidx); R("RspQ Gen:", rspq.gen); S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); S3("u", "Intr pktcnt:", adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); R("FL ID:", fl.cntxt_id); R("FL size:", fl.size - 8); R("FL pend:", fl.pend_cred); R("FL avail:", fl.avail); R("FL PIDX:", fl.pidx); R("FL CIDX:", fl.cidx); } else if (toe_idx < toe_entries) { const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4]; const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4]; int n = min(4, adap->sge.ofldqsets - 4 * toe_idx); S("QType:", "TOE"); T("TxQ ID:", q.cntxt_id); T("TxQ size:", q.size); T("TxQ inuse:", q.in_use); T("TxQ CIDX:", q.cidx); T("TxQ PIDX:", q.pidx); R("RspQ ID:", rspq.abs_id); R("RspQ size:", rspq.size); R("RspQE size:", rspq.iqe_len); R("RspQ CIDX:", rspq.cidx); R("RspQ Gen:", rspq.gen); S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); S3("u", "Intr pktcnt:", adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); R("FL ID:", fl.cntxt_id); R("FL size:", fl.size - 8); R("FL pend:", fl.pend_cred); R("FL avail:", fl.avail); R("FL PIDX:", fl.pidx); R("FL CIDX:", fl.cidx); } else if (rdma_idx < rdma_entries) { const struct sge_ofld_rxq *rx = &adap->sge.rdmarxq[rdma_idx * 4]; int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx); S("QType:", "RDMA-CPL"); S("Interface:", rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); R("RspQ ID:", rspq.abs_id); R("RspQ size:", rspq.size); R("RspQE size:", rspq.iqe_len); R("RspQ CIDX:", rspq.cidx); R("RspQ Gen:", rspq.gen); S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); S3("u", "Intr pktcnt:", adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); R("FL ID:", fl.cntxt_id); R("FL size:", fl.size - 8); R("FL pend:", fl.pend_cred); R("FL avail:", fl.avail); R("FL PIDX:", fl.pidx); R("FL CIDX:", fl.cidx); } else if (ciq_idx < ciq_entries) { const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4]; int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx); S("QType:", "RDMA-CIQ"); S("Interface:", rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); R("RspQ ID:", rspq.abs_id); R("RspQ size:", rspq.size); R("RspQE size:", rspq.iqe_len); R("RspQ CIDX:", rspq.cidx); R("RspQ Gen:", rspq.gen); S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); S3("u", "Intr pktcnt:", adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); } else if (ctrl_idx < ctrl_entries) { const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4]; int n = min(4, adap->params.nports - 4 * ctrl_idx); S("QType:", "Control"); T("TxQ ID:", q.cntxt_id); T("TxQ size:", q.size); T("TxQ inuse:", q.in_use); T("TxQ CIDX:", q.cidx); T("TxQ PIDX:", q.pidx); } else if (fq_idx == 0) { const struct sge_rspq *evtq = &adap->sge.fw_evtq; seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); seq_printf(seq, "%-12s %16u\n", "RspQ size:", evtq->size); seq_printf(seq, "%-12s %16u\n", "RspQE size:", evtq->iqe_len); seq_printf(seq, "%-12s %16u\n", "RspQ CIDX:", evtq->cidx); seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen); seq_printf(seq, "%-12s %16u\n", "Intr delay:", qtimer_val(adap, evtq)); seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", adap->sge.counter_val[evtq->pktcnt_idx]); } #undef R #undef T #undef S #undef S3 return 0; } static int sge_queue_entries(const struct adapter *adap) { return DIV_ROUND_UP(adap->sge.ethqsets, 4) + DIV_ROUND_UP(adap->sge.ofldqsets, 4) + DIV_ROUND_UP(adap->sge.rdmaqs, 4) + DIV_ROUND_UP(adap->sge.rdmaciqs, 4) + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; } static void *sge_queue_start(struct seq_file *seq, loff_t *pos) { int entries = sge_queue_entries(seq->private); return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; } static void sge_queue_stop(struct seq_file *seq, void *v) { } static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos) { int entries = sge_queue_entries(seq->private); ++*pos; return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; } static const struct seq_operations sge_qinfo_seq_ops = { .start = sge_queue_start, .next = sge_queue_next, .stop = sge_queue_stop, .show = sge_qinfo_show }; static int sge_qinfo_open(struct inode *inode, struct file *file) { int res = seq_open(file, &sge_qinfo_seq_ops); if (!res) { struct seq_file *seq = file->private_data; seq->private = inode->i_private; } return res; } static const struct file_operations sge_qinfo_debugfs_fops = { .owner = THIS_MODULE, .open = sge_qinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int mem_open(struct inode *inode, struct file *file) { unsigned int mem; struct adapter *adap; file->private_data = inode->i_private; mem = (uintptr_t)file->private_data & 0x3; adap = file->private_data - mem; (void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH); return 0; } static ssize_t mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { loff_t pos = *ppos; loff_t avail = file_inode(file)->i_size; unsigned int mem = (uintptr_t)file->private_data & 3; struct adapter *adap = file->private_data - mem; __be32 *data; int ret; if (pos < 0) return -EINVAL; if (pos >= avail) return 0; if (count > avail - pos) count = avail - pos; data = t4_alloc_mem(count); if (!data) return -ENOMEM; spin_lock(&adap->win0_lock); ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); spin_unlock(&adap->win0_lock); if (ret) { t4_free_mem(data); return ret; } ret = copy_to_user(buf, data, count); t4_free_mem(data); if (ret) return -EFAULT; *ppos = pos + count; return count; } static const struct file_operations mem_debugfs_fops = { .owner = THIS_MODULE, .open = simple_open, .read = mem_read, .llseek = default_llseek, }; static void add_debugfs_mem(struct adapter *adap, const char *name, unsigned int idx, unsigned int size_mb) { debugfs_create_file_size(name, S_IRUSR, adap->debugfs_root, (void *)adap + idx, &mem_debugfs_fops, size_mb << 20); } static int blocked_fl_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { int len; const struct adapter *adap = filp->private_data; char *buf; ssize_t size = (adap->sge.egr_sz + 3) / 4 + adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */ buf = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; len = snprintf(buf, size - 1, "%*pb\n", adap->sge.egr_sz, adap->sge.blocked_fl); len += sprintf(buf + len, "\n"); size = simple_read_from_buffer(ubuf, count, ppos, buf, len); t4_free_mem(buf); return size; } static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { int err; unsigned long *t; struct adapter *adap = filp->private_data; t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL); if (!t) return -ENOMEM; err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); if (err) return err; bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); t4_free_mem(t); return count; } static const struct file_operations blocked_fl_fops = { .owner = THIS_MODULE, .open = blocked_fl_open, .read = blocked_fl_read, .write = blocked_fl_write, .llseek = generic_file_llseek, }; /* Add an array of Debug FS files. */ void add_debugfs_files(struct adapter *adap, struct t4_debugfs_entry *files, unsigned int nfiles) { int i; /* debugfs support is best effort */ for (i = 0; i < nfiles; i++) debugfs_create_file(files[i].name, files[i].mode, adap->debugfs_root, (void *)adap + files[i].data, files[i].ops); } int t4_setup_debugfs(struct adapter *adap) { int i; u32 size = 0; struct dentry *de; static struct t4_debugfs_entry t4_debugfs_files[] = { { "cim_la", &cim_la_fops, S_IRUSR, 0 }, { "cim_pif_la", &cim_pif_la_fops, S_IRUSR, 0 }, { "cim_ma_la", &cim_ma_la_fops, S_IRUSR, 0 }, { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 }, { "clk", &clk_debugfs_fops, S_IRUSR, 0 }, { "devlog", &devlog_fops, S_IRUSR, 0 }, { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 }, { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 }, { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 }, { "mbox3", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 3 }, { "mbox4", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 4 }, { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 }, { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 }, { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 }, { "l2t", &t4_l2t_fops, S_IRUSR, 0}, { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 }, { "rss", &rss_debugfs_fops, S_IRUSR, 0 }, { "rss_config", &rss_config_debugfs_fops, S_IRUSR, 0 }, { "rss_key", &rss_key_debugfs_fops, S_IRUSR, 0 }, { "rss_pf_config", &rss_pf_config_debugfs_fops, S_IRUSR, 0 }, { "rss_vf_config", &rss_vf_config_debugfs_fops, S_IRUSR, 0 }, { "sge_qinfo", &sge_qinfo_debugfs_fops, S_IRUSR, 0 }, { "ibq_tp0", &cim_ibq_fops, S_IRUSR, 0 }, { "ibq_tp1", &cim_ibq_fops, S_IRUSR, 1 }, { "ibq_ulp", &cim_ibq_fops, S_IRUSR, 2 }, { "ibq_sge0", &cim_ibq_fops, S_IRUSR, 3 }, { "ibq_sge1", &cim_ibq_fops, S_IRUSR, 4 }, { "ibq_ncsi", &cim_ibq_fops, S_IRUSR, 5 }, { "obq_ulp0", &cim_obq_fops, S_IRUSR, 0 }, { "obq_ulp1", &cim_obq_fops, S_IRUSR, 1 }, { "obq_ulp2", &cim_obq_fops, S_IRUSR, 2 }, { "obq_ulp3", &cim_obq_fops, S_IRUSR, 3 }, { "obq_sge", &cim_obq_fops, S_IRUSR, 4 }, { "obq_ncsi", &cim_obq_fops, S_IRUSR, 5 }, { "tp_la", &tp_la_fops, S_IRUSR, 0 }, { "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 }, { "sensors", &sensors_debugfs_fops, S_IRUSR, 0 }, { "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 }, { "tx_rate", &tx_rate_debugfs_fops, S_IRUSR, 0 }, { "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 }, #if IS_ENABLED(CONFIG_IPV6) { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 }, #endif { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. */ static struct t4_debugfs_entry t5_debugfs_files[] = { { "obq_sge_rx_q0", &cim_obq_fops, S_IRUSR, 6 }, { "obq_sge_rx_q1", &cim_obq_fops, S_IRUSR, 7 }, }; add_debugfs_files(adap, t4_debugfs_files, ARRAY_SIZE(t4_debugfs_files)); if (!is_t4(adap->params.chip)) add_debugfs_files(adap, t5_debugfs_files, ARRAY_SIZE(t5_debugfs_files)); i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); if (i & EDRAM0_ENABLE_F) { size = t4_read_reg(adap, MA_EDRAM0_BAR_A); add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM0_SIZE_G(size)); } if (i & EDRAM1_ENABLE_F) { size = t4_read_reg(adap, MA_EDRAM1_BAR_A); add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size)); } if (is_t5(adap->params.chip)) { if (i & EXT_MEM0_ENABLE_F) { size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); add_debugfs_mem(adap, "mc0", MEM_MC0, EXT_MEM0_SIZE_G(size)); } if (i & EXT_MEM1_ENABLE_F) { size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); add_debugfs_mem(adap, "mc1", MEM_MC1, EXT_MEM1_SIZE_G(size)); } } else { if (i & EXT_MEM_ENABLE_F) size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); add_debugfs_mem(adap, "mc", MEM_MC, EXT_MEM_SIZE_G(size)); } de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, &flash_debugfs_fops, adap->params.sf_size); return 0; }
gpl-2.0
lenovo-a3-dev/kernel_lenovo_a3
mediatek/custom/common/kernel/lcm/nt35582/nt35582.c
20
14883
/***************************************************************************** * Copyright Statement: * -------------------- * This software is protected by Copyright and the information contained * herein is confidential. The software may not be copied and the information * contained herein may not be used or disclosed except with the written * permission of MediaTek Inc. (C) 2008 * * BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO * NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S * SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM. * * BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE * LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. * * THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE * WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF * LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND * RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER * THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC). * *****************************************************************************/ #include <linux/string.h> #if defined(BUILD_UBOOT) #include <asm/arch/mt6573_gpio.h> #else #include <mach/mt6573_gpio.h> #endif #include "lcm_drv.h" // --------------------------------------------------------------------------- // Local Constants // --------------------------------------------------------------------------- #define FRAME_WIDTH (480) #define FRAME_HEIGHT (800) // --------------------------------------------------------------------------- // Local Variables // --------------------------------------------------------------------------- static LCM_UTIL_FUNCS lcm_util = {0}; #define SET_RESET_PIN(v) (lcm_util.set_reset_pin((v))) #define UDELAY(n) (lcm_util.udelay(n)) #define MDELAY(n) (lcm_util.mdelay(n)) /* #define LSA0_GPIO_PIN (GPIO_DISP_LSA0_PIN) #define LSCE_GPIO_PIN (GPIO_DISP_LSCE_PIN) #define LSCK_GPIO_PIN (GPIO_DISP_LSCK_PIN) #define LSDA_GPIO_PIN (GPIO_DISP_LSDA_PIN) */ #define LSA0_GPIO_PIN (GPIO103) #define LSCE_GPIO_PIN (GPIO105) #define LSCK_GPIO_PIN (GPIO102) #define LSDA_GPIO_PIN (GPIO104) // --------------------------------------------------------------------------- // Local Functions // --------------------------------------------------------------------------- /*static void config_gpio(void) { lcm_util.set_gpio_mode(LSA0_GPIO_PIN, GPIO_DISP_LSA0_PIN_M_LSA); lcm_util.set_gpio_mode(LSCE_GPIO_PIN, GPIO_DISP_LSCE_PIN_M_LSCE0B); lcm_util.set_gpio_mode(LSCK_GPIO_PIN, GPIO_DISP_LSCK_PIN_M_LSCK); lcm_util.set_gpio_mode(LSDA_GPIO_PIN, GPIO_DISP_LSDA_PIN_M_SDA); lcm_util.set_gpio_dir(LSA0_GPIO_PIN, GPIO_DIR_OUT); lcm_util.set_gpio_dir(LSCE_GPIO_PIN, GPIO_DIR_OUT); lcm_util.set_gpio_dir(LSCK_GPIO_PIN, GPIO_DIR_OUT); lcm_util.set_gpio_dir(LSDA_GPIO_PIN, GPIO_DIR_OUT); lcm_util.set_gpio_pull_enable(LSA0_GPIO_PIN, GPIO_PULL_DISABLE); lcm_util.set_gpio_pull_enable(LSCE_GPIO_PIN, GPIO_PULL_DISABLE); lcm_util.set_gpio_pull_enable(LSCK_GPIO_PIN, GPIO_PULL_DISABLE); lcm_util.set_gpio_pull_enable(LSDA_GPIO_PIN, GPIO_PULL_DISABLE); } */ static void config_gpio(void) { lcm_util.set_gpio_mode(LSA0_GPIO_PIN, GPIO_MODE_01); lcm_util.set_gpio_mode(LSCE_GPIO_PIN, GPIO_MODE_01); lcm_util.set_gpio_mode(LSCK_GPIO_PIN, GPIO_MODE_01); lcm_util.set_gpio_mode(LSDA_GPIO_PIN, GPIO_MODE_01); lcm_util.set_gpio_dir(LSA0_GPIO_PIN, GPIO_DIR_OUT); lcm_util.set_gpio_dir(LSCE_GPIO_PIN, GPIO_DIR_OUT); lcm_util.set_gpio_dir(LSCK_GPIO_PIN, GPIO_DIR_OUT); lcm_util.set_gpio_dir(LSDA_GPIO_PIN, GPIO_DIR_OUT); lcm_util.set_gpio_pull_enable(LSA0_GPIO_PIN, GPIO_PULL_DISABLE); lcm_util.set_gpio_pull_enable(LSCE_GPIO_PIN, GPIO_PULL_DISABLE); lcm_util.set_gpio_pull_enable(LSCK_GPIO_PIN, GPIO_PULL_DISABLE); lcm_util.set_gpio_pull_enable(LSDA_GPIO_PIN, GPIO_PULL_DISABLE); } // --------------------------------------------------------------------------- // Local Functions // --------------------------------------------------------------------------- static __inline void send_ctrl_cmd(unsigned int cmd) { unsigned char temp1 = (unsigned char)((cmd >> 8) & 0xFF); unsigned char temp2 = (unsigned char)(cmd & 0xFF); lcm_util.send_data(0x2000 | temp1); lcm_util.send_data(0x0000 | temp2); } static __inline void send_data_cmd(unsigned int data) { lcm_util.send_data(0x0004 | data); } static __inline void set_lcm_register(unsigned int regIndex, unsigned int regData) { send_ctrl_cmd(regIndex); send_data_cmd(regData); } static void init_lcm_registers(void) { send_ctrl_cmd(0x1100); MDELAY(200); send_ctrl_cmd(0xC000); send_data_cmd(0x86); send_ctrl_cmd(0xC001); send_data_cmd(0x00); send_ctrl_cmd(0xC002); send_data_cmd(0x86); send_ctrl_cmd(0xC003); send_data_cmd(0x00); send_ctrl_cmd(0xC100); send_data_cmd(0x45); send_ctrl_cmd(0xC200); send_data_cmd(0x21); send_ctrl_cmd(0xC202); send_data_cmd(0x02); send_ctrl_cmd(0xB600); send_data_cmd(0x30); send_ctrl_cmd(0xB602); send_data_cmd(0x30); send_ctrl_cmd(0xC700); send_data_cmd(0x8F); send_ctrl_cmd(0xE000); send_data_cmd(0x0E); send_ctrl_cmd(0xE001); send_data_cmd(0x14); send_ctrl_cmd(0xE002); send_data_cmd(0x29); send_ctrl_cmd(0xE003); send_data_cmd(0x3A); send_ctrl_cmd(0xE004); send_data_cmd(0x1D); send_ctrl_cmd(0xE005); send_data_cmd(0x30); send_ctrl_cmd(0xE006); send_data_cmd(0x61); send_ctrl_cmd(0xE007); send_data_cmd(0x3D); send_ctrl_cmd(0xE008); send_data_cmd(0x22); send_ctrl_cmd(0xE009); send_data_cmd(0x2A); send_ctrl_cmd(0xE00A); send_data_cmd(0x87); send_ctrl_cmd(0xE00B); send_data_cmd(0x16); send_ctrl_cmd(0xE00C); send_data_cmd(0x3B); send_ctrl_cmd(0xE00D); send_data_cmd(0x4C); send_ctrl_cmd(0xE00E); send_data_cmd(0x78); send_ctrl_cmd(0xE00F); send_data_cmd(0x96); send_ctrl_cmd(0xE010); send_data_cmd(0x4A); send_ctrl_cmd(0xE011); send_data_cmd(0x4D); send_ctrl_cmd(0xE100); send_data_cmd(0x0E); send_ctrl_cmd(0xE101); send_data_cmd(0x14); send_ctrl_cmd(0xE102); send_data_cmd(0x29); send_ctrl_cmd(0xE103); send_data_cmd(0x3A); send_ctrl_cmd(0xE104); send_data_cmd(0x1D); send_ctrl_cmd(0xE105); send_data_cmd(0x30); send_ctrl_cmd(0xE106); send_data_cmd(0x61); send_ctrl_cmd(0xE107); send_data_cmd(0x3F); send_ctrl_cmd(0xE108); send_data_cmd(0x20); send_ctrl_cmd(0xE109); send_data_cmd(0x26); send_ctrl_cmd(0xE10A); send_data_cmd(0x83); send_ctrl_cmd(0xE10B); send_data_cmd(0x16); send_ctrl_cmd(0xE10C); send_data_cmd(0x3B); send_ctrl_cmd(0xE10D); send_data_cmd(0x4C); send_ctrl_cmd(0xE10E); send_data_cmd(0x78); send_ctrl_cmd(0xE10F); send_data_cmd(0x96); send_ctrl_cmd(0xE110); send_data_cmd(0x4A); send_ctrl_cmd(0xE111); send_data_cmd(0x4D); send_ctrl_cmd(0xE200); send_data_cmd(0x0E); send_ctrl_cmd(0xE201); send_data_cmd(0x14); send_ctrl_cmd(0xE202); send_data_cmd(0x29); send_ctrl_cmd(0xE203); send_data_cmd(0x3A); send_ctrl_cmd(0xE204); send_data_cmd(0x1D); send_ctrl_cmd(0xE205); send_data_cmd(0x30); send_ctrl_cmd(0xE206); send_data_cmd(0x61); send_ctrl_cmd(0xE207); send_data_cmd(0x3D); send_ctrl_cmd(0xE208); send_data_cmd(0x22); send_ctrl_cmd(0xE209); send_data_cmd(0x2A); send_ctrl_cmd(0xE20A); send_data_cmd(0x87); send_ctrl_cmd(0xE20B); send_data_cmd(0x16); send_ctrl_cmd(0xE20C); send_data_cmd(0x3B); send_ctrl_cmd(0xE20D); send_data_cmd(0x4C); send_ctrl_cmd(0xE20E); send_data_cmd(0x78); send_ctrl_cmd(0xE20F); send_data_cmd(0x96); send_ctrl_cmd(0xE210); send_data_cmd(0x4A); send_ctrl_cmd(0xE211); send_data_cmd(0x4D); send_ctrl_cmd(0xE300); send_data_cmd(0x0E); send_ctrl_cmd(0xE301); send_data_cmd(0x14); send_ctrl_cmd(0xE302); send_data_cmd(0x29); send_ctrl_cmd(0xE303); send_data_cmd(0x3A); send_ctrl_cmd(0xE304); send_data_cmd(0x1D); send_ctrl_cmd(0xE305); send_data_cmd(0x30); send_ctrl_cmd(0xE306); send_data_cmd(0x61); send_ctrl_cmd(0xE307); send_data_cmd(0x3F); send_ctrl_cmd(0xE308); send_data_cmd(0x20); send_ctrl_cmd(0xE309); send_data_cmd(0x26); send_ctrl_cmd(0xE30A); send_data_cmd(0x83); send_ctrl_cmd(0xE30B); send_data_cmd(0x16); send_ctrl_cmd(0xE30C); send_data_cmd(0x3B); send_ctrl_cmd(0xE30D); send_data_cmd(0x4C); send_ctrl_cmd(0xE30E); send_data_cmd(0x78); send_ctrl_cmd(0xE30F); send_data_cmd(0x96); send_ctrl_cmd(0xE310); send_data_cmd(0x4A); send_ctrl_cmd(0xE311); send_data_cmd(0x4D); send_ctrl_cmd(0xE400); send_data_cmd(0x0E); send_ctrl_cmd(0xE401); send_data_cmd(0x14); send_ctrl_cmd(0xE402); send_data_cmd(0x29); send_ctrl_cmd(0xE403); send_data_cmd(0x3A); send_ctrl_cmd(0xE404); send_data_cmd(0x1D); send_ctrl_cmd(0xE405); send_data_cmd(0x30); send_ctrl_cmd(0xE406); send_data_cmd(0x61); send_ctrl_cmd(0xE407); send_data_cmd(0x3D); send_ctrl_cmd(0xE408); send_data_cmd(0x22); send_ctrl_cmd(0xE409); send_data_cmd(0x2A); send_ctrl_cmd(0xE40A); send_data_cmd(0x87); send_ctrl_cmd(0xE40B); send_data_cmd(0x16); send_ctrl_cmd(0xE40C); send_data_cmd(0x3B); send_ctrl_cmd(0xE40D); send_data_cmd(0x4C); send_ctrl_cmd(0xE40E); send_data_cmd(0x78); send_ctrl_cmd(0xE40F); send_data_cmd(0x96); send_ctrl_cmd(0xE410); send_data_cmd(0x4A); send_ctrl_cmd(0xE411); send_data_cmd(0x4D); send_ctrl_cmd(0xE500); send_data_cmd(0x0E); send_ctrl_cmd(0xE501); send_data_cmd(0x14); send_ctrl_cmd(0xE502); send_data_cmd(0x29); send_ctrl_cmd(0xE503); send_data_cmd(0x3A); send_ctrl_cmd(0xE504); send_data_cmd(0x1D); send_ctrl_cmd(0xE505); send_data_cmd(0x30); send_ctrl_cmd(0xE506); send_data_cmd(0x61); send_ctrl_cmd(0xE507); send_data_cmd(0x3F); send_ctrl_cmd(0xE508); send_data_cmd(0x20); send_ctrl_cmd(0xE509); send_data_cmd(0x26); send_ctrl_cmd(0xE50A); send_data_cmd(0x83); send_ctrl_cmd(0xE50B); send_data_cmd(0x16); send_ctrl_cmd(0xE50C); send_data_cmd(0x3B); send_ctrl_cmd(0xE50D); send_data_cmd(0x4C); send_ctrl_cmd(0xE50E); send_data_cmd(0x78); send_ctrl_cmd(0xE50F); send_data_cmd(0x96); send_ctrl_cmd(0xE510); send_data_cmd(0x4A); send_ctrl_cmd(0xE511); send_data_cmd(0x4D); send_ctrl_cmd(0x2900); MDELAY(200); send_ctrl_cmd(0x2C00); } // --------------------------------------------------------------------------- // LCM Driver Implementations // --------------------------------------------------------------------------- static void lcm_set_util_funcs(const LCM_UTIL_FUNCS *util) { memcpy(&lcm_util, util, sizeof(LCM_UTIL_FUNCS)); } static void lcm_get_params(LCM_PARAMS *params) { memset(params, 0, sizeof(LCM_PARAMS)); params->type = LCM_TYPE_DPI; params->ctrl = LCM_CTRL_SERIAL_DBI; params->width = FRAME_WIDTH; params->height = FRAME_HEIGHT; params->io_select_mode = 0; /* serial host interface configurations */ params->dbi.port = 0; params->dbi.clock_freq = LCM_DBI_CLOCK_FREQ_7M; params->dbi.data_width = LCM_DBI_DATA_WIDTH_8BITS; params->dbi.data_format.color_order = LCM_COLOR_ORDER_RGB; params->dbi.data_format.trans_seq = LCM_DBI_TRANS_SEQ_MSB_FIRST; params->dbi.data_format.padding = LCM_DBI_PADDING_ON_LSB; params->dbi.data_format.format = LCM_DBI_FORMAT_RGB565; params->dbi.data_format.width = LCM_DBI_DATA_WIDTH_16BITS; params->dbi.cpu_write_bits = LCM_DBI_CPU_WRITE_32_BITS; params->dbi.io_driving_current = LCM_DRIVING_CURRENT_2MA; params->dbi.serial.cs_polarity = LCM_POLARITY_RISING; params->dbi.serial.clk_polarity = LCM_POLARITY_RISING; params->dbi.serial.clk_phase = LCM_CLOCK_PHASE_0; params->dbi.serial.is_non_dbi_mode = 1; params->dbi.serial.clock_base = LCM_DBI_CLOCK_FREQ_52M; params->dbi.serial.clock_div = LCM_SERIAL_CLOCK_DIV_16; /* RGB interface configurations */ params->dpi.mipi_pll_clk_ref = 0; params->dpi.mipi_pll_clk_div1 = 50; params->dpi.mipi_pll_clk_div2 = 13; params->dpi.dpi_clk_div = 2; params->dpi.dpi_clk_duty = 1; params->dpi.clk_pol = LCM_POLARITY_FALLING; params->dpi.de_pol = LCM_POLARITY_RISING; params->dpi.vsync_pol = LCM_POLARITY_FALLING; params->dpi.hsync_pol = LCM_POLARITY_FALLING; params->dpi.hsync_pulse_width = 10; params->dpi.hsync_back_porch = 30; params->dpi.hsync_front_porch = 30; params->dpi.vsync_pulse_width = 10; params->dpi.vsync_back_porch = 30; params->dpi.vsync_front_porch = 30; params->dpi.format = LCM_DPI_FORMAT_RGB888; params->dpi.rgb_order = LCM_COLOR_ORDER_RGB; params->dpi.is_serial_output = 0; params->dpi.intermediat_buffer_num = 2; params->dpi.io_driving_current = LCM_DRIVING_CURRENT_2MA; } static void lcm_init(void) { SET_RESET_PIN(0); MDELAY(25); SET_RESET_PIN(1); MDELAY(50); config_gpio(); init_lcm_registers(); } static void lcm_suspend(void) { config_gpio(); send_ctrl_cmd(0x2800); send_ctrl_cmd(0x1000); MDELAY(20); } static void lcm_resume(void) { config_gpio(); send_ctrl_cmd(0x1100); MDELAY(200); send_ctrl_cmd(0x2900); } // --------------------------------------------------------------------------- // Get LCM Driver Hooks // --------------------------------------------------------------------------- LCM_DRIVER nt35582_lcm_drv = { .name = "nt35582", .set_util_funcs = lcm_set_util_funcs, .get_params = lcm_get_params, .init = lcm_init, .suspend = lcm_suspend, .resume = lcm_resume, };
gpl-2.0
Whatsamelee/NotaTankEmu
src/server/scripts/Northrend/UtgardeKeep/UtgardePinnacle/boss_svala.cpp
20
22234
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "SpellScript.h" #include "SpellAuraEffects.h" #include "utgarde_pinnacle.h" enum Spells { SPELL_SVALA_TRANSFORMING1 = 54140, SPELL_SVALA_TRANSFORMING2 = 54205, SPELL_TRANSFORMING_CHANNEL = 54142, SPELL_CALL_FLAMES = 48258, // caster effect only, triggers event 17841 SPELL_SINSTER_STRIKE = 15667, H_SPELL_SINSTER_STRIKE = 59409, SPELL_RITUAL_PREPARATION = 48267, SPELL_RITUAL_OF_THE_SWORD = 48276, SPELL_RITUAL_STRIKE_TRIGGER = 48331, // triggers 48277 & 59930, needs NPC_RITUAL_TARGET as spell_script_target SPELL_RITUAL_DISARM = 54159, SPELL_RITUAL_STRIKE_EFF_1 = 48277, SPELL_RITUAL_STRIKE_EFF_2 = 59930, SPELL_SUMMONED_VIS = 64446, SPELL_RITUAL_CHANNELER_1 = 48271, SPELL_RITUAL_CHANNELER_2 = 48274, SPELL_RITUAL_CHANNELER_3 = 48275, // Ritual Channeler spells SPELL_PARALYZE = 48278, SPELL_SHADOWS_IN_THE_DARK = 59407, // Scourge Hulk spells SPELL_MIGHTY_BLOW = 48697, SPELL_VOLATILE_INFECTION = 56785, H_SPELL_VOLATILE_INFECTION = 59228 }; enum Yells { // Svala SAY_SVALA_INTRO_0 = 0, // Svala Sorrowgrave SAY_SVALA_INTRO_1 = 0, SAY_SVALA_INTRO_2 = 1, SAY_AGGRO = 2, SAY_SLAY = 3, SAY_DEATH = 4, SAY_SACRIFICE_PLAYER = 5, // Image of Arthas SAY_DIALOG_OF_ARTHAS_1 = 0, SAY_DIALOG_OF_ARTHAS_2 = 1 }; enum Creatures { CREATURE_ARTHAS = 29280, // Image of Arthas CREATURE_SVALA_SORROWGRAVE = 26668, // Svala after transformation CREATURE_SVALA = 29281, // Svala before transformation CREATURE_RITUAL_CHANNELER = 27281, CREATURE_SPECTATOR = 26667, CREATURE_RITUAL_TARGET = 27327, CREATURE_FLAME_BRAZIER = 27273, CREATURE_SCOURGE_HULK = 26555 }; enum Objects { OBJECT_UTGARDE_MIRROR = 191745 }; enum SvalaPhase { IDLE, INTRO, NORMAL, SACRIFICING, SVALADEAD }; #define DATA_INCREDIBLE_HULK 2043 static const float spectatorWP[2][3] = { {296.95f, -312.76f, 86.36f}, {297.69f, -275.81f, 86.36f} }; static Position ArthasPos = { 295.81f, -366.16f, 92.57f, 1.58f }; class boss_svala : public CreatureScript { public: boss_svala() : CreatureScript("boss_svala") { } CreatureAI* GetAI(Creature* creature) const { return new boss_svalaAI (creature); } struct boss_svalaAI : public ScriptedAI { boss_svalaAI(Creature* creature) : ScriptedAI(creature), summons(creature) { instance = creature->GetInstanceScript(); Phase = IDLE; me->ApplySpellImmune(0, IMMUNITY_ID, SPELL_RITUAL_STRIKE_EFF_1, true); me->ApplySpellImmune(0, IMMUNITY_ID, SPELL_RITUAL_STRIKE_EFF_2, true); } InstanceScript* instance; SummonList summons; SvalaPhase Phase; Position pos; float x, y, z; uint32 introTimer; uint8 introPhase; uint8 sacrePhase; TempSummon* arthas; uint64 arthasGUID; uint32 sinsterStrikeTimer; uint32 callFlamesTimer; uint32 sacrificeTimer; bool sacrificed; void Reset() { sacrificed = false; SetCombatMovement(true); summons.DespawnAll(); me->RemoveAllAuras(); if (Phase > NORMAL) Phase = NORMAL; me->SetDisableGravity(Phase == NORMAL); introTimer = 1 * IN_MILLISECONDS; introPhase = 0; arthasGUID = 0; if (instance) { instance->SetData(DATA_SVALA_SORROWGRAVE_EVENT, NOT_STARTED); instance->SetData64(DATA_SACRIFICED_PLAYER, 0); } } void EnterCombat(Unit* /*who*/) { Talk(SAY_AGGRO); sinsterStrikeTimer = 7 * IN_MILLISECONDS; callFlamesTimer = urand(10 * IN_MILLISECONDS, 20 * IN_MILLISECONDS); if (instance) instance->SetData(DATA_SVALA_SORROWGRAVE_EVENT, IN_PROGRESS); } void JustSummoned(Creature* summon) { if (summon->GetEntry() == CREATURE_RITUAL_CHANNELER) summon->CastSpell(summon, SPELL_SUMMONED_VIS, true); summons.Summon(summon); } void SummonedCreatureDespawn(Creature* summon) { summons.Despawn(summon); } void MoveInLineOfSight(Unit* who) { if (!who) return; if (Phase == IDLE && me->IsValidAttackTarget(who) && me->IsWithinDistInMap(who, 40)) { Phase = INTRO; me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); if (GameObject* mirror = GetClosestGameObjectWithEntry(me, OBJECT_UTGARDE_MIRROR, 100.0f)) mirror->SetGoState(GO_STATE_READY); if (Creature* arthas = me->SummonCreature(CREATURE_ARTHAS, ArthasPos, TEMPSUMMON_MANUAL_DESPAWN)) { arthas->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE | UNIT_FLAG_NOT_SELECTABLE); arthasGUID = arthas->GetGUID(); } } } void KilledUnit(Unit* victim) { if (victim != me) Talk(SAY_SLAY); } void JustDied(Unit* /*killer*/) { if (Phase == SACRIFICING) SetEquipmentSlots(false, EQUIP_UNEQUIP, EQUIP_NO_CHANGE, EQUIP_NO_CHANGE); me->HandleEmoteCommand(EMOTE_ONESHOT_FLYDEATH); summons.DespawnAll(); if (instance) instance->SetData(DATA_SVALA_SORROWGRAVE_EVENT, DONE); Talk(SAY_DEATH); } void SpellHitTarget(Unit* /*target*/, const SpellInfo* spell) { if (spell->Id == SPELL_RITUAL_STRIKE_EFF_1 && Phase != NORMAL && Phase != SVALADEAD) { Phase = NORMAL; SetCombatMovement(true); if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 300.0f, true)) me->GetMotionMaster()->MoveChase(target); } } void UpdateAI(uint32 diff) { if (Phase == IDLE) return; if (Phase == INTRO) { if (introTimer <= diff) { Creature* arthas = Unit::GetCreature(*me, arthasGUID); if (!arthas) return; switch (introPhase) { case 0: Talk(SAY_SVALA_INTRO_0); ++introPhase; introTimer = 8100; break; case 1: arthas->AI()->Talk(SAY_DIALOG_OF_ARTHAS_1); ++introPhase; introTimer = 10000; break; case 2: arthas->CastSpell(me, SPELL_TRANSFORMING_CHANNEL, false); pos.Relocate(me); pos.m_positionZ += 8.0f; me->GetMotionMaster()->MoveTakeoff(0, pos); // spectators flee event if (instance) { std::list<Creature*> lspectatorList; GetCreatureListWithEntryInGrid(lspectatorList, me, CREATURE_SPECTATOR, 100.0f); for (std::list<Creature*>::iterator itr = lspectatorList.begin(); itr != lspectatorList.end(); ++itr) { if ((*itr)->isAlive()) { (*itr)->SetStandState(UNIT_STAND_STATE_STAND); (*itr)->SetWalk(false); (*itr)->GetMotionMaster()->MovePoint(1, spectatorWP[0][0], spectatorWP[0][1], spectatorWP[0][2]); } } } ++introPhase; introTimer = 4200; break; case 3: me->CastSpell(me, SPELL_SVALA_TRANSFORMING1, false); ++introPhase; introTimer = 6200; break; case 4: me->CastSpell(me, SPELL_SVALA_TRANSFORMING2, false); arthas->InterruptNonMeleeSpells(true); me->RemoveAllAuras(); me->UpdateEntry(CREATURE_SVALA_SORROWGRAVE); me->SetFacingToObject(arthas); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); ++introPhase; introTimer = 3200; break; case 5: Talk(SAY_SVALA_INTRO_1); ++introPhase; introTimer = 10000; break; case 6: arthas->AI()->Talk(SAY_DIALOG_OF_ARTHAS_2); ++introPhase; introTimer = 7200; break; case 7: Talk(SAY_SVALA_INTRO_2); me->SetOrientation(1.58f); me->SendMovementFlagUpdate(); arthas->SetVisible(false); ++introPhase; introTimer = 13800; break; case 8: pos.Relocate(me); pos.m_positionX = me->GetHomePosition().GetPositionX(); pos.m_positionY = me->GetHomePosition().GetPositionY(); pos.m_positionZ = 90.6065f; me->GetMotionMaster()->MoveLand(0, pos); me->SetDisableGravity(false, true); me->SetHover(true); ++introPhase; introTimer = 3000; break; case 9: if (GameObject* mirror = GetClosestGameObjectWithEntry(me, OBJECT_UTGARDE_MIRROR, 100.0f)) mirror->SetGoState(GO_STATE_ACTIVE); me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); arthas->DespawnOrUnsummon(); arthasGUID = 0; Phase = NORMAL; break; } } else introTimer -= diff; return; } if (Phase == NORMAL) { //Return since we have no target if (!UpdateVictim()) return; if (sinsterStrikeTimer <= diff) { DoCast(me->getVictim(), SPELL_SINSTER_STRIKE); sinsterStrikeTimer = urand(5 * IN_MILLISECONDS, 9 * IN_MILLISECONDS); } else sinsterStrikeTimer -= diff; if (callFlamesTimer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100.0f, true)) { DoCast(target, SPELL_CALL_FLAMES); callFlamesTimer = urand(10 * IN_MILLISECONDS, 20 * IN_MILLISECONDS); } } else callFlamesTimer -= diff; if (!sacrificed) { if (HealthBelowPct(50)) { if (Unit* sacrificeTarget = SelectTarget(SELECT_TARGET_RANDOM, 0, 80.0f, true)) { if (instance) instance->SetData64(DATA_SACRIFICED_PLAYER, sacrificeTarget->GetGUID()); Talk(SAY_SACRIFICE_PLAYER); DoCast(sacrificeTarget, SPELL_RITUAL_PREPARATION); SetCombatMovement(false); Phase = SACRIFICING; sacrePhase = 0; sacrificeTimer = 1 * IN_MILLISECONDS; DoCast(me, SPELL_RITUAL_OF_THE_SWORD); sacrificed = true; } } } DoMeleeAttackIfReady(); } else //SACRIFICING { if (sacrificeTimer <= diff) { switch (sacrePhase) { case 0: // spawn ritual channelers if (instance) { DoCast(me, SPELL_RITUAL_CHANNELER_1, true); DoCast(me, SPELL_RITUAL_CHANNELER_2, true); DoCast(me, SPELL_RITUAL_CHANNELER_3, true); } ++sacrePhase; sacrificeTimer = 2 * IN_MILLISECONDS; break; case 1: me->StopMoving(); me->GetMotionMaster()->MoveIdle(); me->InterruptNonMeleeSpells(true); DoCast(me, SPELL_RITUAL_STRIKE_TRIGGER, true); ++sacrePhase; sacrificeTimer = 200; break; case 2: DoCast(me, SPELL_RITUAL_DISARM); ++sacrePhase; break; case 3: break; } } else sacrificeTimer -= diff; } } }; }; class npc_ritual_channeler : public CreatureScript { public: npc_ritual_channeler() : CreatureScript("npc_ritual_channeler") { } CreatureAI* GetAI(Creature* creature) const { return new npc_ritual_channelerAI(creature); } struct npc_ritual_channelerAI : public ScriptedAI { npc_ritual_channelerAI(Creature* creature) :ScriptedAI(creature) { instance = creature->GetInstanceScript(); SetCombatMovement(false); } InstanceScript* instance; uint32 paralyzeTimer; void Reset() { paralyzeTimer = 1600; if (instance) if (IsHeroic()) DoCast(me, SPELL_SHADOWS_IN_THE_DARK); } void UpdateAI(uint32 diff) { if (me->HasUnitState(UNIT_STATE_CASTING)) return; if (paralyzeTimer <= diff) { if (instance) if (Unit* victim = me->GetUnit(*me, instance->GetData64(DATA_SACRIFICED_PLAYER))) DoCast(victim, SPELL_PARALYZE, false); paralyzeTimer = 200; } else paralyzeTimer -= diff; } }; }; class npc_spectator : public CreatureScript { public: npc_spectator() : CreatureScript("npc_spectator") { } CreatureAI* GetAI(Creature* creature) const { return new npc_spectatorAI(creature); } struct npc_spectatorAI : public ScriptedAI { npc_spectatorAI(Creature* creature) : ScriptedAI(creature) { } void Reset() { } void MovementInform(uint32 motionType, uint32 pointId) { if (motionType == POINT_MOTION_TYPE) { if (pointId == 1) me->GetMotionMaster()->MovePoint(2, spectatorWP[1][0], spectatorWP[1][1], spectatorWP[1][2]); else if (pointId == 2) me->DespawnOrUnsummon(1000); } } }; }; class RitualTargetCheck { public: explicit RitualTargetCheck(Unit* _caster) : caster(_caster) { } bool operator() (WorldObject* unit) const { if (InstanceScript* instance = caster->GetInstanceScript()) if (instance->GetData64(DATA_SACRIFICED_PLAYER) == unit->GetGUID()) return false; return true; } private: Unit* caster; }; class spell_paralyze_pinnacle : public SpellScriptLoader { public: spell_paralyze_pinnacle() : SpellScriptLoader("spell_paralyze_pinnacle") { } class spell_paralyze_pinnacle_SpellScript : public SpellScript { PrepareSpellScript(spell_paralyze_pinnacle_SpellScript); void FilterTargets(std::list<WorldObject*>& unitList) { unitList.remove_if(RitualTargetCheck(GetCaster())); } void Register() { OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_paralyze_pinnacle_SpellScript::FilterTargets, EFFECT_0, TARGET_UNIT_SRC_AREA_ENEMY); } }; SpellScript* GetSpellScript() const { return new spell_paralyze_pinnacle_SpellScript(); } }; class npc_scourge_hulk : public CreatureScript { public: npc_scourge_hulk() : CreatureScript("npc_scourge_hulk") { } struct npc_scourge_hulkAI : public ScriptedAI { npc_scourge_hulkAI(Creature* creature) : ScriptedAI(creature) { } uint32 mightyBlow; uint32 volatileInfection; void Reset() { mightyBlow = urand(4000, 9000); volatileInfection = urand(10000, 14000); killedByRitualStrike = false; } uint32 GetData(uint32 type) const { return type == DATA_INCREDIBLE_HULK ? killedByRitualStrike : 0; } void DamageTaken(Unit* attacker, uint32 &damage) { if (damage >= me->GetHealth() && attacker->GetEntry() == CREATURE_SVALA_SORROWGRAVE) killedByRitualStrike = true; } void UpdateAI(uint32 diff) { if (!UpdateVictim()) return; if (mightyBlow <= diff) { if (Unit* victim = me->getVictim()) if (!victim->HasUnitState(UNIT_STATE_STUNNED)) // Prevent knocking back a ritual player DoCast(victim, SPELL_MIGHTY_BLOW); mightyBlow = urand(12000, 17000); } else mightyBlow -= diff; if (volatileInfection <= diff) { DoCastVictim(SPELL_VOLATILE_INFECTION); volatileInfection = urand(13000, 17000); } else volatileInfection -= diff; DoMeleeAttackIfReady(); } private: bool killedByRitualStrike; }; CreatureAI* GetAI(Creature* creature) const { return new npc_scourge_hulkAI(creature); } }; class achievement_incredible_hulk : public AchievementCriteriaScript { public: achievement_incredible_hulk() : AchievementCriteriaScript("achievement_incredible_hulk") { } bool OnCheck(Player* /*player*/, Unit* target) { return target && target->IsAIEnabled && target->GetAI()->GetData(DATA_INCREDIBLE_HULK); } }; void AddSC_boss_svala() { new boss_svala(); new npc_ritual_channeler(); new npc_spectator(); new spell_paralyze_pinnacle(); new npc_scourge_hulk(); new achievement_incredible_hulk(); }
gpl-2.0
sogno/stbgui
lib/dvb/teletext.cpp
20
27154
#include <lib/base/eerror.h> #include <lib/dvb/teletext.h> #include <lib/dvb/idemux.h> #include <lib/gdi/gpixmap.h> // G0 and G2 national option table // see table 33 in ETSI EN 300 706 // use it with (triplet 1 bits 14-11)*(ctrl bits C12-14) unsigned char NationalOptionSubsetsLookup[16*8] = { 1, 4, 11, 5, 3, 8, 0, 9, 7, 4, 11, 5, 3, 1, 0, 1, 1, 4, 11, 5, 3, 8, 12, 1, 1, 1, 1, 1, 1, 10, 1, 9, 16, 4, 2, 6, 17, 18, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, // reserved 1, 1, 1, 1, 1, 1, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1, // reserved 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // reserved 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // reserved 1, 1, 1, 1, 1, 1, 1, 1, // reserved 1, 1, 1, 1, 1, 1, 1, 1, // reserved 1, 1, 1, 1, 1, 1, 1, 1, // reserved 1, 1, 1, 1, 1, 1, 1, 1 // reserved }; unsigned char NationalReplaceMap[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 11, 12, 13, 0 }; // national option subsets (UTF8) // see table 36 in ETSI EN 300 706 unsigned int NationalOptionSubsets[13*14] = { 0, 0x0023, 0xc5af, 0xc48d, 0xc5a5, 0xc5be, 0xc3bd, 0xc3ad, 0xc599, 0xc3a9, 0xc3a1, 0xc49b, 0xc3ba, 0xc5a1, // Slovak/Czech 0, 0xc2a3, 0x0024, 0x0040, 0xe28690, 0xc2bd, 0xe28692, 0xe28691, 0x0023, 0x002d, 0xc2bc, 0xc781, 0xc2be, 0xc3b7, // English 0, 0x0023, 0xc3b5, 0xc5A0, 0xc384, 0xc396, 0xc5bd, 0xc39c, 0xc395, 0xc5a1, 0xc3a4, 0xc3b6, 0xc5be, 0xc3bc, // Estonian 0, 0xc3a9, 0xc3af, 0xc3a0, 0xc3ab, 0xc3aa, 0xc3b9, 0xc3ae, 0x0023, 0xc3a8, 0xc3a2, 0xc3b4, 0xc3bb, 0xc3a7, // French 0, 0x0023, 0x0024, 0xc2a7, 0xc384, 0xc396, 0xc39c, 0x005e, 0x005f, 0xcb9a, 0xc3a4, 0xc3b6, 0xc3bc, 0xc39f, // German 0, 0xc2a3, 0x0024, 0xc3a9, 0xcb9a, 0xc3a7, 0xe28692, 0xe28691, 0x0023, 0xc3b9, 0xc3a0, 0xc3b2, 0xc3a8, 0xc3ac, // Italian 0, 0x0023, 0x0024, 0xc5a0, 0xc497, 0xc8a9, 0xc5bd, 0xc48d, 0xc5ab, 0xc5a1, 0xc485, 0xc5b3, 0xc5be, 0xc4af/*FIXMEE*/, // Lithuanian/Lettish 0, 0x0023, 0xc584, 0xc485, 0xc5bb, 0xc59a, 0xc581, 0xc487, 0xc3b3, 0xc499, 0xc5bc, 0xc59b, 0xc582, 0xc5ba, // Polish 0, 0xc3a7, 0x0024, 0xc2a1, 0xc3a1, 0xc3a9, 0xc3ad, 0xc3b3, 0xc3ba, 0xc2bf, 0xc3bc, 0xc3b1, 0xc3a8, 0xc3a0, // Spanish/Portuguese 0, 0x0023, 0xc2a4, 0xc5a2, 0xc382, 0xc59e, 0xc78d, 0xc38e, 0xc4b1, 0xc5a3, 0xc3a2, 0xc59f, 0xc78e, 0xc3ae, // Rumanian 0, 0x0023, 0xc38b, 0xc48c, 0xc486, 0xc5bd, 0xc490, 0xc5a0, 0xc3ab, 0xc48d, 0xc487, 0xc5be, 0xc491, 0xc5a1, // Slovenian/Serbian/Croation 0, 0x0023, 0xc2a4, 0xc389, 0xc384, 0xc396, 0xc385, 0xc39c, 0x005f, 0xc3a9, 0xc3a4, 0xc3b6, 0xc3a5, 0xc3bc, // Finnish/Hungarian/Swedish 0, 0xee8080/*FIXME*/, 0xc7a7, 0xc4b0, 0xc59e, 0xc396, 0xc387, 0xc39c, 0xc7a6, 0xc4b1, 0xc59f, 0xc3b6, 0xc3a7, 0xc3bc // Turkish }; const char * const eDVBTeletextParser::my_country_codes[] = { "und", "eng", "ger", "swe", "fin", "hun", "ita", "fra", "por", "spa", "cze", "slk", "pol", "tur", "srp", "hrv", "slv", "rom", "est", "lav", "lit", "dan", "nor", "rus", "ukr", "und", "", "", "", "", "", "" }; unsigned char country_lookup[] = { 255, 1, 4, 11, 11, 11, 5, 4, 8, 8, 0, 0, 7, 12, 10, 10, 10, 9, 2, 6, 6, 11, 11, 17, 18, 255, 255, 255, 255, 255, 255, 255 }; unsigned short diacr_upper_cmap[26*15] = { 0xc380, 0xc381, 0xc382, 0xc383, 0xc480, 0xc482, 0x0000, 0xc384, 0x0000, 0xc385, 0x0000, 0x0000, 0x0000, 0xc484, 0xc482, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc486, 0xc488, 0x0000, 0x0000, 0xc48c, 0xc48a, 0x0000, 0x0000, 0x0000, 0xc387, 0x0000, 0x0000, 0x0000, 0xc48c, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc48e, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc48e, 0xc388, 0xc389, 0xc38a, 0x0000, 0xc492, 0xc494, 0xc496, 0xc38b, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc498, 0xc49a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc49c, 0x0000, 0x0000, 0xc49e, 0xc4a0, 0x0000, 0x0000, 0x0000, 0xc4a2, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4a4, 0x0000, 0xc4a6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc38c, 0xc38d, 0xc38e, 0xc4a8, 0xc4aa, 0xc4ac, 0xc4b0, 0xc38f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4ae, 0xc4ac, 0x0000, 0x0000, 0xc4b4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4b6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4b9, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4bf, 0x0000, 0x0000, 0x0000, 0xc4bb, 0x0000, 0x0000, 0x0000, 0xc4bd, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc583, 0x0000, 0xc391, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc585, 0x0000, 0x0000, 0x0000, 0xc587, 0xc392, 0xc393, 0xc394, 0xc395, 0xc58c, 0xc58e, 0x0000, 0xc396, 0x0000, 0x0000, 0x0000, 0x0000, 0xc590, 0x0000, 0xc58e, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc594, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc596, 0x0000, 0x0000, 0x0000, 0xc598, 0x0000, 0xc59a, 0xc59c, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc59e, 0x0000, 0x0000, 0x0000, 0xc5a0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5a2, 0x0000, 0x0000, 0x0000, 0xc5a4, 0xc399, 0xc39a, 0xc39b, 0xc5a8, 0xc5aa, 0xc5ac, 0x0000, 0xc39c, 0x0000, 0xc5ae, 0x0000, 0x0000, 0xc5b0, 0xc5b2, 0xc5ac, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5b4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc39d, 0xc5b6, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5b8, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5b9, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5bb, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5bd }; unsigned short diacr_lower_cmap[26*15] = { 0xc3a0, 0xc3a1, 0xc3a2, 0xc3a3, 0xc481, 0xc483, 0x0000, 0xc3a4, 0x0000, 0xc3a5, 0x0000, 0x0000, 0x0000, 0xc485, 0xc483, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc487, 0xc489, 0x0000, 0x0000, 0xc48d, 0xc48b, 0x0000, 0x0000, 0x0000, 0xc3a7, 0x0000, 0x0000, 0x0000, 0xc48d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc48f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc48f, 0xc3a8, 0xc3a9, 0xc3aa, 0x0000, 0xc493, 0xc495, 0xc497, 0xc3ab, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc499, 0xc49b, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc49d, 0x0000, 0x0000, 0xc49f, 0xc4a1, 0x0000, 0x0000, 0x0000, 0xc4a3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4a5, 0x0000, 0xc4a7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3ac, 0xc3ad, 0xc3ae, 0xc4a9, 0xc4ab, 0xc4ad, 0xc4b1, 0xc3af, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4af, 0xc4ad, 0x0000, 0x0000, 0xc4b5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4b7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc4ba, 0x0000, 0x0000, 0x0000, 0x0000, 0xc580, 0x0000, 0x0000, 0x0000, 0xc4bc, 0x0000, 0x0000, 0x0000, 0xc4be, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc584, 0x0000, 0xc3b1, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc586, 0x0000, 0x0000, 0x0000, 0xc588, 0xc3b2, 0xc3b3, 0xc3b4, 0xc3b5, 0xc58d, 0xc58f, 0x0000, 0xc3b6, 0x0000, 0x0000, 0x0000, 0x0000, 0xc591, 0x0000, 0xc58f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc595, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc597, 0x0000, 0x0000, 0x0000, 0xc599, 0x0000, 0xc59b, 0xc59d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc59f, 0x0000, 0x0000, 0x0000, 0xc5a1, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5a3, 0x0000, 0x0000, 0x0000, 0xc5a5, 0xc3b9, 0xc3ba, 0xc3bb, 0xc5a9, 0xc5ab, 0xc5ad, 0x0000, 0xc3bc, 0x0000, 0xc5af, 0x0000, 0x0000, 0xc5b1, 0xc5b3, 0xc5ad, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5b5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3bd, 0xc5b7, 0x0000, 0x0000, 0x0000, 0x0000, 0xc3bf, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5ba, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5bc, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xc5be }; unsigned int Latin_G2_set[6*16] = { 0x0020, 0xc2a1, 0xc2a2, 0xc2a3, 0x0024, 0xc2a5, 0x0023, 0xc2a7, 0xc2a4, 0xc2b4, 0x0022, 0xc2ab, 0x003c, 0x005e, 0x003d, 0x0076, 0xc2b0, 0xc2b1, 0xc2b2, 0xc2b3, 0xc397, 0xc2b5, 0xc2b6, 0xc2b7, 0xc3b7, 0xc2b4, 0x0022, 0xc2bb, 0xc2bc, 0xc2bd, 0xc2be, 0xc2bf, 0x0020, 0x0060, 0xc2b4, 0xcb86, 0x007e, 0xcb89, 0xcb98, 0xcb99, 0xcc88, 0x002e, 0xcb9a, 0x0020, 0x005f, 0x0022, 0x0020, 0xcb98, 0x002d, 0xc2b9, 0xc2ae, 0xc2a9, 0xc4a2, 0x002a, 0xc2ac, 0xc0b0, 0xceb1, 0x0020, 0x0020, 0x0020, 0x002a, 0x002a, 0x002a, 0x002a, 0xcea9, 0xc386, 0xc490, 0x0061, 0xc4a6, 0x0020, 0xc4b2, 0xc4bf, 0xc581, 0xc398, 0xc592, 0x006f, 0xc39e, 0xc5a6, 0xc58a, 0xc589, 0xc4b8, 0xc3a6, 0xc491, 0xc48f, 0xc4a7, 0xc4b1, 0xc4b3, 0xc580, 0xc582, 0xc3b8, 0xc593, 0xc39f, 0xc3be, 0xc5a7, 0xc58b, 0x0020, }; unsigned int Cyrillic_1_set[6*16] = { 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0037, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, 0xd0a7, 0xd090, 0xd091, 0xd0a6, 0xd094, 0xd095, 0xd0a4, 0xd093, 0xd0a5, 0xd098, 0xd088, 0xd09a, 0xd09b, 0xd09c, 0xd09d, 0xd09e, 0xd09f, 0xd08c, 0xd0a0, 0xd0a1, 0xd0a2, 0xd0a3, 0xd092, 0xd083, 0xd089, 0xd08a, 0xd097, 0xd08b, 0xd096, 0xd082, 0xd0a8, 0xd08f, 0xd187, 0xd0b0, 0xd0b1, 0xd186, 0xd0b4, 0xd0b5, 0xd0b4, 0xd0b3, 0xd185, 0xd0b8, 0xd198, 0xd0ba, 0xd0bb, 0xd0bc, 0xd0bd, 0xd0be, 0xd0bf, 0xd19c, 0xd180, 0xd181, 0xd182, 0xd183, 0xd0b2, 0xd193, 0xd199, 0xd19a, 0xd0b7, 0xd19b, 0xd0b6, 0xd192, 0xd188, 0x007f, }; unsigned int Cyrillic_2_set[6*16] = { 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0xd18b, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0037, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, 0xd0ae, 0xd090, 0xd091, 0xd0a6, 0xd094, 0xd095, 0xd0a4, 0xd093, 0xd0a5, 0xd098, 0xd099, 0xd09a, 0xd09b, 0xd09c, 0xd09d, 0xd09e, 0xd09f, 0xd0af, 0xd0a0, 0xd0a1, 0xd0a2, 0xd0a3, 0xd096, 0xd092, 0xd0ac, 0xd0aa, 0xd097, 0xd0a8, 0xd0ad, 0xd0a9, 0xd0a7, 0xd0ab, 0xd18e, 0xd0b0, 0xd0b1, 0xd186, 0xd0b4, 0xd0b5, 0xd0b4, 0xd0b3, 0xd185, 0xd0b8, 0xd0b9, 0xd0ba, 0xd0bb, 0xd0bc, 0xd0bd, 0xd0be, 0xd0bf, 0xd18f, 0xd180, 0xd181, 0xd182, 0xd183, 0xd0b6, 0xd0b2, 0xd18c, 0xd18a, 0xd0b7, 0xd188, 0xd18d, 0xd189, 0xd187, 0x007f, }; unsigned int Cyrillic_3_set[6*16] = { 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0xd197, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0037, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, 0xd0ae, 0xd090, 0xd091, 0xd0a6, 0xd094, 0xd095, 0xd0a4, 0xd093, 0xd0a5, 0xd098, 0xd099, 0xd09a, 0xd09b, 0xd09c, 0xd09d, 0xd09e, 0xd09f, 0xd0af, 0xd0a0, 0xd0a1, 0xd0a2, 0xd0a3, 0xd096, 0xd092, 0xd0ac, 0xd0aa, 0xd097, 0xd0a8, 0xd0ad, 0xd0a9, 0xd0a7, 0xd087, 0xd18e, 0xd0b0, 0xd0b1, 0xd186, 0xd0b4, 0xd0b5, 0xd0b4, 0xd0b3, 0xd185, 0xd0b8, 0xd0b9, 0xd0ba, 0xd0bb, 0xd0bc, 0xd0bd, 0xd0be, 0xd0bf, 0xd18f, 0xd180, 0xd181, 0xd182, 0xd183, 0xd0b6, 0xd0b2, 0xd18c, 0xd18a, 0xd0b7, 0xd188, 0xd18d, 0xd189, 0xd187, 0x007f, }; // This is a very simple en300 706 telext decoder. // It can only decode a single page at a time, thus it's only used // for subtitles. And it ONLY support LATIN Charsets yet! DEFINE_REF(eDVBTeletextParser); /* we asumme error free transmission! */ static inline unsigned char decode_odd_parity(unsigned char *b) { int i; unsigned char res = 0; for (i=0; i<7; ++i) if (*b & (0x80 >> i)) res |= 1<<i; return res; } static inline unsigned char decode_hamming_84(unsigned char *b) { return ((*b << 3) & 8) | ((*b ) & 4) | ((*b >> 3) & 2) | ((*b >> 6) & 1); } static inline unsigned long decode_hamming_2418(unsigned char *b) { static const unsigned char rev[16] = { 0x00,0x08,0x04,0x0c, 0x02,0x0a,0x06,0x0e, 0x01,0x09,0x05,0x0d, 0x03,0x0b,0x07,0x0f }; b[0] = rev[b[0] >> 4] | (rev[b[0] & 0xf] << 4); b[1] = rev[b[1] >> 4] | (rev[b[1] & 0xf] << 4); b[2] = rev[b[2] >> 4] | (rev[b[2] & 0xf] << 4); return ((b[0] & 0x04) >> 2) | ((b[0] & 0x70) >> 3) | ((b[1] & 0x7f) << 4) | ((b[2] & 0x7f) << 11); } static int extractPTS(pts_t &pts, unsigned char *pkt) { if (pkt[7] & 0x80) /* PTS present? */ { pts = ((unsigned long long)(pkt[9] & 0xe)) << 29; pts |= ((unsigned long long)(pkt[10] & 0xff)) << 22; pts |= ((unsigned long long)(pkt[11] & 0xfe)) << 14; pts |= ((unsigned long long)(pkt[12] & 0xff)) << 7; pts |= ((unsigned long long)(pkt[13] & 0xfe)) >> 1; return 0; } else return -1; } eDVBTeletextParser::eDVBTeletextParser(iDVBDemux *demux) : m_pid(-1) { eDebug("[eDVBTeletextParser] Starting!"); setStreamID(0xBD); /* as per en 300 472 */ setPageAndMagazine(-1, -1, "und"); if (demux->createPESReader(eApp, m_pes_reader)) eDebug("[eDVBTeletextParser] failed to create teletext subtitle PES reader!"); else { eDebug("[eDVBTeletextParser] created teletext subtitle PES reader!"); m_pes_reader->connectRead(slot(*this, &eDVBTeletextParser::processData), m_read_connection); } } eDVBTeletextParser::~eDVBTeletextParser() { } char *get_bits(int val, int count) { static char buf[33]; memset(buf, 0, 32); if (count < 33) for (int i=0; i < count; ++i) { buf[(count-i)-1]=val&1?'1':'0'; val>>=1; } return buf; } void eDVBTeletextParser::processPESPacket(uint8_t *pkt, int len) { unsigned char *p = pkt; pts_t pts; int have_pts = extractPTS(pts, pkt); //eDebug("[eDVBTeletextParser] PES packet len=%d", len); p += 4; len -= 4; /* start code, already be verified by pes parser */ p += 2; len -= 2; /* length, better use the argument */ p += 3; len -= 3; /* pes header */ p += 0x24; len -= 0x24; /* skip header */ p++; len--; while (len > 2) { p++; /* data_unit_id */ unsigned char data_unit_length = *p++; len -= 2; if (len < data_unit_length) { eDebug("[eDVBTeletextParser] PES data_unit_length(%d) > len(%d)", data_unit_length, len); break; } if (data_unit_length != 44) break; p++; len--; /* line_offset */ unsigned char framing_code = *p++; len--; int magazine_and_packet_address = decode_hamming_84(p++); len--; magazine_and_packet_address |= decode_hamming_84(p++)<<4; len--; unsigned char *data = p; p += 40; len -= 40; if (framing_code != 0xe4) /* no teletxt data */ continue; int M = magazine_and_packet_address & 7, Y = magazine_and_packet_address >> 3; if (Y == 0) /* page header */ { int X = decode_hamming_84(data + 1) * 0x10 + decode_hamming_84(data), S2C4 = decode_hamming_84(data + 3), S4C5C6 = decode_hamming_84(data + 5), C = ((S2C4 & 8) ? (1<<4) : 0) | ((S4C5C6 & 0xC) << 3) | (decode_hamming_84(data + 6) << 7) | (decode_hamming_84(data + 7) << 11), serial_mode = C & (1<<11); /* page on the same magazine? end current page. */ if ((serial_mode || M == m_page_M) && m_page_open) { handlePageEnd(!have_pts, pts); m_page_open = 0; } if ((C & (1<<6)) && (X != 0xFF) && !(C & (1<<5))) /* scan for pages with subtitle bit set */ { eDVBServicePMTHandler::subtitleStream s; s.pid = m_pid; s.subtitling_type = 0x01; // ebu teletext subtitle s.teletext_page_number = X & 0xFF; s.teletext_magazine_number = M & 7; if (m_found_subtitle_pages.find(s) == m_found_subtitle_pages.end()) { m_found_subtitle_pages.insert(s); m_new_subtitle_stream(); } } /* correct page on correct magazine? open page. */ if (M == m_page_M && X == m_page_X) { m_C = C; m_Y = Y; handlePageStart(); m_page_open = 1; m_box_open = 0; handleLine(data + 8, 32); } } else if (Y < 26) // directly displayable packet { /* data for the selected page ? */ if (M == m_page_M && m_page_open) { m_Y = Y; m_box_open = 0; handleLine(data, 40); } } else if (Y == 26 && m_page_open && M == m_page_M) { int display_row=-1, display_column=-1; for (int a = 1; a < 40; a+=3) { int val; if ((val=decode_hamming_2418(data+a)) >= 0) { unsigned char addr = val & 0x3F; unsigned char mode = (val >> 6) & 0x1F; unsigned char data = (val >> 11) & 0x7F; if (addr == 0x3f && mode == 0x1f) // termination marker break; if (addr >= 40) { if (mode == 4) { display_row = addr - 40; continue; } else eDebugNoNewLineStart("[eDVBTeletextParser] ignore unimplemented mode: "); } else //0..39 means column 0..39 { if (display_row != -1) { display_column = addr; if (mode > 15) //char from G0 set w/ diacr. { unsigned int ch=data; if (!mode&0xF) { if (data == 0x2A) ch = '@'; } else { if (ch > 96 && ch < 123) ch = diacr_lower_cmap[(ch-97)*15+(mode&0xF)-1]; else if (ch > 64 && ch < 91) ch = diacr_upper_cmap[(ch-65)*15+(mode&0xF)-1]; } if (ch) m_modifications[(display_row<<16)|display_column] = ch ? ch : data; else /* when data is 0 we set the diacr. mark later on the existing character .. this isn't described in the EN300706.. but i have seen this on "Das Erste" */ m_modifications[(display_row<<16)|display_column] = (mode&0xF); continue; } else if (mode == 15) // char from G2 set { if (data > 0x19) { unsigned int ch=Latin_G2_set[data-0x20]; m_modifications[(display_row<<16)|display_column] = ch; continue; } else eDebugNoNewLineStart("[eDVBTeletextParser] ignore G2 char < 0x20: "); } else eDebugNoNewLineStart("[eDVBTeletextParser] ignore unimplemented: "); } else eDebugNoNewLineStart("[eDVBTeletextParser] row is not selected.. ignore: "); } eDebugNoNewLine("triplet = %08x(%s) address = %02x(%s) mode = %02x(%s) data = %02x(%s)\n", val, get_bits(val, 18), addr, get_bits(addr, 6), mode, get_bits(mode, 5), data, get_bits(data, 7)); } } } else if (Y == 29 && M == m_page_M) { int designation_code = decode_hamming_84(data++); if (designation_code == 0) // 29/0 { m_M29_t1 = decode_hamming_2418(data); m_M29_t2 = decode_hamming_2418(data+3); if ((m_M29_t1 & 0xF) == 0) // format1 m_M29_0_valid = 1; else eDebug("[eDVBTeletextParser] non handled packet M/%d/0 format %d", Y, m_M29_t1 & 0xF); } else eDebug("[eDVBTeletextParser] non handled packet M/%d/%d", Y, designation_code); } else if (m_page_open && M == m_page_M) { int designation_code = decode_hamming_84(data++); if (Y == 28 && designation_code == 0) // 28/0 { m_X28_t1 = decode_hamming_2418(data); m_X28_t2 = decode_hamming_2418(data+3); if ((m_X28_t1 & 0xF) == 0) // format1 m_X28_0_valid = 1; else eDebug("[eDVBTeletextParser] non handled packet X/%d/0 format %d", Y, m_X28_t1 & 0xF); } else eDebug("[eDVBTeletextParser] non handled packet X/%d/%d", Y, designation_code); } } } int eDVBTeletextParser::start(int pid) { m_page_open = 0; eDebug("[eDVBTeletextParser] starting PES reader on pid=%04x", pid); if (m_pes_reader && pid >= 0 && pid < 0x1fff) { m_pid = pid; return m_pes_reader->start(pid); } else return -1; } void eDVBTeletextParser::handlePageStart() { if (m_C & (1<<4)) /* erase flag set */ { m_subtitle_page.clear(); m_modifications.clear(); } } void eDVBTeletextParser::handleLine(unsigned char *data, int len) { m_subtitle_page.clearLine(m_Y); if (!m_Y) /* first line is page header, we don't need that. */ { m_double_height = -1; return; } if (m_double_height == m_Y) { m_double_height = -1; return; } int last_was_white = 1, color = 7; /* start with whitespace. start with color=white. (that's unrelated.) */ static unsigned char out[128]; int outidx = 0, Gtriplet = 0, nat_opts = (m_C & (1<<14) ? 1 : 0) | (m_C & (1<<13) ? 2 : 0) | (m_C & (1<<12) ? 4 : 0), nat_subset,nat_subset_2,second_G0_set=0; if (m_L > 0) nat_subset_2 = country_lookup[m_L]; else nat_subset_2 = NationalOptionSubsetsLookup[Gtriplet*8+nat_opts]; nat_subset = nat_subset_2; if (m_X28_0_valid) { nat_subset = NationalOptionSubsetsLookup[(m_X28_t1 >> 7) & 0x7F]; nat_subset_2 = NationalOptionSubsetsLookup[((m_X28_t1 >> 14) & 0xF) | ((m_X28_t2 & 7) << 4)]; } else if (m_M29_0_valid) { nat_subset = NationalOptionSubsetsLookup[(m_M29_t1 >> 7) & 0x7F]; nat_subset_2 = NationalOptionSubsetsLookup[((m_M29_t1 >> 14) & 0xF) | ((m_M29_t2 & 7) << 4)]; } for (int i=0; i<len; ++i) { unsigned char b = decode_odd_parity(data + i); std::map<int,unsigned int>::iterator it = m_modifications.find((m_Y<<16)|i); if (it != m_modifications.end()) { unsigned int utf8_code = it->second; if (utf8_code < 0x10) { int mode = utf8_code; if (b > 96 && b < 123) utf8_code = diacr_lower_cmap[(b-97)*15+mode-1]; else if (b > 64 && b < 91) utf8_code = diacr_upper_cmap[(b-65)*15+mode-1]; } if (utf8_code > 0xFFFFFF) out[outidx++]=(utf8_code&0xFF000000)>>24; if (utf8_code > 0xFFFF) out[outidx++]=(utf8_code&0xFF0000)>>16; if (utf8_code > 0xFF) out[outidx++]=(utf8_code&0xFF00)>>8; if (utf8_code) out[outidx++]=utf8_code&0xFF; m_modifications.erase(it); continue; } if (b < 0x10) /* spacing attribute */ { if (b < 8) /* colors */ { addSubtitleString(color, std::string((const char*)out, outidx), m_Y); /* new color is split into a new string */ outidx = 0; color = b; } else if (b == 0xd) m_double_height = m_Y + 1; else if (b == 0xa) // close box m_box_open=0; else if (b == 0xb) // open box ++m_box_open; else eDebug("[eDVBTeletextParser] handleLine: ignore %x", b); /* ignore other attributes */ } else if (m_box_open>1) { /* no more than one whitespace, only printable chars */ if (((!last_was_white) || (b != ' ')) && (b >= 0x20)) { int cur_nat_subset = second_G0_set ? nat_subset_2 : nat_subset; unsigned int utf8_code; if (cur_nat_subset < 16) { unsigned char offs = NationalReplaceMap[b]; if (offs) { utf8_code = NationalOptionSubsets[cur_nat_subset*14+offs]; if (utf8_code > 0xFFFFFF) out[outidx++]=(utf8_code&0xFF000000)>>24; if (utf8_code > 0xFFFF) out[outidx++]=(utf8_code&0xFF0000)>>16; if (utf8_code > 0xFF) out[outidx++]=(utf8_code&0xFF00)>>8; out[outidx++]=utf8_code&0xFF; } else out[outidx++] = b; } else { if (cur_nat_subset == 16) utf8_code = Cyrillic_1_set[b-0x20]; else if (cur_nat_subset == 17) utf8_code = Cyrillic_2_set[b-0x20]; else utf8_code = Cyrillic_3_set[b-0x20]; if (utf8_code > 0xFFFFFF) out[outidx++]=(utf8_code&0xFF000000)>>24; if (utf8_code > 0xFFFF) out[outidx++]=(utf8_code&0xFF0000)>>16; if (utf8_code > 0xFF) out[outidx++]=(utf8_code&0xFF00)>>8; out[outidx++]=utf8_code&0xFF; } last_was_white = b == ' '; } else if (b == 0x1b){ // ESC ... switch between default G0 and second G0 charset second_G0_set ^= 1; } } } addSubtitleString(color, std::string((const char*)out, outidx), m_Y); } void eDVBTeletextParser::handlePageEnd(int have_pts, const pts_t &pts) { m_subtitle_page.m_have_pts = have_pts; m_subtitle_page.m_pts = pts; m_subtitle_page.m_timeout = 90000 * 20; /* 20s */ if (m_page_X != 0) m_new_subtitle_page(m_subtitle_page); /* send assembled subtitle page to display */ } void eDVBTeletextParser::setPageAndMagazine(int page, int magazine, const char * lang) { m_L = 0; for (m_M29_0_valid=0; m_M29_0_valid < max_id; m_M29_0_valid++) { if (!memcmp(my_country_codes[m_M29_0_valid], lang, 3)) { m_L = m_M29_0_valid; break; } } if (page > 0) eDebug("[eDVBTeletextParser] enable teletext subtitle page %x%02x (%s)%d", magazine, page, lang, m_L); else eDebug("[eDVBTeletextParser] disable teletext subtitles page %x%02x (%s)", magazine, page, lang); m_M29_0_valid = 0; m_X28_0_valid = 0; m_page_M = magazine; /* magazine to look for */ if (magazine != -1) m_page_M &= 7; m_page_X = page; /* page number */ if (page != -1) m_page_X &= 0xFF; } void eDVBTeletextParser::connectNewStream(const Slot0<void> &slot, ePtr<eConnection> &connection) { connection = new eConnection(this, m_new_subtitle_stream.connect(slot)); } void eDVBTeletextParser::connectNewPage(const Slot1<void, const eDVBTeletextSubtitlePage&> &slot, ePtr<eConnection> &connection) { connection = new eConnection(this, m_new_subtitle_page.connect(slot)); } void eDVBTeletextParser::addSubtitleString(int color, std::string string, int source_line) { const gRGB pal[8] = { gRGB(102, 102, 102), gRGB(255, 0, 0), gRGB(0, 255, 0), gRGB(255, 255, 0), gRGB(102, 102, 255), gRGB(255, 0, 255), gRGB(0, 255, 255), gRGB(255, 255, 255) }; if (string.empty()) return; m_subtitle_page.m_elements.push_back(eDVBTeletextSubtitlePageElement(pal[color], string, source_line)); }
gpl-2.0
neykov/chipidea-device-driver
arch/cris/arch-v10/kernel/signal.c
276
13709
/* * linux/arch/cris/kernel/signal.c * * Based on arch/i386/kernel/signal.c by * Copyright (C) 1991, 1992 Linus Torvalds * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * * Ideas also taken from arch/arm. * * Copyright (C) 2000-2007 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <arch/system.h> #define DEBUG_SIG 0 /* a syscall in Linux/CRIS is a break 13 instruction which is 2 bytes */ /* manipulate regs so that upon return, it will be re-executed */ /* We rely on that pc points to the instruction after "break 13", so the * library must never do strange things like putting it in a delay slot. */ #define RESTART_CRIS_SYS(regs) regs->r10 = regs->orig_r10; regs->irp -= 2; void do_signal(int canrestart, struct pt_regs *regs); /* * Atomically swap in the new signal mask, and wait for a signal. Define * dummy arguments to be able to reach the regs argument. (Note that this * arrangement relies on old_sigset_t occupying one register.) */ int sys_sigsuspend(old_sigset_t mask) { sigset_t blocked; siginitset(&blocked, mask); return sigsuspend(&blocked); } int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } int sys_sigaltstack(const stack_t *uss, stack_t __user *uoss) { return do_sigaltstack(uss, uoss, rdusp()); } /* * Do a signal return; undo the signal stack. */ struct sigframe { struct sigcontext sc; unsigned long extramask[_NSIG_WORDS-1]; unsigned char retcode[8]; /* trampoline code */ }; struct rt_sigframe { struct siginfo *pinfo; void *puc; struct siginfo info; struct ucontext uc; unsigned char retcode[8]; /* trampoline code */ }; static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { unsigned int err = 0; unsigned long old_usp; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* restore the regs from &sc->regs (same as sc, since regs is first) * (sc is already checked for VERIFY_READ since the sigframe was * checked in sys_sigreturn previously) */ if (__copy_from_user(regs, sc, sizeof(struct pt_regs))) goto badframe; /* make sure the U-flag is set so user-mode cannot fool us */ regs->dccr |= 1 << 8; /* restore the old USP as it was before we stacked the sc etc. * (we cannot just pop the sigcontext since we aligned the sp and * stuff after pushing it) */ err |= __get_user(old_usp, &sc->usp); wrusp(old_usp); /* TODO: the other ports use regs->orig_XX to disable syscall checks * after this completes, but we don't use that mechanism. maybe we can * use it now ? */ return err; badframe: return 1; } /* Define dummy arguments to be able to reach the regs argument. */ asmlinkage int sys_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp, struct pt_regs *regs) { struct sigframe __user *frame = (struct sigframe *)rdusp(); sigset_t set; /* * Since we stacked the signal on a dword boundary, * then frame should be dword aligned here. If it's * not, then the user is trying to mess with us. */ if (((long)frame) & 3) goto badframe; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], frame->extramask, sizeof(frame->extramask)))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc)) goto badframe; /* TODO: SIGTRAP when single-stepping as in arm ? */ return regs->r10; badframe: force_sig(SIGSEGV, current); return 0; } /* Define dummy arguments to be able to reach the regs argument. */ asmlinkage int sys_rt_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp, struct pt_regs *regs) { struct rt_sigframe __user *frame = (struct rt_sigframe *)rdusp(); sigset_t set; /* * Since we stacked the signal on a dword boundary, * then frame should be dword aligned here. If it's * not, then the user is trying to mess with us. */ if (((long)frame) & 3) goto badframe; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (do_sigaltstack(&frame->uc.uc_stack, NULL, rdusp()) == -EFAULT) goto badframe; return regs->r10; badframe: force_sig(SIGSEGV, current); return 0; } /* * Set up a signal frame. */ static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask) { int err = 0; unsigned long usp = rdusp(); /* copy the regs. they are first in sc so we can use sc directly */ err |= __copy_to_user(sc, regs, sizeof(struct pt_regs)); /* Set the frametype to CRIS_FRAME_NORMAL for the execution of the signal handler. The frametype will be restored to its previous value in restore_sigcontext. */ regs->frametype = CRIS_FRAME_NORMAL; /* then some other stuff */ err |= __put_user(mask, &sc->oldmask); err |= __put_user(usp, &sc->usp); return err; } /* Figure out where we want to put the new signal frame * - usually on the stack. */ static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp = rdusp(); /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (! on_sig_stack(sp)) sp = current->sas_ss_sp + current->sas_ss_size; } /* make sure the frame is dword-aligned */ sp &= ~3; return (void __user*)(sp - frame_size); } /* grab and setup a signal frame. * * basically we stack a lot of state info, and arrange for the * user-mode program to return to the kernel using either a * trampoline which performs the syscall sigreturn, or a provided * user-mode trampoline. */ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; unsigned long return_ip; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); if (err) goto give_sigsegv; if (_NSIG_WORDS > 1) { err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); } if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { return_ip = (unsigned long)ka->sa.sa_restorer; } else { /* trampoline - the desired return ip is the retcode itself */ return_ip = (unsigned long)&frame->retcode; /* This is movu.w __NR_sigreturn, r9; break 13; */ err |= __put_user(0x9c5f, (short __user*)(frame->retcode+0)); err |= __put_user(__NR_sigreturn, (short __user*)(frame->retcode+2)); err |= __put_user(0xe93d, (short __user*)(frame->retcode+4)); } if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->irp = (unsigned long) ka->sa.sa_handler; /* what we enter NOW */ regs->srp = return_ip; /* what we enter LATER */ regs->r10 = sig; /* first argument is signo */ /* actually move the usp to reflect the stacked frame */ wrusp((unsigned long)frame); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; unsigned long return_ip; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; /* Clear all the bits of the ucontext we don't use. */ err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { return_ip = (unsigned long)ka->sa.sa_restorer; } else { /* trampoline - the desired return ip is the retcode itself */ return_ip = (unsigned long)&frame->retcode; /* This is movu.w __NR_rt_sigreturn, r9; break 13; */ err |= __put_user(0x9c5f, (short __user *)(frame->retcode+0)); err |= __put_user(__NR_rt_sigreturn, (short __user *)(frame->retcode+2)); err |= __put_user(0xe93d, (short __user *)(frame->retcode+4)); } if (err) goto give_sigsegv; /* TODO what is the current->exec_domain stuff and invmap ? */ /* Set up registers for signal handler */ /* What we enter NOW */ regs->irp = (unsigned long) ka->sa.sa_handler; /* What we enter LATER */ regs->srp = return_ip; /* First argument is signo */ regs->r10 = sig; /* Second argument is (siginfo_t *) */ regs->r11 = (unsigned long)&frame->info; /* Third argument is unused */ regs->r12 = 0; /* Actually move the usp to reflect the stacked frame */ wrusp((unsigned long)frame); return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } /* * OK, we're invoking a handler */ static inline void handle_signal(int canrestart, unsigned long sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; /* Are we from a system call? */ if (canrestart) { /* If so, check system call restarting.. */ switch (regs->r10) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: /* ERESTARTNOHAND means that the syscall should * only be restarted if there was no handler for * the signal, and since we only get here if there * is a handler, we don't restart */ regs->r10 = -EINTR; break; case -ERESTARTSYS: /* ERESTARTSYS means to restart the syscall if * there is no handler or the handler was * registered with SA_RESTART */ if (!(ka->sa.sa_flags & SA_RESTART)) { regs->r10 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: /* ERESTARTNOINTR means that the syscall should * be called again after the signal handler returns. */ RESTART_CRIS_SYS(regs); } } /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset, regs); else ret = setup_frame(sig, ka, oldset, regs); if (ret == 0) signal_delivered(sig, info, ka, regs, 0); } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Also note that the regs structure given here as an argument, is the latest * pushed pt_regs. It may or may not be the same as the first pushed registers * when the initial usermode->kernelmode transition took place. Therefore * we can use user_mode(regs) to see if we came directly from kernel or user * mode below. */ void do_signal(int canrestart, struct pt_regs *regs) { siginfo_t info; int signr; struct k_sigaction ka; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ handle_signal(canrestart, signr, &info, &ka, regs); return; } /* Did we come from a system call? */ if (canrestart) { /* Restart the system call - no handlers present */ if (regs->r10 == -ERESTARTNOHAND || regs->r10 == -ERESTARTSYS || regs->r10 == -ERESTARTNOINTR) { RESTART_CRIS_SYS(regs); } if (regs->r10 == -ERESTART_RESTARTBLOCK) { regs->r9 = __NR_restart_syscall; regs->irp -= 2; } } /* if there's no signal to deliver, we just put the saved sigmask * back */ restore_saved_sigmask(); }
gpl-2.0
alanorth/kernel_huawei_codeaurora_u8160
fs/ocfs2/stackglue.c
532
16395
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * stackglue.c * * Code which implements an OCFS2 specific interface to underlying * cluster stacks. * * Copyright (C) 2007, 2009 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/fs.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/sysctl.h> #include "ocfs2_fs.h" #include "stackglue.h" #define OCFS2_STACK_PLUGIN_O2CB "o2cb" #define OCFS2_STACK_PLUGIN_USER "user" #define OCFS2_MAX_HB_CTL_PATH 256 static struct ocfs2_locking_protocol *lproto; static DEFINE_SPINLOCK(ocfs2_stack_lock); static LIST_HEAD(ocfs2_stack_list); static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1]; static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; /* * The stack currently in use. If not null, active_stack->sp_count > 0, * the module is pinned, and the locking protocol cannot be changed. */ static struct ocfs2_stack_plugin *active_stack; static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) { struct ocfs2_stack_plugin *p; assert_spin_locked(&ocfs2_stack_lock); list_for_each_entry(p, &ocfs2_stack_list, sp_list) { if (!strcmp(p->sp_name, name)) return p; } return NULL; } static int ocfs2_stack_driver_request(const char *stack_name, const char *plugin_name) { int rc; struct ocfs2_stack_plugin *p; spin_lock(&ocfs2_stack_lock); /* * If the stack passed by the filesystem isn't the selected one, * we can't continue. */ if (strcmp(stack_name, cluster_stack_name)) { rc = -EBUSY; goto out; } if (active_stack) { /* * If the active stack isn't the one we want, it cannot * be selected right now. */ if (!strcmp(active_stack->sp_name, plugin_name)) rc = 0; else rc = -EBUSY; goto out; } p = ocfs2_stack_lookup(plugin_name); if (!p || !try_module_get(p->sp_owner)) { rc = -ENOENT; goto out; } active_stack = p; rc = 0; out: /* If we found it, pin it */ if (!rc) active_stack->sp_count++; spin_unlock(&ocfs2_stack_lock); return rc; } /* * This function looks up the appropriate stack and makes it active. If * there is no stack, it tries to load it. It will fail if the stack still * cannot be found. It will also fail if a different stack is in use. */ static int ocfs2_stack_driver_get(const char *stack_name) { int rc; char *plugin_name = OCFS2_STACK_PLUGIN_O2CB; /* * Classic stack does not pass in a stack name. This is * compatible with older tools as well. */ if (!stack_name || !*stack_name) stack_name = OCFS2_STACK_PLUGIN_O2CB; if (strlen(stack_name) != OCFS2_STACK_LABEL_LEN) { printk(KERN_ERR "ocfs2 passed an invalid cluster stack label: \"%s\"\n", stack_name); return -EINVAL; } /* Anything that isn't the classic stack is a user stack */ if (strcmp(stack_name, OCFS2_STACK_PLUGIN_O2CB)) plugin_name = OCFS2_STACK_PLUGIN_USER; rc = ocfs2_stack_driver_request(stack_name, plugin_name); if (rc == -ENOENT) { request_module("ocfs2_stack_%s", plugin_name); rc = ocfs2_stack_driver_request(stack_name, plugin_name); } if (rc == -ENOENT) { printk(KERN_ERR "ocfs2: Cluster stack driver \"%s\" cannot be found\n", plugin_name); } else if (rc == -EBUSY) { printk(KERN_ERR "ocfs2: A different cluster stack is in use\n"); } return rc; } static void ocfs2_stack_driver_put(void) { spin_lock(&ocfs2_stack_lock); BUG_ON(active_stack == NULL); BUG_ON(active_stack->sp_count == 0); active_stack->sp_count--; if (!active_stack->sp_count) { module_put(active_stack->sp_owner); active_stack = NULL; } spin_unlock(&ocfs2_stack_lock); } int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin) { int rc; spin_lock(&ocfs2_stack_lock); if (!ocfs2_stack_lookup(plugin->sp_name)) { plugin->sp_count = 0; plugin->sp_proto = lproto; list_add(&plugin->sp_list, &ocfs2_stack_list); printk(KERN_INFO "ocfs2: Registered cluster interface %s\n", plugin->sp_name); rc = 0; } else { printk(KERN_ERR "ocfs2: Stack \"%s\" already registered\n", plugin->sp_name); rc = -EEXIST; } spin_unlock(&ocfs2_stack_lock); return rc; } EXPORT_SYMBOL_GPL(ocfs2_stack_glue_register); void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin) { struct ocfs2_stack_plugin *p; spin_lock(&ocfs2_stack_lock); p = ocfs2_stack_lookup(plugin->sp_name); if (p) { BUG_ON(p != plugin); BUG_ON(plugin == active_stack); BUG_ON(plugin->sp_count != 0); list_del_init(&plugin->sp_list); printk(KERN_INFO "ocfs2: Unregistered cluster interface %s\n", plugin->sp_name); } else { printk(KERN_ERR "Stack \"%s\" is not registered\n", plugin->sp_name); } spin_unlock(&ocfs2_stack_lock); } EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister); void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto) { struct ocfs2_stack_plugin *p; BUG_ON(proto == NULL); spin_lock(&ocfs2_stack_lock); BUG_ON(active_stack != NULL); lproto = proto; list_for_each_entry(p, &ocfs2_stack_list, sp_list) { p->sp_proto = lproto; } spin_unlock(&ocfs2_stack_lock); } EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_locking_protocol); /* * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take * "struct ocfs2_lock_res *astarg" instead of "void *astarg" because the * underlying stack plugins need to pilfer the lksb off of the lock_res. * If some other structure needs to be passed as an astarg, the plugins * will need to be given a different avenue to the lksb. */ int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn, int mode, union ocfs2_dlm_lksb *lksb, u32 flags, void *name, unsigned int namelen, struct ocfs2_lock_res *astarg) { BUG_ON(lproto == NULL); return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags, name, namelen, astarg); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lock); int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn, union ocfs2_dlm_lksb *lksb, u32 flags, struct ocfs2_lock_res *astarg) { BUG_ON(lproto == NULL); return active_stack->sp_ops->dlm_unlock(conn, lksb, flags, astarg); } EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock); int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb) { return active_stack->sp_ops->lock_status(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status); int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb) { return active_stack->sp_ops->lvb_valid(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid); void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb) { return active_stack->sp_ops->lock_lvb(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb); void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb) { active_stack->sp_ops->dump_lksb(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_dump_lksb); int ocfs2_stack_supports_plocks(void) { return active_stack && active_stack->sp_ops->plock; } EXPORT_SYMBOL_GPL(ocfs2_stack_supports_plocks); /* * ocfs2_plock() can only be safely called if * ocfs2_stack_supports_plocks() returned true */ int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino, struct file *file, int cmd, struct file_lock *fl) { WARN_ON_ONCE(active_stack->sp_ops->plock == NULL); if (active_stack->sp_ops->plock) return active_stack->sp_ops->plock(conn, ino, file, cmd, fl); return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(ocfs2_plock); int ocfs2_cluster_connect(const char *stack_name, const char *group, int grouplen, void (*recovery_handler)(int node_num, void *recovery_data), void *recovery_data, struct ocfs2_cluster_connection **conn) { int rc = 0; struct ocfs2_cluster_connection *new_conn; BUG_ON(group == NULL); BUG_ON(conn == NULL); BUG_ON(recovery_handler == NULL); if (grouplen > GROUP_NAME_MAX) { rc = -EINVAL; goto out; } new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection), GFP_KERNEL); if (!new_conn) { rc = -ENOMEM; goto out; } memcpy(new_conn->cc_name, group, grouplen); new_conn->cc_namelen = grouplen; new_conn->cc_recovery_handler = recovery_handler; new_conn->cc_recovery_data = recovery_data; /* Start the new connection at our maximum compatibility level */ new_conn->cc_version = lproto->lp_max_version; /* This will pin the stack driver if successful */ rc = ocfs2_stack_driver_get(stack_name); if (rc) goto out_free; rc = active_stack->sp_ops->connect(new_conn); if (rc) { ocfs2_stack_driver_put(); goto out_free; } *conn = new_conn; out_free: if (rc) kfree(new_conn); out: return rc; } EXPORT_SYMBOL_GPL(ocfs2_cluster_connect); /* If hangup_pending is 0, the stack driver will be dropped */ int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn, int hangup_pending) { int ret; BUG_ON(conn == NULL); ret = active_stack->sp_ops->disconnect(conn); /* XXX Should we free it anyway? */ if (!ret) { kfree(conn); if (!hangup_pending) ocfs2_stack_driver_put(); } return ret; } EXPORT_SYMBOL_GPL(ocfs2_cluster_disconnect); /* * Leave the group for this filesystem. This is executed by a userspace * program (stored in ocfs2_hb_ctl_path). */ static void ocfs2_leave_group(const char *group) { int ret; char *argv[5], *envp[3]; argv[0] = ocfs2_hb_ctl_path; argv[1] = "-K"; argv[2] = "-u"; argv[3] = (char *)group; argv[4] = NULL; /* minimal command environment taken from cpu_run_sbin_hotplug */ envp[0] = "HOME=/"; envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[2] = NULL; ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); if (ret < 0) { printk(KERN_ERR "ocfs2: Error %d running user helper " "\"%s %s %s %s\"\n", ret, argv[0], argv[1], argv[2], argv[3]); } } /* * Hangup is a required post-umount. ocfs2-tools software expects the * filesystem to call "ocfs2_hb_ctl" during unmount. This happens * regardless of whether the DLM got started, so we can't do it * in ocfs2_cluster_disconnect(). The ocfs2_leave_group() function does * the actual work. */ void ocfs2_cluster_hangup(const char *group, int grouplen) { BUG_ON(group == NULL); BUG_ON(group[grouplen] != '\0'); ocfs2_leave_group(group); /* cluster_disconnect() was called with hangup_pending==1 */ ocfs2_stack_driver_put(); } EXPORT_SYMBOL_GPL(ocfs2_cluster_hangup); int ocfs2_cluster_this_node(unsigned int *node) { return active_stack->sp_ops->this_node(node); } EXPORT_SYMBOL_GPL(ocfs2_cluster_this_node); /* * Sysfs bits */ static ssize_t ocfs2_max_locking_protocol_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t ret = 0; spin_lock(&ocfs2_stack_lock); if (lproto) ret = snprintf(buf, PAGE_SIZE, "%u.%u\n", lproto->lp_max_version.pv_major, lproto->lp_max_version.pv_minor); spin_unlock(&ocfs2_stack_lock); return ret; } static struct kobj_attribute ocfs2_attr_max_locking_protocol = __ATTR(max_locking_protocol, S_IFREG | S_IRUGO, ocfs2_max_locking_protocol_show, NULL); static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t ret = 0, total = 0, remain = PAGE_SIZE; struct ocfs2_stack_plugin *p; spin_lock(&ocfs2_stack_lock); list_for_each_entry(p, &ocfs2_stack_list, sp_list) { ret = snprintf(buf, remain, "%s\n", p->sp_name); if (ret < 0) { total = ret; break; } if (ret == remain) { /* snprintf() didn't fit */ total = -E2BIG; break; } total += ret; remain -= ret; } spin_unlock(&ocfs2_stack_lock); return total; } static struct kobj_attribute ocfs2_attr_loaded_cluster_plugins = __ATTR(loaded_cluster_plugins, S_IFREG | S_IRUGO, ocfs2_loaded_cluster_plugins_show, NULL); static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t ret = 0; spin_lock(&ocfs2_stack_lock); if (active_stack) { ret = snprintf(buf, PAGE_SIZE, "%s\n", active_stack->sp_name); if (ret == PAGE_SIZE) ret = -E2BIG; } spin_unlock(&ocfs2_stack_lock); return ret; } static struct kobj_attribute ocfs2_attr_active_cluster_plugin = __ATTR(active_cluster_plugin, S_IFREG | S_IRUGO, ocfs2_active_cluster_plugin_show, NULL); static ssize_t ocfs2_cluster_stack_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t ret; spin_lock(&ocfs2_stack_lock); ret = snprintf(buf, PAGE_SIZE, "%s\n", cluster_stack_name); spin_unlock(&ocfs2_stack_lock); return ret; } static ssize_t ocfs2_cluster_stack_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { size_t len = count; ssize_t ret; if (len == 0) return len; if (buf[len - 1] == '\n') len--; if ((len != OCFS2_STACK_LABEL_LEN) || (strnlen(buf, len) != len)) return -EINVAL; spin_lock(&ocfs2_stack_lock); if (active_stack) { if (!strncmp(buf, cluster_stack_name, len)) ret = count; else ret = -EBUSY; } else { memcpy(cluster_stack_name, buf, len); ret = count; } spin_unlock(&ocfs2_stack_lock); return ret; } static struct kobj_attribute ocfs2_attr_cluster_stack = __ATTR(cluster_stack, S_IFREG | S_IRUGO | S_IWUSR, ocfs2_cluster_stack_show, ocfs2_cluster_stack_store); static struct attribute *ocfs2_attrs[] = { &ocfs2_attr_max_locking_protocol.attr, &ocfs2_attr_loaded_cluster_plugins.attr, &ocfs2_attr_active_cluster_plugin.attr, &ocfs2_attr_cluster_stack.attr, NULL, }; static struct attribute_group ocfs2_attr_group = { .attrs = ocfs2_attrs, }; static struct kset *ocfs2_kset; static void ocfs2_sysfs_exit(void) { kset_unregister(ocfs2_kset); } static int ocfs2_sysfs_init(void) { int ret; ocfs2_kset = kset_create_and_add("ocfs2", NULL, fs_kobj); if (!ocfs2_kset) return -ENOMEM; ret = sysfs_create_group(&ocfs2_kset->kobj, &ocfs2_attr_group); if (ret) goto error; return 0; error: kset_unregister(ocfs2_kset); return ret; } /* * Sysctl bits * * The sysctl lives at /proc/sys/fs/ocfs2/nm/hb_ctl_path. The 'nm' doesn't * make as much sense in a multiple cluster stack world, but it's safer * and easier to preserve the name. */ #define FS_OCFS2_NM 1 static ctl_table ocfs2_nm_table[] = { { .ctl_name = 1, .procname = "hb_ctl_path", .data = ocfs2_hb_ctl_path, .maxlen = OCFS2_MAX_HB_CTL_PATH, .mode = 0644, .proc_handler = &proc_dostring, .strategy = &sysctl_string, }, { .ctl_name = 0 } }; static ctl_table ocfs2_mod_table[] = { { .ctl_name = FS_OCFS2_NM, .procname = "nm", .data = NULL, .maxlen = 0, .mode = 0555, .child = ocfs2_nm_table }, { .ctl_name = 0} }; static ctl_table ocfs2_kern_table[] = { { .ctl_name = FS_OCFS2, .procname = "ocfs2", .data = NULL, .maxlen = 0, .mode = 0555, .child = ocfs2_mod_table }, { .ctl_name = 0} }; static ctl_table ocfs2_root_table[] = { { .ctl_name = CTL_FS, .procname = "fs", .data = NULL, .maxlen = 0, .mode = 0555, .child = ocfs2_kern_table }, { .ctl_name = 0 } }; static struct ctl_table_header *ocfs2_table_header = NULL; /* * Initialization */ static int __init ocfs2_stack_glue_init(void) { strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB); ocfs2_table_header = register_sysctl_table(ocfs2_root_table); if (!ocfs2_table_header) { printk(KERN_ERR "ocfs2 stack glue: unable to register sysctl\n"); return -ENOMEM; /* or something. */ } return ocfs2_sysfs_init(); } static void __exit ocfs2_stack_glue_exit(void) { lproto = NULL; ocfs2_sysfs_exit(); if (ocfs2_table_header) unregister_sysctl_table(ocfs2_table_header); } MODULE_AUTHOR("Oracle"); MODULE_DESCRIPTION("ocfs2 cluter stack glue layer"); MODULE_LICENSE("GPL"); module_init(ocfs2_stack_glue_init); module_exit(ocfs2_stack_glue_exit);
gpl-2.0
GiulianoFranchetto/linux-at91
drivers/mmc/host/sdhci-dove.c
532
3856
/* * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC * * Author: Saeed Bishara <saeed@marvell.com> * Mike Rapoport <mike@compulab.co.il> * Based on sdhci-cns3xxx.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/mmc/host.h> #include <linux/module.h> #include <linux/of.h> #include "sdhci-pltfm.h" struct sdhci_dove_priv { struct clk *clk; }; static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) { u16 ret; switch (reg) { case SDHCI_HOST_VERSION: case SDHCI_SLOT_INT_STATUS: /* those registers don't exist */ return 0; default: ret = readw(host->ioaddr + reg); } return ret; } static u32 sdhci_dove_readl(struct sdhci_host *host, int reg) { u32 ret; ret = readl(host->ioaddr + reg); switch (reg) { case SDHCI_CAPABILITIES: /* Mask the support for 3.0V */ ret &= ~SDHCI_CAN_VDD_300; break; } return ret; } static const struct sdhci_ops sdhci_dove_ops = { .read_w = sdhci_dove_readw, .read_l = sdhci_dove_readl, .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_dove_pdata = { .ops = &sdhci_dove_ops, .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | SDHCI_QUIRK_NO_BUSY_IRQ | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_FORCE_DMA | SDHCI_QUIRK_NO_HISPD_BIT, }; static int sdhci_dove_probe(struct platform_device *pdev) { struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_dove_priv *priv; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "unable to allocate private data"); return -ENOMEM; } priv->clk = devm_clk_get(&pdev->dev, NULL); host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata, 0); if (IS_ERR(host)) return PTR_ERR(host); pltfm_host = sdhci_priv(host); pltfm_host->priv = priv; if (!IS_ERR(priv->clk)) clk_prepare_enable(priv->clk); ret = mmc_of_parse(host->mmc); if (ret) goto err_sdhci_add; ret = sdhci_add_host(host); if (ret) goto err_sdhci_add; return 0; err_sdhci_add: if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); sdhci_pltfm_free(pdev); return ret; } static int sdhci_dove_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_dove_priv *priv = pltfm_host->priv; sdhci_pltfm_unregister(pdev); if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); return 0; } static const struct of_device_id sdhci_dove_of_match_table[] = { { .compatible = "marvell,dove-sdhci", }, {} }; MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table); static struct platform_driver sdhci_dove_driver = { .driver = { .name = "sdhci-dove", .pm = SDHCI_PLTFM_PMOPS, .of_match_table = sdhci_dove_of_match_table, }, .probe = sdhci_dove_probe, .remove = sdhci_dove_remove, }; module_platform_driver(sdhci_dove_driver); MODULE_DESCRIPTION("SDHCI driver for Dove"); MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, " "Mike Rapoport <mike@compulab.co.il>"); MODULE_LICENSE("GPL v2");
gpl-2.0
YogeshNain/linux
arch/arm/mach-pxa/devices.c
532
24548
#include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/i2c/pxa-i2c.h> #include <mach/udc.h> #include <linux/platform_data/usb-pxa3xx-ulpi.h> #include <linux/platform_data/video-pxafb.h> #include <linux/platform_data/mmc-pxamci.h> #include <linux/platform_data/irda-pxaficp.h> #include <mach/irqs.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <linux/platform_data/keypad-pxa27x.h> #include <linux/platform_data/camera-pxa.h> #include <mach/audio.h> #include <mach/hardware.h> #include <linux/platform_data/mtd-nand-pxa3xx.h> #include "devices.h" #include "generic.h" void __init pxa_register_device(struct platform_device *dev, void *data) { int ret; dev->dev.platform_data = data; ret = platform_device_register(dev); if (ret) dev_err(&dev->dev, "unable to register device: %d\n", ret); } static struct resource pxa_resource_pmu = { .start = IRQ_PMU, .end = IRQ_PMU, .flags = IORESOURCE_IRQ, }; struct platform_device pxa_device_pmu = { .name = "xscale-pmu", .id = -1, .resource = &pxa_resource_pmu, .num_resources = 1, }; static struct resource pxamci_resources[] = { [0] = { .start = 0x41100000, .end = 0x41100fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC, .end = IRQ_MMC, .flags = IORESOURCE_IRQ, }, [2] = { .start = 21, .end = 21, .flags = IORESOURCE_DMA, }, [3] = { .start = 22, .end = 22, .flags = IORESOURCE_DMA, }, }; static u64 pxamci_dmamask = 0xffffffffUL; struct platform_device pxa_device_mci = { .name = "pxa2xx-mci", .id = 0, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxamci_resources), .resource = pxamci_resources, }; void __init pxa_set_mci_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa_device_mci, info); } static struct pxa2xx_udc_mach_info pxa_udc_info = { .gpio_pullup = -1, }; void __init pxa_set_udc_info(struct pxa2xx_udc_mach_info *info) { memcpy(&pxa_udc_info, info, sizeof *info); } static struct resource pxa2xx_udc_resources[] = { [0] = { .start = 0x40600000, .end = 0x4060ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USB, .end = IRQ_USB, .flags = IORESOURCE_IRQ, }, }; static u64 udc_dma_mask = ~(u32)0; struct platform_device pxa25x_device_udc = { .name = "pxa25x-udc", .id = -1, .resource = pxa2xx_udc_resources, .num_resources = ARRAY_SIZE(pxa2xx_udc_resources), .dev = { .platform_data = &pxa_udc_info, .dma_mask = &udc_dma_mask, } }; struct platform_device pxa27x_device_udc = { .name = "pxa27x-udc", .id = -1, .resource = pxa2xx_udc_resources, .num_resources = ARRAY_SIZE(pxa2xx_udc_resources), .dev = { .platform_data = &pxa_udc_info, .dma_mask = &udc_dma_mask, } }; #ifdef CONFIG_PXA3xx static struct resource pxa3xx_u2d_resources[] = { [0] = { .start = 0x54100000, .end = 0x54100fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USB2, .end = IRQ_USB2, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa3xx_device_u2d = { .name = "pxa3xx-u2d", .id = -1, .resource = pxa3xx_u2d_resources, .num_resources = ARRAY_SIZE(pxa3xx_u2d_resources), }; void __init pxa3xx_set_u2d_info(struct pxa3xx_u2d_platform_data *info) { pxa_register_device(&pxa3xx_device_u2d, info); } #endif /* CONFIG_PXA3xx */ static struct resource pxafb_resources[] = { [0] = { .start = 0x44000000, .end = 0x4400ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_LCD, .end = IRQ_LCD, .flags = IORESOURCE_IRQ, }, }; static u64 fb_dma_mask = ~(u64)0; struct platform_device pxa_device_fb = { .name = "pxa2xx-fb", .id = -1, .dev = { .dma_mask = &fb_dma_mask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxafb_resources), .resource = pxafb_resources, }; void __init pxa_set_fb_info(struct device *parent, struct pxafb_mach_info *info) { pxa_device_fb.dev.parent = parent; pxa_register_device(&pxa_device_fb, info); } static struct resource pxa_resource_ffuart[] = { { .start = 0x40100000, .end = 0x40100023, .flags = IORESOURCE_MEM, }, { .start = IRQ_FFUART, .end = IRQ_FFUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_ffuart = { .name = "pxa2xx-uart", .id = 0, .resource = pxa_resource_ffuart, .num_resources = ARRAY_SIZE(pxa_resource_ffuart), }; void __init pxa_set_ffuart_info(void *info) { pxa_register_device(&pxa_device_ffuart, info); } static struct resource pxa_resource_btuart[] = { { .start = 0x40200000, .end = 0x40200023, .flags = IORESOURCE_MEM, }, { .start = IRQ_BTUART, .end = IRQ_BTUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_btuart = { .name = "pxa2xx-uart", .id = 1, .resource = pxa_resource_btuart, .num_resources = ARRAY_SIZE(pxa_resource_btuart), }; void __init pxa_set_btuart_info(void *info) { pxa_register_device(&pxa_device_btuart, info); } static struct resource pxa_resource_stuart[] = { { .start = 0x40700000, .end = 0x40700023, .flags = IORESOURCE_MEM, }, { .start = IRQ_STUART, .end = IRQ_STUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_stuart = { .name = "pxa2xx-uart", .id = 2, .resource = pxa_resource_stuart, .num_resources = ARRAY_SIZE(pxa_resource_stuart), }; void __init pxa_set_stuart_info(void *info) { pxa_register_device(&pxa_device_stuart, info); } static struct resource pxa_resource_hwuart[] = { { .start = 0x41600000, .end = 0x4160002F, .flags = IORESOURCE_MEM, }, { .start = IRQ_HWUART, .end = IRQ_HWUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_hwuart = { .name = "pxa2xx-uart", .id = 3, .resource = pxa_resource_hwuart, .num_resources = ARRAY_SIZE(pxa_resource_hwuart), }; void __init pxa_set_hwuart_info(void *info) { if (cpu_is_pxa255()) pxa_register_device(&pxa_device_hwuart, info); else pr_info("UART: Ignoring attempt to register HWUART on non-PXA255 hardware"); } static struct resource pxai2c_resources[] = { { .start = 0x40301680, .end = 0x403016a3, .flags = IORESOURCE_MEM, }, { .start = IRQ_I2C, .end = IRQ_I2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_i2c = { .name = "pxa2xx-i2c", .id = 0, .resource = pxai2c_resources, .num_resources = ARRAY_SIZE(pxai2c_resources), }; void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info) { pxa_register_device(&pxa_device_i2c, info); } #ifdef CONFIG_PXA27x static struct resource pxa27x_resources_i2c_power[] = { { .start = 0x40f00180, .end = 0x40f001a3, .flags = IORESOURCE_MEM, }, { .start = IRQ_PWRI2C, .end = IRQ_PWRI2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_i2c_power = { .name = "pxa2xx-i2c", .id = 1, .resource = pxa27x_resources_i2c_power, .num_resources = ARRAY_SIZE(pxa27x_resources_i2c_power), }; #endif static struct resource pxai2s_resources[] = { { .start = 0x40400000, .end = 0x40400083, .flags = IORESOURCE_MEM, }, { .start = IRQ_I2S, .end = IRQ_I2S, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_i2s = { .name = "pxa2xx-i2s", .id = -1, .resource = pxai2s_resources, .num_resources = ARRAY_SIZE(pxai2s_resources), }; struct platform_device pxa_device_asoc_ssp1 = { .name = "pxa-ssp-dai", .id = 0, }; struct platform_device pxa_device_asoc_ssp2= { .name = "pxa-ssp-dai", .id = 1, }; struct platform_device pxa_device_asoc_ssp3 = { .name = "pxa-ssp-dai", .id = 2, }; struct platform_device pxa_device_asoc_ssp4 = { .name = "pxa-ssp-dai", .id = 3, }; struct platform_device pxa_device_asoc_platform = { .name = "pxa-pcm-audio", .id = -1, }; static u64 pxaficp_dmamask = ~(u32)0; static struct resource pxa_ir_resources[] = { [0] = { .start = IRQ_STUART, .end = IRQ_STUART, .flags = IORESOURCE_IRQ, }, [1] = { .start = IRQ_ICP, .end = IRQ_ICP, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_ficp = { .name = "pxa2xx-ir", .id = -1, .num_resources = ARRAY_SIZE(pxa_ir_resources), .resource = pxa_ir_resources, .dev = { .dma_mask = &pxaficp_dmamask, .coherent_dma_mask = 0xffffffff, }, }; void __init pxa_set_ficp_info(struct pxaficp_platform_data *info) { pxa_register_device(&pxa_device_ficp, info); } static struct resource pxa_rtc_resources[] = { [0] = { .start = 0x40900000, .end = 0x40900000 + 0x3b, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_RTC1Hz, .end = IRQ_RTC1Hz, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_RTCAlrm, .end = IRQ_RTCAlrm, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_rtc = { .name = "pxa-rtc", .id = -1, .num_resources = ARRAY_SIZE(pxa_rtc_resources), .resource = pxa_rtc_resources, }; static struct resource sa1100_rtc_resources[] = { { .start = IRQ_RTC1Hz, .end = IRQ_RTC1Hz, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, { .start = IRQ_RTCAlrm, .end = IRQ_RTCAlrm, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device sa1100_device_rtc = { .name = "sa1100-rtc", .id = -1, .num_resources = ARRAY_SIZE(sa1100_rtc_resources), .resource = sa1100_rtc_resources, }; static struct resource pxa_ac97_resources[] = { [0] = { .start = 0x40500000, .end = 0x40500000 + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_AC97, .end = IRQ_AC97, .flags = IORESOURCE_IRQ, }, }; static u64 pxa_ac97_dmamask = 0xffffffffUL; struct platform_device pxa_device_ac97 = { .name = "pxa2xx-ac97", .id = -1, .dev = { .dma_mask = &pxa_ac97_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa_ac97_resources), .resource = pxa_ac97_resources, }; void __init pxa_set_ac97_info(pxa2xx_audio_ops_t *ops) { pxa_register_device(&pxa_device_ac97, ops); } #ifdef CONFIG_PXA25x static struct resource pxa25x_resource_pwm0[] = { [0] = { .start = 0x40b00000, .end = 0x40b0000f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa25x_device_pwm0 = { .name = "pxa25x-pwm", .id = 0, .resource = pxa25x_resource_pwm0, .num_resources = ARRAY_SIZE(pxa25x_resource_pwm0), }; static struct resource pxa25x_resource_pwm1[] = { [0] = { .start = 0x40c00000, .end = 0x40c0000f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa25x_device_pwm1 = { .name = "pxa25x-pwm", .id = 1, .resource = pxa25x_resource_pwm1, .num_resources = ARRAY_SIZE(pxa25x_resource_pwm1), }; static u64 pxa25x_ssp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_ssp[] = { [0] = { .start = 0x41000000, .end = 0x4100001f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP, .end = IRQ_SSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 13, .end = 13, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 14, .end = 14, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa25x_device_ssp = { .name = "pxa25x-ssp", .id = 0, .dev = { .dma_mask = &pxa25x_ssp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_ssp, .num_resources = ARRAY_SIZE(pxa25x_resource_ssp), }; static u64 pxa25x_nssp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_nssp[] = { [0] = { .start = 0x41400000, .end = 0x4140002f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_NSSP, .end = IRQ_NSSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 15, .end = 15, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 16, .end = 16, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa25x_device_nssp = { .name = "pxa25x-nssp", .id = 1, .dev = { .dma_mask = &pxa25x_nssp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_nssp, .num_resources = ARRAY_SIZE(pxa25x_resource_nssp), }; static u64 pxa25x_assp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_assp[] = { [0] = { .start = 0x41500000, .end = 0x4150002f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_ASSP, .end = IRQ_ASSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 23, .end = 23, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 24, .end = 24, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa25x_device_assp = { /* ASSP is basically equivalent to NSSP */ .name = "pxa25x-nssp", .id = 2, .dev = { .dma_mask = &pxa25x_assp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_assp, .num_resources = ARRAY_SIZE(pxa25x_resource_assp), }; #endif /* CONFIG_PXA25x */ #if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) static struct resource pxa27x_resource_camera[] = { [0] = { .start = 0x50000000, .end = 0x50000fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_CAMERA, .end = IRQ_CAMERA, .flags = IORESOURCE_IRQ, }, }; static u64 pxa27x_dma_mask_camera = DMA_BIT_MASK(32); static struct platform_device pxa27x_device_camera = { .name = "pxa27x-camera", .id = 0, /* This is used to put cameras on this interface */ .dev = { .dma_mask = &pxa27x_dma_mask_camera, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa27x_resource_camera), .resource = pxa27x_resource_camera, }; void __init pxa_set_camera_info(struct pxacamera_platform_data *info) { pxa_register_device(&pxa27x_device_camera, info); } static u64 pxa27x_ohci_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ohci[] = { [0] = { .start = 0x4C000000, .end = 0x4C00ff6f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USBH1, .end = IRQ_USBH1, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_ohci = { .name = "pxa27x-ohci", .id = -1, .dev = { .dma_mask = &pxa27x_ohci_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pxa27x_resource_ohci), .resource = pxa27x_resource_ohci, }; void __init pxa_set_ohci_info(struct pxaohci_platform_data *info) { pxa_register_device(&pxa27x_device_ohci, info); } #endif /* CONFIG_PXA27x || CONFIG_PXA3xx */ #if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) static struct resource pxa27x_resource_keypad[] = { [0] = { .start = 0x41500000, .end = 0x4150004c, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_KEYPAD, .end = IRQ_KEYPAD, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_keypad = { .name = "pxa27x-keypad", .id = -1, .resource = pxa27x_resource_keypad, .num_resources = ARRAY_SIZE(pxa27x_resource_keypad), }; void __init pxa_set_keypad_info(struct pxa27x_keypad_platform_data *info) { pxa_register_device(&pxa27x_device_keypad, info); } static u64 pxa27x_ssp1_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp1[] = { [0] = { .start = 0x41000000, .end = 0x4100003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP, .end = IRQ_SSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 13, .end = 13, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 14, .end = 14, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa27x_device_ssp1 = { .name = "pxa27x-ssp", .id = 0, .dev = { .dma_mask = &pxa27x_ssp1_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp1, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp1), }; static u64 pxa27x_ssp2_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp2[] = { [0] = { .start = 0x41700000, .end = 0x4170003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP2, .end = IRQ_SSP2, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 15, .end = 15, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 16, .end = 16, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa27x_device_ssp2 = { .name = "pxa27x-ssp", .id = 1, .dev = { .dma_mask = &pxa27x_ssp2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp2, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp2), }; static u64 pxa27x_ssp3_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp3[] = { [0] = { .start = 0x41900000, .end = 0x4190003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP3, .end = IRQ_SSP3, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 66, .end = 66, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 67, .end = 67, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa27x_device_ssp3 = { .name = "pxa27x-ssp", .id = 2, .dev = { .dma_mask = &pxa27x_ssp3_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp3, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp3), }; static struct resource pxa27x_resource_pwm0[] = { [0] = { .start = 0x40b00000, .end = 0x40b0001f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa27x_device_pwm0 = { .name = "pxa27x-pwm", .id = 0, .resource = pxa27x_resource_pwm0, .num_resources = ARRAY_SIZE(pxa27x_resource_pwm0), }; static struct resource pxa27x_resource_pwm1[] = { [0] = { .start = 0x40c00000, .end = 0x40c0001f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa27x_device_pwm1 = { .name = "pxa27x-pwm", .id = 1, .resource = pxa27x_resource_pwm1, .num_resources = ARRAY_SIZE(pxa27x_resource_pwm1), }; #endif /* CONFIG_PXA27x || CONFIG_PXA3xx */ #ifdef CONFIG_PXA3xx static struct resource pxa3xx_resources_mci2[] = { [0] = { .start = 0x42000000, .end = 0x42000fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC2, .end = IRQ_MMC2, .flags = IORESOURCE_IRQ, }, [2] = { .start = 93, .end = 93, .flags = IORESOURCE_DMA, }, [3] = { .start = 94, .end = 94, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa3xx_device_mci2 = { .name = "pxa2xx-mci", .id = 1, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa3xx_resources_mci2), .resource = pxa3xx_resources_mci2, }; void __init pxa3xx_set_mci2_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa3xx_device_mci2, info); } static struct resource pxa3xx_resources_mci3[] = { [0] = { .start = 0x42500000, .end = 0x42500fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC3, .end = IRQ_MMC3, .flags = IORESOURCE_IRQ, }, [2] = { .start = 100, .end = 100, .flags = IORESOURCE_DMA, }, [3] = { .start = 101, .end = 101, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa3xx_device_mci3 = { .name = "pxa2xx-mci", .id = 2, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa3xx_resources_mci3), .resource = pxa3xx_resources_mci3, }; void __init pxa3xx_set_mci3_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa3xx_device_mci3, info); } static struct resource pxa3xx_resources_gcu[] = { { .start = 0x54000000, .end = 0x54000fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_GCU, .end = IRQ_GCU, .flags = IORESOURCE_IRQ, }, }; static u64 pxa3xx_gcu_dmamask = DMA_BIT_MASK(32); struct platform_device pxa3xx_device_gcu = { .name = "pxa3xx-gcu", .id = -1, .num_resources = ARRAY_SIZE(pxa3xx_resources_gcu), .resource = pxa3xx_resources_gcu, .dev = { .dma_mask = &pxa3xx_gcu_dmamask, .coherent_dma_mask = 0xffffffff, }, }; #endif /* CONFIG_PXA3xx */ #if defined(CONFIG_PXA3xx) static struct resource pxa3xx_resources_i2c_power[] = { { .start = 0x40f500c0, .end = 0x40f500d3, .flags = IORESOURCE_MEM, }, { .start = IRQ_PWRI2C, .end = IRQ_PWRI2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa3xx_device_i2c_power = { .name = "pxa3xx-pwri2c", .id = 1, .resource = pxa3xx_resources_i2c_power, .num_resources = ARRAY_SIZE(pxa3xx_resources_i2c_power), }; static struct resource pxa3xx_resources_nand[] = { [0] = { .start = 0x43100000, .end = 0x43100053, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_NAND, .end = IRQ_NAND, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for Data DMA */ .start = 97, .end = 97, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for Command DMA */ .start = 99, .end = 99, .flags = IORESOURCE_DMA, }, }; static u64 pxa3xx_nand_dma_mask = DMA_BIT_MASK(32); struct platform_device pxa3xx_device_nand = { .name = "pxa3xx-nand", .id = -1, .dev = { .dma_mask = &pxa3xx_nand_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pxa3xx_resources_nand), .resource = pxa3xx_resources_nand, }; void __init pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info) { pxa_register_device(&pxa3xx_device_nand, info); } static u64 pxa3xx_ssp4_dma_mask = DMA_BIT_MASK(32); static struct resource pxa3xx_resource_ssp4[] = { [0] = { .start = 0x41a00000, .end = 0x41a0003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP4, .end = IRQ_SSP4, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 2, .end = 2, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 3, .end = 3, .flags = IORESOURCE_DMA, }, }; /* * PXA3xx SSP is basically equivalent to PXA27x. * However, we need to register the device by the correct name in order to * make the driver set the correct internal type, hence we provide specific * platform_devices for each of them. */ struct platform_device pxa3xx_device_ssp1 = { .name = "pxa3xx-ssp", .id = 0, .dev = { .dma_mask = &pxa27x_ssp1_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp1, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp1), }; struct platform_device pxa3xx_device_ssp2 = { .name = "pxa3xx-ssp", .id = 1, .dev = { .dma_mask = &pxa27x_ssp2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp2, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp2), }; struct platform_device pxa3xx_device_ssp3 = { .name = "pxa3xx-ssp", .id = 2, .dev = { .dma_mask = &pxa27x_ssp3_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp3, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp3), }; struct platform_device pxa3xx_device_ssp4 = { .name = "pxa3xx-ssp", .id = 3, .dev = { .dma_mask = &pxa3xx_ssp4_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa3xx_resource_ssp4, .num_resources = ARRAY_SIZE(pxa3xx_resource_ssp4), }; #endif /* CONFIG_PXA3xx */ struct resource pxa_resource_gpio[] = { { .start = 0x40e00000, .end = 0x40e0ffff, .flags = IORESOURCE_MEM, }, { .start = IRQ_GPIO0, .end = IRQ_GPIO0, .name = "gpio0", .flags = IORESOURCE_IRQ, }, { .start = IRQ_GPIO1, .end = IRQ_GPIO1, .name = "gpio1", .flags = IORESOURCE_IRQ, }, { .start = IRQ_GPIO_2_x, .end = IRQ_GPIO_2_x, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa25x_device_gpio = { #ifdef CONFIG_CPU_PXA26x .name = "pxa26x-gpio", #else .name = "pxa25x-gpio", #endif .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; struct platform_device pxa27x_device_gpio = { .name = "pxa27x-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; struct platform_device pxa3xx_device_gpio = { .name = "pxa3xx-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; struct platform_device pxa93x_device_gpio = { .name = "pxa93x-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info) { struct platform_device *pd; pd = platform_device_alloc("pxa2xx-spi", id); if (pd == NULL) { printk(KERN_ERR "pxa2xx-spi: failed to allocate device id %d\n", id); return; } pd->dev.platform_data = info; platform_device_add(pd); }
gpl-2.0
jmw7912/wat-0016-kernel-2.6.37
net/8021q/vlan_netlink.c
1044
6458
/* * VLAN netlink control interface * * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <net/net_namespace.h> #include <net/netlink.h> #include <net/rtnetlink.h> #include "vlan.h" static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = { [IFLA_VLAN_ID] = { .type = NLA_U16 }, [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) }, [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED }, [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED }, }; static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = { [IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) }, }; static inline int vlan_validate_qos_map(struct nlattr *attr) { if (!attr) return 0; return nla_validate_nested(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy); } static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) { struct ifla_vlan_flags *flags; u16 id; int err; if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (!data) return -EINVAL; if (data[IFLA_VLAN_ID]) { id = nla_get_u16(data[IFLA_VLAN_ID]); if (id >= VLAN_VID_MASK) return -ERANGE; } if (data[IFLA_VLAN_FLAGS]) { flags = nla_data(data[IFLA_VLAN_FLAGS]); if ((flags->flags & flags->mask) & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | VLAN_FLAG_LOOSE_BINDING)) return -EINVAL; } err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]); if (err < 0) return err; err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]); if (err < 0) return err; return 0; } static int vlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ifla_vlan_flags *flags; struct ifla_vlan_qos_mapping *m; struct nlattr *attr; int rem; if (data[IFLA_VLAN_FLAGS]) { flags = nla_data(data[IFLA_VLAN_FLAGS]); vlan_dev_change_flags(dev, flags->flags, flags->mask); } if (data[IFLA_VLAN_INGRESS_QOS]) { nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { m = nla_data(attr); vlan_dev_set_ingress_priority(dev, m->to, m->from); } } if (data[IFLA_VLAN_EGRESS_QOS]) { nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) { m = nla_data(attr); vlan_dev_set_egress_priority(dev, m->from, m->to); } } return 0; } static int vlan_get_tx_queues(struct net *net, struct nlattr *tb[], unsigned int *num_tx_queues, unsigned int *real_num_tx_queues) { struct net_device *real_dev; if (!tb[IFLA_LINK]) return -EINVAL; real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; *num_tx_queues = real_dev->num_tx_queues; *real_num_tx_queues = real_dev->real_num_tx_queues; return 0; } static int vlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev; int err; if (!data[IFLA_VLAN_ID]) return -EINVAL; if (!tb[IFLA_LINK]) return -EINVAL; real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); vlan->real_dev = real_dev; vlan->flags = VLAN_FLAG_REORDER_HDR; err = vlan_check_real_dev(real_dev, vlan->vlan_id); if (err < 0) return err; if (!tb[IFLA_MTU]) dev->mtu = real_dev->mtu; else if (dev->mtu > real_dev->mtu) return -EINVAL; err = vlan_changelink(dev, tb, data); if (err < 0) return err; return register_vlan_dev(dev); } static inline size_t vlan_qos_map_size(unsigned int n) { if (n == 0) return 0; /* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */ return nla_total_size(sizeof(struct nlattr)) + nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n; } static size_t vlan_get_size(const struct net_device *dev) { struct vlan_dev_info *vlan = vlan_dev_info(dev); return nla_total_size(2) + /* IFLA_VLAN_ID */ sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ vlan_qos_map_size(vlan->nr_ingress_mappings) + vlan_qos_map_size(vlan->nr_egress_mappings); } static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct vlan_dev_info *vlan = vlan_dev_info(dev); struct vlan_priority_tci_mapping *pm; struct ifla_vlan_flags f; struct ifla_vlan_qos_mapping m; struct nlattr *nest; unsigned int i; NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id); if (vlan->flags) { f.flags = vlan->flags; f.mask = ~0; NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f); } if (vlan->nr_ingress_mappings) { nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS); if (nest == NULL) goto nla_put_failure; for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) { if (!vlan->ingress_priority_map[i]) continue; m.from = i; m.to = vlan->ingress_priority_map[i]; NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, sizeof(m), &m); } nla_nest_end(skb, nest); } if (vlan->nr_egress_mappings) { nest = nla_nest_start(skb, IFLA_VLAN_EGRESS_QOS); if (nest == NULL) goto nla_put_failure; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { for (pm = vlan->egress_priority_map[i]; pm; pm = pm->next) { if (!pm->vlan_qos) continue; m.from = pm->priority; m.to = (pm->vlan_qos >> 13) & 0x7; NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, sizeof(m), &m); } } nla_nest_end(skb, nest); } return 0; nla_put_failure: return -EMSGSIZE; } struct rtnl_link_ops vlan_link_ops __read_mostly = { .kind = "vlan", .maxtype = IFLA_VLAN_MAX, .policy = vlan_policy, .priv_size = sizeof(struct vlan_dev_info), .get_tx_queues = vlan_get_tx_queues, .setup = vlan_setup, .validate = vlan_validate, .newlink = vlan_newlink, .changelink = vlan_changelink, .dellink = unregister_vlan_dev, .get_size = vlan_get_size, .fill_info = vlan_fill_info, }; int __init vlan_netlink_init(void) { return rtnl_link_register(&vlan_link_ops); } void __exit vlan_netlink_fini(void) { rtnl_link_unregister(&vlan_link_ops); } MODULE_ALIAS_RTNL_LINK("vlan");
gpl-2.0
lawnn/android_kernel_samsung_d2dcm
drivers/gpu/drm/radeon/atom.c
1300
35221
/* * Copyright 2008 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Author: Stanislaw Skowronek */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/unaligned.h> #define ATOM_DEBUG #include "atom.h" #include "atom-names.h" #include "atom-bits.h" #include "radeon.h" #define ATOM_COND_ABOVE 0 #define ATOM_COND_ABOVEOREQUAL 1 #define ATOM_COND_ALWAYS 2 #define ATOM_COND_BELOW 3 #define ATOM_COND_BELOWOREQUAL 4 #define ATOM_COND_EQUAL 5 #define ATOM_COND_NOTEQUAL 6 #define ATOM_PORT_ATI 0 #define ATOM_PORT_PCI 1 #define ATOM_PORT_SYSIO 2 #define ATOM_UNIT_MICROSEC 0 #define ATOM_UNIT_MILLISEC 1 #define PLL_INDEX 2 #define PLL_DATA 3 typedef struct { struct atom_context *ctx; uint32_t *ps, *ws; int ps_shift; uint16_t start; unsigned last_jump; unsigned long last_jump_jiffies; bool abort; } atom_exec_context; int atom_debug = 0; static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); static uint32_t atom_arg_mask[8] = { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, 0xFF000000 }; static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 }; static int atom_dst_to_src[8][4] = { /* translate destination alignment field to the source alignment encoding */ {0, 0, 0, 0}, {1, 2, 3, 0}, {1, 2, 3, 0}, {1, 2, 3, 0}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, }; static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; static int debug_depth = 0; #ifdef ATOM_DEBUG static void debug_print_spaces(int n) { while (n--) printk(" "); } #define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0) #define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0) #else #define DEBUG(...) do { } while (0) #define SDEBUG(...) do { } while (0) #endif static uint32_t atom_iio_execute(struct atom_context *ctx, int base, uint32_t index, uint32_t data) { struct radeon_device *rdev = ctx->card->dev->dev_private; uint32_t temp = 0xCDCDCDCD; while (1) switch (CU8(base)) { case ATOM_IIO_NOP: base++; break; case ATOM_IIO_READ: temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1)); base += 3; break; case ATOM_IIO_WRITE: if (rdev->family == CHIP_RV515) (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); base += 3; break; case ATOM_IIO_CLEAR: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 2)); base += 3; break; case ATOM_IIO_SET: temp |= (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 2); base += 3; break; case ATOM_IIO_MOVE_INDEX: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); temp |= ((index >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); base += 4; break; case ATOM_IIO_MOVE_DATA: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); temp |= ((data >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); base += 4; break; case ATOM_IIO_MOVE_ATTR: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); temp |= ((ctx-> io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8 (base + 1)))) << CU8(base + 3); base += 4; break; case ATOM_IIO_END: return temp; default: printk(KERN_INFO "Unknown IIO opcode.\n"); return 0; } } static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr, uint32_t *saved, int print) { uint32_t idx, val = 0xCDCDCDCD, align, arg; struct atom_context *gctx = ctx->ctx; arg = attr & 7; align = (attr >> 3) & 7; switch (arg) { case ATOM_ARG_REG: idx = U16(*ptr); (*ptr) += 2; if (print) DEBUG("REG[0x%04X]", idx); idx += gctx->reg_block; switch (gctx->io_mode) { case ATOM_IO_MM: val = gctx->card->reg_read(gctx->card, idx); break; case ATOM_IO_PCI: printk(KERN_INFO "PCI registers are not implemented.\n"); return 0; case ATOM_IO_SYSIO: printk(KERN_INFO "SYSIO registers are not implemented.\n"); return 0; default: if (!(gctx->io_mode & 0x80)) { printk(KERN_INFO "Bad IO mode.\n"); return 0; } if (!gctx->iio[gctx->io_mode & 0x7F]) { printk(KERN_INFO "Undefined indirect IO read method %d.\n", gctx->io_mode & 0x7F); return 0; } val = atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0x7F], idx, 0); } break; case ATOM_ARG_PS: idx = U8(*ptr); (*ptr)++; /* get_unaligned_le32 avoids unaligned accesses from atombios * tables, noticed on a DEC Alpha. */ val = get_unaligned_le32((u32 *)&ctx->ps[idx]); if (print) DEBUG("PS[0x%02X,0x%04X]", idx, val); break; case ATOM_ARG_WS: idx = U8(*ptr); (*ptr)++; if (print) DEBUG("WS[0x%02X]", idx); switch (idx) { case ATOM_WS_QUOTIENT: val = gctx->divmul[0]; break; case ATOM_WS_REMAINDER: val = gctx->divmul[1]; break; case ATOM_WS_DATAPTR: val = gctx->data_block; break; case ATOM_WS_SHIFT: val = gctx->shift; break; case ATOM_WS_OR_MASK: val = 1 << gctx->shift; break; case ATOM_WS_AND_MASK: val = ~(1 << gctx->shift); break; case ATOM_WS_FB_WINDOW: val = gctx->fb_base; break; case ATOM_WS_ATTRIBUTES: val = gctx->io_attr; break; case ATOM_WS_REGPTR: val = gctx->reg_block; break; default: val = ctx->ws[idx]; } break; case ATOM_ARG_ID: idx = U16(*ptr); (*ptr) += 2; if (print) { if (gctx->data_block) DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block); else DEBUG("ID[0x%04X]", idx); } val = U32(idx + gctx->data_block); break; case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); val = 0; } else val = gctx->scratch[(gctx->fb_base / 4) + idx]; if (print) DEBUG("FB[0x%02X]", idx); break; case ATOM_ARG_IMM: switch (align) { case ATOM_SRC_DWORD: val = U32(*ptr); (*ptr) += 4; if (print) DEBUG("IMM 0x%08X\n", val); return val; case ATOM_SRC_WORD0: case ATOM_SRC_WORD8: case ATOM_SRC_WORD16: val = U16(*ptr); (*ptr) += 2; if (print) DEBUG("IMM 0x%04X\n", val); return val; case ATOM_SRC_BYTE0: case ATOM_SRC_BYTE8: case ATOM_SRC_BYTE16: case ATOM_SRC_BYTE24: val = U8(*ptr); (*ptr)++; if (print) DEBUG("IMM 0x%02X\n", val); return val; } return 0; case ATOM_ARG_PLL: idx = U8(*ptr); (*ptr)++; if (print) DEBUG("PLL[0x%02X]", idx); val = gctx->card->pll_read(gctx->card, idx); break; case ATOM_ARG_MC: idx = U8(*ptr); (*ptr)++; if (print) DEBUG("MC[0x%02X]", idx); val = gctx->card->mc_read(gctx->card, idx); break; } if (saved) *saved = val; val &= atom_arg_mask[align]; val >>= atom_arg_shift[align]; if (print) switch (align) { case ATOM_SRC_DWORD: DEBUG(".[31:0] -> 0x%08X\n", val); break; case ATOM_SRC_WORD0: DEBUG(".[15:0] -> 0x%04X\n", val); break; case ATOM_SRC_WORD8: DEBUG(".[23:8] -> 0x%04X\n", val); break; case ATOM_SRC_WORD16: DEBUG(".[31:16] -> 0x%04X\n", val); break; case ATOM_SRC_BYTE0: DEBUG(".[7:0] -> 0x%02X\n", val); break; case ATOM_SRC_BYTE8: DEBUG(".[15:8] -> 0x%02X\n", val); break; case ATOM_SRC_BYTE16: DEBUG(".[23:16] -> 0x%02X\n", val); break; case ATOM_SRC_BYTE24: DEBUG(".[31:24] -> 0x%02X\n", val); break; } return val; } static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) { uint32_t align = (attr >> 3) & 7, arg = attr & 7; switch (arg) { case ATOM_ARG_REG: case ATOM_ARG_ID: (*ptr) += 2; break; case ATOM_ARG_PLL: case ATOM_ARG_MC: case ATOM_ARG_PS: case ATOM_ARG_WS: case ATOM_ARG_FB: (*ptr)++; break; case ATOM_ARG_IMM: switch (align) { case ATOM_SRC_DWORD: (*ptr) += 4; return; case ATOM_SRC_WORD0: case ATOM_SRC_WORD8: case ATOM_SRC_WORD16: (*ptr) += 2; return; case ATOM_SRC_BYTE0: case ATOM_SRC_BYTE8: case ATOM_SRC_BYTE16: case ATOM_SRC_BYTE24: (*ptr)++; return; } return; } } static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) { return atom_get_src_int(ctx, attr, ptr, NULL, 1); } static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) { uint32_t val = 0xCDCDCDCD; switch (align) { case ATOM_SRC_DWORD: val = U32(*ptr); (*ptr) += 4; break; case ATOM_SRC_WORD0: case ATOM_SRC_WORD8: case ATOM_SRC_WORD16: val = U16(*ptr); (*ptr) += 2; break; case ATOM_SRC_BYTE0: case ATOM_SRC_BYTE8: case ATOM_SRC_BYTE16: case ATOM_SRC_BYTE24: val = U8(*ptr); (*ptr)++; break; } return val; } static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr, uint32_t *saved, int print) { return atom_get_src_int(ctx, arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3] << 3, ptr, saved, print); } static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr) { atom_skip_src_int(ctx, arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3] << 3, ptr); } static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr, uint32_t val, uint32_t saved) { uint32_t align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val = val, idx; struct atom_context *gctx = ctx->ctx; old_val &= atom_arg_mask[align] >> atom_arg_shift[align]; val <<= atom_arg_shift[align]; val &= atom_arg_mask[align]; saved &= ~atom_arg_mask[align]; val |= saved; switch (arg) { case ATOM_ARG_REG: idx = U16(*ptr); (*ptr) += 2; DEBUG("REG[0x%04X]", idx); idx += gctx->reg_block; switch (gctx->io_mode) { case ATOM_IO_MM: if (idx == 0) gctx->card->reg_write(gctx->card, idx, val << 2); else gctx->card->reg_write(gctx->card, idx, val); break; case ATOM_IO_PCI: printk(KERN_INFO "PCI registers are not implemented.\n"); return; case ATOM_IO_SYSIO: printk(KERN_INFO "SYSIO registers are not implemented.\n"); return; default: if (!(gctx->io_mode & 0x80)) { printk(KERN_INFO "Bad IO mode.\n"); return; } if (!gctx->iio[gctx->io_mode & 0xFF]) { printk(KERN_INFO "Undefined indirect IO write method %d.\n", gctx->io_mode & 0x7F); return; } atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF], idx, val); } break; case ATOM_ARG_PS: idx = U8(*ptr); (*ptr)++; DEBUG("PS[0x%02X]", idx); ctx->ps[idx] = cpu_to_le32(val); break; case ATOM_ARG_WS: idx = U8(*ptr); (*ptr)++; DEBUG("WS[0x%02X]", idx); switch (idx) { case ATOM_WS_QUOTIENT: gctx->divmul[0] = val; break; case ATOM_WS_REMAINDER: gctx->divmul[1] = val; break; case ATOM_WS_DATAPTR: gctx->data_block = val; break; case ATOM_WS_SHIFT: gctx->shift = val; break; case ATOM_WS_OR_MASK: case ATOM_WS_AND_MASK: break; case ATOM_WS_FB_WINDOW: gctx->fb_base = val; break; case ATOM_WS_ATTRIBUTES: gctx->io_attr = val; break; case ATOM_WS_REGPTR: gctx->reg_block = val; break; default: ctx->ws[idx] = val; } break; case ATOM_ARG_FB: idx = U8(*ptr); (*ptr)++; if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); } else gctx->scratch[(gctx->fb_base / 4) + idx] = val; DEBUG("FB[0x%02X]", idx); break; case ATOM_ARG_PLL: idx = U8(*ptr); (*ptr)++; DEBUG("PLL[0x%02X]", idx); gctx->card->pll_write(gctx->card, idx, val); break; case ATOM_ARG_MC: idx = U8(*ptr); (*ptr)++; DEBUG("MC[0x%02X]", idx); gctx->card->mc_write(gctx->card, idx, val); return; } switch (align) { case ATOM_SRC_DWORD: DEBUG(".[31:0] <- 0x%08X\n", old_val); break; case ATOM_SRC_WORD0: DEBUG(".[15:0] <- 0x%04X\n", old_val); break; case ATOM_SRC_WORD8: DEBUG(".[23:8] <- 0x%04X\n", old_val); break; case ATOM_SRC_WORD16: DEBUG(".[31:16] <- 0x%04X\n", old_val); break; case ATOM_SRC_BYTE0: DEBUG(".[7:0] <- 0x%02X\n", old_val); break; case ATOM_SRC_BYTE8: DEBUG(".[15:8] <- 0x%02X\n", old_val); break; case ATOM_SRC_BYTE16: DEBUG(".[23:16] <- 0x%02X\n", old_val); break; case ATOM_SRC_BYTE24: DEBUG(".[31:24] <- 0x%02X\n", old_val); break; } } static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst += src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst &= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) { printk("ATOM BIOS beeped!\n"); } static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) { int idx = U8((*ptr)++); int r = 0; if (idx < ATOM_TABLE_NAMES_CNT) SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); else SDEBUG(" table: %d\n", idx); if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); if (r) { ctx->abort = true; } } static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t saved; int dptr = *ptr; attr &= 0x38; attr |= atom_def_dst[attr >> 3] << 6; atom_get_dst(ctx, arg, attr, ptr, &saved, 0); SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, 0, saved); } static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src; SDEBUG(" src1: "); dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); SDEBUG(" src2: "); src = atom_get_src(ctx, attr, ptr); ctx->ctx->cs_equal = (dst == src); ctx->ctx->cs_above = (dst > src); SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE", ctx->ctx->cs_above ? "GT" : "LE"); } static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) { unsigned count = U8((*ptr)++); SDEBUG(" count: %d\n", count); if (arg == ATOM_UNIT_MICROSEC) udelay(count); else if (!drm_can_sleep()) mdelay(count); else msleep(count); } static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src; SDEBUG(" src1: "); dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); SDEBUG(" src2: "); src = atom_get_src(ctx, attr, ptr); if (src != 0) { ctx->ctx->divmul[0] = dst / src; ctx->ctx->divmul[1] = dst % src; } else { ctx->ctx->divmul[0] = 0; ctx->ctx->divmul[1] = 0; } } static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) { /* functionally, a nop */ } static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) { int execute = 0, target = U16(*ptr); unsigned long cjiffies; (*ptr) += 2; switch (arg) { case ATOM_COND_ABOVE: execute = ctx->ctx->cs_above; break; case ATOM_COND_ABOVEOREQUAL: execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; break; case ATOM_COND_ALWAYS: execute = 1; break; case ATOM_COND_BELOW: execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); break; case ATOM_COND_BELOWOREQUAL: execute = !ctx->ctx->cs_above; break; case ATOM_COND_EQUAL: execute = ctx->ctx->cs_equal; break; case ATOM_COND_NOTEQUAL: execute = !ctx->ctx->cs_equal; break; } if (arg != ATOM_COND_ALWAYS) SDEBUG(" taken: %s\n", execute ? "yes" : "no"); SDEBUG(" target: 0x%04X\n", target); if (execute) { if (ctx->last_jump == (ctx->start + target)) { cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; if ((jiffies_to_msecs(cjiffies) > 5000)) { DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); ctx->abort = true; } } else { /* jiffies wrap around we will just wait a little longer */ ctx->last_jump_jiffies = jiffies; } } else { ctx->last_jump = ctx->start + target; ctx->last_jump_jiffies = jiffies; } *ptr = ctx->start + target; } } static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, mask, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); SDEBUG(" mask: 0x%08x", mask); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst &= mask; dst |= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t src, saved; int dptr = *ptr; if (((attr >> 3) & 7) != ATOM_SRC_DWORD) atom_get_dst(ctx, arg, attr, ptr, &saved, 0); else { atom_skip_dst(ctx, arg, attr, ptr); saved = 0xCDCDCDCD; } SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, src, saved); } static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src; SDEBUG(" src1: "); dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); SDEBUG(" src2: "); src = atom_get_src(ctx, attr, ptr); ctx->ctx->divmul[0] = dst * src; } static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) { /* nothing */ } static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst |= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) { uint8_t val = U8((*ptr)++); SDEBUG("POST card output: 0x%02X\n", val); } static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) { printk(KERN_INFO "unimplemented!\n"); } static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) { printk(KERN_INFO "unimplemented!\n"); } static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) { printk(KERN_INFO "unimplemented!\n"); } static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) { int idx = U8(*ptr); (*ptr)++; SDEBUG(" block: %d\n", idx); if (!idx) ctx->ctx->data_block = 0; else if (idx == 255) ctx->ctx->data_block = ctx->start; else ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx); SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block); } static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); SDEBUG(" fb_base: "); ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); } static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) { int port; switch (arg) { case ATOM_PORT_ATI: port = U16(*ptr); if (port < ATOM_IO_NAMES_CNT) SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]); else SDEBUG(" port: %d\n", port); if (!port) ctx->ctx->io_mode = ATOM_IO_MM; else ctx->ctx->io_mode = ATOM_IO_IIO | port; (*ptr) += 2; break; case ATOM_PORT_PCI: ctx->ctx->io_mode = ATOM_IO_PCI; (*ptr)++; break; case ATOM_PORT_SYSIO: ctx->ctx->io_mode = ATOM_IO_SYSIO; (*ptr)++; break; } } static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) { ctx->ctx->reg_block = U16(*ptr); (*ptr) += 2; SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); } static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; attr &= 0x38; attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); SDEBUG(" shift: %d\n", shift); dst <<= shift; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; attr &= 0x38; attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); SDEBUG(" shift: %d\n", shift); dst >>= shift; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); /* op needs to full dst value */ dst = saved; shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst <<= shift; dst &= atom_arg_mask[dst_align]; dst >>= atom_arg_shift[dst_align]; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); /* op needs to full dst value */ dst = saved; shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst >>= shift; dst &= atom_arg_mask[dst_align]; dst >>= atom_arg_shift[dst_align]; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst -= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t src, val, target; SDEBUG(" switch: "); src = atom_get_src(ctx, attr, ptr); while (U16(*ptr) != ATOM_CASE_END) if (U8(*ptr) == ATOM_CASE_MAGIC) { (*ptr)++; SDEBUG(" case: "); val = atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM, ptr); target = U16(*ptr); if (val == src) { SDEBUG(" target: %04X\n", target); *ptr = ctx->start + target; return; } (*ptr) += 2; } else { printk(KERN_INFO "Bad case.\n"); return; } (*ptr) += 2; } static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src; SDEBUG(" src1: "); dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); SDEBUG(" src2: "); src = atom_get_src(ctx, attr, ptr); ctx->ctx->cs_equal = ((dst & src) == 0); SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE"); } static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); uint32_t dst, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); SDEBUG(" src: "); src = atom_get_src(ctx, attr, ptr); dst ^= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) { printk(KERN_INFO "unimplemented!\n"); } static struct { void (*func) (atom_exec_context *, int *, int); int arg; } opcode_table[ATOM_OP_CNT] = { { NULL, 0}, { atom_op_move, ATOM_ARG_REG}, { atom_op_move, ATOM_ARG_PS}, { atom_op_move, ATOM_ARG_WS}, { atom_op_move, ATOM_ARG_FB}, { atom_op_move, ATOM_ARG_PLL}, { atom_op_move, ATOM_ARG_MC}, { atom_op_and, ATOM_ARG_REG}, { atom_op_and, ATOM_ARG_PS}, { atom_op_and, ATOM_ARG_WS}, { atom_op_and, ATOM_ARG_FB}, { atom_op_and, ATOM_ARG_PLL}, { atom_op_and, ATOM_ARG_MC}, { atom_op_or, ATOM_ARG_REG}, { atom_op_or, ATOM_ARG_PS}, { atom_op_or, ATOM_ARG_WS}, { atom_op_or, ATOM_ARG_FB}, { atom_op_or, ATOM_ARG_PLL}, { atom_op_or, ATOM_ARG_MC}, { atom_op_shift_left, ATOM_ARG_REG}, { atom_op_shift_left, ATOM_ARG_PS}, { atom_op_shift_left, ATOM_ARG_WS}, { atom_op_shift_left, ATOM_ARG_FB}, { atom_op_shift_left, ATOM_ARG_PLL}, { atom_op_shift_left, ATOM_ARG_MC}, { atom_op_shift_right, ATOM_ARG_REG}, { atom_op_shift_right, ATOM_ARG_PS}, { atom_op_shift_right, ATOM_ARG_WS}, { atom_op_shift_right, ATOM_ARG_FB}, { atom_op_shift_right, ATOM_ARG_PLL}, { atom_op_shift_right, ATOM_ARG_MC}, { atom_op_mul, ATOM_ARG_REG}, { atom_op_mul, ATOM_ARG_PS}, { atom_op_mul, ATOM_ARG_WS}, { atom_op_mul, ATOM_ARG_FB}, { atom_op_mul, ATOM_ARG_PLL}, { atom_op_mul, ATOM_ARG_MC}, { atom_op_div, ATOM_ARG_REG}, { atom_op_div, ATOM_ARG_PS}, { atom_op_div, ATOM_ARG_WS}, { atom_op_div, ATOM_ARG_FB}, { atom_op_div, ATOM_ARG_PLL}, { atom_op_div, ATOM_ARG_MC}, { atom_op_add, ATOM_ARG_REG}, { atom_op_add, ATOM_ARG_PS}, { atom_op_add, ATOM_ARG_WS}, { atom_op_add, ATOM_ARG_FB}, { atom_op_add, ATOM_ARG_PLL}, { atom_op_add, ATOM_ARG_MC}, { atom_op_sub, ATOM_ARG_REG}, { atom_op_sub, ATOM_ARG_PS}, { atom_op_sub, ATOM_ARG_WS}, { atom_op_sub, ATOM_ARG_FB}, { atom_op_sub, ATOM_ARG_PLL}, { atom_op_sub, ATOM_ARG_MC}, { atom_op_setport, ATOM_PORT_ATI}, { atom_op_setport, ATOM_PORT_PCI}, { atom_op_setport, ATOM_PORT_SYSIO}, { atom_op_setregblock, 0}, { atom_op_setfbbase, 0}, { atom_op_compare, ATOM_ARG_REG}, { atom_op_compare, ATOM_ARG_PS}, { atom_op_compare, ATOM_ARG_WS}, { atom_op_compare, ATOM_ARG_FB}, { atom_op_compare, ATOM_ARG_PLL}, { atom_op_compare, ATOM_ARG_MC}, { atom_op_switch, 0}, { atom_op_jump, ATOM_COND_ALWAYS}, { atom_op_jump, ATOM_COND_EQUAL}, { atom_op_jump, ATOM_COND_BELOW}, { atom_op_jump, ATOM_COND_ABOVE}, { atom_op_jump, ATOM_COND_BELOWOREQUAL}, { atom_op_jump, ATOM_COND_ABOVEOREQUAL}, { atom_op_jump, ATOM_COND_NOTEQUAL}, { atom_op_test, ATOM_ARG_REG}, { atom_op_test, ATOM_ARG_PS}, { atom_op_test, ATOM_ARG_WS}, { atom_op_test, ATOM_ARG_FB}, { atom_op_test, ATOM_ARG_PLL}, { atom_op_test, ATOM_ARG_MC}, { atom_op_delay, ATOM_UNIT_MILLISEC}, { atom_op_delay, ATOM_UNIT_MICROSEC}, { atom_op_calltable, 0}, { atom_op_repeat, 0}, { atom_op_clear, ATOM_ARG_REG}, { atom_op_clear, ATOM_ARG_PS}, { atom_op_clear, ATOM_ARG_WS}, { atom_op_clear, ATOM_ARG_FB}, { atom_op_clear, ATOM_ARG_PLL}, { atom_op_clear, ATOM_ARG_MC}, { atom_op_nop, 0}, { atom_op_eot, 0}, { atom_op_mask, ATOM_ARG_REG}, { atom_op_mask, ATOM_ARG_PS}, { atom_op_mask, ATOM_ARG_WS}, { atom_op_mask, ATOM_ARG_FB}, { atom_op_mask, ATOM_ARG_PLL}, { atom_op_mask, ATOM_ARG_MC}, { atom_op_postcard, 0}, { atom_op_beep, 0}, { atom_op_savereg, 0}, { atom_op_restorereg, 0}, { atom_op_setdatablock, 0}, { atom_op_xor, ATOM_ARG_REG}, { atom_op_xor, ATOM_ARG_PS}, { atom_op_xor, ATOM_ARG_WS}, { atom_op_xor, ATOM_ARG_FB}, { atom_op_xor, ATOM_ARG_PLL}, { atom_op_xor, ATOM_ARG_MC}, { atom_op_shl, ATOM_ARG_REG}, { atom_op_shl, ATOM_ARG_PS}, { atom_op_shl, ATOM_ARG_WS}, { atom_op_shl, ATOM_ARG_FB}, { atom_op_shl, ATOM_ARG_PLL}, { atom_op_shl, ATOM_ARG_MC}, { atom_op_shr, ATOM_ARG_REG}, { atom_op_shr, ATOM_ARG_PS}, { atom_op_shr, ATOM_ARG_WS}, { atom_op_shr, ATOM_ARG_FB}, { atom_op_shr, ATOM_ARG_PLL}, { atom_op_shr, ATOM_ARG_MC}, { atom_op_debug, 0},}; static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) { int base = CU16(ctx->cmd_table + 4 + 2 * index); int len, ws, ps, ptr; unsigned char op; atom_exec_context ectx; int ret = 0; if (!base) return -EINVAL; len = CU16(base + ATOM_CT_SIZE_PTR); ws = CU8(base + ATOM_CT_WS_PTR); ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK; ptr = base + ATOM_CT_CODE_PTR; SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); ectx.ctx = ctx; ectx.ps_shift = ps / 4; ectx.start = base; ectx.ps = params; ectx.abort = false; ectx.last_jump = 0; if (ws) ectx.ws = kzalloc(4 * ws, GFP_KERNEL); else ectx.ws = NULL; debug_depth++; while (1) { op = CU8(ptr++); if (op < ATOM_OP_NAMES_CNT) SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); else SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); if (ectx.abort) { DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", base, len, ws, ps, ptr - 1); ret = -EINVAL; goto free; } if (op < ATOM_OP_CNT && op > 0) opcode_table[op].func(&ectx, &ptr, opcode_table[op].arg); else break; if (op == ATOM_OP_EOT) break; } debug_depth--; SDEBUG("<<\n"); free: if (ws) kfree(ectx.ws); return ret; } int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) { int r; mutex_lock(&ctx->mutex); /* reset data block */ ctx->data_block = 0; /* reset reg block */ ctx->reg_block = 0; /* reset fb window */ ctx->fb_base = 0; /* reset io mode */ ctx->io_mode = ATOM_IO_MM; /* reset divmul */ ctx->divmul[0] = 0; ctx->divmul[1] = 0; r = atom_execute_table_locked(ctx, index, params); mutex_unlock(&ctx->mutex); return r; } static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; static void atom_index_iio(struct atom_context *ctx, int base) { ctx->iio = kzalloc(2 * 256, GFP_KERNEL); while (CU8(base) == ATOM_IIO_START) { ctx->iio[CU8(base + 1)] = base + 2; base += 2; while (CU8(base) != ATOM_IIO_END) base += atom_iio_len[CU8(base)]; base += 3; } } struct atom_context *atom_parse(struct card_info *card, void *bios) { int base; struct atom_context *ctx = kzalloc(sizeof(struct atom_context), GFP_KERNEL); char *str; char name[512]; int i; if (!ctx) return NULL; ctx->card = card; ctx->bios = bios; if (CU16(0) != ATOM_BIOS_MAGIC) { printk(KERN_INFO "Invalid BIOS magic.\n"); kfree(ctx); return NULL; } if (strncmp (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC, strlen(ATOM_ATI_MAGIC))) { printk(KERN_INFO "Invalid ATI magic.\n"); kfree(ctx); return NULL; } base = CU16(ATOM_ROM_TABLE_PTR); if (strncmp (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC, strlen(ATOM_ROM_MAGIC))) { printk(KERN_INFO "Invalid ATOM magic.\n"); kfree(ctx); return NULL; } ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); str = CSTR(CU16(base + ATOM_ROM_MSG_PTR)); while (*str && ((*str == '\n') || (*str == '\r'))) str++; /* name string isn't always 0 terminated */ for (i = 0; i < 511; i++) { name[i] = str[i]; if (name[i] < '.' || name[i] > 'z') { name[i] = 0; break; } } printk(KERN_INFO "ATOM BIOS: %s\n", name); return ctx; } int atom_asic_init(struct atom_context *ctx) { struct radeon_device *rdev = ctx->card->dev->dev_private; int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); uint32_t ps[16]; int ret; memset(ps, 0, 64); ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR)); if (!ps[0] || !ps[1]) return 1; if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) return 1; ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps); if (ret) return ret; memset(ps, 0, 64); if (rdev->family < CHIP_R600) { if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL)) atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps); } return ret; } void atom_destroy(struct atom_context *ctx) { if (ctx->iio) kfree(ctx->iio); kfree(ctx); } bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t * size, uint8_t * frev, uint8_t * crev, uint16_t * data_start) { int offset = index * 2 + 4; int idx = CU16(ctx->data_table + offset); u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); if (!mdt[index]) return false; if (size) *size = CU16(idx); if (frev) *frev = CU8(idx + 2); if (crev) *crev = CU8(idx + 3); *data_start = idx; return true; } bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, uint8_t * crev) { int offset = index * 2 + 4; int idx = CU16(ctx->cmd_table + offset); u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); if (!mct[index]) return false; if (frev) *frev = CU8(idx + 2); if (crev) *crev = CU8(idx + 3); return true; } int atom_allocate_fb_scratch(struct atom_context *ctx) { int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); uint16_t data_offset; int usage_bytes = 0; struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); DRM_DEBUG("atom firmware requested %08x %dkb\n", le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; } ctx->scratch_size_bytes = 0; if (usage_bytes == 0) usage_bytes = 20 * 1024; /* allocate some scratch memory */ ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); if (!ctx->scratch) return -ENOMEM; ctx->scratch_size_bytes = usage_bytes; return 0; }
gpl-2.0
telf/TDR_watchdog_RFC_1
drivers/hid/hid-tivo.c
1556
2327
/* * HID driver for TiVo Slide Bluetooth remote * * Copyright (c) 2011 Jarod Wilson <jarod@redhat.com> * based on the hid-topseed driver, which is in turn, based on hid-cherry... */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define HID_UP_TIVOVENDOR 0xffff0000 #define tivo_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int tivo_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { switch (usage->hid & HID_USAGE_PAGE) { case HID_UP_TIVOVENDOR: switch (usage->hid & HID_USAGE) { /* TiVo button */ case 0x3d: tivo_map_key_clear(KEY_MEDIA); break; /* Live TV */ case 0x3e: tivo_map_key_clear(KEY_TV); break; /* Red thumbs down */ case 0x41: tivo_map_key_clear(KEY_KPMINUS); break; /* Green thumbs up */ case 0x42: tivo_map_key_clear(KEY_KPPLUS); break; default: return 0; } break; case HID_UP_CONSUMER: switch (usage->hid & HID_USAGE) { /* Enter/Last (default mapping: KEY_LAST) */ case 0x083: tivo_map_key_clear(KEY_ENTER); break; /* Info (default mapping: KEY_PROPS) */ case 0x209: tivo_map_key_clear(KEY_INFO); break; default: return 0; } break; default: return 0; } /* This means we found a matching mapping here, else, look in the * standard hid mappings in hid-input.c */ return 1; } static const struct hid_device_id tivo_devices[] = { /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, { } }; MODULE_DEVICE_TABLE(hid, tivo_devices); static struct hid_driver tivo_driver = { .name = "tivo_slide", .id_table = tivo_devices, .input_mapping = tivo_input_mapping, }; module_hid_driver(tivo_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
gpl-2.0
adrianovalente/linux-extras
drivers/mtd/maps/dc21285.c
1812
5944
/* * MTD map driver for flash on the DC21285 (the StrongARM-110 companion chip) * * (C) 2000 Nicolas Pitre <nico@fluxnic.net> * * This code is GPL */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <asm/hardware/dec21285.h> #include <asm/mach-types.h> static struct mtd_info *dc21285_mtd; #ifdef CONFIG_ARCH_NETWINDER /* * This is really ugly, but it seams to be the only * realiable way to do it, as the cpld state machine * is unpredictible. So we have a 25us penalty per * write access. */ static void nw_en_write(void) { unsigned long flags; /* * we want to write a bit pattern XXX1 to Xilinx to enable * the write gate, which will be open for about the next 2ms. */ spin_lock_irqsave(&nw_gpio_lock, flags); nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE); spin_unlock_irqrestore(&nw_gpio_lock, flags); /* * let the ISA bus to catch on... */ udelay(25); } #else #define nw_en_write() do { } while (0) #endif static map_word dc21285_read8(struct map_info *map, unsigned long ofs) { map_word val; val.x[0] = *(uint8_t*)(map->virt + ofs); return val; } static map_word dc21285_read16(struct map_info *map, unsigned long ofs) { map_word val; val.x[0] = *(uint16_t*)(map->virt + ofs); return val; } static map_word dc21285_read32(struct map_info *map, unsigned long ofs) { map_word val; val.x[0] = *(uint32_t*)(map->virt + ofs); return val; } static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { memcpy(to, (void*)(map->virt + from), len); } static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr) { if (machine_is_netwinder()) nw_en_write(); *CSR_ROMWRITEREG = adr & 3; adr &= ~3; *(uint8_t*)(map->virt + adr) = d.x[0]; } static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr) { if (machine_is_netwinder()) nw_en_write(); *CSR_ROMWRITEREG = adr & 3; adr &= ~3; *(uint16_t*)(map->virt + adr) = d.x[0]; } static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr) { if (machine_is_netwinder()) nw_en_write(); *(uint32_t*)(map->virt + adr) = d.x[0]; } static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len) { while (len > 0) { map_word d; d.x[0] = *((uint32_t*)from); dc21285_write32(map, d, to); from += 4; to += 4; len -= 4; } } static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len) { while (len > 0) { map_word d; d.x[0] = *((uint16_t*)from); dc21285_write16(map, d, to); from += 2; to += 2; len -= 2; } } static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len) { map_word d; d.x[0] = *((uint8_t*)from); dc21285_write8(map, d, to); from++; to++; len--; } static struct map_info dc21285_map = { .name = "DC21285 flash", .phys = NO_XIP, .size = 16*1024*1024, .copy_from = dc21285_copy_from, }; /* Partition stuff */ #ifdef CONFIG_MTD_PARTITIONS static struct mtd_partition *dc21285_parts; static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; #endif static int __init init_dc21285(void) { #ifdef CONFIG_MTD_PARTITIONS int nrparts; #endif /* Determine bankwidth */ switch (*CSR_SA110_CNTL & (3<<14)) { case SA110_CNTL_ROMWIDTH_8: dc21285_map.bankwidth = 1; dc21285_map.read = dc21285_read8; dc21285_map.write = dc21285_write8; dc21285_map.copy_to = dc21285_copy_to_8; break; case SA110_CNTL_ROMWIDTH_16: dc21285_map.bankwidth = 2; dc21285_map.read = dc21285_read16; dc21285_map.write = dc21285_write16; dc21285_map.copy_to = dc21285_copy_to_16; break; case SA110_CNTL_ROMWIDTH_32: dc21285_map.bankwidth = 4; dc21285_map.read = dc21285_read32; dc21285_map.write = dc21285_write32; dc21285_map.copy_to = dc21285_copy_to_32; break; default: printk (KERN_ERR "DC21285 flash: undefined bankwidth\n"); return -ENXIO; } printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n", dc21285_map.bankwidth*8); /* Let's map the flash area */ dc21285_map.virt = ioremap(DC21285_FLASH, 16*1024*1024); if (!dc21285_map.virt) { printk("Failed to ioremap\n"); return -EIO; } if (machine_is_ebsa285()) { dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map); } else { dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map); } if (!dc21285_mtd) { iounmap(dc21285_map.virt); return -ENXIO; } dc21285_mtd->owner = THIS_MODULE; #ifdef CONFIG_MTD_PARTITIONS nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0); if (nrparts > 0) add_mtd_partitions(dc21285_mtd, dc21285_parts, nrparts); else #endif add_mtd_device(dc21285_mtd); if(machine_is_ebsa285()) { /* * Flash timing is determined with bits 19-16 of the * CSR_SA110_CNTL. The value is the number of wait cycles, or * 0 for 16 cycles (the default). Cycles are 20 ns. * Here we use 7 for 140 ns flash chips. */ /* access time */ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16)); /* burst time */ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20)); /* tristate time */ *CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24)); } return 0; } static void __exit cleanup_dc21285(void) { #ifdef CONFIG_MTD_PARTITIONS if (dc21285_parts) { del_mtd_partitions(dc21285_mtd); kfree(dc21285_parts); } else #endif del_mtd_device(dc21285_mtd); map_destroy(dc21285_mtd); iounmap(dc21285_map.virt); } module_init(init_dc21285); module_exit(cleanup_dc21285); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>"); MODULE_DESCRIPTION("MTD map driver for DC21285 boards");
gpl-2.0
aospl/kernel_samsung_smdk4412
drivers/video/omap2/displays/panel-taal.c
2068
40185
/* * Taal DSI command mode panel * * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /*#define DEBUG*/ #include <linux/module.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/backlight.h> #include <linux/fb.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> #include <linux/mutex.h> #include <video/omapdss.h> #include <video/omap-panel-nokia-dsi.h> /* DSI Virtual channel. Hardcoded for now. */ #define TCH 0 #define DCS_READ_NUM_ERRORS 0x05 #define DCS_READ_POWER_MODE 0x0a #define DCS_READ_MADCTL 0x0b #define DCS_READ_PIXEL_FORMAT 0x0c #define DCS_RDDSDR 0x0f #define DCS_SLEEP_IN 0x10 #define DCS_SLEEP_OUT 0x11 #define DCS_DISPLAY_OFF 0x28 #define DCS_DISPLAY_ON 0x29 #define DCS_COLUMN_ADDR 0x2a #define DCS_PAGE_ADDR 0x2b #define DCS_MEMORY_WRITE 0x2c #define DCS_TEAR_OFF 0x34 #define DCS_TEAR_ON 0x35 #define DCS_MEM_ACC_CTRL 0x36 #define DCS_PIXEL_FORMAT 0x3a #define DCS_BRIGHTNESS 0x51 #define DCS_CTRL_DISPLAY 0x53 #define DCS_WRITE_CABC 0x55 #define DCS_READ_CABC 0x56 #define DCS_GET_ID1 0xda #define DCS_GET_ID2 0xdb #define DCS_GET_ID3 0xdc static irqreturn_t taal_te_isr(int irq, void *data); static void taal_te_timeout_work_callback(struct work_struct *work); static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable); static int taal_panel_reset(struct omap_dss_device *dssdev); struct panel_regulator { struct regulator *regulator; const char *name; int min_uV; int max_uV; }; static void free_regulators(struct panel_regulator *regulators, int n) { int i; for (i = 0; i < n; i++) { /* disable/put in reverse order */ regulator_disable(regulators[n - i - 1].regulator); regulator_put(regulators[n - i - 1].regulator); } } static int init_regulators(struct omap_dss_device *dssdev, struct panel_regulator *regulators, int n) { int r, i, v; for (i = 0; i < n; i++) { struct regulator *reg; reg = regulator_get(&dssdev->dev, regulators[i].name); if (IS_ERR(reg)) { dev_err(&dssdev->dev, "failed to get regulator %s\n", regulators[i].name); r = PTR_ERR(reg); goto err; } /* FIXME: better handling of fixed vs. variable regulators */ v = regulator_get_voltage(reg); if (v < regulators[i].min_uV || v > regulators[i].max_uV) { r = regulator_set_voltage(reg, regulators[i].min_uV, regulators[i].max_uV); if (r) { dev_err(&dssdev->dev, "failed to set regulator %s voltage\n", regulators[i].name); regulator_put(reg); goto err; } } r = regulator_enable(reg); if (r) { dev_err(&dssdev->dev, "failed to enable regulator %s\n", regulators[i].name); regulator_put(reg); goto err; } regulators[i].regulator = reg; } return 0; err: free_regulators(regulators, i); return r; } /** * struct panel_config - panel configuration * @name: panel name * @type: panel type * @timings: panel resolution * @sleep: various panel specific delays, passed to msleep() if non-zero * @reset_sequence: reset sequence timings, passed to udelay() if non-zero * @regulators: array of panel regulators * @num_regulators: number of regulators in the array */ struct panel_config { const char *name; int type; struct omap_video_timings timings; struct { unsigned int sleep_in; unsigned int sleep_out; unsigned int hw_reset; unsigned int enable_te; } sleep; struct { unsigned int high; unsigned int low; } reset_sequence; struct panel_regulator *regulators; int num_regulators; }; enum { PANEL_TAAL, }; static struct panel_config panel_configs[] = { { .name = "taal", .type = PANEL_TAAL, .timings = { .x_res = 864, .y_res = 480, }, .sleep = { .sleep_in = 5, .sleep_out = 5, .hw_reset = 5, .enable_te = 100, /* possible panel bug */ }, .reset_sequence = { .high = 10, .low = 10, }, }, }; struct taal_data { struct mutex lock; struct backlight_device *bldev; unsigned long hw_guard_end; /* next value of jiffies when we can * issue the next sleep in/out command */ unsigned long hw_guard_wait; /* max guard time in jiffies */ struct omap_dss_device *dssdev; bool enabled; u8 rotate; bool mirror; bool te_enabled; atomic_t do_update; struct { u16 x; u16 y; u16 w; u16 h; } update_region; int channel; struct delayed_work te_timeout_work; bool use_dsi_bl; bool cabc_broken; unsigned cabc_mode; bool intro_printed; struct workqueue_struct *workqueue; struct delayed_work esd_work; unsigned esd_interval; bool ulps_enabled; unsigned ulps_timeout; struct delayed_work ulps_work; struct panel_config *panel_config; }; static inline struct nokia_dsi_panel_data *get_panel_data(const struct omap_dss_device *dssdev) { return (struct nokia_dsi_panel_data *) dssdev->data; } static void taal_esd_work(struct work_struct *work); static void taal_ulps_work(struct work_struct *work); static void hw_guard_start(struct taal_data *td, int guard_msec) { td->hw_guard_wait = msecs_to_jiffies(guard_msec); td->hw_guard_end = jiffies + td->hw_guard_wait; } static void hw_guard_wait(struct taal_data *td) { unsigned long wait = td->hw_guard_end - jiffies; if ((long)wait > 0 && wait <= td->hw_guard_wait) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(wait); } } static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data) { int r; u8 buf[1]; r = dsi_vc_dcs_read(td->dssdev, td->channel, dcs_cmd, buf, 1); if (r < 0) return r; *data = buf[0]; return 0; } static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd) { return dsi_vc_dcs_write(td->dssdev, td->channel, &dcs_cmd, 1); } static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param) { u8 buf[2]; buf[0] = dcs_cmd; buf[1] = param; return dsi_vc_dcs_write(td->dssdev, td->channel, buf, 2); } static int taal_sleep_in(struct taal_data *td) { u8 cmd; int r; hw_guard_wait(td); cmd = DCS_SLEEP_IN; r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, &cmd, 1); if (r) return r; hw_guard_start(td, 120); if (td->panel_config->sleep.sleep_in) msleep(td->panel_config->sleep.sleep_in); return 0; } static int taal_sleep_out(struct taal_data *td) { int r; hw_guard_wait(td); r = taal_dcs_write_0(td, DCS_SLEEP_OUT); if (r) return r; hw_guard_start(td, 120); if (td->panel_config->sleep.sleep_out) msleep(td->panel_config->sleep.sleep_out); return 0; } static int taal_get_id(struct taal_data *td, u8 *id1, u8 *id2, u8 *id3) { int r; r = taal_dcs_read_1(td, DCS_GET_ID1, id1); if (r) return r; r = taal_dcs_read_1(td, DCS_GET_ID2, id2); if (r) return r; r = taal_dcs_read_1(td, DCS_GET_ID3, id3); if (r) return r; return 0; } static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror) { int r; u8 mode; int b5, b6, b7; r = taal_dcs_read_1(td, DCS_READ_MADCTL, &mode); if (r) return r; switch (rotate) { default: case 0: b7 = 0; b6 = 0; b5 = 0; break; case 1: b7 = 0; b6 = 1; b5 = 1; break; case 2: b7 = 1; b6 = 1; b5 = 0; break; case 3: b7 = 1; b6 = 0; b5 = 1; break; } if (mirror) b6 = !b6; mode &= ~((1<<7) | (1<<6) | (1<<5)); mode |= (b7 << 7) | (b6 << 6) | (b5 << 5); return taal_dcs_write_1(td, DCS_MEM_ACC_CTRL, mode); } static int taal_set_update_window(struct taal_data *td, u16 x, u16 y, u16 w, u16 h) { int r; u16 x1 = x; u16 x2 = x + w - 1; u16 y1 = y; u16 y2 = y + h - 1; u8 buf[5]; buf[0] = DCS_COLUMN_ADDR; buf[1] = (x1 >> 8) & 0xff; buf[2] = (x1 >> 0) & 0xff; buf[3] = (x2 >> 8) & 0xff; buf[4] = (x2 >> 0) & 0xff; r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf)); if (r) return r; buf[0] = DCS_PAGE_ADDR; buf[1] = (y1 >> 8) & 0xff; buf[2] = (y1 >> 0) & 0xff; buf[3] = (y2 >> 8) & 0xff; buf[4] = (y2 >> 0) & 0xff; r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf)); if (r) return r; dsi_vc_send_bta_sync(td->dssdev, td->channel); return r; } static void taal_queue_esd_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->esd_interval > 0) queue_delayed_work(td->workqueue, &td->esd_work, msecs_to_jiffies(td->esd_interval)); } static void taal_cancel_esd_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); cancel_delayed_work(&td->esd_work); } static void taal_queue_ulps_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->ulps_timeout > 0) queue_delayed_work(td->workqueue, &td->ulps_work, msecs_to_jiffies(td->ulps_timeout)); } static void taal_cancel_ulps_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); cancel_delayed_work(&td->ulps_work); } static int taal_enter_ulps(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; if (td->ulps_enabled) return 0; taal_cancel_ulps_work(dssdev); r = _taal_enable_te(dssdev, false); if (r) goto err; disable_irq(gpio_to_irq(panel_data->ext_te_gpio)); omapdss_dsi_display_disable(dssdev, false, true); td->ulps_enabled = true; return 0; err: dev_err(&dssdev->dev, "enter ULPS failed"); taal_panel_reset(dssdev); td->ulps_enabled = false; taal_queue_ulps_work(dssdev); return r; } static int taal_exit_ulps(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; if (!td->ulps_enabled) return 0; r = omapdss_dsi_display_enable(dssdev); if (r) goto err; omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); r = _taal_enable_te(dssdev, true); if (r) goto err; enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); taal_queue_ulps_work(dssdev); td->ulps_enabled = false; return 0; err: dev_err(&dssdev->dev, "exit ULPS failed"); r = taal_panel_reset(dssdev); enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); td->ulps_enabled = false; taal_queue_ulps_work(dssdev); return r; } static int taal_wake_up(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->ulps_enabled) return taal_exit_ulps(dssdev); taal_cancel_ulps_work(dssdev); taal_queue_ulps_work(dssdev); return 0; } static int taal_bl_update_status(struct backlight_device *dev) { struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; int level; if (dev->props.fb_blank == FB_BLANK_UNBLANK && dev->props.power == FB_BLANK_UNBLANK) level = dev->props.brightness; else level = 0; dev_dbg(&dssdev->dev, "update brightness to %d\n", level); mutex_lock(&td->lock); if (td->use_dsi_bl) { if (td->enabled) { dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level); dsi_bus_unlock(dssdev); } else { r = 0; } } else { if (!panel_data->set_backlight) r = -EINVAL; else r = panel_data->set_backlight(dssdev, level); } mutex_unlock(&td->lock); return r; } static int taal_bl_get_intensity(struct backlight_device *dev) { if (dev->props.fb_blank == FB_BLANK_UNBLANK && dev->props.power == FB_BLANK_UNBLANK) return dev->props.brightness; return 0; } static const struct backlight_ops taal_bl_ops = { .get_brightness = taal_bl_get_intensity, .update_status = taal_bl_update_status, }; static void taal_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { *timings = dssdev->panel.timings; } static void taal_get_resolution(struct omap_dss_device *dssdev, u16 *xres, u16 *yres) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->rotate == 0 || td->rotate == 2) { *xres = dssdev->panel.timings.x_res; *yres = dssdev->panel.timings.y_res; } else { *yres = dssdev->panel.timings.x_res; *xres = dssdev->panel.timings.y_res; } } static ssize_t taal_num_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 errors; int r; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors); dsi_bus_unlock(dssdev); } else { r = -ENODEV; } mutex_unlock(&td->lock); if (r) return r; return snprintf(buf, PAGE_SIZE, "%d\n", errors); } static ssize_t taal_hw_revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 id1, id2, id3; int r; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) r = taal_get_id(td, &id1, &id2, &id3); dsi_bus_unlock(dssdev); } else { r = -ENODEV; } mutex_unlock(&td->lock); if (r) return r; return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); } static const char *cabc_modes[] = { "off", /* used also always when CABC is not supported */ "ui", "still-image", "moving-image", }; static ssize_t show_cabc_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); const char *mode_str; int mode; int len; mode = td->cabc_mode; mode_str = "unknown"; if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes)) mode_str = cabc_modes[mode]; len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str); return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1; } static ssize_t store_cabc_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); int i; int r; for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { if (sysfs_streq(cabc_modes[i], buf)) break; } if (i == ARRAY_SIZE(cabc_modes)) return -EINVAL; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); if (!td->cabc_broken) { r = taal_wake_up(dssdev); if (r) goto err; r = taal_dcs_write_1(td, DCS_WRITE_CABC, i); if (r) goto err; } dsi_bus_unlock(dssdev); } td->cabc_mode = i; mutex_unlock(&td->lock); return count; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static ssize_t show_cabc_available_modes(struct device *dev, struct device_attribute *attr, char *buf) { int len; int i; for (i = 0, len = 0; len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s", i ? " " : "", cabc_modes[i], i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : ""); return len < PAGE_SIZE ? len : PAGE_SIZE - 1; } static ssize_t taal_store_esd_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned long t; int r; r = strict_strtoul(buf, 10, &t); if (r) return r; mutex_lock(&td->lock); taal_cancel_esd_work(dssdev); td->esd_interval = t; if (td->enabled) taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); return count; } static ssize_t taal_show_esd_interval(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned t; mutex_lock(&td->lock); t = td->esd_interval; mutex_unlock(&td->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static ssize_t taal_store_ulps(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned long t; int r; r = strict_strtoul(buf, 10, &t); if (r) return r; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); if (t) r = taal_enter_ulps(dssdev); else r = taal_wake_up(dssdev); dsi_bus_unlock(dssdev); } mutex_unlock(&td->lock); if (r) return r; return count; } static ssize_t taal_show_ulps(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned t; mutex_lock(&td->lock); t = td->ulps_enabled; mutex_unlock(&td->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static ssize_t taal_store_ulps_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned long t; int r; r = strict_strtoul(buf, 10, &t); if (r) return r; mutex_lock(&td->lock); td->ulps_timeout = t; if (td->enabled) { /* taal_wake_up will restart the timer */ dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); dsi_bus_unlock(dssdev); } mutex_unlock(&td->lock); if (r) return r; return count; } static ssize_t taal_show_ulps_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned t; mutex_lock(&td->lock); t = td->ulps_timeout; mutex_unlock(&td->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL); static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL); static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, show_cabc_mode, store_cabc_mode); static DEVICE_ATTR(cabc_available_modes, S_IRUGO, show_cabc_available_modes, NULL); static DEVICE_ATTR(esd_interval, S_IRUGO | S_IWUSR, taal_show_esd_interval, taal_store_esd_interval); static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR, taal_show_ulps, taal_store_ulps); static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR, taal_show_ulps_timeout, taal_store_ulps_timeout); static struct attribute *taal_attrs[] = { &dev_attr_num_dsi_errors.attr, &dev_attr_hw_revision.attr, &dev_attr_cabc_mode.attr, &dev_attr_cabc_available_modes.attr, &dev_attr_esd_interval.attr, &dev_attr_ulps.attr, &dev_attr_ulps_timeout.attr, NULL, }; static struct attribute_group taal_attr_group = { .attrs = taal_attrs, }; static void taal_hw_reset(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); if (panel_data->reset_gpio == -1) return; gpio_set_value(panel_data->reset_gpio, 1); if (td->panel_config->reset_sequence.high) udelay(td->panel_config->reset_sequence.high); /* reset the panel */ gpio_set_value(panel_data->reset_gpio, 0); /* assert reset */ if (td->panel_config->reset_sequence.low) udelay(td->panel_config->reset_sequence.low); gpio_set_value(panel_data->reset_gpio, 1); /* wait after releasing reset */ if (td->panel_config->sleep.hw_reset) msleep(td->panel_config->sleep.hw_reset); } static int taal_probe(struct omap_dss_device *dssdev) { struct backlight_properties props; struct taal_data *td; struct backlight_device *bldev; struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); struct panel_config *panel_config = NULL; int r, i; dev_dbg(&dssdev->dev, "probe\n"); if (!panel_data || !panel_data->name) { r = -EINVAL; goto err; } for (i = 0; i < ARRAY_SIZE(panel_configs); i++) { if (strcmp(panel_data->name, panel_configs[i].name) == 0) { panel_config = &panel_configs[i]; break; } } if (!panel_config) { r = -EINVAL; goto err; } dssdev->panel.config = OMAP_DSS_LCD_TFT; dssdev->panel.timings = panel_config->timings; dssdev->ctrl.pixel_size = 24; td = kzalloc(sizeof(*td), GFP_KERNEL); if (!td) { r = -ENOMEM; goto err; } td->dssdev = dssdev; td->panel_config = panel_config; td->esd_interval = panel_data->esd_interval; td->ulps_enabled = false; td->ulps_timeout = panel_data->ulps_timeout; mutex_init(&td->lock); atomic_set(&td->do_update, 0); r = init_regulators(dssdev, panel_config->regulators, panel_config->num_regulators); if (r) goto err_reg; td->workqueue = create_singlethread_workqueue("taal_esd"); if (td->workqueue == NULL) { dev_err(&dssdev->dev, "can't create ESD workqueue\n"); r = -ENOMEM; goto err_wq; } INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work); INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work); dev_set_drvdata(&dssdev->dev, td); taal_hw_reset(dssdev); /* if no platform set_backlight() defined, presume DSI backlight * control */ memset(&props, 0, sizeof(struct backlight_properties)); if (!panel_data->set_backlight) td->use_dsi_bl = true; if (td->use_dsi_bl) props.max_brightness = 255; else props.max_brightness = 127; props.type = BACKLIGHT_RAW; bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev, dssdev, &taal_bl_ops, &props); if (IS_ERR(bldev)) { r = PTR_ERR(bldev); goto err_bl; } td->bldev = bldev; bldev->props.fb_blank = FB_BLANK_UNBLANK; bldev->props.power = FB_BLANK_UNBLANK; if (td->use_dsi_bl) bldev->props.brightness = 255; else bldev->props.brightness = 127; taal_bl_update_status(bldev); if (panel_data->use_ext_te) { int gpio = panel_data->ext_te_gpio; r = gpio_request(gpio, "taal irq"); if (r) { dev_err(&dssdev->dev, "GPIO request failed\n"); goto err_gpio; } gpio_direction_input(gpio); r = request_irq(gpio_to_irq(gpio), taal_te_isr, IRQF_DISABLED | IRQF_TRIGGER_RISING, "taal vsync", dssdev); if (r) { dev_err(&dssdev->dev, "IRQ request failed\n"); gpio_free(gpio); goto err_irq; } INIT_DELAYED_WORK_DEFERRABLE(&td->te_timeout_work, taal_te_timeout_work_callback); dev_dbg(&dssdev->dev, "Using GPIO TE\n"); } r = omap_dsi_request_vc(dssdev, &td->channel); if (r) { dev_err(&dssdev->dev, "failed to get virtual channel\n"); goto err_req_vc; } r = omap_dsi_set_vc_id(dssdev, td->channel, TCH); if (r) { dev_err(&dssdev->dev, "failed to set VC_ID\n"); goto err_vc_id; } r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group); if (r) { dev_err(&dssdev->dev, "failed to create sysfs files\n"); goto err_vc_id; } return 0; err_vc_id: omap_dsi_release_vc(dssdev, td->channel); err_req_vc: if (panel_data->use_ext_te) free_irq(gpio_to_irq(panel_data->ext_te_gpio), dssdev); err_irq: if (panel_data->use_ext_te) gpio_free(panel_data->ext_te_gpio); err_gpio: backlight_device_unregister(bldev); err_bl: destroy_workqueue(td->workqueue); err_wq: free_regulators(panel_config->regulators, panel_config->num_regulators); err_reg: kfree(td); err: return r; } static void __exit taal_remove(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); struct backlight_device *bldev; dev_dbg(&dssdev->dev, "remove\n"); sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group); omap_dsi_release_vc(dssdev, td->channel); if (panel_data->use_ext_te) { int gpio = panel_data->ext_te_gpio; free_irq(gpio_to_irq(gpio), dssdev); gpio_free(gpio); } bldev = td->bldev; bldev->props.power = FB_BLANK_POWERDOWN; taal_bl_update_status(bldev); backlight_device_unregister(bldev); taal_cancel_ulps_work(dssdev); taal_cancel_esd_work(dssdev); destroy_workqueue(td->workqueue); /* reset, to be sure that the panel is in a valid state */ taal_hw_reset(dssdev); free_regulators(td->panel_config->regulators, td->panel_config->num_regulators); kfree(td); } static int taal_power_on(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 id1, id2, id3; int r; r = omapdss_dsi_display_enable(dssdev); if (r) { dev_err(&dssdev->dev, "failed to enable DSI\n"); goto err0; } taal_hw_reset(dssdev); omapdss_dsi_vc_enable_hs(dssdev, td->channel, false); r = taal_sleep_out(td); if (r) goto err; r = taal_get_id(td, &id1, &id2, &id3); if (r) goto err; /* on early Taal revisions CABC is broken */ if (td->panel_config->type == PANEL_TAAL && (id2 == 0x00 || id2 == 0xff || id2 == 0x81)) td->cabc_broken = true; r = taal_dcs_write_1(td, DCS_BRIGHTNESS, 0xff); if (r) goto err; r = taal_dcs_write_1(td, DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */ if (r) goto err; r = taal_dcs_write_1(td, DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */ if (r) goto err; r = taal_set_addr_mode(td, td->rotate, td->mirror); if (r) goto err; if (!td->cabc_broken) { r = taal_dcs_write_1(td, DCS_WRITE_CABC, td->cabc_mode); if (r) goto err; } r = taal_dcs_write_0(td, DCS_DISPLAY_ON); if (r) goto err; r = _taal_enable_te(dssdev, td->te_enabled); if (r) goto err; td->enabled = 1; if (!td->intro_printed) { dev_info(&dssdev->dev, "%s panel revision %02x.%02x.%02x\n", td->panel_config->name, id1, id2, id3); if (td->cabc_broken) dev_info(&dssdev->dev, "old Taal version, CABC disabled\n"); td->intro_printed = true; } omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); return 0; err: dev_err(&dssdev->dev, "error while enabling panel, issuing HW reset\n"); taal_hw_reset(dssdev); omapdss_dsi_display_disable(dssdev, true, false); err0: return r; } static void taal_power_off(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; r = taal_dcs_write_0(td, DCS_DISPLAY_OFF); if (!r) { r = taal_sleep_in(td); /* HACK: wait a bit so that the message goes through */ msleep(10); } if (r) { dev_err(&dssdev->dev, "error disabling panel, issuing HW reset\n"); taal_hw_reset(dssdev); } omapdss_dsi_display_disable(dssdev, true, false); td->enabled = 0; } static int taal_panel_reset(struct omap_dss_device *dssdev) { dev_err(&dssdev->dev, "performing LCD reset\n"); taal_power_off(dssdev); taal_hw_reset(dssdev); return taal_power_on(dssdev); } static int taal_enable(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "enable\n"); mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { r = -EINVAL; goto err; } dsi_bus_lock(dssdev); r = taal_power_on(dssdev); dsi_bus_unlock(dssdev); if (r) goto err; taal_queue_esd_work(dssdev); dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; mutex_unlock(&td->lock); return 0; err: dev_dbg(&dssdev->dev, "enable failed\n"); mutex_unlock(&td->lock); return r; } static void taal_disable(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); dev_dbg(&dssdev->dev, "disable\n"); mutex_lock(&td->lock); taal_cancel_ulps_work(dssdev); taal_cancel_esd_work(dssdev); dsi_bus_lock(dssdev); if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { taal_wake_up(dssdev); taal_power_off(dssdev); } dsi_bus_unlock(dssdev); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; mutex_unlock(&td->lock); } static int taal_suspend(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "suspend\n"); mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { r = -EINVAL; goto err; } taal_cancel_ulps_work(dssdev); taal_cancel_esd_work(dssdev); dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) taal_power_off(dssdev); dsi_bus_unlock(dssdev); dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; mutex_unlock(&td->lock); return 0; err: mutex_unlock(&td->lock); return r; } static int taal_resume(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "resume\n"); mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) { r = -EINVAL; goto err; } dsi_bus_lock(dssdev); r = taal_power_on(dssdev); dsi_bus_unlock(dssdev); if (r) { dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } else { dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; taal_queue_esd_work(dssdev); } mutex_unlock(&td->lock); return r; err: mutex_unlock(&td->lock); return r; } static void taal_framedone_cb(int err, void *data) { struct omap_dss_device *dssdev = data; dev_dbg(&dssdev->dev, "framedone, err %d\n", err); dsi_bus_unlock(dssdev); } static irqreturn_t taal_te_isr(int irq, void *data) { struct omap_dss_device *dssdev = data; struct taal_data *td = dev_get_drvdata(&dssdev->dev); int old; int r; old = atomic_cmpxchg(&td->do_update, 1, 0); if (old) { cancel_delayed_work(&td->te_timeout_work); r = omap_dsi_update(dssdev, td->channel, td->update_region.x, td->update_region.y, td->update_region.w, td->update_region.h, taal_framedone_cb, dssdev); if (r) goto err; } return IRQ_HANDLED; err: dev_err(&dssdev->dev, "start update failed\n"); dsi_bus_unlock(dssdev); return IRQ_HANDLED; } static void taal_te_timeout_work_callback(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, te_timeout_work.work); struct omap_dss_device *dssdev = td->dssdev; dev_err(&dssdev->dev, "TE not received for 250ms!\n"); atomic_set(&td->do_update, 0); dsi_bus_unlock(dssdev); } static int taal_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); mutex_lock(&td->lock); dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) goto err; if (!td->enabled) { r = 0; goto err; } r = omap_dsi_prepare_update(dssdev, &x, &y, &w, &h, true); if (r) goto err; r = taal_set_update_window(td, x, y, w, h); if (r) goto err; if (td->te_enabled && panel_data->use_ext_te) { td->update_region.x = x; td->update_region.y = y; td->update_region.w = w; td->update_region.h = h; barrier(); schedule_delayed_work(&td->te_timeout_work, msecs_to_jiffies(250)); atomic_set(&td->do_update, 1); } else { r = omap_dsi_update(dssdev, td->channel, x, y, w, h, taal_framedone_cb, dssdev); if (r) goto err; } /* note: no bus_unlock here. unlock is in framedone_cb */ mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static int taal_sync(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); dev_dbg(&dssdev->dev, "sync\n"); mutex_lock(&td->lock); dsi_bus_lock(dssdev); dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); dev_dbg(&dssdev->dev, "sync done\n"); return 0; } static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; if (enable) r = taal_dcs_write_1(td, DCS_TEAR_ON, 0); else r = taal_dcs_write_0(td, DCS_TEAR_OFF); if (!panel_data->use_ext_te) omapdss_dsi_enable_te(dssdev, enable); if (td->panel_config->sleep.enable_te) msleep(td->panel_config->sleep.enable_te); return r; } static int taal_enable_te(struct omap_dss_device *dssdev, bool enable) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); if (td->te_enabled == enable) goto end; dsi_bus_lock(dssdev); if (td->enabled) { r = taal_wake_up(dssdev); if (r) goto err; r = _taal_enable_te(dssdev, enable); if (r) goto err; } td->te_enabled = enable; dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static int taal_get_te(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); r = td->te_enabled; mutex_unlock(&td->lock); return r; } static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "rotate %d\n", rotate); mutex_lock(&td->lock); if (td->rotate == rotate) goto end; dsi_bus_lock(dssdev); if (td->enabled) { r = taal_wake_up(dssdev); if (r) goto err; r = taal_set_addr_mode(td, rotate, td->mirror); if (r) goto err; } td->rotate = rotate; dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static u8 taal_get_rotate(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); r = td->rotate; mutex_unlock(&td->lock); return r; } static int taal_mirror(struct omap_dss_device *dssdev, bool enable) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "mirror %d\n", enable); mutex_lock(&td->lock); if (td->mirror == enable) goto end; dsi_bus_lock(dssdev); if (td->enabled) { r = taal_wake_up(dssdev); if (r) goto err; r = taal_set_addr_mode(td, td->rotate, enable); if (r) goto err; } td->mirror = enable; dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static bool taal_get_mirror(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); r = td->mirror; mutex_unlock(&td->lock); return r; } static int taal_run_test(struct omap_dss_device *dssdev, int test_num) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 id1, id2, id3; int r; mutex_lock(&td->lock); if (!td->enabled) { r = -ENODEV; goto err1; } dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) goto err2; r = taal_dcs_read_1(td, DCS_GET_ID1, &id1); if (r) goto err2; r = taal_dcs_read_1(td, DCS_GET_ID2, &id2); if (r) goto err2; r = taal_dcs_read_1(td, DCS_GET_ID3, &id3); if (r) goto err2; dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return 0; err2: dsi_bus_unlock(dssdev); err1: mutex_unlock(&td->lock); return r; } static int taal_memory_read(struct omap_dss_device *dssdev, void *buf, size_t size, u16 x, u16 y, u16 w, u16 h) { int r; int first = 1; int plen; unsigned buf_used = 0; struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (size < w * h * 3) return -ENOMEM; mutex_lock(&td->lock); if (!td->enabled) { r = -ENODEV; goto err1; } size = min(w * h * 3, dssdev->panel.timings.x_res * dssdev->panel.timings.y_res * 3); dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) goto err2; /* plen 1 or 2 goes into short packet. until checksum error is fixed, * use short packets. plen 32 works, but bigger packets seem to cause * an error. */ if (size % 2) plen = 1; else plen = 2; taal_set_update_window(td, x, y, w, h); r = dsi_vc_set_max_rx_packet_size(dssdev, td->channel, plen); if (r) goto err2; while (buf_used < size) { u8 dcs_cmd = first ? 0x2e : 0x3e; first = 0; r = dsi_vc_dcs_read(dssdev, td->channel, dcs_cmd, buf + buf_used, size - buf_used); if (r < 0) { dev_err(&dssdev->dev, "read error\n"); goto err3; } buf_used += r; if (r < plen) { dev_err(&dssdev->dev, "short read\n"); break; } if (signal_pending(current)) { dev_err(&dssdev->dev, "signal pending, " "aborting memory read\n"); r = -ERESTARTSYS; goto err3; } } r = buf_used; err3: dsi_vc_set_max_rx_packet_size(dssdev, td->channel, 1); err2: dsi_bus_unlock(dssdev); err1: mutex_unlock(&td->lock); return r; } static void taal_ulps_work(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, ulps_work.work); struct omap_dss_device *dssdev = td->dssdev; mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !td->enabled) { mutex_unlock(&td->lock); return; } dsi_bus_lock(dssdev); taal_enter_ulps(dssdev); dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); } static void taal_esd_work(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, esd_work.work); struct omap_dss_device *dssdev = td->dssdev; struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); u8 state1, state2; int r; mutex_lock(&td->lock); if (!td->enabled) { mutex_unlock(&td->lock); return; } dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) { dev_err(&dssdev->dev, "failed to exit ULPS\n"); goto err; } r = taal_dcs_read_1(td, DCS_RDDSDR, &state1); if (r) { dev_err(&dssdev->dev, "failed to read Taal status\n"); goto err; } /* Run self diagnostics */ r = taal_sleep_out(td); if (r) { dev_err(&dssdev->dev, "failed to run Taal self-diagnostics\n"); goto err; } r = taal_dcs_read_1(td, DCS_RDDSDR, &state2); if (r) { dev_err(&dssdev->dev, "failed to read Taal status\n"); goto err; } /* Each sleep out command will trigger a self diagnostic and flip * Bit6 if the test passes. */ if (!((state1 ^ state2) & (1 << 6))) { dev_err(&dssdev->dev, "LCD self diagnostics failed\n"); goto err; } /* Self-diagnostics result is also shown on TE GPIO line. We need * to re-enable TE after self diagnostics */ if (td->te_enabled && panel_data->use_ext_te) { r = taal_dcs_write_1(td, DCS_TEAR_ON, 0); if (r) goto err; } dsi_bus_unlock(dssdev); taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); return; err: dev_err(&dssdev->dev, "performing LCD reset\n"); taal_panel_reset(dssdev); dsi_bus_unlock(dssdev); taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); } static int taal_set_update_mode(struct omap_dss_device *dssdev, enum omap_dss_update_mode mode) { if (mode != OMAP_DSS_UPDATE_MANUAL) return -EINVAL; return 0; } static enum omap_dss_update_mode taal_get_update_mode( struct omap_dss_device *dssdev) { return OMAP_DSS_UPDATE_MANUAL; } static struct omap_dss_driver taal_driver = { .probe = taal_probe, .remove = __exit_p(taal_remove), .enable = taal_enable, .disable = taal_disable, .suspend = taal_suspend, .resume = taal_resume, .set_update_mode = taal_set_update_mode, .get_update_mode = taal_get_update_mode, .update = taal_update, .sync = taal_sync, .get_resolution = taal_get_resolution, .get_recommended_bpp = omapdss_default_get_recommended_bpp, .enable_te = taal_enable_te, .get_te = taal_get_te, .set_rotate = taal_rotate, .get_rotate = taal_get_rotate, .set_mirror = taal_mirror, .get_mirror = taal_get_mirror, .run_test = taal_run_test, .memory_read = taal_memory_read, .get_timings = taal_get_timings, .driver = { .name = "taal", .owner = THIS_MODULE, }, }; static int __init taal_init(void) { omap_dss_register_driver(&taal_driver); return 0; } static void __exit taal_exit(void) { omap_dss_unregister_driver(&taal_driver); } module_init(taal_init); module_exit(taal_exit); MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>"); MODULE_DESCRIPTION("Taal Driver"); MODULE_LICENSE("GPL");
gpl-2.0
sandymanu/sandy_lettuce_8916
drivers/hwmon/sht21.c
2836
7475
/* Sensirion SHT21 humidity and temperature sensor driver * * Copyright (C) 2010 Urs Fleisch <urs.fleisch@sensirion.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA * * Data sheet available (5/2010) at * http://www.sensirion.com/en/pdf/product_information/Datasheet-humidity-sensor-SHT21.pdf */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/jiffies.h> /* I2C command bytes */ #define SHT21_TRIG_T_MEASUREMENT_HM 0xe3 #define SHT21_TRIG_RH_MEASUREMENT_HM 0xe5 /** * struct sht21 - SHT21 device specific data * @hwmon_dev: device registered with hwmon * @lock: mutex to protect measurement values * @valid: only 0 before first measurement is taken * @last_update: time of last update (jiffies) * @temperature: cached temperature measurement value * @humidity: cached humidity measurement value */ struct sht21 { struct device *hwmon_dev; struct mutex lock; char valid; unsigned long last_update; int temperature; int humidity; }; /** * sht21_temp_ticks_to_millicelsius() - convert raw temperature ticks to * milli celsius * @ticks: temperature ticks value received from sensor */ static inline int sht21_temp_ticks_to_millicelsius(int ticks) { ticks &= ~0x0003; /* clear status bits */ /* * Formula T = -46.85 + 175.72 * ST / 2^16 from data sheet 6.2, * optimized for integer fixed point (3 digits) arithmetic */ return ((21965 * ticks) >> 13) - 46850; } /** * sht21_rh_ticks_to_per_cent_mille() - convert raw humidity ticks to * one-thousandths of a percent relative humidity * @ticks: humidity ticks value received from sensor */ static inline int sht21_rh_ticks_to_per_cent_mille(int ticks) { ticks &= ~0x0003; /* clear status bits */ /* * Formula RH = -6 + 125 * SRH / 2^16 from data sheet 6.1, * optimized for integer fixed point (3 digits) arithmetic */ return ((15625 * ticks) >> 13) - 6000; } /** * sht21_update_measurements() - get updated measurements from device * @client: I2C client device * * Returns 0 on success, else negative errno. */ static int sht21_update_measurements(struct i2c_client *client) { int ret = 0; struct sht21 *sht21 = i2c_get_clientdata(client); mutex_lock(&sht21->lock); /* * Data sheet 2.4: * SHT2x should not be active for more than 10% of the time - e.g. * maximum two measurements per second at 12bit accuracy shall be made. */ if (time_after(jiffies, sht21->last_update + HZ / 2) || !sht21->valid) { ret = i2c_smbus_read_word_swapped(client, SHT21_TRIG_T_MEASUREMENT_HM); if (ret < 0) goto out; sht21->temperature = sht21_temp_ticks_to_millicelsius(ret); ret = i2c_smbus_read_word_swapped(client, SHT21_TRIG_RH_MEASUREMENT_HM); if (ret < 0) goto out; sht21->humidity = sht21_rh_ticks_to_per_cent_mille(ret); sht21->last_update = jiffies; sht21->valid = 1; } out: mutex_unlock(&sht21->lock); return ret >= 0 ? 0 : ret; } /** * sht21_show_temperature() - show temperature measurement value in sysfs * @dev: device * @attr: device attribute * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to * * Will be called on read access to temp1_input sysfs attribute. * Returns number of bytes written into buffer, negative errno on error. */ static ssize_t sht21_show_temperature(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sht21 *sht21 = i2c_get_clientdata(client); int ret = sht21_update_measurements(client); if (ret < 0) return ret; return sprintf(buf, "%d\n", sht21->temperature); } /** * sht21_show_humidity() - show humidity measurement value in sysfs * @dev: device * @attr: device attribute * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to * * Will be called on read access to humidity1_input sysfs attribute. * Returns number of bytes written into buffer, negative errno on error. */ static ssize_t sht21_show_humidity(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct sht21 *sht21 = i2c_get_clientdata(client); int ret = sht21_update_measurements(client); if (ret < 0) return ret; return sprintf(buf, "%d\n", sht21->humidity); } /* sysfs attributes */ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht21_show_temperature, NULL, 0); static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht21_show_humidity, NULL, 0); static struct attribute *sht21_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_humidity1_input.dev_attr.attr, NULL }; static const struct attribute_group sht21_attr_group = { .attrs = sht21_attributes, }; /** * sht21_probe() - probe device * @client: I2C client device * @id: device ID * * Called by the I2C core when an entry in the ID table matches a * device's name. * Returns 0 on success. */ static int sht21_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct sht21 *sht21; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_err(&client->dev, "adapter does not support SMBus word transactions\n"); return -ENODEV; } sht21 = devm_kzalloc(&client->dev, sizeof(*sht21), GFP_KERNEL); if (!sht21) return -ENOMEM; i2c_set_clientdata(client, sht21); mutex_init(&sht21->lock); err = sysfs_create_group(&client->dev.kobj, &sht21_attr_group); if (err) { dev_dbg(&client->dev, "could not create sysfs files\n"); return err; } sht21->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(sht21->hwmon_dev)) { dev_dbg(&client->dev, "unable to register hwmon device\n"); err = PTR_ERR(sht21->hwmon_dev); goto fail_remove_sysfs; } dev_info(&client->dev, "initialized\n"); return 0; fail_remove_sysfs: sysfs_remove_group(&client->dev.kobj, &sht21_attr_group); return err; } /** * sht21_remove() - remove device * @client: I2C client device */ static int sht21_remove(struct i2c_client *client) { struct sht21 *sht21 = i2c_get_clientdata(client); hwmon_device_unregister(sht21->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &sht21_attr_group); return 0; } /* Device ID table */ static const struct i2c_device_id sht21_id[] = { { "sht21", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, sht21_id); static struct i2c_driver sht21_driver = { .driver.name = "sht21", .probe = sht21_probe, .remove = sht21_remove, .id_table = sht21_id, }; module_i2c_driver(sht21_driver); MODULE_AUTHOR("Urs Fleisch <urs.fleisch@sensirion.com>"); MODULE_DESCRIPTION("Sensirion SHT21 humidity and temperature sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
davem330/net-next
fs/filesystems.c
3092
6519
/* * linux/fs/filesystems.c * * Copyright (C) 1991, 1992 Linus Torvalds * * table of configured filesystems */ #include <linux/syscalls.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/uaccess.h> /* * Handling of filesystem drivers list. * Rules: * Inclusion to/removals from/scanning of list are protected by spinlock. * During the unload module must call unregister_filesystem(). * We can access the fields of list element if: * 1) spinlock is held or * 2) we hold the reference to the module. * The latter can be guaranteed by call of try_module_get(); if it * returned 0 we must skip the element, otherwise we got the reference. * Once the reference is obtained we can drop the spinlock. */ static struct file_system_type *file_systems; static DEFINE_RWLOCK(file_systems_lock); /* WARNING: This can be used only if we _already_ own a reference */ void get_filesystem(struct file_system_type *fs) { __module_get(fs->owner); } void put_filesystem(struct file_system_type *fs) { module_put(fs->owner); } static struct file_system_type **find_filesystem(const char *name, unsigned len) { struct file_system_type **p; for (p=&file_systems; *p; p=&(*p)->next) if (strlen((*p)->name) == len && strncmp((*p)->name, name, len) == 0) break; return p; } /** * register_filesystem - register a new filesystem * @fs: the file system structure * * Adds the file system passed to the list of file systems the kernel * is aware of for mount and other syscalls. Returns 0 on success, * or a negative errno code on an error. * * The &struct file_system_type that is passed is linked into the kernel * structures and must not be freed until the file system has been * unregistered. */ int register_filesystem(struct file_system_type * fs) { int res = 0; struct file_system_type ** p; BUG_ON(strchr(fs->name, '.')); if (fs->next) return -EBUSY; INIT_LIST_HEAD(&fs->fs_supers); write_lock(&file_systems_lock); p = find_filesystem(fs->name, strlen(fs->name)); if (*p) res = -EBUSY; else *p = fs; write_unlock(&file_systems_lock); return res; } EXPORT_SYMBOL(register_filesystem); /** * unregister_filesystem - unregister a file system * @fs: filesystem to unregister * * Remove a file system that was previously successfully registered * with the kernel. An error is returned if the file system is not found. * Zero is returned on a success. * * Once this function has returned the &struct file_system_type structure * may be freed or reused. */ int unregister_filesystem(struct file_system_type * fs) { struct file_system_type ** tmp; write_lock(&file_systems_lock); tmp = &file_systems; while (*tmp) { if (fs == *tmp) { *tmp = fs->next; fs->next = NULL; write_unlock(&file_systems_lock); synchronize_rcu(); return 0; } tmp = &(*tmp)->next; } write_unlock(&file_systems_lock); return -EINVAL; } EXPORT_SYMBOL(unregister_filesystem); static int fs_index(const char __user * __name) { struct file_system_type * tmp; char * name; int err, index; name = getname(__name); err = PTR_ERR(name); if (IS_ERR(name)) return err; err = -EINVAL; read_lock(&file_systems_lock); for (tmp=file_systems, index=0 ; tmp ; tmp=tmp->next, index++) { if (strcmp(tmp->name,name) == 0) { err = index; break; } } read_unlock(&file_systems_lock); putname(name); return err; } static int fs_name(unsigned int index, char __user * buf) { struct file_system_type * tmp; int len, res; read_lock(&file_systems_lock); for (tmp = file_systems; tmp; tmp = tmp->next, index--) if (index <= 0 && try_module_get(tmp->owner)) break; read_unlock(&file_systems_lock); if (!tmp) return -EINVAL; /* OK, we got the reference, so we can safely block */ len = strlen(tmp->name) + 1; res = copy_to_user(buf, tmp->name, len) ? -EFAULT : 0; put_filesystem(tmp); return res; } static int fs_maxindex(void) { struct file_system_type * tmp; int index; read_lock(&file_systems_lock); for (tmp = file_systems, index = 0 ; tmp ; tmp = tmp->next, index++) ; read_unlock(&file_systems_lock); return index; } /* * Whee.. Weird sysv syscall. */ SYSCALL_DEFINE3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2) { int retval = -EINVAL; switch (option) { case 1: retval = fs_index((const char __user *) arg1); break; case 2: retval = fs_name(arg1, (char __user *) arg2); break; case 3: retval = fs_maxindex(); break; } return retval; } int __init get_filesystem_list(char *buf) { int len = 0; struct file_system_type * tmp; read_lock(&file_systems_lock); tmp = file_systems; while (tmp && len < PAGE_SIZE - 80) { len += sprintf(buf+len, "%s\t%s\n", (tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev", tmp->name); tmp = tmp->next; } read_unlock(&file_systems_lock); return len; } #ifdef CONFIG_PROC_FS static int filesystems_proc_show(struct seq_file *m, void *v) { struct file_system_type * tmp; read_lock(&file_systems_lock); tmp = file_systems; while (tmp) { seq_printf(m, "%s\t%s\n", (tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev", tmp->name); tmp = tmp->next; } read_unlock(&file_systems_lock); return 0; } static int filesystems_proc_open(struct inode *inode, struct file *file) { return single_open(file, filesystems_proc_show, NULL); } static const struct file_operations filesystems_proc_fops = { .open = filesystems_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_filesystems_init(void) { proc_create("filesystems", 0, NULL, &filesystems_proc_fops); return 0; } module_init(proc_filesystems_init); #endif static struct file_system_type *__get_fs_type(const char *name, int len) { struct file_system_type *fs; read_lock(&file_systems_lock); fs = *(find_filesystem(name, len)); if (fs && !try_module_get(fs->owner)) fs = NULL; read_unlock(&file_systems_lock); return fs; } struct file_system_type *get_fs_type(const char *name) { struct file_system_type *fs; const char *dot = strchr(name, '.'); int len = dot ? dot - name : strlen(name); fs = __get_fs_type(name, len); if (!fs && (request_module("%.*s", len, name) == 0)) fs = __get_fs_type(name, len); if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { put_filesystem(fs); fs = NULL; } return fs; } EXPORT_SYMBOL(get_fs_type);
gpl-2.0
tommytarts/QuantumKernelS3
net/netfilter/xt_TPROXY.c
4884
12321
/* * Transparent proxy support for Linux/iptables * * Copyright (c) 2006-2010 BalaBit IT Ltd. * Author: Balazs Scheidler, Krisztian Kovacs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <net/checksum.h> #include <net/udp.h> #include <net/inet_sock.h> #include <linux/inetdevice.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/ipv4/nf_defrag_ipv4.h> #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) #define XT_TPROXY_HAVE_IPV6 1 #include <net/if_inet6.h> #include <net/addrconf.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #endif #include <net/netfilter/nf_tproxy_core.h> #include <linux/netfilter/xt_TPROXY.h> static bool tproxy_sk_is_transparent(struct sock *sk) { if (sk->sk_state != TCP_TIME_WAIT) { if (inet_sk(sk)->transparent) return true; sock_put(sk); } else { if (inet_twsk(sk)->tw_transparent) return true; inet_twsk_put(inet_twsk(sk)); } return false; } static inline __be32 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) { struct in_device *indev; __be32 laddr; if (user_laddr) return user_laddr; laddr = 0; rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); for_primary_ifa(indev) { laddr = ifa->ifa_local; break; } endfor_ifa(indev); rcu_read_unlock(); return laddr ? laddr : daddr; } /** * tproxy_handle_time_wait4() - handle IPv4 TCP TIME_WAIT reopen redirections * @skb: The skb being processed. * @laddr: IPv4 address to redirect to or zero. * @lport: TCP port to redirect to or zero. * @sk: The TIME_WAIT TCP socket found by the lookup. * * We have to handle SYN packets arriving to TIME_WAIT sockets * differently: instead of reopening the connection we should rather * redirect the new connection to the proxy if there's a listener * socket present. * * tproxy_handle_time_wait4() consumes the socket reference passed in. * * Returns the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ static struct sock * tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport, struct sock *sk) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr _hdr, *hp; hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) { inet_twsk_put(inet_twsk(sk)); return NULL; } if (hp->syn && !hp->rst && !hp->ack && !hp->fin) { /* SYN to a TIME_WAIT socket, we'd rather redirect it * to a listener socket if there's one */ struct sock *sk2; sk2 = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, iph->saddr, laddr ? laddr : iph->daddr, hp->source, lport ? lport : hp->dest, skb->dev, NFT_LOOKUP_LISTENER); if (sk2) { inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row); inet_twsk_put(inet_twsk(sk)); sk = sk2; } } return sk; } static unsigned int tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, u_int32_t mark_mask, u_int32_t mark_value) { const struct iphdr *iph = ip_hdr(skb); struct udphdr _hdr, *hp; struct sock *sk; hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) return NF_DROP; /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, iph->saddr, iph->daddr, hp->source, hp->dest, skb->dev, NFT_LOOKUP_ESTABLISHED); laddr = tproxy_laddr4(skb, laddr, iph->daddr); if (!lport) lport = hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) /* reopening a TIME_WAIT connection needs special handling */ sk = tproxy_handle_time_wait4(skb, laddr, lport, sk); else if (!sk) /* no, there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, iph->saddr, laddr, hp->source, lport, skb->dev, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~mark_mask) ^ mark_value; pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->daddr, ntohs(hp->dest), &laddr, ntohs(lport), skb->mark); nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } pr_debug("no socket, dropping: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->saddr, ntohs(hp->source), &iph->daddr, ntohs(hp->dest), skb->mark); return NF_DROP; } static unsigned int tproxy_tg4_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info *tgi = par->targinfo; return tproxy_tg4(skb, tgi->laddr, tgi->lport, tgi->mark_mask, tgi->mark_value); } static unsigned int tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); } #ifdef XT_TPROXY_HAVE_IPV6 static inline const struct in6_addr * tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, const struct in6_addr *daddr) { struct inet6_dev *indev; struct inet6_ifaddr *ifa; struct in6_addr *laddr; if (!ipv6_addr_any(user_laddr)) return user_laddr; laddr = NULL; rcu_read_lock(); indev = __in6_dev_get(skb->dev); if (indev) list_for_each_entry(ifa, &indev->addr_list, if_list) { if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) continue; laddr = &ifa->addr; break; } rcu_read_unlock(); return laddr ? laddr : daddr; } /** * tproxy_handle_time_wait6() - handle IPv6 TCP TIME_WAIT reopen redirections * @skb: The skb being processed. * @tproto: Transport protocol. * @thoff: Transport protocol header offset. * @par: Iptables target parameters. * @sk: The TIME_WAIT TCP socket found by the lookup. * * We have to handle SYN packets arriving to TIME_WAIT sockets * differently: instead of reopening the connection we should rather * redirect the new connection to the proxy if there's a listener * socket present. * * tproxy_handle_time_wait6() consumes the socket reference passed in. * * Returns the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ static struct sock * tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, const struct xt_action_param *par, struct sock *sk) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct tcphdr _hdr, *hp; const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) { inet_twsk_put(inet_twsk(sk)); return NULL; } if (hp->syn && !hp->rst && !hp->ack && !hp->fin) { /* SYN to a TIME_WAIT socket, we'd rather redirect it * to a listener socket if there's one */ struct sock *sk2; sk2 = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, &iph->saddr, tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr), hp->source, tgi->lport ? tgi->lport : hp->dest, skb->dev, NFT_LOOKUP_LISTENER); if (sk2) { inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row); inet_twsk_put(inet_twsk(sk)); sk = sk2; } } return sk; } static unsigned int tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; struct udphdr _hdr, *hp; struct sock *sk; const struct in6_addr *laddr; __be16 lport; int thoff; int tproto; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); if (tproto < 0) { pr_debug("unable to find transport header in IPv6 packet, dropping\n"); return NF_DROP; } hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) { pr_debug("unable to grab transport header contents in IPv6 packet, dropping\n"); return NF_DROP; } /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, &iph->saddr, &iph->daddr, hp->source, hp->dest, par->in, NFT_LOOKUP_ESTABLISHED); laddr = tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr); lport = tgi->lport ? tgi->lport : hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) /* reopening a TIME_WAIT connection needs special handling */ sk = tproxy_handle_time_wait6(skb, tproto, thoff, par, sk); else if (!sk) /* no there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, &iph->saddr, laddr, hp->source, lport, par->in, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), laddr, ntohs(lport), skb->mark); nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } pr_debug("no socket, dropping: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), &iph->daddr, ntohs(hp->dest), skb->mark); return NF_DROP; } static int tproxy_tg6_check(const struct xt_tgchk_param *par) { const struct ip6t_ip6 *i = par->entryinfo; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->flags & IP6T_INV_PROTO)) return 0; pr_info("Can be used only in combination with " "either -p tcp or -p udp\n"); return -EINVAL; } #endif static int tproxy_tg4_check(const struct xt_tgchk_param *par) { const struct ipt_ip *i = par->entryinfo; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->invflags & IPT_INV_PROTO)) return 0; pr_info("Can be used only in combination with " "either -p tcp or -p udp\n"); return -EINVAL; } static struct xt_target tproxy_tg_reg[] __read_mostly = { { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v0, .revision = 0, .targetsize = sizeof(struct xt_tproxy_target_info), .checkentry = tproxy_tg4_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg4_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #ifdef XT_TPROXY_HAVE_IPV6 { .name = "TPROXY", .family = NFPROTO_IPV6, .table = "mangle", .target = tproxy_tg6_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg6_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #endif }; static int __init tproxy_tg_init(void) { nf_defrag_ipv4_enable(); #ifdef XT_TPROXY_HAVE_IPV6 nf_defrag_ipv6_enable(); #endif return xt_register_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } static void __exit tproxy_tg_exit(void) { xt_unregister_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } module_init(tproxy_tg_init); module_exit(tproxy_tg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Balazs Scheidler, Krisztian Kovacs"); MODULE_DESCRIPTION("Netfilter transparent proxy (TPROXY) target module."); MODULE_ALIAS("ipt_TPROXY"); MODULE_ALIAS("ip6t_TPROXY");
gpl-2.0
zarboz/android_kernel_htc_dlx
fs/nls/nls_cp874.c
12564
10995
/* * linux/fs/nls/nls_cp874.c * * Charset cp874 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2026, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 0x90*/ 0x0000, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 0xa0*/ 0x00a0, 0x0e01, 0x0e02, 0x0e03, 0x0e04, 0x0e05, 0x0e06, 0x0e07, 0x0e08, 0x0e09, 0x0e0a, 0x0e0b, 0x0e0c, 0x0e0d, 0x0e0e, 0x0e0f, /* 0xb0*/ 0x0e10, 0x0e11, 0x0e12, 0x0e13, 0x0e14, 0x0e15, 0x0e16, 0x0e17, 0x0e18, 0x0e19, 0x0e1a, 0x0e1b, 0x0e1c, 0x0e1d, 0x0e1e, 0x0e1f, /* 0xc0*/ 0x0e20, 0x0e21, 0x0e22, 0x0e23, 0x0e24, 0x0e25, 0x0e26, 0x0e27, 0x0e28, 0x0e29, 0x0e2a, 0x0e2b, 0x0e2c, 0x0e2d, 0x0e2e, 0x0e2f, /* 0xd0*/ 0x0e30, 0x0e31, 0x0e32, 0x0e33, 0x0e34, 0x0e35, 0x0e36, 0x0e37, 0x0e38, 0x0e39, 0x0e3a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0e3f, /* 0xe0*/ 0x0e40, 0x0e41, 0x0e42, 0x0e43, 0x0e44, 0x0e45, 0x0e46, 0x0e47, 0x0e48, 0x0e49, 0x0e4a, 0x0e4b, 0x0e4c, 0x0e4d, 0x0e4e, 0x0e4f, /* 0xf0*/ 0x0e50, 0x0e51, 0x0e52, 0x0e53, 0x0e54, 0x0e55, 0x0e56, 0x0e57, 0x0e58, 0x0e59, 0x0e5a, 0x0e5b, 0x0000, 0x0000, 0x0000, 0x0000, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char page0e[256] = { 0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x08-0x0f */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x91, 0x92, 0x00, 0x00, 0x93, 0x94, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page0e, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp874", .alias = "tis-620", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp874(void) { return register_nls(&table); } static void __exit exit_nls_cp874(void) { unregister_nls(&table); } module_init(init_nls_cp874) module_exit(exit_nls_cp874) MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS_NLS(tis-620);
gpl-2.0
AMohseni76/Prime_Kernel
arch/powerpc/boot/prpmc2800.c
13332
13994
/* * Motorola ECC prpmc280/f101 & prpmc2800/f101e platform code. * * Author: Mark A. Greer <mgreer@mvista.com> * * 2007 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "page.h" #include "string.h" #include "stdio.h" #include "io.h" #include "ops.h" #include "gunzip_util.h" #include "mv64x60.h" #define KB 1024U #define MB (KB*KB) #define GB (KB*MB) #define MHz (1000U*1000U) #define GHz (1000U*MHz) #define BOARD_MODEL "PrPMC2800" #define BOARD_MODEL_MAX 32 /* max strlen(BOARD_MODEL) + 1 */ #define EEPROM2_ADDR 0xa4 #define EEPROM3_ADDR 0xa8 BSS_STACK(16*KB); static u8 *bridge_base; typedef enum { BOARD_MODEL_PRPMC280, BOARD_MODEL_PRPMC2800, } prpmc2800_board_model; typedef enum { BRIDGE_TYPE_MV64360, BRIDGE_TYPE_MV64362, } prpmc2800_bridge_type; struct prpmc2800_board_info { prpmc2800_board_model model; char variant; prpmc2800_bridge_type bridge_type; u8 subsys0; u8 subsys1; u8 vpd4; u8 vpd4_mask; u32 core_speed; u32 mem_size; u32 boot_flash; u32 user_flash; }; static struct prpmc2800_board_info prpmc2800_board_info[] = { { .model = BOARD_MODEL_PRPMC280, .variant = 'a', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x00, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'b', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x01, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 0, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'c', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x02, .vpd4_mask = 0x0f, .core_speed = 733*MHz, .mem_size = 512*MB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'd', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x03, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 1*GB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'e', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x04, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'f', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x05, .vpd4_mask = 0x0f, .core_speed = 733*MHz, .mem_size = 128*MB, .boot_flash = 1*MB, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'g', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x06, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 256*MB, .boot_flash = 1*MB, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'h', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x07, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 1*GB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'a', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xb2, .subsys1 = 0x8c, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'b', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xb2, .subsys1 = 0x8d, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 0, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'c', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xb2, .subsys1 = 0x8e, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 733*MHz, .mem_size = 512*MB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'd', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xb2, .subsys1 = 0x8f, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 1*GB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'e', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xa2, .subsys1 = 0x8a, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'f', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xa2, .subsys1 = 0x8b, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 733*MHz, .mem_size = 128*MB, .boot_flash = 2*MB, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'g', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xa2, .subsys1 = 0x8c, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 2*GB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'h', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xa2, .subsys1 = 0x8d, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 733*MHz, .mem_size = 1*GB, .boot_flash = 2*MB, .user_flash = 64*MB, }, }; static struct prpmc2800_board_info *prpmc2800_get_board_info(u8 *vpd) { struct prpmc2800_board_info *bip; int i; for (i=0,bip=prpmc2800_board_info; i<ARRAY_SIZE(prpmc2800_board_info); i++,bip++) if ((vpd[0] == bip->subsys0) && (vpd[1] == bip->subsys1) && ((vpd[4] & bip->vpd4_mask) == bip->vpd4)) return bip; return NULL; } /* Get VPD from i2c eeprom 2, then match it to a board info entry */ static struct prpmc2800_board_info *prpmc2800_get_bip(void) { struct prpmc2800_board_info *bip; u8 vpd[5]; int rc; if (mv64x60_i2c_open()) fatal("Error: Can't open i2c device\n\r"); /* Get VPD from i2c eeprom-2 */ memset(vpd, 0, sizeof(vpd)); rc = mv64x60_i2c_read(EEPROM2_ADDR, vpd, 0x1fde, 2, sizeof(vpd)); if (rc < 0) fatal("Error: Couldn't read eeprom2\n\r"); mv64x60_i2c_close(); /* Get board type & related info */ bip = prpmc2800_get_board_info(vpd); if (bip == NULL) { printf("Error: Unsupported board or corrupted VPD:\n\r"); printf(" 0x%x 0x%x 0x%x 0x%x 0x%x\n\r", vpd[0], vpd[1], vpd[2], vpd[3], vpd[4]); printf("Using device tree defaults...\n\r"); } return bip; } static void prpmc2800_bridge_setup(u32 mem_size) { u32 i, v[12], enables, acc_bits; u32 pci_base_hi, pci_base_lo, size, buf[2]; unsigned long cpu_base; int rc; void *devp; u8 *bridge_pbase, is_coherent; struct mv64x60_cpu2pci_win *tbl; bridge_pbase = mv64x60_get_bridge_pbase(); is_coherent = mv64x60_is_coherent(); if (is_coherent) acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_WB | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_32_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_32_BYTES; else acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_NONE | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_128_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_256_BYTES; mv64x60_config_ctlr_windows(bridge_base, bridge_pbase, is_coherent); mv64x60_config_pci_windows(bridge_base, bridge_pbase, 0, 0, mem_size, acc_bits); /* Get the cpu -> pci i/o & mem mappings from the device tree */ devp = find_node_by_compatible(NULL, "marvell,mv64360-pci"); if (devp == NULL) fatal("Error: Missing marvell,mv64360-pci" " device tree node\n\r"); rc = getprop(devp, "ranges", v, sizeof(v)); if (rc != sizeof(v)) fatal("Error: Can't find marvell,mv64360-pci ranges" " property\n\r"); /* Get the cpu -> pci i/o & mem mappings from the device tree */ devp = find_node_by_compatible(NULL, "marvell,mv64360"); if (devp == NULL) fatal("Error: Missing marvell,mv64360 device tree node\n\r"); enables = in_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE)); enables |= 0x0007fe00; /* Disable all cpu->pci windows */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); for (i=0; i<12; i+=6) { switch (v[i] & 0xff000000) { case 0x01000000: /* PCI I/O Space */ tbl = mv64x60_cpu2pci_io; break; case 0x02000000: /* PCI MEM Space */ tbl = mv64x60_cpu2pci_mem; break; default: continue; } pci_base_hi = v[i+1]; pci_base_lo = v[i+2]; cpu_base = v[i+3]; size = v[i+5]; buf[0] = cpu_base; buf[1] = size; if (!dt_xlate_addr(devp, buf, sizeof(buf), &cpu_base)) fatal("Error: Can't translate PCI address 0x%x\n\r", (u32)cpu_base); mv64x60_config_cpu2pci_window(bridge_base, 0, pci_base_hi, pci_base_lo, cpu_base, size, tbl); } enables &= ~0x00000600; /* Enable cpu->pci0 i/o, cpu->pci0 mem0 */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); } static void prpmc2800_fixups(void) { u32 v[2], l, mem_size; int rc; void *devp; char model[BOARD_MODEL_MAX]; struct prpmc2800_board_info *bip; bip = prpmc2800_get_bip(); /* Get board info based on VPD */ mem_size = (bip) ? bip->mem_size : mv64x60_get_mem_size(bridge_base); prpmc2800_bridge_setup(mem_size); /* Do necessary bridge setup */ /* If the VPD doesn't match what we know about, just use the * defaults already in the device tree. */ if (!bip) return; /* Know the board type so override device tree defaults */ /* Set /model appropriately */ devp = finddevice("/"); if (devp == NULL) fatal("Error: Missing '/' device tree node\n\r"); memset(model, 0, BOARD_MODEL_MAX); strncpy(model, BOARD_MODEL, BOARD_MODEL_MAX - 2); l = strlen(model); if (bip->model == BOARD_MODEL_PRPMC280) l--; model[l++] = bip->variant; model[l++] = '\0'; setprop(devp, "model", model, l); /* Set /cpus/PowerPC,7447/clock-frequency */ devp = find_node_by_prop_value_str(NULL, "device_type", "cpu"); if (devp == NULL) fatal("Error: Missing proper cpu device tree node\n\r"); v[0] = bip->core_speed; setprop(devp, "clock-frequency", &v[0], sizeof(v[0])); /* Set /memory/reg size */ devp = finddevice("/memory"); if (devp == NULL) fatal("Error: Missing /memory device tree node\n\r"); v[0] = 0; v[1] = bip->mem_size; setprop(devp, "reg", v, sizeof(v)); /* Update model, if this is a mv64362 */ if (bip->bridge_type == BRIDGE_TYPE_MV64362) { devp = find_node_by_compatible(NULL, "marvell,mv64360"); if (devp == NULL) fatal("Error: Missing marvell,mv64360" " device tree node\n\r"); setprop(devp, "model", "mv64362", strlen("mv64362") + 1); } /* Set User FLASH size */ devp = find_node_by_compatible(NULL, "direct-mapped"); if (devp == NULL) fatal("Error: Missing User FLASH device tree node\n\r"); rc = getprop(devp, "reg", v, sizeof(v)); if (rc != sizeof(v)) fatal("Error: Can't find User FLASH reg property\n\r"); v[1] = bip->user_flash; setprop(devp, "reg", v, sizeof(v)); } #define MV64x60_MPP_CNTL_0 0xf000 #define MV64x60_MPP_CNTL_2 0xf008 #define MV64x60_GPP_IO_CNTL 0xf100 #define MV64x60_GPP_LEVEL_CNTL 0xf110 #define MV64x60_GPP_VALUE_SET 0xf118 static void prpmc2800_reset(void) { u32 temp; udelay(5000000); if (bridge_base != 0) { temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); out_le32((u32 *)(bridge_base + MV64x60_GPP_VALUE_SET), 0x00080004); } for (;;); } #define HEAP_SIZE (16*MB) static struct gunzip_state gzstate; void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { struct elf_info ei; char *heap_start, *dtb; int dt_size = _dtb_end - _dtb_start; void *vmlinuz_addr = _vmlinux_start; unsigned long vmlinuz_size = _vmlinux_end - _vmlinux_start; char elfheader[256]; if (dt_size <= 0) /* No fdt */ exit(); /* * Start heap after end of the kernel (after decompressed to * address 0) or the end of the zImage, whichever is higher. * That's so things allocated by simple_alloc won't overwrite * any part of the zImage and the kernel won't overwrite the dtb * when decompressed & relocated. */ gunzip_start(&gzstate, vmlinuz_addr, vmlinuz_size); gunzip_exactly(&gzstate, elfheader, sizeof(elfheader)); if (!parse_elf32(elfheader, &ei)) exit(); heap_start = (char *)(ei.memsize + ei.elfoffset); /* end of kernel*/ heap_start = max(heap_start, (char *)_end); /* end of zImage */ if ((unsigned)simple_alloc_init(heap_start, HEAP_SIZE, 2*KB, 16) > (128*MB)) exit(); /* Relocate dtb to safe area past end of zImage & kernel */ dtb = malloc(dt_size); if (!dtb) exit(); memmove(dtb, _dtb_start, dt_size); fdt_init(dtb); bridge_base = mv64x60_get_bridge_base(); platform_ops.fixups = prpmc2800_fixups; platform_ops.exit = prpmc2800_reset; if (serial_console_init() < 0) exit(); } /* _zimage_start called very early--need to turn off external interrupts */ asm (" .globl _zimage_start\n\ _zimage_start:\n\ mfmsr 10\n\ rlwinm 10,10,0,~(1<<15) /* Clear MSR_EE */\n\ sync\n\ mtmsr 10\n\ isync\n\ b _zimage_start_lib\n\ ");
gpl-2.0
rcrobles/sxf-linux-4.3.5
arch/powerpc/boot/prpmc2800.c
13332
13994
/* * Motorola ECC prpmc280/f101 & prpmc2800/f101e platform code. * * Author: Mark A. Greer <mgreer@mvista.com> * * 2007 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "page.h" #include "string.h" #include "stdio.h" #include "io.h" #include "ops.h" #include "gunzip_util.h" #include "mv64x60.h" #define KB 1024U #define MB (KB*KB) #define GB (KB*MB) #define MHz (1000U*1000U) #define GHz (1000U*MHz) #define BOARD_MODEL "PrPMC2800" #define BOARD_MODEL_MAX 32 /* max strlen(BOARD_MODEL) + 1 */ #define EEPROM2_ADDR 0xa4 #define EEPROM3_ADDR 0xa8 BSS_STACK(16*KB); static u8 *bridge_base; typedef enum { BOARD_MODEL_PRPMC280, BOARD_MODEL_PRPMC2800, } prpmc2800_board_model; typedef enum { BRIDGE_TYPE_MV64360, BRIDGE_TYPE_MV64362, } prpmc2800_bridge_type; struct prpmc2800_board_info { prpmc2800_board_model model; char variant; prpmc2800_bridge_type bridge_type; u8 subsys0; u8 subsys1; u8 vpd4; u8 vpd4_mask; u32 core_speed; u32 mem_size; u32 boot_flash; u32 user_flash; }; static struct prpmc2800_board_info prpmc2800_board_info[] = { { .model = BOARD_MODEL_PRPMC280, .variant = 'a', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x00, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'b', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x01, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 0, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'c', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x02, .vpd4_mask = 0x0f, .core_speed = 733*MHz, .mem_size = 512*MB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'd', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x03, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 1*GB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'e', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x04, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'f', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x05, .vpd4_mask = 0x0f, .core_speed = 733*MHz, .mem_size = 128*MB, .boot_flash = 1*MB, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'g', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x06, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 256*MB, .boot_flash = 1*MB, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC280, .variant = 'h', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xff, .subsys1 = 0xff, .vpd4 = 0x07, .vpd4_mask = 0x0f, .core_speed = 1*GHz, .mem_size = 1*GB, .boot_flash = 1*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'a', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xb2, .subsys1 = 0x8c, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'b', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xb2, .subsys1 = 0x8d, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 0, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'c', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xb2, .subsys1 = 0x8e, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 733*MHz, .mem_size = 512*MB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'd', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xb2, .subsys1 = 0x8f, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 1*GB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'e', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xa2, .subsys1 = 0x8a, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 512*MB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'f', .bridge_type = BRIDGE_TYPE_MV64362, .subsys0 = 0xa2, .subsys1 = 0x8b, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 733*MHz, .mem_size = 128*MB, .boot_flash = 2*MB, .user_flash = 0, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'g', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xa2, .subsys1 = 0x8c, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 1*GHz, .mem_size = 2*GB, .boot_flash = 2*MB, .user_flash = 64*MB, }, { .model = BOARD_MODEL_PRPMC2800, .variant = 'h', .bridge_type = BRIDGE_TYPE_MV64360, .subsys0 = 0xa2, .subsys1 = 0x8d, .vpd4 = 0x00, .vpd4_mask = 0x00, .core_speed = 733*MHz, .mem_size = 1*GB, .boot_flash = 2*MB, .user_flash = 64*MB, }, }; static struct prpmc2800_board_info *prpmc2800_get_board_info(u8 *vpd) { struct prpmc2800_board_info *bip; int i; for (i=0,bip=prpmc2800_board_info; i<ARRAY_SIZE(prpmc2800_board_info); i++,bip++) if ((vpd[0] == bip->subsys0) && (vpd[1] == bip->subsys1) && ((vpd[4] & bip->vpd4_mask) == bip->vpd4)) return bip; return NULL; } /* Get VPD from i2c eeprom 2, then match it to a board info entry */ static struct prpmc2800_board_info *prpmc2800_get_bip(void) { struct prpmc2800_board_info *bip; u8 vpd[5]; int rc; if (mv64x60_i2c_open()) fatal("Error: Can't open i2c device\n\r"); /* Get VPD from i2c eeprom-2 */ memset(vpd, 0, sizeof(vpd)); rc = mv64x60_i2c_read(EEPROM2_ADDR, vpd, 0x1fde, 2, sizeof(vpd)); if (rc < 0) fatal("Error: Couldn't read eeprom2\n\r"); mv64x60_i2c_close(); /* Get board type & related info */ bip = prpmc2800_get_board_info(vpd); if (bip == NULL) { printf("Error: Unsupported board or corrupted VPD:\n\r"); printf(" 0x%x 0x%x 0x%x 0x%x 0x%x\n\r", vpd[0], vpd[1], vpd[2], vpd[3], vpd[4]); printf("Using device tree defaults...\n\r"); } return bip; } static void prpmc2800_bridge_setup(u32 mem_size) { u32 i, v[12], enables, acc_bits; u32 pci_base_hi, pci_base_lo, size, buf[2]; unsigned long cpu_base; int rc; void *devp; u8 *bridge_pbase, is_coherent; struct mv64x60_cpu2pci_win *tbl; bridge_pbase = mv64x60_get_bridge_pbase(); is_coherent = mv64x60_is_coherent(); if (is_coherent) acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_WB | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_32_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_32_BYTES; else acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_NONE | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_128_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_256_BYTES; mv64x60_config_ctlr_windows(bridge_base, bridge_pbase, is_coherent); mv64x60_config_pci_windows(bridge_base, bridge_pbase, 0, 0, mem_size, acc_bits); /* Get the cpu -> pci i/o & mem mappings from the device tree */ devp = find_node_by_compatible(NULL, "marvell,mv64360-pci"); if (devp == NULL) fatal("Error: Missing marvell,mv64360-pci" " device tree node\n\r"); rc = getprop(devp, "ranges", v, sizeof(v)); if (rc != sizeof(v)) fatal("Error: Can't find marvell,mv64360-pci ranges" " property\n\r"); /* Get the cpu -> pci i/o & mem mappings from the device tree */ devp = find_node_by_compatible(NULL, "marvell,mv64360"); if (devp == NULL) fatal("Error: Missing marvell,mv64360 device tree node\n\r"); enables = in_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE)); enables |= 0x0007fe00; /* Disable all cpu->pci windows */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); for (i=0; i<12; i+=6) { switch (v[i] & 0xff000000) { case 0x01000000: /* PCI I/O Space */ tbl = mv64x60_cpu2pci_io; break; case 0x02000000: /* PCI MEM Space */ tbl = mv64x60_cpu2pci_mem; break; default: continue; } pci_base_hi = v[i+1]; pci_base_lo = v[i+2]; cpu_base = v[i+3]; size = v[i+5]; buf[0] = cpu_base; buf[1] = size; if (!dt_xlate_addr(devp, buf, sizeof(buf), &cpu_base)) fatal("Error: Can't translate PCI address 0x%x\n\r", (u32)cpu_base); mv64x60_config_cpu2pci_window(bridge_base, 0, pci_base_hi, pci_base_lo, cpu_base, size, tbl); } enables &= ~0x00000600; /* Enable cpu->pci0 i/o, cpu->pci0 mem0 */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); } static void prpmc2800_fixups(void) { u32 v[2], l, mem_size; int rc; void *devp; char model[BOARD_MODEL_MAX]; struct prpmc2800_board_info *bip; bip = prpmc2800_get_bip(); /* Get board info based on VPD */ mem_size = (bip) ? bip->mem_size : mv64x60_get_mem_size(bridge_base); prpmc2800_bridge_setup(mem_size); /* Do necessary bridge setup */ /* If the VPD doesn't match what we know about, just use the * defaults already in the device tree. */ if (!bip) return; /* Know the board type so override device tree defaults */ /* Set /model appropriately */ devp = finddevice("/"); if (devp == NULL) fatal("Error: Missing '/' device tree node\n\r"); memset(model, 0, BOARD_MODEL_MAX); strncpy(model, BOARD_MODEL, BOARD_MODEL_MAX - 2); l = strlen(model); if (bip->model == BOARD_MODEL_PRPMC280) l--; model[l++] = bip->variant; model[l++] = '\0'; setprop(devp, "model", model, l); /* Set /cpus/PowerPC,7447/clock-frequency */ devp = find_node_by_prop_value_str(NULL, "device_type", "cpu"); if (devp == NULL) fatal("Error: Missing proper cpu device tree node\n\r"); v[0] = bip->core_speed; setprop(devp, "clock-frequency", &v[0], sizeof(v[0])); /* Set /memory/reg size */ devp = finddevice("/memory"); if (devp == NULL) fatal("Error: Missing /memory device tree node\n\r"); v[0] = 0; v[1] = bip->mem_size; setprop(devp, "reg", v, sizeof(v)); /* Update model, if this is a mv64362 */ if (bip->bridge_type == BRIDGE_TYPE_MV64362) { devp = find_node_by_compatible(NULL, "marvell,mv64360"); if (devp == NULL) fatal("Error: Missing marvell,mv64360" " device tree node\n\r"); setprop(devp, "model", "mv64362", strlen("mv64362") + 1); } /* Set User FLASH size */ devp = find_node_by_compatible(NULL, "direct-mapped"); if (devp == NULL) fatal("Error: Missing User FLASH device tree node\n\r"); rc = getprop(devp, "reg", v, sizeof(v)); if (rc != sizeof(v)) fatal("Error: Can't find User FLASH reg property\n\r"); v[1] = bip->user_flash; setprop(devp, "reg", v, sizeof(v)); } #define MV64x60_MPP_CNTL_0 0xf000 #define MV64x60_MPP_CNTL_2 0xf008 #define MV64x60_GPP_IO_CNTL 0xf100 #define MV64x60_GPP_LEVEL_CNTL 0xf110 #define MV64x60_GPP_VALUE_SET 0xf118 static void prpmc2800_reset(void) { u32 temp; udelay(5000000); if (bridge_base != 0) { temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); out_le32((u32 *)(bridge_base + MV64x60_GPP_VALUE_SET), 0x00080004); } for (;;); } #define HEAP_SIZE (16*MB) static struct gunzip_state gzstate; void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { struct elf_info ei; char *heap_start, *dtb; int dt_size = _dtb_end - _dtb_start; void *vmlinuz_addr = _vmlinux_start; unsigned long vmlinuz_size = _vmlinux_end - _vmlinux_start; char elfheader[256]; if (dt_size <= 0) /* No fdt */ exit(); /* * Start heap after end of the kernel (after decompressed to * address 0) or the end of the zImage, whichever is higher. * That's so things allocated by simple_alloc won't overwrite * any part of the zImage and the kernel won't overwrite the dtb * when decompressed & relocated. */ gunzip_start(&gzstate, vmlinuz_addr, vmlinuz_size); gunzip_exactly(&gzstate, elfheader, sizeof(elfheader)); if (!parse_elf32(elfheader, &ei)) exit(); heap_start = (char *)(ei.memsize + ei.elfoffset); /* end of kernel*/ heap_start = max(heap_start, (char *)_end); /* end of zImage */ if ((unsigned)simple_alloc_init(heap_start, HEAP_SIZE, 2*KB, 16) > (128*MB)) exit(); /* Relocate dtb to safe area past end of zImage & kernel */ dtb = malloc(dt_size); if (!dtb) exit(); memmove(dtb, _dtb_start, dt_size); fdt_init(dtb); bridge_base = mv64x60_get_bridge_base(); platform_ops.fixups = prpmc2800_fixups; platform_ops.exit = prpmc2800_reset; if (serial_console_init() < 0) exit(); } /* _zimage_start called very early--need to turn off external interrupts */ asm (" .globl _zimage_start\n\ _zimage_start:\n\ mfmsr 10\n\ rlwinm 10,10,0,~(1<<15) /* Clear MSR_EE */\n\ sync\n\ mtmsr 10\n\ isync\n\ b _zimage_start_lib\n\ ");
gpl-2.0
shahan-mik3/android_kernel_xiaomi_cancro
arch/microblaze/mm/mmu_context.c
13844
2042
/* * This file contains the routines for handling the MMU. * * Copyright (C) 2007 Xilinx, Inc. All rights reserved. * * Derived from arch/ppc/mm/4xx_mmu.c: * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/mm.h> #include <linux/init.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> mm_context_t next_mmu_context; unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; atomic_t nr_free_contexts; struct mm_struct *context_mm[LAST_CONTEXT+1]; /* * Initialize the context management stuff. */ void __init mmu_context_init(void) { /* * The use of context zero is reserved for the kernel. * This code assumes FIRST_CONTEXT < 32. */ context_map[0] = (1 << FIRST_CONTEXT) - 1; next_mmu_context = FIRST_CONTEXT; atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); } /* * Steal a context from a task that has one at the moment. * * This isn't an LRU system, it just frees up each context in * turn (sort-of pseudo-random replacement :). This would be the * place to implement an LRU scheme if anyone were motivated to do it. */ void steal_context(void) { struct mm_struct *mm; /* free up context `next_mmu_context' */ /* if we shouldn't free context 0, don't... */ if (next_mmu_context < FIRST_CONTEXT) next_mmu_context = FIRST_CONTEXT; mm = context_mm[next_mmu_context]; flush_tlb_mm(mm); destroy_context(mm); }
gpl-2.0
ArchiDroid/android_kernel_samsung_smdk4412-old
arch/powerpc/boot/cuboot-yosemite.c
14100
1095
/* * Old U-boot compatibility for Yosemite * * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com> * * Copyright 2008 IBM Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void yosemite_fixups(void) { unsigned long sysclk = 66666666; ibm440ep_fixup_clocks(sysclk, 11059200, 50000000); ibm4xx_sdram_fixup_memsize(); ibm4xx_quiesce_eth((u32 *)0xef600e00, (u32 *)0xef600f00); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = yosemite_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
Split-Screen/android_kernel_lge_v500
drivers/gpu/msm/kgsl.c
21
114373
/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/fb.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/dma-buf.h> #include <linux/vmalloc.h> #include <linux/pm_runtime.h> #include <linux/genlock.h> #include <linux/rbtree.h> #include <linux/ashmem.h> #include <linux/major.h> #include <linux/io.h> #include <mach/socinfo.h> #include <linux/mman.h> #include <linux/sort.h> #include <asm/cacheflush.h> #include "kgsl.h" #include "kgsl_debugfs.h" #include "kgsl_cffdump.h" #include "kgsl_log.h" #include "kgsl_sharedmem.h" #include "kgsl_device.h" #include "kgsl_trace.h" #include "kgsl_sync.h" #include "adreno.h" #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "kgsl." static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT; static char *ksgl_mmu_type; module_param_named(ptcount, kgsl_pagetable_count, int, 0); MODULE_PARM_DESC(kgsl_pagetable_count, "Minimum number of pagetables for KGSL to allocate at initialization time"); module_param_named(mmutype, ksgl_mmu_type, charp, 0); MODULE_PARM_DESC(ksgl_mmu_type, "Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'"); struct kgsl_dma_buf_meta { struct dma_buf_attachment *attach; struct dma_buf *dmabuf; struct sg_table *table; }; static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry); /** * kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy * device: KGSL device * id: ID of the context submitting the command * cmdbatch: Pointer to kgsl_cmdbatch describing these commands * timestamp: Timestamp assigned to the command batch * flags: Flags sent by the user * result: Result of the submission attempt * type: Type of context issuing the command * * Wrap the issueibcmds ftrace hook into a function that can be called from the * GPU specific modules. */ void kgsl_trace_issueibcmds(struct kgsl_device *device, int id, struct kgsl_cmdbatch *cmdbatch, unsigned int timestamp, unsigned int flags, int result, unsigned int type) { trace_kgsl_issueibcmds(device, id, cmdbatch, timestamp, flags, result, type); } EXPORT_SYMBOL(kgsl_trace_issueibcmds); /** * kgsl_trace_regwrite - call regwrite ftrace function by proxy * device: KGSL device * offset: dword offset of the register being written * value: Value of the register being written * * Wrap the regwrite ftrace hook into a function that can be called from the * GPU specific modules. */ void kgsl_trace_regwrite(struct kgsl_device *device, unsigned int offset, unsigned int value) { trace_kgsl_regwrite(device, offset, value); } EXPORT_SYMBOL(kgsl_trace_regwrite); int kgsl_memfree_hist_init(void) { void *base; base = kzalloc(KGSL_MEMFREE_HIST_SIZE, GFP_KERNEL); kgsl_driver.memfree_hist.base_hist_rb = base; if (base == NULL) return -ENOMEM; kgsl_driver.memfree_hist.size = KGSL_MEMFREE_HIST_SIZE; kgsl_driver.memfree_hist.wptr = base; return 0; } void kgsl_memfree_hist_exit(void) { kfree(kgsl_driver.memfree_hist.base_hist_rb); kgsl_driver.memfree_hist.base_hist_rb = NULL; } void kgsl_memfree_hist_set_event(unsigned int pid, unsigned int gpuaddr, unsigned int size, int flags) { struct kgsl_memfree_hist_elem *p; void *base = kgsl_driver.memfree_hist.base_hist_rb; int rbsize = kgsl_driver.memfree_hist.size; if (base == NULL) return; mutex_lock(&kgsl_driver.memfree_hist_mutex); p = kgsl_driver.memfree_hist.wptr; p->pid = pid; p->gpuaddr = gpuaddr; p->size = size; p->flags = flags; kgsl_driver.memfree_hist.wptr++; if ((void *)kgsl_driver.memfree_hist.wptr >= base+rbsize) { kgsl_driver.memfree_hist.wptr = (struct kgsl_memfree_hist_elem *)base; } mutex_unlock(&kgsl_driver.memfree_hist_mutex); } /* kgsl_get_mem_entry - get the mem_entry structure for the specified object * @device - Pointer to the device structure * @ptbase - the pagetable base of the object * @gpuaddr - the GPU address of the object * @size - Size of the region to search * * Caller must kgsl_mem_entry_put() the returned entry when finished using it. */ struct kgsl_mem_entry * __must_check kgsl_get_mem_entry(struct kgsl_device *device, phys_addr_t ptbase, unsigned int gpuaddr, unsigned int size) { struct kgsl_process_private *priv; struct kgsl_mem_entry *entry; mutex_lock(&kgsl_driver.process_mutex); list_for_each_entry(priv, &kgsl_driver.process_list, list) { if (!kgsl_mmu_pt_equal(&device->mmu, priv->pagetable, ptbase)) continue; entry = kgsl_sharedmem_find_region(priv, gpuaddr, size); if (entry) { mutex_unlock(&kgsl_driver.process_mutex); return entry; } } mutex_unlock(&kgsl_driver.process_mutex); return NULL; } EXPORT_SYMBOL(kgsl_get_mem_entry); static inline struct kgsl_mem_entry * kgsl_mem_entry_create(void) { struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry)); else kref_init(&entry->refcount); return entry; } static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta) { if (meta != NULL) { dma_buf_unmap_attachment(meta->attach, meta->table, DMA_FROM_DEVICE); dma_buf_detach(meta->dmabuf, meta->attach); dma_buf_put(meta->dmabuf); kfree(meta); } } void kgsl_mem_entry_destroy(struct kref *kref) { struct kgsl_mem_entry *entry = container_of(kref, struct kgsl_mem_entry, refcount); /* Detach from process list */ kgsl_mem_entry_detach_process(entry); if (entry->memtype != KGSL_MEM_ENTRY_KERNEL) kgsl_driver.stats.mapped -= entry->memdesc.size; /* * Ion takes care of freeing the sglist for us so * clear the sg before freeing the sharedmem so kgsl_sharedmem_free * doesn't try to free it again */ if (entry->memtype == KGSL_MEM_ENTRY_ION) { entry->memdesc.sg = NULL; } kgsl_sharedmem_free(&entry->memdesc); switch (entry->memtype) { case KGSL_MEM_ENTRY_PMEM: case KGSL_MEM_ENTRY_ASHMEM: if (entry->priv_data) fput(entry->priv_data); break; case KGSL_MEM_ENTRY_ION: kgsl_destroy_ion(entry->priv_data); break; } kfree(entry); } EXPORT_SYMBOL(kgsl_mem_entry_destroy); /** * kgsl_mem_entry_track_gpuaddr - Insert a mem_entry in the address tree and * assign it with a gpu address space before insertion * @process: the process that owns the memory * @entry: the memory entry * * @returns - 0 on succcess else error code * * Insert the kgsl_mem_entry in to the rb_tree for searching by GPU address. * The assignment of gpu address and insertion into list needs to * happen with the memory lock held to avoid race conditions between * gpu address being selected and some other thread looking through the * rb list in search of memory based on gpuaddr * This function should be called with processes memory spinlock held */ static int kgsl_mem_entry_track_gpuaddr(struct kgsl_process_private *process, struct kgsl_mem_entry *entry) { int ret = 0; struct rb_node **node; struct rb_node *parent = NULL; assert_spin_locked(&process->mem_lock); /* * If cpu=gpu map is used then caller needs to set the * gpu address */ if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) { if (!entry->memdesc.gpuaddr) goto done; } else if (entry->memdesc.gpuaddr) { WARN_ONCE(1, "gpuaddr assigned w/o holding memory lock\n"); ret = -EINVAL; goto done; } if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) { ret = kgsl_mmu_get_gpuaddr(process->pagetable, &entry->memdesc); if (ret) goto done; } node = &process->mem_rb.rb_node; while (*node) { struct kgsl_mem_entry *cur; parent = *node; cur = rb_entry(parent, struct kgsl_mem_entry, node); if (entry->memdesc.gpuaddr < cur->memdesc.gpuaddr) node = &parent->rb_left; else node = &parent->rb_right; } rb_link_node(&entry->node, parent, node); rb_insert_color(&entry->node, &process->mem_rb); done: return ret; } /** * kgsl_mem_entry_untrack_gpuaddr() - Untrack memory that is previously tracked * process - Pointer to process private to which memory belongs * entry - Memory entry to untrack * * Function just does the opposite of kgsl_mem_entry_track_gpuaddr. Needs to be * called with processes spin lock held */ static void kgsl_mem_entry_untrack_gpuaddr(struct kgsl_process_private *process, struct kgsl_mem_entry *entry) { assert_spin_locked(&process->mem_lock); if (entry->memdesc.gpuaddr) { kgsl_mmu_put_gpuaddr(process->pagetable, &entry->memdesc); rb_erase(&entry->node, &entry->priv->mem_rb); } } /** * kgsl_mem_entry_attach_process - Attach a mem_entry to its owner process * @entry: the memory entry * @process: the owner process * * Attach a newly created mem_entry to its owner process so that * it can be found later. The mem_entry will be added to mem_idr and have * its 'id' field assigned. If the GPU address has been set, the entry * will also be added to the mem_rb tree. * * @returns - 0 on success or error code on failure. */ static int kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry, struct kgsl_device_private *dev_priv) { int ret; struct kgsl_process_private *process = dev_priv->process_priv; ret = kgsl_process_private_get(process); if (!ret) return -EBADF; while (1) { if (idr_pre_get(&process->mem_idr, GFP_KERNEL) == 0) { ret = -ENOMEM; goto err_put_proc_priv; } spin_lock(&process->mem_lock); ret = idr_get_new_above(&process->mem_idr, entry, 1, &entry->id); spin_unlock(&process->mem_lock); if (ret == 0) break; else if (ret != -EAGAIN) goto err_put_proc_priv; } entry->priv = process; entry->dev_priv = dev_priv; spin_lock(&process->mem_lock); ret = kgsl_mem_entry_track_gpuaddr(process, entry); if (ret) idr_remove(&process->mem_idr, entry->id); spin_unlock(&process->mem_lock); if (ret) goto err_put_proc_priv; /* map the memory after unlocking if gpuaddr has been assigned */ if (entry->memdesc.gpuaddr) { ret = kgsl_mmu_map(process->pagetable, &entry->memdesc); if (ret) kgsl_mem_entry_detach_process(entry); } return ret; err_put_proc_priv: kgsl_process_private_put(process); return ret; } /* Detach a memory entry from a process and unmap it from the MMU */ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry) { if (entry == NULL) return; /* Unmap here so that below we can call kgsl_mmu_put_gpuaddr */ kgsl_mmu_unmap(entry->priv->pagetable, &entry->memdesc); spin_lock(&entry->priv->mem_lock); kgsl_mem_entry_untrack_gpuaddr(entry->priv, entry); if (entry->id != 0) idr_remove(&entry->priv->mem_idr, entry->id); entry->id = 0; entry->priv->stats[entry->memtype].cur -= entry->memdesc.size; spin_unlock(&entry->priv->mem_lock); kgsl_process_private_put(entry->priv); entry->priv = NULL; } /** * kgsl_context_init() - helper to initialize kgsl_context members * @dev_priv: the owner of the context * @context: the newly created context struct, should be allocated by * the device specific drawctxt_create function. * * This is a helper function for the device specific drawctxt_create * function to initialize the common members of its context struct. * If this function succeeds, reference counting is active in the context * struct and the caller should kgsl_context_put() it on error. * If it fails, the caller should just free the context structure * it passed in. */ int kgsl_context_init(struct kgsl_device_private *dev_priv, struct kgsl_context *context) { int ret = 0, id; struct kgsl_device *device = dev_priv->device; while (1) { if (idr_pre_get(&device->context_idr, GFP_KERNEL) == 0) { KGSL_DRV_INFO(device, "idr_pre_get: ENOMEM\n"); ret = -ENOMEM; break; } write_lock(&device->context_lock); ret = idr_get_new_above(&device->context_idr, context, 1, &id); context->id = id; write_unlock(&device->context_lock); if (ret != -EAGAIN) break; } if (ret) goto fail; /* MAX - 1, there is one memdesc in memstore for device info */ if (id >= KGSL_MEMSTORE_MAX) { KGSL_DRV_INFO(device, "cannot have more than %d " "ctxts due to memstore limitation\n", KGSL_MEMSTORE_MAX); ret = -ENOSPC; goto fail_free_id; } kref_init(&context->refcount); /* * Get a refernce to the process private so its not destroyed, until * the context is destroyed. This will also prevent the pagetable * from being destroyed */ if (!kgsl_process_private_get(dev_priv->process_priv)) goto fail_free_id; context->device = dev_priv->device; context->dev_priv = dev_priv; context->proc_priv = dev_priv->process_priv; context->pid = task_tgid_nr(current); ret = kgsl_sync_timeline_create(context); if (ret) goto fail_free_id; /* Initialize the pending event list */ INIT_LIST_HEAD(&context->events); /* * Initialize the node that is used to maintain the master list of * contexts with pending events in the device structure. Normally we * wouldn't take the time to initalize a node but at event add time we * call list_empty() on the node as a quick way of determining if the * context is already in the master list so it needs to always be either * active or in an unused but initialized state */ INIT_LIST_HEAD(&context->events_list); return 0; fail_free_id: write_lock(&device->context_lock); idr_remove(&dev_priv->device->context_idr, id); write_unlock(&device->context_lock); fail: return ret; } EXPORT_SYMBOL(kgsl_context_init); /** * kgsl_context_detach() - Release the "master" context reference * @context: The context that will be detached * * This is called when a context becomes unusable, because userspace * has requested for it to be destroyed. The context itself may * exist a bit longer until its reference count goes to zero. * Other code referencing the context can detect that it has been * detached by checking the KGSL_CONTEXT_DETACHED bit in * context->priv. */ int kgsl_context_detach(struct kgsl_context *context) { int ret; if (context == NULL) return -EINVAL; /* * Mark the context as detached to keep others from using * the context before it gets fully removed, and to make sure * we don't try to detach twice. */ if (test_and_set_bit(KGSL_CONTEXT_DETACHED, &context->priv)) return -EINVAL; trace_kgsl_context_detach(context->device, context); ret = context->device->ftbl->drawctxt_detach(context); /* * Cancel events after the device-specific context is * detached, to avoid possibly freeing memory while * it is still in use by the GPU. */ kgsl_context_cancel_events(context->device, context); kgsl_context_put(context); return ret; } void kgsl_context_destroy(struct kref *kref) { struct kgsl_context *context = container_of(kref, struct kgsl_context, refcount); struct kgsl_device *device = context->device; trace_kgsl_context_destroy(device, context); BUG_ON(!kgsl_context_detached(context)); write_lock(&device->context_lock); if (context->id != KGSL_CONTEXT_INVALID) { /* Clear the timestamps in the memstore during destroy */ kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), 0); kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), 0); /* clear device power constraint */ if (context->id == device->pwrctrl.constraint.owner_id) { trace_kgsl_constraint(device, device->pwrctrl.constraint.type, device->pwrctrl.active_pwrlevel, 0); device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE; } idr_remove(&device->context_idr, context->id); context->id = KGSL_CONTEXT_INVALID; } write_unlock(&device->context_lock); kgsl_sync_timeline_destroy(context); kgsl_process_private_put(context->proc_priv); device->ftbl->drawctxt_destroy(context); } struct kgsl_device *kgsl_get_device(int dev_idx) { int i; struct kgsl_device *ret = NULL; mutex_lock(&kgsl_driver.devlock); for (i = 0; i < KGSL_DEVICE_MAX; i++) { if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) { ret = kgsl_driver.devp[i]; break; } } mutex_unlock(&kgsl_driver.devlock); return ret; } EXPORT_SYMBOL(kgsl_get_device); static struct kgsl_device *kgsl_get_minor(int minor) { struct kgsl_device *ret = NULL; if (minor < 0 || minor >= KGSL_DEVICE_MAX) return NULL; mutex_lock(&kgsl_driver.devlock); ret = kgsl_driver.devp[minor]; mutex_unlock(&kgsl_driver.devlock); return ret; } int kgsl_check_timestamp(struct kgsl_device *device, struct kgsl_context *context, unsigned int timestamp) { unsigned int ts_processed; ts_processed = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED); return (timestamp_cmp(ts_processed, timestamp) >= 0); } EXPORT_SYMBOL(kgsl_check_timestamp); static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state) { int status = -EINVAL; struct kgsl_pwrscale_policy *policy_saved; if (!device) return -EINVAL; KGSL_PWR_WARN(device, "suspend start\n"); mutex_lock(&device->mutex); policy_saved = device->pwrscale.policy; device->pwrscale.policy = NULL; kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND); /* Tell the device to drain the submission queue */ device->ftbl->drain(device); /* Wait for the active count to hit zero */ status = kgsl_active_count_wait(device, 0); if (status) goto end; /* * An interrupt could have snuck in and requested NAP in * the meantime, make sure we're on the SUSPEND path. */ kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND); /* Don't let the timer wake us during suspended sleep. */ del_timer_sync(&device->idle_timer); switch (device->state) { case KGSL_STATE_INIT: break; case KGSL_STATE_ACTIVE: case KGSL_STATE_NAP: case KGSL_STATE_SLEEP: /* make sure power is on to stop the device */ kgsl_pwrctrl_enable(device); /* Get the completion ready to be waited upon. */ INIT_COMPLETION(device->hwaccess_gate); device->ftbl->suspend_context(device); device->ftbl->stop(device); pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, PM_QOS_DEFAULT_VALUE); kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND); break; case KGSL_STATE_SLUMBER: INIT_COMPLETION(device->hwaccess_gate); kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND); break; default: KGSL_PWR_ERR(device, "suspend fail, device %d\n", device->id); goto end; } kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); device->pwrscale.policy = policy_saved; status = 0; end: if (status) { /* On failure, re-resume normal activity */ if (device->ftbl->resume) device->ftbl->resume(device); } mutex_unlock(&device->mutex); KGSL_PWR_WARN(device, "suspend end\n"); return status; } static int kgsl_resume_device(struct kgsl_device *device) { if (!device) return -EINVAL; KGSL_PWR_WARN(device, "resume start\n"); mutex_lock(&device->mutex); if (device->state == KGSL_STATE_SUSPEND) { kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); complete_all(&device->hwaccess_gate); } else if (device->state != KGSL_STATE_INIT) { /* * This is an error situation,so wait for the device * to idle and then put the device to SLUMBER state. * This will put the device to the right state when * we resume. */ if (device->state == KGSL_STATE_ACTIVE) device->ftbl->idle(device); kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER); kgsl_pwrctrl_sleep(device); KGSL_PWR_ERR(device, "resume invoked without a suspend\n"); } kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); /* Call the GPU specific resume function */ if (device->ftbl->resume) device->ftbl->resume(device); mutex_unlock(&device->mutex); KGSL_PWR_WARN(device, "resume end\n"); return 0; } static int kgsl_suspend(struct device *dev) { pm_message_t arg = {0}; struct kgsl_device *device = dev_get_drvdata(dev); return kgsl_suspend_device(device, arg); } static int kgsl_resume(struct device *dev) { struct kgsl_device *device = dev_get_drvdata(dev); return kgsl_resume_device(device); } static int kgsl_runtime_suspend(struct device *dev) { return 0; } static int kgsl_runtime_resume(struct device *dev) { return 0; } const struct dev_pm_ops kgsl_pm_ops = { .suspend = kgsl_suspend, .resume = kgsl_resume, .runtime_suspend = kgsl_runtime_suspend, .runtime_resume = kgsl_runtime_resume, }; EXPORT_SYMBOL(kgsl_pm_ops); int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state) { struct kgsl_device *device = dev_get_drvdata(&pdev->dev); return kgsl_suspend_device(device, state); } EXPORT_SYMBOL(kgsl_suspend_driver); int kgsl_resume_driver(struct platform_device *pdev) { struct kgsl_device *device = dev_get_drvdata(&pdev->dev); return kgsl_resume_device(device); } EXPORT_SYMBOL(kgsl_resume_driver); /** * kgsl_destroy_process_private() - Cleanup function to free process private * @kref: - Pointer to object being destroyed's kref struct * Free struct object and all other resources attached to it. * Since the function can be used when not all resources inside process * private have been allocated, there is a check to (before each resource * cleanup) see if the struct member being cleaned is in fact allocated or not. * If the value is not NULL, resource is freed. */ static void kgsl_destroy_process_private(struct kref *kref) { struct kgsl_mem_entry *entry = NULL; int next = 0; struct kgsl_process_private *private = container_of(kref, struct kgsl_process_private, refcount); /* * Remove this process from global process list * We do not acquire a lock first as it is expected that * kgsl_destroy_process_private() is only going to be called * through kref_put() which is only called after acquiring * the lock. */ if (!private) { KGSL_CORE_ERR("Cannot destroy null process private\n"); mutex_unlock(&kgsl_driver.process_mutex); return; } list_del(&private->list); mutex_unlock(&kgsl_driver.process_mutex); if (private->kobj.state_in_sysfs) kgsl_process_uninit_sysfs(private); if (private->debug_root) debugfs_remove_recursive(private->debug_root); while (1) { spin_lock(&private->mem_lock); entry = idr_get_next(&private->mem_idr, &next); spin_unlock(&private->mem_lock); if (entry == NULL) break; kgsl_mem_entry_put(entry); /* * Always start back at the beginning, to * ensure all entries are removed, * like list_for_each_entry_safe. */ next = 0; } idr_destroy(&private->mem_idr); kgsl_mmu_putpagetable(private->pagetable); kfree(private); return; } void kgsl_process_private_put(struct kgsl_process_private *private) { mutex_lock(&kgsl_driver.process_mutex); /* * kref_put() returns 1 when the refcnt has reached 0 and the destroy * function is called. Mutex is released in the destroy function if * its called, so only release mutex if kref_put() return 0 */ if (!kref_put(&private->refcount, kgsl_destroy_process_private)) mutex_unlock(&kgsl_driver.process_mutex); return; } /** * find_process_private() - Helper function to search for process private * @cur_dev_priv: Pointer to device private structure which contains pointers * to device and process_private structs. * Returns: Pointer to the found/newly created private struct */ static struct kgsl_process_private * kgsl_find_process_private(struct kgsl_device_private *cur_dev_priv) { struct kgsl_process_private *private; /* Search in the process list */ mutex_lock(&kgsl_driver.process_mutex); list_for_each_entry(private, &kgsl_driver.process_list, list) { if (private->pid == task_tgid_nr(current)) { if (!kgsl_process_private_get(private)) private = NULL; goto done; } } /* no existing process private found for this dev_priv, create one */ private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL); if (private == NULL) { KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n", sizeof(struct kgsl_process_private)); goto done; } kref_init(&private->refcount); private->pid = task_tgid_nr(current); spin_lock_init(&private->mem_lock); mutex_init(&private->process_private_mutex); /* Add the newly created process struct obj to the process list */ list_add(&private->list, &kgsl_driver.process_list); done: mutex_unlock(&kgsl_driver.process_mutex); return private; } /** * kgsl_get_process_private() - Used to find the process private structure * @cur_dev_priv: Current device pointer * Finds or creates a new porcess private structire and initializes its members * Returns: Pointer to the private process struct obj found/created or * NULL if pagetable creation for this process private obj failed. */ static struct kgsl_process_private * kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv) { struct kgsl_process_private *private; private = kgsl_find_process_private(cur_dev_priv); if (!private) return NULL; mutex_lock(&private->process_private_mutex); if (test_bit(KGSL_PROCESS_INIT, &private->priv)) goto done; private->mem_rb = RB_ROOT; idr_init(&private->mem_idr); if ((!private->pagetable) && kgsl_mmu_enabled()) { unsigned long pt_name; struct kgsl_mmu *mmu = &cur_dev_priv->device->mmu; pt_name = task_tgid_nr(current); private->pagetable = kgsl_mmu_getpagetable(mmu, pt_name); if (private->pagetable == NULL) goto error; } if (kgsl_process_init_sysfs(cur_dev_priv->device, private)) goto error; if (kgsl_process_init_debugfs(private)) goto error; set_bit(KGSL_PROCESS_INIT, &private->priv); done: mutex_unlock(&private->process_private_mutex); return private; error: mutex_unlock(&private->process_private_mutex); kgsl_process_private_put(private); return NULL; } int kgsl_close_device(struct kgsl_device *device) { int result = 0; device->open_count--; if (device->open_count == 0) { /* Wait for the active count to go to 0 */ kgsl_active_count_wait(device, 0); /* Fail if the wait times out */ BUG_ON(atomic_read(&device->active_cnt) > 0); /* Force power on to do the stop */ kgsl_pwrctrl_enable(device); result = device->ftbl->stop(device); kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT); } return result; } EXPORT_SYMBOL(kgsl_close_device); static int kgsl_release(struct inode *inodep, struct file *filep) { int result = 0; struct kgsl_device_private *dev_priv = filep->private_data; struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; struct kgsl_mem_entry *entry; int next = 0; filep->private_data = NULL; mutex_lock(&device->mutex); while (1) { read_lock(&device->context_lock); context = idr_get_next(&device->context_idr, &next); read_unlock(&device->context_lock); if (context == NULL) break; if (context->dev_priv == dev_priv) { /* * Hold a reference to the context in case somebody * tries to put it while we are detaching */ if (_kgsl_context_get(context)) { kgsl_context_detach(context); kgsl_context_put(context); } } next = next + 1; } next = 0; while (1) { spin_lock(&private->mem_lock); entry = idr_get_next(&private->mem_idr, &next); if (entry == NULL) { spin_unlock(&private->mem_lock); break; } /* * If the free pending flag is not set it means that user space * did not free it's reference to this entry, in that case * free a reference to this entry, other references are from * within kgsl so they will be freed eventually by kgsl */ if (entry->dev_priv == dev_priv && !entry->pending_free) { entry->pending_free = 1; spin_unlock(&private->mem_lock); trace_kgsl_mem_free(entry); kgsl_mem_entry_put(entry); } else { spin_unlock(&private->mem_lock); } next = next + 1; } /* * Clean up any to-be-freed entries that belong to this * process and this device. This is done after the context * are destroyed to avoid possibly freeing memory while * it is still in use by the GPU. */ kgsl_cancel_events(device, dev_priv); result = kgsl_close_device(device); mutex_unlock(&device->mutex); kfree(dev_priv); kgsl_process_private_put(private); pm_runtime_put(device->parentdev); return result; } int kgsl_open_device(struct kgsl_device *device) { int result = 0; if (device->open_count == 0) { /* * active_cnt special case: we are starting up for the first * time, so use this sequence instead of the kgsl_pwrctrl_wake() * which will be called by kgsl_active_count_get(). */ atomic_inc(&device->active_cnt); kgsl_sharedmem_set(device, &device->memstore, 0, 0, device->memstore.size); result = device->ftbl->init(device); if (result) goto err; result = device->ftbl->start(device); if (result) goto err; /* * Make sure the gates are open, so they don't block until * we start suspend or FT. */ complete_all(&device->hwaccess_gate); kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE); kgsl_active_count_put(device); } device->open_count++; err: if (result) atomic_dec(&device->active_cnt); return result; } EXPORT_SYMBOL(kgsl_open_device); static int kgsl_open(struct inode *inodep, struct file *filep) { int result; struct kgsl_device_private *dev_priv; struct kgsl_device *device; unsigned int minor = iminor(inodep); device = kgsl_get_minor(minor); BUG_ON(device == NULL); if (filep->f_flags & O_EXCL) { KGSL_DRV_ERR(device, "O_EXCL not allowed\n"); return -EBUSY; } result = pm_runtime_get_sync(device->parentdev); if (result < 0) { KGSL_DRV_ERR(device, "Runtime PM: Unable to wake up the device, rc = %d\n", result); return result; } result = 0; dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL); if (dev_priv == NULL) { KGSL_DRV_ERR(device, "kzalloc failed(%d)\n", sizeof(struct kgsl_device_private)); result = -ENOMEM; goto err_pmruntime; } dev_priv->device = device; filep->private_data = dev_priv; mutex_lock(&device->mutex); result = kgsl_open_device(device); if (result) goto err_freedevpriv; mutex_unlock(&device->mutex); /* * Get file (per process) private struct. This must be done * after the first start so that the global pagetable mappings * are set up before we create the per-process pagetable. */ dev_priv->process_priv = kgsl_get_process_private(dev_priv); if (dev_priv->process_priv == NULL) { result = -ENOMEM; goto err_stop; } KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n", device->name, kgsl_mmu_enabled() ? "on" : "off", kgsl_pagetable_count); return result; err_stop: mutex_lock(&device->mutex); device->open_count--; if (device->open_count == 0) { /* make sure power is on to stop the device */ kgsl_pwrctrl_enable(device); device->ftbl->stop(device); kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT); atomic_dec(&device->active_cnt); } err_freedevpriv: mutex_unlock(&device->mutex); filep->private_data = NULL; kfree(dev_priv); err_pmruntime: pm_runtime_put(device->parentdev); return result; } /** * kgsl_sharedmem_find_region() - Find a gpu memory allocation * * @private: private data for the process to check. * @gpuaddr: start address of the region * @size: size of the region * * Find a gpu allocation. Caller must kgsl_mem_entry_put() * the returned entry when finished using it. */ struct kgsl_mem_entry * __must_check kgsl_sharedmem_find_region(struct kgsl_process_private *private, unsigned int gpuaddr, size_t size) { struct rb_node *node; if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr)) return NULL; spin_lock(&private->mem_lock); node = private->mem_rb.rb_node; while (node != NULL) { struct kgsl_mem_entry *entry; entry = rb_entry(node, struct kgsl_mem_entry, node); if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) { if (!kgsl_mem_entry_get(entry)) break; spin_unlock(&private->mem_lock); return entry; } if (gpuaddr < entry->memdesc.gpuaddr) node = node->rb_left; else if (gpuaddr >= (entry->memdesc.gpuaddr + entry->memdesc.size)) node = node->rb_right; else { spin_unlock(&private->mem_lock); return NULL; } } spin_unlock(&private->mem_lock); return NULL; } EXPORT_SYMBOL(kgsl_sharedmem_find_region); /** * kgsl_sharedmem_find() - Find a gpu memory allocation * * @private: private data for the process to check. * @gpuaddr: start address of the region * * Find a gpu allocation. Caller must kgsl_mem_entry_put() * the returned entry when finished using it. */ static inline struct kgsl_mem_entry * __must_check kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr) { return kgsl_sharedmem_find_region(private, gpuaddr, 1); } /** * kgsl_sharedmem_region_empty() - Check if an addression region is empty * * @private: private data for the process to check. * @gpuaddr: start address of the region * @size: length of the region. * * Checks that there are no existing allocations within an address * region. This function should be called with processes spin lock * held. */ static int kgsl_sharedmem_region_empty(struct kgsl_process_private *private, unsigned int gpuaddr, size_t size) { int result = 1; unsigned int gpuaddr_end = gpuaddr + size; struct rb_node *node; assert_spin_locked(&private->mem_lock); if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr)) return 0; /* don't overflow */ if (gpuaddr_end < gpuaddr) return 0; node = private->mem_rb.rb_node; while (node != NULL) { struct kgsl_mem_entry *entry; unsigned int memdesc_start, memdesc_end; entry = rb_entry(node, struct kgsl_mem_entry, node); memdesc_start = entry->memdesc.gpuaddr; memdesc_end = memdesc_start + kgsl_memdesc_mmapsize(&entry->memdesc); if (gpuaddr_end <= memdesc_start) node = node->rb_left; else if (memdesc_end <= gpuaddr) node = node->rb_right; else { result = 0; break; } } return result; } /** * kgsl_sharedmem_find_id() - find a memory entry by id * @process: the owning process * @id: id to find * * @returns - the mem_entry or NULL * * Caller must kgsl_mem_entry_put() the returned entry, when finished using * it. */ static inline struct kgsl_mem_entry * __must_check kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id) { int result = 0; struct kgsl_mem_entry *entry; spin_lock(&process->mem_lock); entry = idr_find(&process->mem_idr, id); if (entry) result = kgsl_mem_entry_get(entry); spin_unlock(&process->mem_lock); if (!result) return NULL; return entry; } /** * kgsl_mem_entry_set_pend() - Set the pending free flag of a memory entry * @entry - The memory entry * * @returns - true if pending flag was 0 else false * * This function will set the pending free flag if it is previously unset. Used * to prevent race condition between ioctls calling free/freememontimestamp * on the same entry. Whichever thread set's the flag first will do the free. */ static inline bool kgsl_mem_entry_set_pend(struct kgsl_mem_entry *entry) { bool ret = false; if (entry == NULL) return false; spin_lock(&entry->priv->mem_lock); if (!entry->pending_free) { entry->pending_free = 1; ret = true; } spin_unlock(&entry->priv->mem_lock); return ret; } /*call all ioctl sub functions with driver locked*/ static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { int result = 0; struct kgsl_device_getproperty *param = data; switch (param->type) { case KGSL_PROP_VERSION: { struct kgsl_version version; if (param->sizebytes != sizeof(version)) { result = -EINVAL; break; } version.drv_major = KGSL_VERSION_MAJOR; version.drv_minor = KGSL_VERSION_MINOR; version.dev_major = dev_priv->device->ver_major; version.dev_minor = dev_priv->device->ver_minor; if (copy_to_user(param->value, &version, sizeof(version))) result = -EFAULT; break; } case KGSL_PROP_GPU_RESET_STAT: { /* Return reset status of given context and clear it */ uint32_t id; struct kgsl_context *context; if (param->sizebytes != sizeof(unsigned int)) { result = -EINVAL; break; } /* We expect the value passed in to contain the context id */ if (copy_from_user(&id, param->value, sizeof(unsigned int))) { result = -EFAULT; break; } context = kgsl_context_get_owner(dev_priv, id); if (!context) { result = -EINVAL; break; } /* * Copy the reset status to value which also serves as * the out parameter */ if (copy_to_user(param->value, &(context->reset_status), sizeof(unsigned int))) result = -EFAULT; else { /* Clear reset status once its been queried */ context->reset_status = KGSL_CTX_STAT_NO_ERROR; } kgsl_context_put(context); break; } default: result = dev_priv->device->ftbl->getproperty( dev_priv->device, param->type, param->value, param->sizebytes); } return result; } static long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { int result = 0; /* The getproperty struct is reused for setproperty too */ struct kgsl_device_getproperty *param = data; if (dev_priv->device->ftbl->setproperty) result = dev_priv->device->ftbl->setproperty( dev_priv, param->type, param->value, param->sizebytes); return result; } static long _device_waittimestamp(struct kgsl_device_private *dev_priv, struct kgsl_context *context, unsigned int timestamp, unsigned int timeout) { int result = 0; struct kgsl_device *device = dev_priv->device; unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL; trace_kgsl_waittimestamp_entry(device, context_id, kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED), timestamp, timeout); result = device->ftbl->waittimestamp(dev_priv->device, context, timestamp, timeout); trace_kgsl_waittimestamp_exit(device, kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED), result); return result; } static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_device_waittimestamp *param = data; return _device_waittimestamp(dev_priv, NULL, param->timestamp, param->timeout); } static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_device_waittimestamp_ctxtid *param = data; struct kgsl_context *context; long result = -EINVAL; context = kgsl_context_get_owner(dev_priv, param->context_id); if (context) result = _device_waittimestamp(dev_priv, context, param->timestamp, param->timeout); kgsl_context_put(context); return result; } /* * KGSL command batch management * A command batch is a single submission from userland. The cmdbatch * encapsulates everything about the submission : command buffers, flags and * sync points. * * Sync points are events that need to expire before the * cmdbatch can be queued to the hardware. For each sync point a * kgsl_cmdbatch_sync_event struct is created and added to a list in the * cmdbatch. There can be multiple types of events both internal ones (GPU * events) and external triggers. As the events expire the struct is deleted * from the list. The GPU will submit the command batch as soon as the list * goes empty indicating that all the sync points have been met. */ /** * struct kgsl_cmdbatch_sync_event * @type: Syncpoint type * @node: Local list node for the cmdbatch sync point list * @cmdbatch: Pointer to the cmdbatch that owns the sync event * @context: Pointer to the KGSL context that owns the cmdbatch * @timestamp: Pending timestamp for the event * @handle: Pointer to a sync fence handle * @device: Pointer to the KGSL device * @refcount: Allow event to be destroyed asynchronously */ struct kgsl_cmdbatch_sync_event { int type; struct list_head node; struct kgsl_cmdbatch *cmdbatch; struct kgsl_context *context; unsigned int timestamp; struct kgsl_sync_fence_waiter *handle; struct kgsl_device *device; struct kref refcount; }; /** * kgsl_cmdbatch_sync_event_destroy() - Destroy a sync event object * @kref: Pointer to the kref structure for this object * * Actually destroy a sync event object. Called from * kgsl_cmdbatch_sync_event_put. */ static void kgsl_cmdbatch_sync_event_destroy(struct kref *kref) { struct kgsl_cmdbatch_sync_event *event = container_of(kref, struct kgsl_cmdbatch_sync_event, refcount); kgsl_cmdbatch_put(event->cmdbatch); kfree(event); } /** * kgsl_cmdbatch_sync_event_put() - Decrement the refcount for a * sync event object * @event: Pointer to the sync event object */ static inline void kgsl_cmdbatch_sync_event_put( struct kgsl_cmdbatch_sync_event *event) { kref_put(&event->refcount, kgsl_cmdbatch_sync_event_destroy); } /** * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object * @kref: Pointer to the kref structure for this object * * Actually destroy a command batch object. Called from kgsl_cmdbatch_put */ void kgsl_cmdbatch_destroy_object(struct kref *kref) { struct kgsl_cmdbatch *cmdbatch = container_of(kref, struct kgsl_cmdbatch, refcount); kgsl_context_put(cmdbatch->context); kfree(cmdbatch->ibdesc); kfree(cmdbatch); } EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object); /* * a generic function to retire a pending sync event and (possibly) * kick the dispatcher */ static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device, struct kgsl_cmdbatch_sync_event *event) { struct kgsl_cmdbatch_sync_event *e, *tmp; int sched = 0; int removed = 0; spin_lock(&event->cmdbatch->lock); /* * sync events that are contained by a cmdbatch which has been * destroyed may have already been removed from the synclist */ list_for_each_entry_safe(e, tmp, &event->cmdbatch->synclist, node) { if (e == event) { list_del_init(&event->node); removed = 1; break; } } sched = list_empty(&event->cmdbatch->synclist) ? 1 : 0; spin_unlock(&event->cmdbatch->lock); /* * if this is the last event in the list then tell * the GPU device that the cmdbatch can be submitted */ if (sched && device->ftbl->drawctxt_sched) device->ftbl->drawctxt_sched(device, event->cmdbatch->context); /* Put events that have been removed from the synclist */ if (removed) kgsl_cmdbatch_sync_event_put(event); } /* * This function is called by the GPU event when the sync event timestamp * expires */ static void kgsl_cmdbatch_sync_func(struct kgsl_device *device, void *priv, u32 id, u32 timestamp, u32 type) { struct kgsl_cmdbatch_sync_event *event = priv; kgsl_cmdbatch_sync_expire(device, event); kgsl_context_put(event->context); /* Put events that have signaled */ kgsl_cmdbatch_sync_event_put(event); } /** * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure * @cmdbatch: Pointer to the command batch object to destroy * * Start the process of destroying a command batch. Cancel any pending events * and decrement the refcount. Asynchronous events can still signal after * kgsl_cmdbatch_destroy has returned. */ void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch) { struct kgsl_cmdbatch_sync_event *event, *tmp; LIST_HEAD(cancel_synclist); /* * Empty the synclist before canceling events */ spin_lock(&cmdbatch->lock); list_splice_init(&cmdbatch->synclist, &cancel_synclist); spin_unlock(&cmdbatch->lock); /* * Finish canceling events outside the cmdbatch spinlock and * require the cancel function to return if the event was * successfully canceled meaning that the event is guaranteed * not to signal the callback. This guarantee ensures that * the reference count for the event and cmdbatch is correct. */ list_for_each_entry_safe(event, tmp, &cancel_synclist, node) { if (event->type == KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP) { /* * Timestamp events are guaranteed to signal * when canceled */ kgsl_cancel_event(cmdbatch->device, event->context, event->timestamp, kgsl_cmdbatch_sync_func, event); } else if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE) { /* Put events that are successfully canceled */ if (kgsl_sync_fence_async_cancel(event->handle)) kgsl_cmdbatch_sync_event_put(event); } /* Put events that have been removed from the synclist */ list_del_init(&event->node); kgsl_cmdbatch_sync_event_put(event); } kgsl_cmdbatch_put(cmdbatch); } EXPORT_SYMBOL(kgsl_cmdbatch_destroy); /* * A callback that gets registered with kgsl_sync_fence_async_wait and is fired * when a fence is expired */ static void kgsl_cmdbatch_sync_fence_func(void *priv) { struct kgsl_cmdbatch_sync_event *event = priv; kgsl_cmdbatch_sync_expire(event->device, event); /* Put events that have signaled */ kgsl_cmdbatch_sync_event_put(event); } /* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint * @device: KGSL device * @cmdbatch: KGSL cmdbatch to add the sync point to * @priv: Private sructure passed by the user * * Add a new fence sync syncpoint to the cmdbatch. */ static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch, void *priv) { struct kgsl_cmd_syncpoint_fence *sync = priv; struct kgsl_cmdbatch_sync_event *event; event = kzalloc(sizeof(*event), GFP_KERNEL); if (event == NULL) return -ENOMEM; kref_get(&cmdbatch->refcount); event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE; event->cmdbatch = cmdbatch; event->device = device; event->context = NULL; /* * Initial kref is to ensure async callback does not free the * event before this function sets the event handle */ kref_init(&event->refcount); /* * Add it to the list first to account for the possiblity that the * callback will happen immediately after the call to * kgsl_sync_fence_async_wait. Decrement the event refcount when * removing from the synclist. */ spin_lock(&cmdbatch->lock); kref_get(&event->refcount); list_add(&event->node, &cmdbatch->synclist); spin_unlock(&cmdbatch->lock); /* * Increment the reference count for the async callback. * Decrement when the callback is successfully canceled, when * the callback is signaled or if the async wait fails. */ kref_get(&event->refcount); event->handle = kgsl_sync_fence_async_wait(sync->fd, kgsl_cmdbatch_sync_fence_func, event); if (IS_ERR_OR_NULL(event->handle)) { int ret = PTR_ERR(event->handle); /* Failed to add the event to the async callback */ kgsl_cmdbatch_sync_event_put(event); /* Remove event from the synclist */ spin_lock(&cmdbatch->lock); list_del(&event->node); kgsl_cmdbatch_sync_event_put(event); spin_unlock(&cmdbatch->lock); /* Event no longer needed by this function */ kgsl_cmdbatch_sync_event_put(event); return ret; } /* * Event was successfully added to the synclist, the async * callback and handle to cancel event has been set. */ kgsl_cmdbatch_sync_event_put(event); return 0; } /* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch * @device: KGSL device * @cmdbatch: KGSL cmdbatch to add the sync point to * @priv: Private sructure passed by the user * * Add a new sync point timestamp event to the cmdbatch. */ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch, void *priv) { struct kgsl_cmd_syncpoint_timestamp *sync = priv; struct kgsl_context *context = kgsl_context_get(cmdbatch->device, sync->context_id); struct kgsl_cmdbatch_sync_event *event; int ret = -EINVAL; if (context == NULL) return -EINVAL; /* * We allow somebody to create a sync point on their own context. * This has the effect of delaying a command from submitting until the * dependent command has cleared. That said we obviously can't let them * create a sync point on a future timestamp. */ if (context == cmdbatch->context) { unsigned int queued = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED); if (timestamp_cmp(sync->timestamp, queued) > 0) { KGSL_DRV_ERR(device, "Cannot create syncpoint for future timestamp %d (current %d)\n", sync->timestamp, queued); goto done; } } event = kzalloc(sizeof(*event), GFP_KERNEL); if (event == NULL) { ret = -ENOMEM; goto done; } kref_get(&cmdbatch->refcount); event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP; event->cmdbatch = cmdbatch; event->context = context; event->timestamp = sync->timestamp; /* * Two krefs are required to support events. The first kref is for * the synclist which holds the event in the cmdbatch. The second * kref is for the callback which can be asynchronous and be called * after kgsl_cmdbatch_destroy. The kref should be put when the event * is removed from the synclist, if the callback is successfully * canceled or when the callback is signaled. */ kref_init(&event->refcount); kref_get(&event->refcount); spin_lock(&cmdbatch->lock); list_add(&event->node, &cmdbatch->synclist); spin_unlock(&cmdbatch->lock); mutex_lock(&device->mutex); ret = kgsl_add_event(device, context->id, sync->timestamp, kgsl_cmdbatch_sync_func, event, NULL); mutex_unlock(&device->mutex); if (ret) { spin_lock(&cmdbatch->lock); list_del(&event->node); spin_unlock(&cmdbatch->lock); kgsl_cmdbatch_put(cmdbatch); kfree(event); } done: if (ret) kgsl_context_put(context); return ret; } /** * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch * @device: Pointer to the KGSL device struct for the GPU * @cmdbatch: Pointer to the cmdbatch * @sync: Pointer to the user-specified struct defining the syncpoint * * Create a new sync point in the cmdbatch based on the user specified * parameters */ static int kgsl_cmdbatch_add_sync(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch, struct kgsl_cmd_syncpoint *sync) { void *priv; int ret, psize; int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch, void *priv); switch (sync->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: psize = sizeof(struct kgsl_cmd_syncpoint_timestamp); func = kgsl_cmdbatch_add_sync_timestamp; break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: psize = sizeof(struct kgsl_cmd_syncpoint_fence); func = kgsl_cmdbatch_add_sync_fence; break; default: KGSL_DRV_ERR(device, "Invalid sync type 0x%x\n", sync->type); return -EINVAL; } if (sync->size != psize) { KGSL_DRV_ERR(device, "Invalid sync size %d\n", sync->size); return -EINVAL; } priv = kzalloc(sync->size, GFP_KERNEL); if (priv == NULL) return -ENOMEM; if (copy_from_user(priv, sync->priv, sync->size)) { kfree(priv); return -EFAULT; } ret = func(device, cmdbatch, priv); kfree(priv); return ret; } /** * kgsl_cmdbatch_create() - Create a new cmdbatch structure * @device: Pointer to a KGSL device struct * @context: Pointer to a KGSL context struct * @numibs: Number of indirect buffers to make room for in the cmdbatch * * Allocate an new cmdbatch structure and add enough room to store the list of * indirect buffers */ static struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device, struct kgsl_context *context, unsigned int flags, unsigned int numibs) { struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL); if (cmdbatch == NULL) return ERR_PTR(-ENOMEM); /* * Increase the reference count on the context so it doesn't disappear * during the lifetime of this command batch */ if (!_kgsl_context_get(context)) { kfree(cmdbatch); return ERR_PTR(-EINVAL); } if (!(flags & KGSL_CONTEXT_SYNC)) { cmdbatch->ibdesc = kzalloc(sizeof(*cmdbatch->ibdesc) * numibs, GFP_KERNEL); if (cmdbatch->ibdesc == NULL) { kgsl_context_put(context); kfree(cmdbatch); return ERR_PTR(-ENOMEM); } } kref_init(&cmdbatch->refcount); INIT_LIST_HEAD(&cmdbatch->synclist); spin_lock_init(&cmdbatch->lock); cmdbatch->device = device; cmdbatch->ibcount = (flags & KGSL_CONTEXT_SYNC) ? 0 : numibs; cmdbatch->context = context; cmdbatch->flags = flags & ~KGSL_CONTEXT_SUBMIT_IB_LIST; return cmdbatch; } /** * _kgsl_cmdbatch_verify() - Perform a quick sanity check on a command batch * @device: Pointer to a KGSL instance that owns the command batch * @pagetable: Pointer to the pagetable for the current process * @cmdbatch: Number of indirect buffers to make room for in the cmdbatch * * Do a quick sanity test on the list of indirect buffers in a command batch * verifying that the size and GPU address */ static bool _kgsl_cmdbatch_verify(struct kgsl_device_private *dev_priv, struct kgsl_cmdbatch *cmdbatch) { int i; struct kgsl_process_private *private = dev_priv->process_priv; for (i = 0; i < cmdbatch->ibcount; i++) { if (cmdbatch->ibdesc[i].sizedwords == 0) { KGSL_DRV_ERR(dev_priv->device, "invalid size ctx %d ib(%d) %X/%X\n", cmdbatch->context->id, i, cmdbatch->ibdesc[i].gpuaddr, cmdbatch->ibdesc[i].sizedwords); return false; } if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, cmdbatch->ibdesc[i].gpuaddr)) { KGSL_DRV_ERR(dev_priv->device, "Invalid address ctx %d ib(%d) %X/%X\n", cmdbatch->context->id, i, cmdbatch->ibdesc[i].gpuaddr, cmdbatch->ibdesc[i].sizedwords); return false; } } return true; } /** * _kgsl_cmdbatch_create_legacy() - Create a cmdbatch from a legacy ioctl struct * @device: Pointer to the KGSL device struct for the GPU * @context: Pointer to the KGSL context that issued the command batch * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent * * Create a command batch from the legacy issueibcmds format. */ static struct kgsl_cmdbatch *_kgsl_cmdbatch_create_legacy( struct kgsl_device *device, struct kgsl_context *context, struct kgsl_ringbuffer_issueibcmds *param) { struct kgsl_cmdbatch *cmdbatch = kgsl_cmdbatch_create(device, context, param->flags, 1); if (IS_ERR(cmdbatch)) return cmdbatch; cmdbatch->ibdesc[0].gpuaddr = param->ibdesc_addr; cmdbatch->ibdesc[0].sizedwords = param->numibs; cmdbatch->ibcount = 1; cmdbatch->flags = param->flags; return cmdbatch; } /** * _kgsl_cmdbatch_create() - Create a cmdbatch from a ioctl struct * @device: Pointer to the KGSL device struct for the GPU * @context: Pointer to the KGSL context that issued the command batch * @flags: Flags passed in from the user command * @cmdlist: Pointer to the list of commands from the user * @numcmds: Number of commands in the list * @synclist: Pointer to the list of syncpoints from the user * @numsyncs: Number of syncpoints in the list * * Create a command batch from the standard issueibcmds format sent by the user. */ static struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device, struct kgsl_context *context, unsigned int flags, unsigned int cmdlist, unsigned int numcmds, unsigned int synclist, unsigned int numsyncs) { struct kgsl_cmdbatch *cmdbatch = kgsl_cmdbatch_create(device, context, flags, numcmds); int ret = 0; if (IS_ERR(cmdbatch)) return cmdbatch; if (!(flags & KGSL_CONTEXT_SYNC)) { if (copy_from_user(cmdbatch->ibdesc, (void __user *) cmdlist, sizeof(struct kgsl_ibdesc) * numcmds)) { ret = -EFAULT; goto done; } } if (synclist && numsyncs) { struct kgsl_cmd_syncpoint sync; void __user *uptr = (void __user *) synclist; int i; for (i = 0; i < numsyncs; i++) { memset(&sync, 0, sizeof(sync)); if (copy_from_user(&sync, uptr, sizeof(sync))) { ret = -EFAULT; break; } ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync); if (ret) break; uptr += sizeof(sync); } } done: if (ret) { kgsl_cmdbatch_destroy(cmdbatch); return ERR_PTR(ret); } return cmdbatch; } static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_ringbuffer_issueibcmds *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; struct kgsl_cmdbatch *cmdbatch; long result = -EINVAL; /* The legacy functions don't support synchronization commands */ if (param->flags & KGSL_CONTEXT_SYNC) return -EINVAL; /* Get the context */ context = kgsl_context_get_owner(dev_priv, param->drawctxt_id); if (context == NULL) goto done; if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) { /* * Do a quick sanity check on the number of IBs in the * submission */ if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS) goto done; cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags, param->ibdesc_addr, param->numibs, 0, 0); } else cmdbatch = _kgsl_cmdbatch_create_legacy(device, context, param); if (IS_ERR(cmdbatch)) { result = PTR_ERR(cmdbatch); goto done; } /* Run basic sanity checking on the command */ if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch)) goto free_cmdbatch; result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, cmdbatch, &param->timestamp); free_cmdbatch: /* * -EPROTO is a "success" error - it just tells the user that the * context had previously faulted */ if (result && result != -EPROTO) kgsl_cmdbatch_destroy(cmdbatch); done: kgsl_context_put(context); return result; } static long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_submit_commands *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; struct kgsl_cmdbatch *cmdbatch; long result = -EINVAL; /* The number of IBs are completely ignored for sync commands */ if (!(param->flags & KGSL_CONTEXT_SYNC)) { if (param->numcmds == 0 || param->numcmds > KGSL_MAX_NUMIBS) return -EINVAL; } else if (param->numcmds != 0) { KGSL_DEV_ERR_ONCE(device, "Commands specified with the SYNC flag. They will be ignored\n"); } context = kgsl_context_get_owner(dev_priv, param->context_id); if (context == NULL) return -EINVAL; cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags, (unsigned int) param->cmdlist, param->numcmds, (unsigned int) param->synclist, param->numsyncs); if (IS_ERR(cmdbatch)) { result = PTR_ERR(cmdbatch); goto done; } /* Run basic sanity checking on the command */ if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch)) goto free_cmdbatch; result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, cmdbatch, &param->timestamp); free_cmdbatch: /* * -EPROTO is a "success" error - it just tells the user that the * context had previously faulted */ if (result && result != -EPROTO) kgsl_cmdbatch_destroy(cmdbatch); done: kgsl_context_put(context); return result; } static long _cmdstream_readtimestamp(struct kgsl_device_private *dev_priv, struct kgsl_context *context, unsigned int type, unsigned int *timestamp) { *timestamp = kgsl_readtimestamp(dev_priv->device, context, type); trace_kgsl_readtimestamp(dev_priv->device, context ? context->id : KGSL_MEMSTORE_GLOBAL, type, *timestamp); return 0; } static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_cmdstream_readtimestamp *param = data; return _cmdstream_readtimestamp(dev_priv, NULL, param->type, &param->timestamp); } static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_cmdstream_readtimestamp_ctxtid *param = data; struct kgsl_context *context; long result = -EINVAL; context = kgsl_context_get_owner(dev_priv, param->context_id); if (context) result = _cmdstream_readtimestamp(dev_priv, context, param->type, &param->timestamp); kgsl_context_put(context); return result; } static void kgsl_freemem_event_cb(struct kgsl_device *device, void *priv, u32 id, u32 timestamp, u32 type) { struct kgsl_mem_entry *entry = priv; /* Free the memory for all event types */ trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0); kgsl_mem_entry_put(entry); } static long _cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv, unsigned int gpuaddr, struct kgsl_context *context, unsigned int timestamp, unsigned int type) { int result = 0; struct kgsl_mem_entry *entry = NULL; struct kgsl_device *device = dev_priv->device; unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL; entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr); if (!entry) { KGSL_DRV_ERR(dev_priv->device, "invalid gpuaddr %08x\n", gpuaddr); return -EINVAL; } if (!kgsl_mem_entry_set_pend(entry)) { KGSL_DRV_WARN(dev_priv->device, "Cannot set pending bit for gpuaddr %08x\n", gpuaddr); kgsl_mem_entry_put(entry); return -EBUSY; } trace_kgsl_mem_timestamp_queue(device, entry, context_id, kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED), timestamp); result = kgsl_add_event(dev_priv->device, context_id, timestamp, kgsl_freemem_event_cb, entry, dev_priv); kgsl_mem_entry_put(entry); return result; } static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_cmdstream_freememontimestamp *param = data; return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr, NULL, param->timestamp, param->type); } static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid( struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data; struct kgsl_context *context; long result = -EINVAL; context = kgsl_context_get_owner(dev_priv, param->context_id); if (context) result = _cmdstream_freememontimestamp(dev_priv, param->gpuaddr, context, param->timestamp, param->type); kgsl_context_put(context); return result; } static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { int result = 0; struct kgsl_drawctxt_create *param = data; struct kgsl_context *context = NULL; struct kgsl_device *device = dev_priv->device; context = device->ftbl->drawctxt_create(dev_priv, &param->flags); if (IS_ERR(context)) { result = PTR_ERR(context); goto done; } trace_kgsl_context_create(dev_priv->device, context, param->flags); param->drawctxt_id = context->id; done: return result; } static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_drawctxt_destroy *param = data; struct kgsl_context *context; long result; context = kgsl_context_get_owner(dev_priv, param->drawctxt_id); result = kgsl_context_detach(context); kgsl_context_put(context); return result; } static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_sharedmem_free *param = data; struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_mem_entry *entry = NULL; entry = kgsl_sharedmem_find(private, param->gpuaddr); if (!entry) { KGSL_MEM_INFO(dev_priv->device, "invalid gpuaddr %08x\n", param->gpuaddr); return -EINVAL; } if (!kgsl_mem_entry_set_pend(entry)) { kgsl_mem_entry_put(entry); return -EBUSY; } trace_kgsl_mem_free(entry); kgsl_memfree_hist_set_event(entry->priv->pid, entry->memdesc.gpuaddr, entry->memdesc.size, entry->memdesc.flags); /* * First kgsl_mem_entry_put is for the reference that we took in * this function when calling kgsl_sharedmem_find, second one is * to free the memory since this is a free ioctl */ kgsl_mem_entry_put(entry); kgsl_mem_entry_put(entry); return 0; } static long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_gpumem_free_id *param = data; struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_mem_entry *entry = NULL; entry = kgsl_sharedmem_find_id(private, param->id); if (!entry) { KGSL_MEM_INFO(dev_priv->device, "invalid id %d\n", param->id); return -EINVAL; } if (!kgsl_mem_entry_set_pend(entry)) { kgsl_mem_entry_put(entry); return -EBUSY; } trace_kgsl_mem_free(entry); kgsl_memfree_hist_set_event(entry->priv->pid, entry->memdesc.gpuaddr, entry->memdesc.size, entry->memdesc.flags); /* * First kgsl_mem_entry_put is for the reference that we took in * this function when calling kgsl_sharedmem_find_id, second one is * to free the memory since this is a free ioctl */ kgsl_mem_entry_put(entry); kgsl_mem_entry_put(entry); return 0; } static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr) { struct vm_area_struct *vma; down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, addr); up_read(&current->mm->mmap_sem); if (!vma) KGSL_CORE_ERR("find_vma(%x) failed\n", addr); return vma; } static inline int _check_region(unsigned long start, unsigned long size, uint64_t len) { uint64_t end = ((uint64_t) start) + size; return (end > len); } static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len, unsigned long *vstart, struct file **filep) { struct file *fbfile; int ret = 0; dev_t rdev; struct fb_info *info; *start = 0; *vstart = 0; *len = 0; *filep = NULL; fbfile = fget(fd); if (fbfile == NULL) { KGSL_CORE_ERR("fget_light failed\n"); return -1; } rdev = fbfile->f_dentry->d_inode->i_rdev; info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL; if (info) { *start = info->fix.smem_start; *len = info->fix.smem_len; *vstart = (unsigned long)__va(info->fix.smem_start); ret = 0; } else { KGSL_CORE_ERR("framebuffer minor %d not found\n", MINOR(rdev)); ret = -1; } fput(fbfile); return ret; } static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry, struct kgsl_pagetable *pagetable, unsigned int fd, unsigned int offset, size_t size) { int ret; unsigned long phys, virt, len; struct file *filep; ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep); if (ret) return ret; ret = -ERANGE; if (phys == 0) goto err; /* Make sure the length of the region, the offset and the desired * size are all page aligned or bail */ if ((len & ~PAGE_MASK) || (offset & ~PAGE_MASK) || (size & ~PAGE_MASK)) { KGSL_CORE_ERR("length offset or size is not page aligned\n"); goto err; } /* The size or offset can never be greater than the PMEM length */ if (offset >= len || size > len) goto err; /* If size is 0, then adjust it to default to the size of the region * minus the offset. If size isn't zero, then make sure that it will * fit inside of the region. */ if (size == 0) size = len - offset; else if (_check_region(offset, size, len)) goto err; entry->priv_data = filep; entry->memdesc.pagetable = pagetable; entry->memdesc.size = size; entry->memdesc.physaddr = phys + offset; entry->memdesc.hostptr = (void *) (virt + offset); /* USE_CPU_MAP is not impemented for PMEM. */ entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP; ret = memdesc_sg_phys(&entry->memdesc, phys + offset, size); if (ret) goto err; return 0; err: return ret; } static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, unsigned long paddr, int size) { int i; int sglen = PAGE_ALIGN(size) / PAGE_SIZE; memdesc->sg = kgsl_sg_alloc(sglen); if (memdesc->sg == NULL) return -ENOMEM; memdesc->sglen = sglen; memdesc->sglen_alloc = sglen; sg_init_table(memdesc->sg, sglen); spin_lock(&current->mm->page_table_lock); for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) { struct page *page; pmd_t *ppmd; pte_t *ppte; pgd_t *ppgd = pgd_offset(current->mm, paddr); if (pgd_none(*ppgd) || pgd_bad(*ppgd)) goto err; ppmd = pmd_offset(pud_offset(ppgd, paddr), paddr); if (pmd_none(*ppmd) || pmd_bad(*ppmd)) goto err; ppte = pte_offset_map(ppmd, paddr); if (ppte == NULL) goto err; page = pfn_to_page(pte_pfn(*ppte)); if (!page) goto err; sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0); pte_unmap(ppte); } spin_unlock(&current->mm->page_table_lock); return 0; err: spin_unlock(&current->mm->page_table_lock); kgsl_sg_free(memdesc->sg, sglen); memdesc->sg = NULL; return -EINVAL; } static int kgsl_setup_useraddr(struct kgsl_mem_entry *entry, struct kgsl_pagetable *pagetable, unsigned long useraddr, unsigned int offset, size_t size) { struct vm_area_struct *vma; unsigned int len; down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, useraddr); up_read(&current->mm->mmap_sem); if (!vma) { KGSL_CORE_ERR("find_vma(%lx) failed\n", useraddr); return -EINVAL; } /* We don't necessarily start at vma->vm_start */ len = vma->vm_end - useraddr; if (offset >= len) return -EINVAL; if (!KGSL_IS_PAGE_ALIGNED(useraddr) || !KGSL_IS_PAGE_ALIGNED(len)) { KGSL_CORE_ERR("bad alignment: start(%lx) len(%u)\n", useraddr, len); return -EINVAL; } if (size == 0) size = len; /* Adjust the size of the region to account for the offset */ size += offset & ~PAGE_MASK; size = ALIGN(size, PAGE_SIZE); if (_check_region(offset & PAGE_MASK, size, len)) { KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger" "than region length %d\n", offset & PAGE_MASK, size, len); return -EINVAL; } entry->memdesc.pagetable = pagetable; entry->memdesc.size = size; entry->memdesc.useraddr = useraddr + (offset & PAGE_MASK); if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) entry->memdesc.gpuaddr = entry->memdesc.useraddr; return memdesc_sg_virt(&entry->memdesc, entry->memdesc.useraddr, size); } #ifdef CONFIG_ASHMEM static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry, struct kgsl_pagetable *pagetable, int fd, unsigned long useraddr, size_t size) { int ret; struct vm_area_struct *vma; struct file *filep, *vmfile; unsigned long len; vma = kgsl_get_vma_from_start_addr(useraddr); if (vma == NULL) return -EINVAL; if (vma->vm_pgoff || vma->vm_start != useraddr) { KGSL_CORE_ERR("Invalid vma region\n"); return -EINVAL; } len = vma->vm_end - vma->vm_start; if (size == 0) size = len; if (size != len) { KGSL_CORE_ERR("Invalid size %d for vma region %lx\n", size, useraddr); return -EINVAL; } ret = get_ashmem_file(fd, &filep, &vmfile, &len); if (ret) { KGSL_CORE_ERR("get_ashmem_file failed\n"); return ret; } if (vmfile != vma->vm_file) { KGSL_CORE_ERR("ashmem shmem file does not match vma\n"); ret = -EINVAL; goto err; } entry->priv_data = filep; entry->memdesc.pagetable = pagetable; entry->memdesc.size = ALIGN(size, PAGE_SIZE); entry->memdesc.useraddr = useraddr; if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) entry->memdesc.gpuaddr = entry->memdesc.useraddr; ret = memdesc_sg_virt(&entry->memdesc, useraddr, size); if (ret) goto err; return 0; err: put_ashmem_file(filep); return ret; } #else static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry, struct kgsl_pagetable *pagetable, int fd, unsigned long useraddr, size_t size) { return -EINVAL; } #endif static int kgsl_setup_ion(struct kgsl_mem_entry *entry, struct kgsl_pagetable *pagetable, void *data, struct kgsl_device *device) { struct scatterlist *s; struct sg_table *sg_table; struct kgsl_map_user_mem *param = data; int fd = param->fd; struct dma_buf *dmabuf; struct dma_buf_attachment *attach = NULL; struct kgsl_dma_buf_meta *meta; int ret = 0; meta = kzalloc(sizeof(*meta), GFP_KERNEL); if (!meta) return -ENOMEM; dmabuf = dma_buf_get(fd); if (IS_ERR_OR_NULL(dmabuf)) { ret = PTR_ERR(dmabuf); goto out; } attach = dma_buf_attach(dmabuf, device->dev); if (IS_ERR_OR_NULL(attach)) { ret = PTR_ERR(attach); goto out; } meta->dmabuf = dmabuf; meta->attach = attach; entry->memtype = KGSL_MEM_ENTRY_ION; entry->memdesc.pagetable = pagetable; entry->memdesc.size = 0; /* USE_CPU_MAP is not impemented for ION. */ entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP; sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE); if (IS_ERR_OR_NULL(sg_table)) { ret = PTR_ERR(sg_table); goto out; } meta->table = sg_table; entry->priv_data = meta; entry->memdesc.sg = sg_table->sgl; /* Calculate the size of the memdesc from the sglist */ entry->memdesc.sglen = 0; for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) { entry->memdesc.size += s->length; entry->memdesc.sglen++; } entry->memdesc.size = PAGE_ALIGN(entry->memdesc.size); out: if (ret) { if (!IS_ERR_OR_NULL(attach)) dma_buf_detach(dmabuf, attach); if (!IS_ERR_OR_NULL(dmabuf)) dma_buf_put(dmabuf); kfree(meta); } return ret; } static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { int result = -EINVAL; struct kgsl_map_user_mem *param = data; struct kgsl_mem_entry *entry = NULL; struct kgsl_process_private *private = dev_priv->process_priv; enum kgsl_user_mem_type memtype; entry = kgsl_mem_entry_create(); if (entry == NULL) return -ENOMEM; if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem)) memtype = KGSL_USER_MEM_TYPE_PMEM; else memtype = param->memtype; /* * Mask off unknown flags from userspace. This way the caller can * check if a flag is supported by looking at the returned flags. * Note: CACHEMODE is ignored for this call. Caching should be * determined by type of allocation being mapped. */ param->flags &= KGSL_MEMFLAGS_GPUREADONLY | KGSL_MEMTYPE_MASK | KGSL_MEMALIGN_MASK | KGSL_MEMFLAGS_USE_CPU_MAP; entry->memdesc.flags = param->flags; if (!kgsl_mmu_use_cpu_map(&dev_priv->device->mmu)) entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP; if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE; switch (memtype) { case KGSL_USER_MEM_TYPE_PMEM: if (param->fd == 0 || param->len == 0) break; result = kgsl_setup_phys_file(entry, private->pagetable, param->fd, param->offset, param->len); entry->memtype = KGSL_MEM_ENTRY_PMEM; break; case KGSL_USER_MEM_TYPE_ADDR: KGSL_DEV_ERR_ONCE(dev_priv->device, "User mem type " "KGSL_USER_MEM_TYPE_ADDR is deprecated\n"); if (!kgsl_mmu_enabled()) { KGSL_DRV_ERR(dev_priv->device, "Cannot map paged memory with the " "MMU disabled\n"); break; } if (param->hostptr == 0) break; result = kgsl_setup_useraddr(entry, private->pagetable, param->hostptr, param->offset, param->len); entry->memtype = KGSL_MEM_ENTRY_USER; break; case KGSL_USER_MEM_TYPE_ASHMEM: if (!kgsl_mmu_enabled()) { KGSL_DRV_ERR(dev_priv->device, "Cannot map paged memory with the " "MMU disabled\n"); break; } if (param->hostptr == 0) break; result = kgsl_setup_ashmem(entry, private->pagetable, param->fd, param->hostptr, param->len); entry->memtype = KGSL_MEM_ENTRY_ASHMEM; break; case KGSL_USER_MEM_TYPE_ION: result = kgsl_setup_ion(entry, private->pagetable, data, dev_priv->device); break; default: KGSL_CORE_ERR("Invalid memory type: %x\n", memtype); break; } if (result) goto error; if (entry->memdesc.size >= SZ_1M) kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M)); else if (entry->memdesc.size >= SZ_64K) kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64)); /* echo back flags */ param->flags = entry->memdesc.flags; result = kgsl_mem_entry_attach_process(entry, dev_priv); if (result) goto error_attach; /* Adjust the returned value for a non 4k aligned offset */ param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK); KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped, kgsl_driver.stats.mapped_max); kgsl_process_add_stats(private, entry->memtype, param->len); trace_kgsl_mem_map(entry, param->fd); return result; error_attach: switch (entry->memtype) { case KGSL_MEM_ENTRY_PMEM: case KGSL_MEM_ENTRY_ASHMEM: if (entry->priv_data) fput(entry->priv_data); break; case KGSL_MEM_ENTRY_ION: kgsl_destroy_ion(entry->priv_data); break; default: break; } error: /* Clear gpuaddr here so userspace doesn't get any wrong ideas */ param->gpuaddr = 0; kfree(entry); return result; } static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry, int op) { int ret = 0; int cacheop; int mode; /* * Flush is defined as (clean | invalidate). If both bits are set, then * do a flush, otherwise check for the individual bits and clean or inv * as requested */ if ((op & KGSL_GPUMEM_CACHE_FLUSH) == KGSL_GPUMEM_CACHE_FLUSH) cacheop = KGSL_CACHE_OP_FLUSH; else if (op & KGSL_GPUMEM_CACHE_CLEAN) cacheop = KGSL_CACHE_OP_CLEAN; else if (op & KGSL_GPUMEM_CACHE_INV) cacheop = KGSL_CACHE_OP_INV; else { ret = -EINVAL; goto done; } mode = kgsl_memdesc_get_cachemode(&entry->memdesc); if (mode != KGSL_CACHEMODE_UNCACHED && mode != KGSL_CACHEMODE_WRITECOMBINE) { trace_kgsl_mem_sync_cache(entry, op); kgsl_cache_range_op(&entry->memdesc, cacheop); } done: return ret; } /* New cache sync function - supports both directions (clean and invalidate) */ static long kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_gpumem_sync_cache *param = data; struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_mem_entry *entry = NULL; long ret; if (param->id != 0) { entry = kgsl_sharedmem_find_id(private, param->id); if (entry == NULL) { KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n", param->id); return -EINVAL; } } else if (param->gpuaddr != 0) { entry = kgsl_sharedmem_find(private, param->gpuaddr); if (entry == NULL) { KGSL_MEM_INFO(dev_priv->device, "can't find gpuaddr %x\n", param->gpuaddr); return -EINVAL; } } else { return -EINVAL; } ret = _kgsl_gpumem_sync_cache(entry, param->op); kgsl_mem_entry_put(entry); return ret; } static int mem_id_cmp(const void *_a, const void *_b) { const unsigned int *a = _a, *b = _b; int cmp = a - b; return (cmp < 0) ? -1 : (cmp > 0); } static long kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { int i; struct kgsl_gpumem_sync_cache_bulk *param = data; struct kgsl_process_private *private = dev_priv->process_priv; unsigned int id, last_id = 0, *id_list = NULL, actual_count = 0; struct kgsl_mem_entry **entries = NULL; long ret = 0; size_t op_size = 0; bool full_flush = false; if (param->id_list == NULL || param->count == 0 || param->count > (PAGE_SIZE / sizeof(unsigned int))) return -EINVAL; id_list = kzalloc(param->count * sizeof(unsigned int), GFP_KERNEL); if (id_list == NULL) return -ENOMEM; entries = kzalloc(param->count * sizeof(*entries), GFP_KERNEL); if (entries == NULL) { ret = -ENOMEM; goto end; } if (copy_from_user(id_list, param->id_list, param->count * sizeof(unsigned int))) { ret = -EFAULT; goto end; } /* sort the ids so we can weed out duplicates */ sort(id_list, param->count, sizeof(int), mem_id_cmp, NULL); for (i = 0; i < param->count; i++) { unsigned int cachemode; struct kgsl_mem_entry *entry = NULL; id = id_list[i]; /* skip 0 ids or duplicates */ if (id == last_id) continue; entry = kgsl_sharedmem_find_id(private, id); if (entry == NULL) continue; /* skip uncached memory */ cachemode = kgsl_memdesc_get_cachemode(&entry->memdesc); if (cachemode != KGSL_CACHEMODE_WRITETHROUGH && cachemode != KGSL_CACHEMODE_WRITEBACK) { kgsl_mem_entry_put(entry); continue; } op_size += entry->memdesc.size; entries[actual_count++] = entry; /* If we exceed the breakeven point, flush the entire cache */ if (kgsl_driver.full_cache_threshold != 0 && op_size >= kgsl_driver.full_cache_threshold && param->op == KGSL_GPUMEM_CACHE_FLUSH) { full_flush = true; break; } last_id = id; } if (full_flush) { trace_kgsl_mem_sync_full_cache(actual_count, op_size, param->op); __cpuc_flush_kern_all(); } for (i = 0; i < actual_count; i++) { if (!full_flush) _kgsl_gpumem_sync_cache(entries[i], param->op); kgsl_mem_entry_put(entries[i]); } end: kfree(entries); kfree(id_list); return ret; } /* Legacy cache function, does a flush (clean + invalidate) */ static long kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_sharedmem_free *param = data; struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_mem_entry *entry = NULL; long ret; entry = kgsl_sharedmem_find(private, param->gpuaddr); if (entry == NULL) { KGSL_MEM_INFO(dev_priv->device, "can't find gpuaddr %x\n", param->gpuaddr); return -EINVAL; } ret = _kgsl_gpumem_sync_cache(entry, KGSL_GPUMEM_CACHE_FLUSH); kgsl_mem_entry_put(entry); return ret; } /* * The common parts of kgsl_ioctl_gpumem_alloc and kgsl_ioctl_gpumem_alloc_id. */ int _gpumem_alloc(struct kgsl_device_private *dev_priv, struct kgsl_mem_entry **ret_entry, unsigned int size, unsigned int flags) { int result; struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_mem_entry *entry; int align; /* * Mask off unknown flags from userspace. This way the caller can * check if a flag is supported by looking at the returned flags. */ flags &= KGSL_MEMFLAGS_GPUREADONLY | KGSL_CACHEMODE_MASK | KGSL_MEMTYPE_MASK | KGSL_MEMALIGN_MASK | KGSL_MEMFLAGS_USE_CPU_MAP; /* Cap the alignment bits to the highest number we can handle */ align = (flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT; if (align >= 32) { KGSL_CORE_ERR("Alignment too big, restricting to 2^31\n"); flags &= ~KGSL_MEMALIGN_MASK; flags |= (31 << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK; } entry = kgsl_mem_entry_create(); if (entry == NULL) return -ENOMEM; if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE; result = kgsl_allocate_user(dev_priv->device, &entry->memdesc, private->pagetable, size, flags); if (result != 0) goto err; entry->memtype = KGSL_MEM_ENTRY_KERNEL; *ret_entry = entry; return result; err: kfree(entry); *ret_entry = NULL; return result; } static long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_gpumem_alloc *param = data; struct kgsl_mem_entry *entry = NULL; int result; param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP; result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags); if (result) return result; result = kgsl_mem_entry_attach_process(entry, dev_priv); if (result != 0) goto err; kgsl_process_add_stats(private, entry->memtype, param->size); trace_kgsl_mem_alloc(entry); param->gpuaddr = entry->memdesc.gpuaddr; param->size = entry->memdesc.size; param->flags = entry->memdesc.flags; return result; err: kgsl_sharedmem_free(&entry->memdesc); kfree(entry); return result; } static long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_device *device = dev_priv->device; struct kgsl_gpumem_alloc_id *param = data; struct kgsl_mem_entry *entry = NULL; int result; if (!kgsl_mmu_use_cpu_map(&device->mmu)) param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP; result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags); if (result != 0) goto err; result = kgsl_mem_entry_attach_process(entry, dev_priv); if (result != 0) goto err; kgsl_process_add_stats(private, entry->memtype, param->size); trace_kgsl_mem_alloc(entry); param->id = entry->id; param->flags = entry->memdesc.flags; param->size = entry->memdesc.size; param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc); param->gpuaddr = entry->memdesc.gpuaddr; return result; err: if (entry) kgsl_sharedmem_free(&entry->memdesc); kfree(entry); return result; } static long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_gpumem_get_info *param = data; struct kgsl_mem_entry *entry = NULL; int result = 0; if (param->id != 0) { entry = kgsl_sharedmem_find_id(private, param->id); if (entry == NULL) { KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n", param->id); return -EINVAL; } } else if (param->gpuaddr != 0) { entry = kgsl_sharedmem_find(private, param->gpuaddr); if (entry == NULL) { KGSL_MEM_INFO(dev_priv->device, "can't find gpuaddr %lx\n", param->gpuaddr); return -EINVAL; } } else { return -EINVAL; } param->gpuaddr = entry->memdesc.gpuaddr; param->id = entry->id; param->flags = entry->memdesc.flags; param->size = entry->memdesc.size; param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc); param->useraddr = entry->memdesc.useraddr; kgsl_mem_entry_put(entry); return result; } static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { int result = 0; struct kgsl_cff_syncmem *param = data; struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_mem_entry *entry = NULL; entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len); if (!entry) return -EINVAL; kgsl_cffdump_syncmem(dev_priv->device, &entry->memdesc, param->gpuaddr, param->len, true); kgsl_mem_entry_put(entry); return result; } static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { int result = 0; struct kgsl_cff_user_event *param = data; kgsl_cffdump_user_event(dev_priv->device, param->cff_opcode, param->op1, param->op2, param->op3, param->op4, param->op5); return result; } #ifdef CONFIG_GENLOCK struct kgsl_genlock_event_priv { struct genlock_handle *handle; struct genlock *lock; }; /** * kgsl_genlock_event_cb() - Event callback for a genlock timestamp event * @device: The KGSL device that expired the timestamp * @priv: private data for the event * @context_id: the context id that goes with the timestamp * @timestamp: the timestamp that triggered the event * @type: Type of event that signaled the callback * * Release a genlock lock following the expiration of a timestamp */ static void kgsl_genlock_event_cb(struct kgsl_device *device, void *priv, u32 context_id, u32 timestamp, u32 type) { struct kgsl_genlock_event_priv *ev = priv; int ret; /* Signal the lock for every event type */ ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0); if (ret) KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret); genlock_put_handle(ev->handle); kfree(ev); } /** * kgsl_add_genlock-event - Create a new genlock event * @device - KGSL device to create the event on * @timestamp - Timestamp to trigger the event * @data - User space buffer containing struct kgsl_genlock_event_priv * @len - length of the userspace buffer * @owner - driver instance that owns this event * @returns 0 on success or error code on error * * Attack to a genlock handle and register an event to release the * genlock lock when the timestamp expires */ static int kgsl_add_genlock_event(struct kgsl_device *device, u32 context_id, u32 timestamp, void __user *data, int len, struct kgsl_device_private *owner) { struct kgsl_genlock_event_priv *event; struct kgsl_timestamp_event_genlock priv; int ret; if (len != sizeof(priv)) return -EINVAL; if (copy_from_user(&priv, data, sizeof(priv))) return -EFAULT; event = kzalloc(sizeof(*event), GFP_KERNEL); if (event == NULL) return -ENOMEM; event->handle = genlock_get_handle_fd(priv.handle); if (IS_ERR(event->handle)) { int ret = PTR_ERR(event->handle); kfree(event); return ret; } ret = kgsl_add_event(device, context_id, timestamp, kgsl_genlock_event_cb, event, owner); if (ret) kfree(event); return ret; } #else static long kgsl_add_genlock_event(struct kgsl_device *device, u32 context_id, u32 timestamp, void __user *data, int len, struct kgsl_device_private *owner) { return -EINVAL; } #endif /** * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace * @dev_priv - pointer to the private device structure * @cmd - the ioctl cmd passed from kgsl_ioctl * @data - the user data buffer from kgsl_ioctl * @returns 0 on success or error code on failure */ static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_timestamp_event *param = data; int ret; switch (param->type) { case KGSL_TIMESTAMP_EVENT_GENLOCK: ret = kgsl_add_genlock_event(dev_priv->device, param->context_id, param->timestamp, param->priv, param->len, dev_priv); break; case KGSL_TIMESTAMP_EVENT_FENCE: ret = kgsl_add_fence_event(dev_priv->device, param->context_id, param->timestamp, param->priv, param->len, dev_priv); break; default: ret = -EINVAL; } return ret; } typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *, unsigned int, void *); #define KGSL_IOCTL_FUNC(_cmd, _func, _flags) \ [_IOC_NR((_cmd))] = \ { .cmd = (_cmd), .func = (_func), .flags = (_flags) } #define KGSL_IOCTL_LOCK BIT(0) static const struct { unsigned int cmd; kgsl_ioctl_func_t func; unsigned int flags; } kgsl_ioctl_funcs[] = { KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY, kgsl_ioctl_device_getproperty, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP, kgsl_ioctl_device_waittimestamp, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID, kgsl_ioctl_device_waittimestamp_ctxtid, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS, kgsl_ioctl_rb_issueibcmds, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_SUBMIT_COMMANDS, kgsl_ioctl_submit_commands, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP, kgsl_ioctl_cmdstream_readtimestamp, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID, kgsl_ioctl_cmdstream_readtimestamp_ctxtid, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP, kgsl_ioctl_cmdstream_freememontimestamp, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID, kgsl_ioctl_cmdstream_freememontimestamp_ctxtid, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE, kgsl_ioctl_drawctxt_create, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY, kgsl_ioctl_drawctxt_destroy, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM, kgsl_ioctl_map_user_mem, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM, kgsl_ioctl_map_user_mem, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE, kgsl_ioctl_sharedmem_free, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE, kgsl_ioctl_sharedmem_flush_cache, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC, kgsl_ioctl_gpumem_alloc, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM, kgsl_ioctl_cff_syncmem, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT, kgsl_ioctl_cff_user_event, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT, kgsl_ioctl_timestamp_event, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY, kgsl_ioctl_device_setproperty, KGSL_IOCTL_LOCK), KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID, kgsl_ioctl_gpumem_alloc_id, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID, kgsl_ioctl_gpumem_free_id, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_GET_INFO, kgsl_ioctl_gpumem_get_info, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE, kgsl_ioctl_gpumem_sync_cache, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK, kgsl_ioctl_gpumem_sync_cache_bulk, 0), }; static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct kgsl_device_private *dev_priv = filep->private_data; unsigned int nr; kgsl_ioctl_func_t func; int lock, ret; char ustack[64]; void *uptr = NULL; BUG_ON(dev_priv == NULL); /* Workaround for an previously incorrectly defined ioctl code. This helps ensure binary compatability */ if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD) cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP; else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD) cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP; else if (cmd == IOCTL_KGSL_TIMESTAMP_EVENT_OLD) cmd = IOCTL_KGSL_TIMESTAMP_EVENT; nr = _IOC_NR(cmd); if (cmd & (IOC_IN | IOC_OUT)) { if (_IOC_SIZE(cmd) < sizeof(ustack)) uptr = ustack; else { uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL); if (uptr == NULL) { KGSL_MEM_ERR(dev_priv->device, "kzalloc(%d) failed\n", _IOC_SIZE(cmd)); ret = -ENOMEM; goto done; } } if (cmd & IOC_IN) { if (copy_from_user(uptr, (void __user *) arg, _IOC_SIZE(cmd))) { ret = -EFAULT; goto done; } } else memset(uptr, 0, _IOC_SIZE(cmd)); } if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) && kgsl_ioctl_funcs[nr].func != NULL) { /* * Make sure that nobody tried to send us a malformed ioctl code * with a valid NR but bogus flags */ if (kgsl_ioctl_funcs[nr].cmd != cmd) { KGSL_DRV_ERR(dev_priv->device, "Malformed ioctl code %08x\n", cmd); ret = -ENOIOCTLCMD; goto done; } func = kgsl_ioctl_funcs[nr].func; lock = kgsl_ioctl_funcs[nr].flags & KGSL_IOCTL_LOCK; } else { func = dev_priv->device->ftbl->ioctl; if (!func) { KGSL_DRV_INFO(dev_priv->device, "invalid ioctl code %08x\n", cmd); ret = -ENOIOCTLCMD; goto done; } lock = 1; } if (lock) mutex_lock(&dev_priv->device->mutex); ret = func(dev_priv, cmd, uptr); if (lock) mutex_unlock(&dev_priv->device->mutex); /* * Still copy back on failure, but assume function took * all necessary precautions sanitizing the return values. */ if (cmd & IOC_OUT) { if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd))) ret = -EFAULT; } done: if (_IOC_SIZE(cmd) >= sizeof(ustack)) kfree(uptr); return ret; } static int kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma) { struct kgsl_memdesc *memdesc = &device->memstore; int result; unsigned int vma_size = vma->vm_end - vma->vm_start; /* The memstore can only be mapped as read only */ if (vma->vm_flags & VM_WRITE) return -EPERM; if (memdesc->size != vma_size) { KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n", vma_size, memdesc->size); return -EINVAL; } vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); result = remap_pfn_range(vma, vma->vm_start, device->memstore.physaddr >> PAGE_SHIFT, vma_size, vma->vm_page_prot); if (result != 0) KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n", result); return result; } /* * kgsl_gpumem_vm_open is called whenever a vma region is copied or split. * Increase the refcount to make sure that the accounting stays correct */ static void kgsl_gpumem_vm_open(struct vm_area_struct *vma) { struct kgsl_mem_entry *entry = vma->vm_private_data; if (!kgsl_mem_entry_get(entry)) vma->vm_private_data = NULL; } static int kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct kgsl_mem_entry *entry = vma->vm_private_data; if (!entry) return VM_FAULT_SIGBUS; if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault) return VM_FAULT_SIGBUS; return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf); } static void kgsl_gpumem_vm_close(struct vm_area_struct *vma) { struct kgsl_mem_entry *entry = vma->vm_private_data; if (!entry) return; entry->memdesc.useraddr = 0; kgsl_mem_entry_put(entry); } static struct vm_operations_struct kgsl_gpumem_vm_ops = { .open = kgsl_gpumem_vm_open, .fault = kgsl_gpumem_vm_fault, .close = kgsl_gpumem_vm_close, }; static int get_mmap_entry(struct kgsl_process_private *private, struct kgsl_mem_entry **out_entry, unsigned long pgoff, unsigned long len) { int ret = 0; struct kgsl_mem_entry *entry; entry = kgsl_sharedmem_find_id(private, pgoff); if (entry == NULL) { entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT); } if (!entry) return -EINVAL; if (!entry->memdesc.ops || !entry->memdesc.ops->vmflags || !entry->memdesc.ops->vmfault) { ret = -EINVAL; goto err_put; } if (entry->memdesc.useraddr != 0) { ret = -EBUSY; goto err_put; } if (len != kgsl_memdesc_mmapsize(&entry->memdesc)) { ret = -ERANGE; goto err_put; } *out_entry = entry; return 0; err_put: kgsl_mem_entry_put(entry); return ret; } static inline bool mmap_range_valid(unsigned long addr, unsigned long len) { return ((ULONG_MAX - addr) > len) && ((addr + len) < TASK_SIZE); } static unsigned long kgsl_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long ret = 0, orig_len = len; unsigned long vma_offset = pgoff << PAGE_SHIFT; struct kgsl_device_private *dev_priv = file->private_data; struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_device *device = dev_priv->device; struct kgsl_mem_entry *entry = NULL; unsigned int align; unsigned int retry = 0; if (vma_offset == device->memstore.gpuaddr) return get_unmapped_area(NULL, addr, len, pgoff, flags); ret = get_mmap_entry(private, &entry, pgoff, len); if (ret) return ret; if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) { /* * If we're not going to use the same mapping on the gpu, * any address is fine. * For MAP_FIXED, hopefully the caller knows what they're doing, * but we may fail in mmap() if there is already something * at the virtual address chosen. */ ret = get_unmapped_area(NULL, addr, len, pgoff, flags); goto put; } if (entry->memdesc.gpuaddr != 0) { KGSL_MEM_INFO(device, "pgoff %lx already mapped to gpuaddr %x\n", pgoff, entry->memdesc.gpuaddr); ret = -EBUSY; goto put; } align = kgsl_memdesc_get_align(&entry->memdesc); if (align >= ilog2(SZ_1M)) align = ilog2(SZ_1M); else if (align >= ilog2(SZ_64K)) align = ilog2(SZ_64K); else if (align <= PAGE_SHIFT) align = 0; if (align) len += 1 << align; if (!mmap_range_valid(addr, len)) addr = 0; do { ret = get_unmapped_area(NULL, addr, len, pgoff, flags); if (IS_ERR_VALUE(ret)) { /* * If we are really fragmented, there may not be room * for the alignment padding, so try again without it. */ if (!retry && (ret == (unsigned long)-ENOMEM) && (align > PAGE_SHIFT)) { align = 0; addr = 0; len = orig_len; retry = 1; continue; } break; } if (align) ret = ALIGN(ret, (1 << align)); /*make sure there isn't a GPU only mapping at this address */ spin_lock(&private->mem_lock); if (kgsl_sharedmem_region_empty(private, ret, orig_len)) { int ret_val; /* * We found a free memory map, claim it here with * memory lock held */ entry->memdesc.gpuaddr = ret; /* This should never fail */ ret_val = kgsl_mem_entry_track_gpuaddr(private, entry); spin_unlock(&private->mem_lock); BUG_ON(ret_val); /* map cannot be called with lock held */ ret_val = kgsl_mmu_map(private->pagetable, &entry->memdesc); if (ret_val) { spin_lock(&private->mem_lock); kgsl_mem_entry_untrack_gpuaddr(private, entry); spin_unlock(&private->mem_lock); ret = ret_val; } break; } spin_unlock(&private->mem_lock); trace_kgsl_mem_unmapped_area_collision(entry, addr, orig_len, ret); /* * If we collided, bump the hint address so that * get_umapped_area knows to look somewhere else. */ addr = (addr == 0) ? ret + orig_len : addr + orig_len; /* * The addr hint can be set by userspace to be near * the end of the address space. Make sure we search * the whole address space at least once by wrapping * back around once. */ if (!retry && !mmap_range_valid(addr, len)) { addr = 0; retry = 1; } else { ret = -EBUSY; } } while (!(flags & MAP_FIXED) && mmap_range_valid(addr, len)); if (IS_ERR_VALUE(ret)) KGSL_MEM_ERR(device, "pid %d pgoff %lx len %ld failed error %ld\n", private->pid, pgoff, len, ret); put: kgsl_mem_entry_put(entry); return ret; } static int kgsl_mmap(struct file *file, struct vm_area_struct *vma) { unsigned int ret, cache; unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT; struct kgsl_device_private *dev_priv = file->private_data; struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_mem_entry *entry = NULL; struct kgsl_device *device = dev_priv->device; /* Handle leagacy behavior for memstore */ if (vma_offset == device->memstore.gpuaddr) return kgsl_mmap_memstore(device, vma); /* * The reference count on the entry that we get from * get_mmap_entry() will be held until kgsl_gpumem_vm_close(). */ ret = get_mmap_entry(private, &entry, vma->vm_pgoff, vma->vm_end - vma->vm_start); if (ret) return ret; vma->vm_flags |= entry->memdesc.ops->vmflags; vma->vm_private_data = entry; /* Determine user-side caching policy */ cache = kgsl_memdesc_get_cachemode(&entry->memdesc); switch (cache) { case KGSL_CACHEMODE_UNCACHED: vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); break; case KGSL_CACHEMODE_WRITETHROUGH: vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot); break; case KGSL_CACHEMODE_WRITEBACK: vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot); break; case KGSL_CACHEMODE_WRITECOMBINE: default: vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); break; } vma->vm_ops = &kgsl_gpumem_vm_ops; if (cache == KGSL_CACHEMODE_WRITEBACK || cache == KGSL_CACHEMODE_WRITETHROUGH) { struct scatterlist *s; int i; int sglen = entry->memdesc.sglen; unsigned long addr = vma->vm_start; for_each_sg(entry->memdesc.sg, s, sglen, i) { int j; for (j = 0; j < (sg_dma_len(s) >> PAGE_SHIFT); j++) { struct page *page = sg_page(s); page = nth_page(page, j); vm_insert_page(vma, addr, page); addr += PAGE_SIZE; } } } vma->vm_file = file; entry->memdesc.useraddr = vma->vm_start; trace_kgsl_mem_mmap(entry); return 0; } static irqreturn_t kgsl_irq_handler(int irq, void *data) { struct kgsl_device *device = data; return device->ftbl->irq_handler(device); } static const struct file_operations kgsl_fops = { .owner = THIS_MODULE, .release = kgsl_release, .open = kgsl_open, .mmap = kgsl_mmap, .get_unmapped_area = kgsl_get_unmapped_area, .unlocked_ioctl = kgsl_ioctl, }; struct kgsl_driver kgsl_driver = { .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex), .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock), .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock), .memfree_hist_mutex = __MUTEX_INITIALIZER(kgsl_driver.memfree_hist_mutex), /* * Full cache flushes are faster than line by line on at least * 8064 and 8974 once the region to be flushed is > 16mb. */ .full_cache_threshold = SZ_16M, }; EXPORT_SYMBOL(kgsl_driver); static void _unregister_device(struct kgsl_device *device) { int minor; mutex_lock(&kgsl_driver.devlock); for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) { if (device == kgsl_driver.devp[minor]) break; } if (minor != KGSL_DEVICE_MAX) { device_destroy(kgsl_driver.class, MKDEV(MAJOR(kgsl_driver.major), minor)); kgsl_driver.devp[minor] = NULL; } mutex_unlock(&kgsl_driver.devlock); } static int _register_device(struct kgsl_device *device) { int minor, ret; dev_t dev; /* Find a minor for the device */ mutex_lock(&kgsl_driver.devlock); for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) { if (kgsl_driver.devp[minor] == NULL) { kgsl_driver.devp[minor] = device; break; } } mutex_unlock(&kgsl_driver.devlock); if (minor == KGSL_DEVICE_MAX) { KGSL_CORE_ERR("minor devices exhausted\n"); return -ENODEV; } /* Create the device */ dev = MKDEV(MAJOR(kgsl_driver.major), minor); device->dev = device_create(kgsl_driver.class, device->parentdev, dev, device, device->name); if (IS_ERR(device->dev)) { mutex_lock(&kgsl_driver.devlock); kgsl_driver.devp[minor] = NULL; mutex_unlock(&kgsl_driver.devlock); ret = PTR_ERR(device->dev); KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret); return ret; } dev_set_drvdata(device->parentdev, device); return 0; } int kgsl_device_platform_probe(struct kgsl_device *device) { int result; int status = -EINVAL; struct resource *res; struct platform_device *pdev = container_of(device->parentdev, struct platform_device, dev); status = _register_device(device); if (status) return status; /* Initialize logging first, so that failures below actually print. */ kgsl_device_debugfs_init(device); status = kgsl_pwrctrl_init(device); if (status) goto error; /* Get starting physical address of device registers */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, device->iomemname); if (res == NULL) { KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n"); status = -EINVAL; goto error_pwrctrl_close; } if (res->start == 0 || resource_size(res) == 0) { KGSL_DRV_ERR(device, "dev %d invalid register region\n", device->id); status = -EINVAL; goto error_pwrctrl_close; } device->reg_phys = res->start; device->reg_len = resource_size(res); /* * Check if a shadermemname is defined, and then get shader memory * details including shader memory starting physical address * and shader memory length */ if (device->shadermemname != NULL) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, device->shadermemname); if (res == NULL) { KGSL_DRV_ERR(device, "Shader memory: platform_get_resource_byname failed\n"); } else { device->shader_mem_phys = res->start; device->shader_mem_len = resource_size(res); } if (!devm_request_mem_region(device->dev, device->shader_mem_phys, device->shader_mem_len, device->name)) { KGSL_DRV_ERR(device, "request_mem_region_failed\n"); } } if (!devm_request_mem_region(device->dev, device->reg_phys, device->reg_len, device->name)) { KGSL_DRV_ERR(device, "request_mem_region failed\n"); status = -ENODEV; goto error_pwrctrl_close; } device->reg_virt = devm_ioremap(device->dev, device->reg_phys, device->reg_len); if (device->reg_virt == NULL) { KGSL_DRV_ERR(device, "ioremap failed\n"); status = -ENODEV; goto error_pwrctrl_close; } /*acquire interrupt */ device->pwrctrl.interrupt_num = platform_get_irq_byname(pdev, device->pwrctrl.irq_name); if (device->pwrctrl.interrupt_num <= 0) { KGSL_DRV_ERR(device, "platform_get_irq_byname failed: %d\n", device->pwrctrl.interrupt_num); status = -EINVAL; goto error_pwrctrl_close; } status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num, kgsl_irq_handler, IRQF_TRIGGER_HIGH, device->name, device); if (status) { KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n", device->pwrctrl.interrupt_num, status); goto error_pwrctrl_close; } disable_irq(device->pwrctrl.interrupt_num); KGSL_DRV_INFO(device, "dev_id %d regs phys 0x%08lx size 0x%08x virt %p\n", device->id, device->reg_phys, device->reg_len, device->reg_virt); rwlock_init(&device->context_lock); result = kgsl_drm_init(pdev); if (result) goto error_pwrctrl_close; setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device); status = kgsl_create_device_workqueue(device); if (status) goto error_pwrctrl_close; status = kgsl_mmu_init(device); if (status != 0) { KGSL_DRV_ERR(device, "kgsl_mmu_init failed %d\n", status); goto error_dest_work_q; } status = kgsl_allocate_contiguous(&device->memstore, KGSL_MEMSTORE_SIZE); if (status != 0) { KGSL_DRV_ERR(device, "kgsl_allocate_contiguous failed %d\n", status); goto error_close_mmu; } pm_qos_add_request(&device->pwrctrl.pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); /* Initalize the snapshot engine */ kgsl_device_snapshot_init(device); /* Initialize common sysfs entries */ kgsl_pwrctrl_init_sysfs(device); return 0; error_close_mmu: kgsl_mmu_close(device); error_dest_work_q: destroy_workqueue(device->work_queue); device->work_queue = NULL; error_pwrctrl_close: kgsl_pwrctrl_close(device); error: _unregister_device(device); return status; } EXPORT_SYMBOL(kgsl_device_platform_probe); int kgsl_postmortem_dump(struct kgsl_device *device, int manual) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; BUG_ON(device == NULL); kgsl_cffdump_hang(device); /* For a manual dump, make sure that the system is idle */ if (manual) { kgsl_active_count_wait(device, 0); if (device->state == KGSL_STATE_ACTIVE) kgsl_idle(device); } if (device->pm_dump_enable) { KGSL_LOG_DUMP(device, "POWER: START_STOP_SLEEP_WAKE = %d\n", pwr->strtstp_sleepwake); KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X", pwr->power_flags, pwr->active_pwrlevel); KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ", pwr->interval_timeout); } /* Disable the idle timer so we don't get interrupted */ del_timer_sync(&device->idle_timer); /* Force on the clocks */ kgsl_pwrctrl_wake(device); /* Disable the irq */ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); /*Call the device specific postmortem dump function*/ device->ftbl->postmortem_dump(device, manual); /* On a manual trigger, turn on the interrupts and put the clocks to sleep. They will recover themselves on the next event. For a hang, leave things as they are until fault tolerance kicks in. */ if (manual) { kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON); /* try to go into a sleep mode until the next event */ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP); kgsl_pwrctrl_sleep(device); } return 0; } EXPORT_SYMBOL(kgsl_postmortem_dump); void kgsl_device_platform_remove(struct kgsl_device *device) { kgsl_device_snapshot_close(device); kgsl_pwrctrl_uninit_sysfs(device); pm_qos_remove_request(&device->pwrctrl.pm_qos_req_dma); idr_destroy(&device->context_idr); kgsl_sharedmem_free(&device->memstore); kgsl_mmu_close(device); if (device->work_queue) { destroy_workqueue(device->work_queue); device->work_queue = NULL; } kgsl_pwrctrl_close(device); _unregister_device(device); } EXPORT_SYMBOL(kgsl_device_platform_remove); static int __devinit kgsl_ptdata_init(void) { kgsl_driver.ptpool = kgsl_mmu_ptpool_init(kgsl_pagetable_count); if (!kgsl_driver.ptpool) return -ENOMEM; return 0; } static void kgsl_core_exit(void) { kgsl_mmu_ptpool_destroy(kgsl_driver.ptpool); kgsl_driver.ptpool = NULL; kgsl_drm_exit(); kgsl_cffdump_destroy(); kgsl_core_debugfs_close(); /* * We call kgsl_sharedmem_uninit_sysfs() and device_unregister() * only if kgsl_driver.virtdev has been populated. * We check at least one member of kgsl_driver.virtdev to * see if it is not NULL (and thus, has been populated). */ if (kgsl_driver.virtdev.class) { kgsl_sharedmem_uninit_sysfs(); device_unregister(&kgsl_driver.virtdev); } if (kgsl_driver.class) { class_destroy(kgsl_driver.class); kgsl_driver.class = NULL; } kgsl_memfree_hist_exit(); unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX); } static int __init kgsl_core_init(void) { int result = 0; /* alloc major and minor device numbers */ result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX, KGSL_NAME); if (result < 0) { KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result); goto err; } cdev_init(&kgsl_driver.cdev, &kgsl_fops); kgsl_driver.cdev.owner = THIS_MODULE; kgsl_driver.cdev.ops = &kgsl_fops; result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0), KGSL_DEVICE_MAX); if (result) { KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d," " result= %d\n", kgsl_driver.major, result); goto err; } kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME); if (IS_ERR(kgsl_driver.class)) { result = PTR_ERR(kgsl_driver.class); KGSL_CORE_ERR("failed to create class %s", KGSL_NAME); goto err; } /* Make a virtual device for managing core related things in sysfs */ kgsl_driver.virtdev.class = kgsl_driver.class; dev_set_name(&kgsl_driver.virtdev, "kgsl"); result = device_register(&kgsl_driver.virtdev); if (result) { KGSL_CORE_ERR("driver_register failed\n"); goto err; } /* Make kobjects in the virtual device for storing statistics */ kgsl_driver.ptkobj = kobject_create_and_add("pagetables", &kgsl_driver.virtdev.kobj); kgsl_driver.prockobj = kobject_create_and_add("proc", &kgsl_driver.virtdev.kobj); kgsl_core_debugfs_init(); kgsl_sharedmem_init_sysfs(); kgsl_cffdump_init(); INIT_LIST_HEAD(&kgsl_driver.process_list); INIT_LIST_HEAD(&kgsl_driver.pagetable_list); kgsl_mmu_set_mmutype(ksgl_mmu_type); if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) { result = kgsl_ptdata_init(); if (result) goto err; } if (kgsl_memfree_hist_init()) KGSL_CORE_ERR("failed to init memfree_hist"); return 0; err: kgsl_core_exit(); return result; } module_init(kgsl_core_init); module_exit(kgsl_core_exit); MODULE_AUTHOR("Qualcomm Innovation Center, Inc."); MODULE_DESCRIPTION("MSM GPU driver"); MODULE_LICENSE("GPL");
gpl-2.0
jwpi/glibc
iconvdata/cp774.c
21
1078
/* Conversion from and to CP774. Copyright (C) 2011-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@gmail.com>, 2011. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <stdint.h> /* Specify the conversion table. */ #define TABLES <cp774.h> #define CHARSET_NAME "CP774//" #define HAS_HOLES 0 /* All 256 character are defined. */ #include <8bit-gap.c>
gpl-2.0
vpeter4/linux-fslc
arch/x86/kernel/nmi.c
21
15018
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs * Copyright (C) 2011 Don Zickus Red Hat, Inc. * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* * Handle hardware traps and faults. */ #include <linux/spinlock.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/nmi.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/hardirq.h> #include <linux/slab.h> #include <linux/export.h> #if defined(CONFIG_EDAC) #include <linux/edac.h> #endif #include <linux/atomic.h> #include <asm/traps.h> #include <asm/mach_traps.h> #include <asm/nmi.h> #include <asm/x86_init.h> #define CREATE_TRACE_POINTS #include <trace/events/nmi.h> struct nmi_desc { spinlock_t lock; struct list_head head; }; static struct nmi_desc nmi_desc[NMI_MAX] = { { .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), .head = LIST_HEAD_INIT(nmi_desc[0].head), }, { .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), .head = LIST_HEAD_INIT(nmi_desc[1].head), }, { .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock), .head = LIST_HEAD_INIT(nmi_desc[2].head), }, { .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock), .head = LIST_HEAD_INIT(nmi_desc[3].head), }, }; struct nmi_stats { unsigned int normal; unsigned int unknown; unsigned int external; unsigned int swallow; }; static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); static int ignore_nmis; int unknown_nmi_panic; /* * Prevent NMI reason port (0x61) being accessed simultaneously, can * only be used in NMI handler. */ static DEFINE_RAW_SPINLOCK(nmi_reason_lock); static int __init setup_unknown_nmi_panic(char *str) { unknown_nmi_panic = 1; return 1; } __setup("unknown_nmi_panic", setup_unknown_nmi_panic); #define nmi_to_desc(type) (&nmi_desc[type]) static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC; static int __init nmi_warning_debugfs(void) { debugfs_create_u64("nmi_longest_ns", 0644, arch_debugfs_dir, &nmi_longest_ns); return 0; } fs_initcall(nmi_warning_debugfs); static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *a; int handled=0; rcu_read_lock(); /* * NMIs are edge-triggered, which means if you have enough * of them concurrently, you can lose some because only one * can be latched at any given time. Walk the whole list * to handle those situations. */ list_for_each_entry_rcu(a, &desc->head, list) { u64 before, delta, whole_msecs; int remainder_ns, decimal_msecs, thishandled; before = sched_clock(); thishandled = a->handler(type, regs); handled += thishandled; delta = sched_clock() - before; trace_nmi_handler(a->handler, (int)delta, thishandled); if (delta < nmi_longest_ns) continue; nmi_longest_ns = delta; whole_msecs = delta; remainder_ns = do_div(whole_msecs, (1000 * 1000)); decimal_msecs = remainder_ns / 1000; printk_ratelimited(KERN_INFO "INFO: NMI handler (%ps) took too long to run: " "%lld.%03d msecs\n", a->handler, whole_msecs, decimal_msecs); } rcu_read_unlock(); /* return total number of NMI events handled */ return handled; } int __register_nmi_handler(unsigned int type, struct nmiaction *action) { struct nmi_desc *desc = nmi_to_desc(type); unsigned long flags; if (!action->handler) return -EINVAL; spin_lock_irqsave(&desc->lock, flags); /* * most handlers of type NMI_UNKNOWN never return because * they just assume the NMI is theirs. Just a sanity check * to manage expectations */ WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); /* * some handlers need to be executed first otherwise a fake * event confuses some handlers (kdump uses this flag) */ if (action->flags & NMI_FLAG_FIRST) list_add_rcu(&action->list, &desc->head); else list_add_tail_rcu(&action->list, &desc->head); spin_unlock_irqrestore(&desc->lock, flags); return 0; } EXPORT_SYMBOL(__register_nmi_handler); void unregister_nmi_handler(unsigned int type, const char *name) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *n; unsigned long flags; spin_lock_irqsave(&desc->lock, flags); list_for_each_entry_rcu(n, &desc->head, list) { /* * the name passed in to describe the nmi handler * is used as the lookup key */ if (!strcmp(n->name, name)) { WARN(in_nmi(), "Trying to free NMI (%s) from NMI context!\n", n->name); list_del_rcu(&n->list); break; } } spin_unlock_irqrestore(&desc->lock, flags); synchronize_rcu(); } EXPORT_SYMBOL_GPL(unregister_nmi_handler); static __kprobes void pci_serr_error(unsigned char reason, struct pt_regs *regs) { /* check to see if anyone registered against these types of errors */ if (nmi_handle(NMI_SERR, regs, false)) return; pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", reason, smp_processor_id()); /* * On some machines, PCI SERR line is used to report memory * errors. EDAC makes use of it. */ #if defined(CONFIG_EDAC) if (edac_handler_set()) { edac_atomic_assert_error(); return; } #endif if (panic_on_unrecovered_nmi) panic("NMI: Not continuing"); pr_emerg("Dazed and confused, but trying to continue\n"); /* Clear and disable the PCI SERR error line. */ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; outb(reason, NMI_REASON_PORT); } static __kprobes void io_check_error(unsigned char reason, struct pt_regs *regs) { unsigned long i; /* check to see if anyone registered against these types of errors */ if (nmi_handle(NMI_IO_CHECK, regs, false)) return; pr_emerg( "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", reason, smp_processor_id()); show_regs(regs); if (panic_on_io_nmi) panic("NMI IOCK error: Not continuing"); /* Re-enable the IOCK line, wait for a few seconds */ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; outb(reason, NMI_REASON_PORT); i = 20000; while (--i) { touch_nmi_watchdog(); udelay(100); } reason &= ~NMI_REASON_CLEAR_IOCHK; outb(reason, NMI_REASON_PORT); } static __kprobes void unknown_nmi_error(unsigned char reason, struct pt_regs *regs) { int handled; /* * Use 'false' as back-to-back NMIs are dealt with one level up. * Of course this makes having multiple 'unknown' handlers useless * as only the first one is ever run (unless it can actually determine * if it caused the NMI) */ handled = nmi_handle(NMI_UNKNOWN, regs, false); if (handled) { __this_cpu_add(nmi_stats.unknown, handled); return; } __this_cpu_add(nmi_stats.unknown, 1); pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", reason, smp_processor_id()); pr_emerg("Do you have a strange power saving mode enabled?\n"); if (unknown_nmi_panic || panic_on_unrecovered_nmi) panic("NMI: Not continuing"); pr_emerg("Dazed and confused, but trying to continue\n"); } static DEFINE_PER_CPU(bool, swallow_nmi); static DEFINE_PER_CPU(unsigned long, last_nmi_rip); static __kprobes void default_do_nmi(struct pt_regs *regs) { unsigned char reason = 0; int handled; bool b2b = false; /* * CPU-specific NMI must be processed before non-CPU-specific * NMI, otherwise we may lose it, because the CPU-specific * NMI can not be detected/processed on other CPUs. */ /* * Back-to-back NMIs are interesting because they can either * be two NMI or more than two NMIs (any thing over two is dropped * due to NMI being edge-triggered). If this is the second half * of the back-to-back NMI, assume we dropped things and process * more handlers. Otherwise reset the 'swallow' NMI behaviour */ if (regs->ip == __this_cpu_read(last_nmi_rip)) b2b = true; else __this_cpu_write(swallow_nmi, false); __this_cpu_write(last_nmi_rip, regs->ip); handled = nmi_handle(NMI_LOCAL, regs, b2b); __this_cpu_add(nmi_stats.normal, handled); if (handled) { /* * There are cases when a NMI handler handles multiple * events in the current NMI. One of these events may * be queued for in the next NMI. Because the event is * already handled, the next NMI will result in an unknown * NMI. Instead lets flag this for a potential NMI to * swallow. */ if (handled > 1) __this_cpu_write(swallow_nmi, true); return; } /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ raw_spin_lock(&nmi_reason_lock); reason = x86_platform.get_nmi_reason(); if (reason & NMI_REASON_MASK) { if (reason & NMI_REASON_SERR) pci_serr_error(reason, regs); else if (reason & NMI_REASON_IOCHK) io_check_error(reason, regs); #ifdef CONFIG_X86_32 /* * Reassert NMI in case it became active * meanwhile as it's edge-triggered: */ reassert_nmi(); #endif __this_cpu_add(nmi_stats.external, 1); raw_spin_unlock(&nmi_reason_lock); return; } raw_spin_unlock(&nmi_reason_lock); /* * Only one NMI can be latched at a time. To handle * this we may process multiple nmi handlers at once to * cover the case where an NMI is dropped. The downside * to this approach is we may process an NMI prematurely, * while its real NMI is sitting latched. This will cause * an unknown NMI on the next run of the NMI processing. * * We tried to flag that condition above, by setting the * swallow_nmi flag when we process more than one event. * This condition is also only present on the second half * of a back-to-back NMI, so we flag that condition too. * * If both are true, we assume we already processed this * NMI previously and we swallow it. Otherwise we reset * the logic. * * There are scenarios where we may accidentally swallow * a 'real' unknown NMI. For example, while processing * a perf NMI another perf NMI comes in along with a * 'real' unknown NMI. These two NMIs get combined into * one (as descibed above). When the next NMI gets * processed, it will be flagged by perf as handled, but * noone will know that there was a 'real' unknown NMI sent * also. As a result it gets swallowed. Or if the first * perf NMI returns two events handled then the second * NMI will get eaten by the logic below, again losing a * 'real' unknown NMI. But this is the best we can do * for now. */ if (b2b && __this_cpu_read(swallow_nmi)) __this_cpu_add(nmi_stats.swallow, 1); else unknown_nmi_error(reason, regs); } /* * NMIs can page fault or hit breakpoints which will cause it to lose * its NMI context with the CPU when the breakpoint or page fault does an IRET. * * As a result, NMIs can nest if NMIs get unmasked due an IRET during * NMI processing. On x86_64, the asm glue protects us from nested NMIs * if the outer NMI came from kernel mode, but we can still nest if the * outer NMI came from user mode. * * To handle these nested NMIs, we have three states: * * 1) not running * 2) executing * 3) latched * * When no NMI is in progress, it is in the "not running" state. * When an NMI comes in, it goes into the "executing" state. * Normally, if another NMI is triggered, it does not interrupt * the running NMI and the HW will simply latch it so that when * the first NMI finishes, it will restart the second NMI. * (Note, the latch is binary, thus multiple NMIs triggering, * when one is running, are ignored. Only one NMI is restarted.) * * If an NMI executes an iret, another NMI can preempt it. We do not * want to allow this new NMI to run, but we want to execute it when the * first one finishes. We set the state to "latched", and the exit of * the first NMI will perform a dec_return, if the result is zero * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the * dec_return would have set the state to NMI_EXECUTING (what we want it * to be when we are running). In this case, we simply jump back to * rerun the NMI handler again, and restart the 'latched' NMI. * * No trap (breakpoint or page fault) should be hit before nmi_restart, * thus there is no race between the first check of state for NOT_RUNNING * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs * at this point. * * In case the NMI takes a page fault, we need to save off the CR2 * because the NMI could have preempted another page fault and corrupt * the CR2 that is about to be read. As nested NMIs must be restarted * and they can not take breakpoints or page faults, the update of the * CR2 must be done before converting the nmi state back to NOT_RUNNING. * Otherwise, there would be a race of another nested NMI coming in * after setting state to NOT_RUNNING but before updating the nmi_cr2. */ enum nmi_states { NMI_NOT_RUNNING = 0, NMI_EXECUTING, NMI_LATCHED, }; static DEFINE_PER_CPU(enum nmi_states, nmi_state); static DEFINE_PER_CPU(unsigned long, nmi_cr2); #ifdef CONFIG_X86_64 /* * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without * some care, the inner breakpoint will clobber the outer breakpoint's * stack. * * If a breakpoint is being processed, and the debug stack is being * used, if an NMI comes in and also hits a breakpoint, the stack * pointer will be set to the same fixed address as the breakpoint that * was interrupted, causing that stack to be corrupted. To handle this * case, check if the stack that was interrupted is the debug stack, and * if so, change the IDT so that new breakpoints will use the current * stack and not switch to the fixed address. On return of the NMI, * switch back to the original IDT. */ static DEFINE_PER_CPU(int, update_debug_stack); #endif dotraplinkage notrace void do_nmi(struct pt_regs *regs, long error_code) { if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { this_cpu_write(nmi_state, NMI_LATCHED); return; } this_cpu_write(nmi_state, NMI_EXECUTING); this_cpu_write(nmi_cr2, read_cr2()); nmi_restart: #ifdef CONFIG_X86_64 /* * If we interrupted a breakpoint, it is possible that * the nmi handler will have breakpoints too. We need to * change the IDT such that breakpoints that happen here * continue to use the NMI stack. */ if (unlikely(is_debug_stack(regs->sp))) { debug_stack_set_zero(); this_cpu_write(update_debug_stack, 1); } #endif nmi_enter(); inc_irq_stat(__nmi_count); if (!ignore_nmis) default_do_nmi(regs); nmi_exit(); #ifdef CONFIG_X86_64 if (unlikely(this_cpu_read(update_debug_stack))) { debug_stack_reset(); this_cpu_write(update_debug_stack, 0); } #endif if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) write_cr2(this_cpu_read(nmi_cr2)); if (this_cpu_dec_return(nmi_state)) goto nmi_restart; } void stop_nmi(void) { ignore_nmis++; } void restart_nmi(void) { ignore_nmis--; } /* reset the back-to-back NMI logic */ void local_touch_nmi(void) { __this_cpu_write(last_nmi_rip, 0); } EXPORT_SYMBOL_GPL(local_touch_nmi);
gpl-2.0
FernetMenta/xbmc
xbmc/utils/JSONVariantWriter.cpp
21
2800
/* * Copyright (C) 2015 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "JSONVariantWriter.h" #include <rapidjson/prettywriter.h> #include <rapidjson/stringbuffer.h> #include <rapidjson/writer.h> #include "utils/Variant.h" template<class TWriter> bool InternalWrite(TWriter& writer, const CVariant &value) { switch (value.type()) { case CVariant::VariantTypeInteger: return writer.Int64(value.asInteger()); case CVariant::VariantTypeUnsignedInteger: return writer.Uint64(value.asUnsignedInteger()); case CVariant::VariantTypeDouble: return writer.Double(value.asDouble()); case CVariant::VariantTypeBoolean: return writer.Bool(value.asBoolean()); case CVariant::VariantTypeString: return writer.String(value.c_str(), value.size()); case CVariant::VariantTypeArray: if (!writer.StartArray()) return false; for (CVariant::const_iterator_array itr = value.begin_array(); itr != value.end_array(); ++itr) { if (!InternalWrite(writer, *itr)) return false; } return writer.EndArray(value.size()); case CVariant::VariantTypeObject: if (!writer.StartObject()) return false; for (CVariant::const_iterator_map itr = value.begin_map(); itr != value.end_map(); ++itr) { if (!writer.Key(itr->first.c_str()) || !InternalWrite(writer, itr->second)) return false; } return writer.EndObject(value.size()); case CVariant::VariantTypeConstNull: case CVariant::VariantTypeNull: default: return writer.Null(); } return false; } bool CJSONVariantWriter::Write(const CVariant &value, std::string& output, bool compact) { rapidjson::StringBuffer stringBuffer; if (compact) { rapidjson::Writer<rapidjson::StringBuffer> writer(stringBuffer); if (!InternalWrite(writer, value) || !writer.IsComplete()) return false; } else { rapidjson::PrettyWriter<rapidjson::StringBuffer> writer(stringBuffer); writer.SetIndent('\t', 1); if (!InternalWrite(writer, value) || !writer.IsComplete()) return false; } output = stringBuffer.GetString(); return true; }
gpl-2.0
boa19861105/android_LP5.0.2_kernel_htc_dlxpul
arch/arm/mach-msm/htc_awb_cal.c
21
5050
/* arch/arm/mach-msm/htc_awb_cal.c */ /* Code to extract Camera AWB calibration information from ATAG set up by the bootloader. Copyright (C) 2008 Google, Inc. Author: Dmitry Shmidt <dimitrysh@google.com> This software is licensed under the terms of the GNU General Public License version 2, as published by the Free Software Foundation, and may be copied, distributed, and modified under those terms. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/proc_fs.h> #include <asm/setup.h> /* for outputing file to filesystem : /data/awb_calibration_data_hboot.txt */ #include <linux/fs.h> #include <linux/syscalls.h> /* configuration tags specific to msm */ #define ATAG_MSM_AWB_CAL 0x59504550 /* MSM CAMERA AWB Calibration */ #define AWB_CAL_MAX_SIZE 0x2000U /* 0x1000 = 4096 bytes 0x2000 = 8192 bytes */ struct qct_lsc_struct{ unsigned long int lsc_verify; unsigned long int lsc_fuseid[4]; float pCalcParam[17*13*4]; unsigned long int lsc_checksum; }; struct qct_awb_lsc_struct{ unsigned long int caBuff[8];/* AWB Calibartion */ struct qct_lsc_struct qct_lsc_data;/* LSC Calibration */ /* Andrew_Cheng 20120223 For Flash_Camera MB */ unsigned long int flashcaBuff[8]; //flash_camera /* Andrew_Cheng 20120223 For Flash_Camera ME */ /* HTC_START Horng 20130118 - OIS calibration */ unsigned long int ois_data[8]; /* HTC_END */ }; static unsigned char cam_awb_ram[AWB_CAL_MAX_SIZE]; int gCAM_AWB_CAL_LEN; unsigned char *get_cam_awb_cal(void) { return cam_awb_ram; } EXPORT_SYMBOL(get_cam_awb_cal); /* HTC_START */ /* klocwork */ unsigned char *dummy(unsigned char *p) { return p; } /* HTC_END */ static int __init parse_tag_cam_awb_cal(const struct tag *tag) { unsigned char *dptr = (unsigned char *)(&tag->u); unsigned size; size = min((tag->hdr.size - 2) * sizeof(__u32), AWB_CAL_MAX_SIZE); printk(KERN_INFO "CAM_AWB_CAL Data size = %d , 0x%x, size = %d (%d,%d)\n", tag->hdr.size, tag->hdr.tag, size, ((tag->hdr.size - 2) * sizeof(__u32)), (AWB_CAL_MAX_SIZE)); gCAM_AWB_CAL_LEN = size; memcpy(cam_awb_ram, dummy(dptr), size); /* HTC */ #ifdef ATAG_CAM_AWB_CAL_DEBUG { int *pint, i; printk(KERN_INFO "parse_tag_cam_awb_cal():\n"); pint = (int *)cam_awb_ram; for (i = 0; i < 1024; i++) printk(KERN_INFO "%x\n", pint[i]); } #endif return 0; } __tagtable(ATAG_MSM_AWB_CAL, parse_tag_cam_awb_cal); static ssize_t awb_calibration_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret = 0; unsigned char *ptr; ptr = get_cam_awb_cal(); /* fixed : workaround because of defined 8 parameters now */ ret = sizeof(struct qct_awb_lsc_struct);/* 8*4; */ //ret = gCAM_AWB_CAL_LEN; printk(KERN_INFO "awb_calibration_show(%d)\n", ret); memcpy(buf, ptr, ret); #ifdef ATAG_CAM_AWB_CAL_DEBUG { int i, *pint; printk(KERN_INFO "awb_calibration_show():\n"); pint = (int *)buf; for (i = 0; i < 914; i++) printk(KERN_INFO "%d-%x\n", i, pint[i]); } #endif return ret; } static ssize_t awb_calibration_front_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret = 0; unsigned char *ptr; if (gCAM_AWB_CAL_LEN < AWB_CAL_MAX_SIZE) return 0; ptr = get_cam_awb_cal(); /* fixed : workaround because of defined 8 parameters now */ ret = sizeof(struct qct_awb_lsc_struct);/* 8*4; */ //ret = gCAM_AWB_CAL_LEN; printk(KERN_INFO "awb_calibration_front_show(%d)\n", ret); memcpy(buf, ptr + 0x1000U, ret); #ifdef ATAG_CAM_AWB_CAL_DEBUG { int i, *pint; printk(KERN_INFO "awb_calibration_front_show():\n"); pint = (int *)buf; for (i = 0; i < 898; i++) printk(KERN_INFO "%x\n", pint[i]); } #endif return ret; } static DEVICE_ATTR(awb_cal, 0444, awb_calibration_show, NULL); static DEVICE_ATTR(awb_cal_front, 0444, awb_calibration_front_show, NULL); static struct kobject *cam_awb_cal; static int cam_get_awb_cal(void) { int ret ; /* Create /sys/android_camera_awb_cal/awb_cal */ cam_awb_cal = kobject_create_and_add("android_camera_awb_cal", NULL); if (cam_awb_cal == NULL) { pr_info("cam_get_awb_cal: subsystem_register failed\n"); ret = -ENOMEM; return ret ; } /* dev_attr_[register_name]<== DEVICE_ATTR(awb_cal, 0444, awb_calibration_show, NULL); */ ret = sysfs_create_file(cam_awb_cal, &dev_attr_awb_cal.attr); if (ret) { pr_info("cam_get_awb_cal:: sysfs_create_file failed\n"); kobject_del(cam_awb_cal); goto end; } if (gCAM_AWB_CAL_LEN < AWB_CAL_MAX_SIZE) goto end; ret = sysfs_create_file(cam_awb_cal, &dev_attr_awb_cal_front.attr); if (ret) { pr_info("cam_get_awb_cal_front:: sysfs_create_file failed\n"); kobject_del(cam_awb_cal); goto end; } end: return 0 ; } late_initcall(cam_get_awb_cal);
gpl-2.0
sumitn/linux-2.6.24
arch/sparc64/kernel/pci_common.c
21
12885
/* pci_common.c: PCI controller common support. * * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) */ #include <linux/string.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/device.h> #include <asm/prom.h> #include <asm/of_device.h> #include <asm/oplib.h> #include "pci_impl.h" #include "pci_sun4v.h" static int config_out_of_range(struct pci_pbm_info *pbm, unsigned long bus, unsigned long devfn, unsigned long reg) { if (bus < pbm->pci_first_busno || bus > pbm->pci_last_busno) return 1; return 0; } static void *sun4u_config_mkaddr(struct pci_pbm_info *pbm, unsigned long bus, unsigned long devfn, unsigned long reg) { unsigned long rbits = pbm->config_space_reg_bits; if (config_out_of_range(pbm, bus, devfn, reg)) return NULL; reg = (reg & ((1 << rbits) - 1)); devfn <<= rbits; bus <<= rbits + 8; return (void *) (pbm->config_space | bus | devfn | reg); } /* At least on Sabre, it is necessary to access all PCI host controller * registers at their natural size, otherwise zeros are returned. * Strange but true, and I see no language in the UltraSPARC-IIi * programmer's manual that mentions this even indirectly. */ static int sun4u_read_pci_cfg_host(struct pci_pbm_info *pbm, unsigned char bus, unsigned int devfn, int where, int size, u32 *value) { u32 tmp32, *addr; u16 tmp16; u8 tmp8; addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: if (where < 8) { unsigned long align = (unsigned long) addr; align &= ~1; pci_config_read16((u16 *)align, &tmp16); if (where & 1) *value = tmp16 >> 8; else *value = tmp16 & 0xff; } else { pci_config_read8((u8 *)addr, &tmp8); *value = (u32) tmp8; } break; case 2: if (where < 8) { pci_config_read16((u16 *)addr, &tmp16); *value = (u32) tmp16; } else { pci_config_read8((u8 *)addr, &tmp8); *value = (u32) tmp8; pci_config_read8(((u8 *)addr) + 1, &tmp8); *value |= ((u32) tmp8) << 8; } break; case 4: tmp32 = 0xffffffff; sun4u_read_pci_cfg_host(pbm, bus, devfn, where, 2, &tmp32); *value = tmp32; tmp32 = 0xffffffff; sun4u_read_pci_cfg_host(pbm, bus, devfn, where + 2, 2, &tmp32); *value |= tmp32 << 16; break; } return PCIBIOS_SUCCESSFUL; } static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 *value) { struct pci_pbm_info *pbm = bus_dev->sysdata; unsigned char bus = bus_dev->number; u32 *addr; u16 tmp16; u8 tmp8; switch (size) { case 1: *value = 0xff; break; case 2: *value = 0xffff; break; case 4: *value = 0xffffffff; break; } if (!bus_dev->number && !PCI_SLOT(devfn)) return sun4u_read_pci_cfg_host(pbm, bus, devfn, where, size, value); addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: pci_config_read8((u8 *)addr, &tmp8); *value = (u32) tmp8; break; case 2: if (where & 0x01) { printk("pci_read_config_word: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_read16((u16 *)addr, &tmp16); *value = (u32) tmp16; break; case 4: if (where & 0x03) { printk("pci_read_config_dword: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_read32(addr, value); break; } return PCIBIOS_SUCCESSFUL; } static int sun4u_write_pci_cfg_host(struct pci_pbm_info *pbm, unsigned char bus, unsigned int devfn, int where, int size, u32 value) { u32 *addr; addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: if (where < 8) { unsigned long align = (unsigned long) addr; u16 tmp16; align &= ~1; pci_config_read16((u16 *)align, &tmp16); if (where & 1) { tmp16 &= 0x00ff; tmp16 |= value << 8; } else { tmp16 &= 0xff00; tmp16 |= value; } pci_config_write16((u16 *)align, tmp16); } else pci_config_write8((u8 *)addr, value); break; case 2: if (where < 8) { pci_config_write16((u16 *)addr, value); } else { pci_config_write8((u8 *)addr, value & 0xff); pci_config_write8(((u8 *)addr) + 1, value >> 8); } break; case 4: sun4u_write_pci_cfg_host(pbm, bus, devfn, where, 2, value & 0xffff); sun4u_write_pci_cfg_host(pbm, bus, devfn, where + 2, 2, value >> 16); break; } return PCIBIOS_SUCCESSFUL; } static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 value) { struct pci_pbm_info *pbm = bus_dev->sysdata; unsigned char bus = bus_dev->number; u32 *addr; if (!bus_dev->number && !PCI_SLOT(devfn)) return sun4u_write_pci_cfg_host(pbm, bus, devfn, where, size, value); addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: pci_config_write8((u8 *)addr, value); break; case 2: if (where & 0x01) { printk("pci_write_config_word: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_write16((u16 *)addr, value); break; case 4: if (where & 0x03) { printk("pci_write_config_dword: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_write32(addr, value); } return PCIBIOS_SUCCESSFUL; } struct pci_ops sun4u_pci_ops = { .read = sun4u_read_pci_cfg, .write = sun4u_write_pci_cfg, }; static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 *value) { struct pci_pbm_info *pbm = bus_dev->sysdata; u32 devhandle = pbm->devhandle; unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); unsigned long ret; if (!bus && devfn == 0x00) return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where, size, value); if (config_out_of_range(pbm, bus, devfn, where)) { ret = ~0UL; } else { ret = pci_sun4v_config_get(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size); } switch (size) { case 1: *value = ret & 0xff; break; case 2: *value = ret & 0xffff; break; case 4: *value = ret & 0xffffffff; break; }; return PCIBIOS_SUCCESSFUL; } static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 value) { struct pci_pbm_info *pbm = bus_dev->sysdata; u32 devhandle = pbm->devhandle; unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); unsigned long ret; if (!bus && devfn == 0x00) return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where, size, value); if (config_out_of_range(pbm, bus, devfn, where)) { /* Do nothing. */ } else { ret = pci_sun4v_config_put(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size, value); } return PCIBIOS_SUCCESSFUL; } struct pci_ops sun4v_pci_ops = { .read = sun4v_read_pci_cfg, .write = sun4v_write_pci_cfg, }; void pci_get_pbm_props(struct pci_pbm_info *pbm) { const u32 *val = of_get_property(pbm->prom_node, "bus-range", NULL); pbm->pci_first_busno = val[0]; pbm->pci_last_busno = val[1]; val = of_get_property(pbm->prom_node, "ino-bitmap", NULL); if (val) { pbm->ino_bitmap = (((u64)val[1] << 32UL) | ((u64)val[0] << 0UL)); } } static void pci_register_legacy_regions(struct resource *io_res, struct resource *mem_res) { struct resource *p; /* VGA Video RAM. */ p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return; p->name = "Video RAM area"; p->start = mem_res->start + 0xa0000UL; p->end = p->start + 0x1ffffUL; p->flags = IORESOURCE_BUSY; request_resource(mem_res, p); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return; p->name = "System ROM"; p->start = mem_res->start + 0xf0000UL; p->end = p->start + 0xffffUL; p->flags = IORESOURCE_BUSY; request_resource(mem_res, p); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return; p->name = "Video ROM"; p->start = mem_res->start + 0xc0000UL; p->end = p->start + 0x7fffUL; p->flags = IORESOURCE_BUSY; request_resource(mem_res, p); } static void pci_register_iommu_region(struct pci_pbm_info *pbm) { const u32 *vdma = of_get_property(pbm->prom_node, "virtual-dma", NULL); if (vdma) { struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL); if (!rp) { prom_printf("Cannot allocate IOMMU resource.\n"); prom_halt(); } rp->name = "IOMMU"; rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; rp->end = rp->start + (unsigned long) vdma[1] - 1UL; rp->flags = IORESOURCE_BUSY; request_resource(&pbm->mem_space, rp); } } void pci_determine_mem_io_space(struct pci_pbm_info *pbm) { const struct linux_prom_pci_ranges *pbm_ranges; int i, saw_mem, saw_io; int num_pbm_ranges; saw_mem = saw_io = 0; pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i); if (!pbm_ranges) { prom_printf("PCI: Fatal error, missing PBM ranges property " " for %s\n", pbm->name); prom_halt(); } num_pbm_ranges = i / sizeof(*pbm_ranges); for (i = 0; i < num_pbm_ranges; i++) { const struct linux_prom_pci_ranges *pr = &pbm_ranges[i]; unsigned long a, size; u32 parent_phys_hi, parent_phys_lo; u32 size_hi, size_lo; int type; parent_phys_hi = pr->parent_phys_hi; parent_phys_lo = pr->parent_phys_lo; if (tlb_type == hypervisor) parent_phys_hi &= 0x0fffffff; size_hi = pr->size_hi; size_lo = pr->size_lo; type = (pr->child_phys_hi >> 24) & 0x3; a = (((unsigned long)parent_phys_hi << 32UL) | ((unsigned long)parent_phys_lo << 0UL)); size = (((unsigned long)size_hi << 32UL) | ((unsigned long)size_lo << 0UL)); switch (type) { case 0: /* PCI config space, 16MB */ pbm->config_space = a; break; case 1: /* 16-bit IO space, 16MB */ pbm->io_space.start = a; pbm->io_space.end = a + size - 1UL; pbm->io_space.flags = IORESOURCE_IO; saw_io = 1; break; case 2: /* 32-bit MEM space, 2GB */ pbm->mem_space.start = a; pbm->mem_space.end = a + size - 1UL; pbm->mem_space.flags = IORESOURCE_MEM; saw_mem = 1; break; case 3: /* XXX 64-bit MEM handling XXX */ default: break; }; } if (!saw_io || !saw_mem) { prom_printf("%s: Fatal error, missing %s PBM range.\n", pbm->name, (!saw_io ? "IO" : "MEM")); prom_halt(); } printk("%s: PCI IO[%lx] MEM[%lx]\n", pbm->name, pbm->io_space.start, pbm->mem_space.start); pbm->io_space.name = pbm->mem_space.name = pbm->name; request_resource(&ioport_resource, &pbm->io_space); request_resource(&iomem_resource, &pbm->mem_space); pci_register_legacy_regions(&pbm->io_space, &pbm->mem_space); pci_register_iommu_region(pbm); } /* Generic helper routines for PCI error reporting. */ void pci_scan_for_target_abort(struct pci_pbm_info *pbm, struct pci_bus *pbus) { struct pci_dev *pdev; struct pci_bus *bus; list_for_each_entry(pdev, &pbus->devices, bus_list) { u16 status, error_bits; pci_read_config_word(pdev, PCI_STATUS, &status); error_bits = (status & (PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); printk("%s: Device %s saw Target Abort [%016x]\n", pbm->name, pci_name(pdev), status); } } list_for_each_entry(bus, &pbus->children, node) pci_scan_for_target_abort(pbm, bus); } void pci_scan_for_master_abort(struct pci_pbm_info *pbm, struct pci_bus *pbus) { struct pci_dev *pdev; struct pci_bus *bus; list_for_each_entry(pdev, &pbus->devices, bus_list) { u16 status, error_bits; pci_read_config_word(pdev, PCI_STATUS, &status); error_bits = (status & (PCI_STATUS_REC_MASTER_ABORT)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); printk("%s: Device %s received Master Abort [%016x]\n", pbm->name, pci_name(pdev), status); } } list_for_each_entry(bus, &pbus->children, node) pci_scan_for_master_abort(pbm, bus); } void pci_scan_for_parity_error(struct pci_pbm_info *pbm, struct pci_bus *pbus) { struct pci_dev *pdev; struct pci_bus *bus; list_for_each_entry(pdev, &pbus->devices, bus_list) { u16 status, error_bits; pci_read_config_word(pdev, PCI_STATUS, &status); error_bits = (status & (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); printk("%s: Device %s saw Parity Error [%016x]\n", pbm->name, pci_name(pdev), status); } } list_for_each_entry(bus, &pbus->children, node) pci_scan_for_parity_error(pbm, bus); }
gpl-2.0
zarboz/s2w-VilleZ
kernel/time/alarmtimer.c
533
18450
/* * Alarmtimer interface * * This interface provides a timer which is similarto hrtimers, * but triggers a RTC alarm if the box is suspend. * * This interface is influenced by the Android RTC Alarm timer * interface. * * Copyright (C) 2010 IBM Corperation * * Author: John Stultz <john.stultz@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/time.h> #include <linux/hrtimer.h> #include <linux/timerqueue.h> #include <linux/rtc.h> #include <linux/alarmtimer.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/posix-timers.h> #include <linux/workqueue.h> #include <linux/freezer.h> /** * struct alarm_base - Alarm timer bases * @lock: Lock for syncrhonized access to the base * @timerqueue: Timerqueue head managing the list of events * @timer: hrtimer used to schedule events while running * @gettime: Function to read the time correlating to the base * @base_clockid: clockid for the base */ static struct alarm_base { spinlock_t lock; struct timerqueue_head timerqueue; struct hrtimer timer; ktime_t (*gettime)(void); clockid_t base_clockid; } alarm_bases[ALARM_NUMTYPE]; /* freezer delta & lock used to handle clock_nanosleep triggered wakeups */ static ktime_t freezer_delta; static DEFINE_SPINLOCK(freezer_delta_lock); #ifdef CONFIG_RTC_CLASS /* rtc timer and device for setting alarm wakeups at suspend */ static struct rtc_timer rtctimer; static struct rtc_device *rtcdev; static DEFINE_SPINLOCK(rtcdev_lock); /** * has_wakealarm - check rtc device has wakealarm ability * @dev: current device * @name_ptr: name to be returned * * This helper function checks to see if the rtc device can wake * from suspend. */ static int has_wakealarm(struct device *dev, void *name_ptr) { struct rtc_device *candidate = to_rtc_device(dev); if (!candidate->ops->set_alarm) return 0; if (!device_may_wakeup(candidate->dev.parent)) return 0; *(const char **)name_ptr = dev_name(dev); return 1; } /** * alarmtimer_get_rtcdev - Return selected rtcdevice * * This function returns the rtc device to use for wakealarms. * If one has not already been chosen, it checks to see if a * functional rtc device is available. */ static struct rtc_device *alarmtimer_get_rtcdev(void) { struct device *dev; char *str; unsigned long flags; struct rtc_device *ret; spin_lock_irqsave(&rtcdev_lock, flags); if (!rtcdev) { /* Find an rtc device and init the rtc_timer */ dev = class_find_device(rtc_class, NULL, &str, has_wakealarm); /* If we have a device then str is valid. See has_wakealarm() */ if (dev) { rtcdev = rtc_class_open(str); /* * Drop the reference we got in class_find_device, * rtc_open takes its own. */ put_device(dev); rtc_timer_init(&rtctimer, NULL, NULL); } } ret = rtcdev; spin_unlock_irqrestore(&rtcdev_lock, flags); return ret; } #else #define alarmtimer_get_rtcdev() (0) #define rtcdev (0) #endif /** * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue * @base: pointer to the base where the timer is being run * @alarm: pointer to alarm being enqueued. * * Adds alarm to a alarm_base timerqueue and if necessary sets * an hrtimer to run. * * Must hold base->lock when calling. */ static void alarmtimer_enqueue(struct alarm_base *base, struct alarm *alarm) { timerqueue_add(&base->timerqueue, &alarm->node); if (&alarm->node == timerqueue_getnext(&base->timerqueue)) { hrtimer_try_to_cancel(&base->timer); hrtimer_start(&base->timer, alarm->node.expires, HRTIMER_MODE_ABS); } } /** * alarmtimer_remove - Removes an alarm timer from an alarm_base timerqueue * @base: pointer to the base where the timer is running * @alarm: pointer to alarm being removed * * Removes alarm to a alarm_base timerqueue and if necessary sets * a new timer to run. * * Must hold base->lock when calling. */ static void alarmtimer_remove(struct alarm_base *base, struct alarm *alarm) { struct timerqueue_node *next = timerqueue_getnext(&base->timerqueue); timerqueue_del(&base->timerqueue, &alarm->node); if (next == &alarm->node) { hrtimer_try_to_cancel(&base->timer); next = timerqueue_getnext(&base->timerqueue); if (!next) return; hrtimer_start(&base->timer, next->expires, HRTIMER_MODE_ABS); } } /** * alarmtimer_fired - Handles alarm hrtimer being fired. * @timer: pointer to hrtimer being run * * When a alarm timer fires, this runs through the timerqueue to * see which alarms expired, and runs those. If there are more alarm * timers queued for the future, we set the hrtimer to fire when * when the next future alarm timer expires. */ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) { struct alarm_base *base = container_of(timer, struct alarm_base, timer); struct timerqueue_node *next; unsigned long flags; ktime_t now; int ret = HRTIMER_NORESTART; spin_lock_irqsave(&base->lock, flags); now = base->gettime(); while ((next = timerqueue_getnext(&base->timerqueue))) { struct alarm *alarm; ktime_t expired = next->expires; if (expired.tv64 >= now.tv64) break; alarm = container_of(next, struct alarm, node); timerqueue_del(&base->timerqueue, &alarm->node); alarm->enabled = 0; /* Re-add periodic timers */ if (alarm->period.tv64) { alarm->node.expires = ktime_add(expired, alarm->period); timerqueue_add(&base->timerqueue, &alarm->node); alarm->enabled = 1; } spin_unlock_irqrestore(&base->lock, flags); if (alarm->function) alarm->function(alarm); spin_lock_irqsave(&base->lock, flags); } if (next) { hrtimer_set_expires(&base->timer, next->expires); ret = HRTIMER_RESTART; } spin_unlock_irqrestore(&base->lock, flags); return ret; } #ifdef CONFIG_RTC_CLASS /** * alarmtimer_suspend - Suspend time callback * @dev: unused * @state: unused * * When we are going into suspend, we look through the bases * to see which is the soonest timer to expire. We then * set an rtc timer to fire that far into the future, which * will wake us from suspend. */ static int alarmtimer_suspend(struct device *dev) { struct rtc_time tm; ktime_t min, now; unsigned long flags; struct rtc_device *rtc; int i; spin_lock_irqsave(&freezer_delta_lock, flags); min = freezer_delta; freezer_delta = ktime_set(0, 0); spin_unlock_irqrestore(&freezer_delta_lock, flags); rtc = rtcdev; /* If we have no rtcdev, just return */ if (!rtc) return 0; /* Find the soonest timer to expire*/ for (i = 0; i < ALARM_NUMTYPE; i++) { struct alarm_base *base = &alarm_bases[i]; struct timerqueue_node *next; ktime_t delta; spin_lock_irqsave(&base->lock, flags); next = timerqueue_getnext(&base->timerqueue); spin_unlock_irqrestore(&base->lock, flags); if (!next) continue; delta = ktime_sub(next->expires, base->gettime()); if (!min.tv64 || (delta.tv64 < min.tv64)) min = delta; } if (min.tv64 == 0) return 0; /* XXX - Should we enforce a minimum sleep time? */ WARN_ON(min.tv64 < NSEC_PER_SEC); /* Setup an rtc timer to fire that far in the future */ rtc_timer_cancel(rtc, &rtctimer); rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); now = ktime_add(now, min); rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0)); return 0; } #else static int alarmtimer_suspend(struct device *dev) { return 0; } #endif static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type) { ktime_t delta; unsigned long flags; struct alarm_base *base = &alarm_bases[type]; delta = ktime_sub(absexp, base->gettime()); spin_lock_irqsave(&freezer_delta_lock, flags); if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) freezer_delta = delta; spin_unlock_irqrestore(&freezer_delta_lock, flags); } /** * alarm_init - Initialize an alarm structure * @alarm: ptr to alarm to be initialized * @type: the type of the alarm * @function: callback that is run when the alarm fires */ void alarm_init(struct alarm *alarm, enum alarmtimer_type type, void (*function)(struct alarm *)) { timerqueue_init(&alarm->node); alarm->period = ktime_set(0, 0); alarm->function = function; alarm->type = type; alarm->enabled = 0; } /** * alarm_start - Sets an alarm to fire * @alarm: ptr to alarm to set * @start: time to run the alarm * @period: period at which the alarm will recur */ void alarm_start(struct alarm *alarm, ktime_t start, ktime_t period) { struct alarm_base *base = &alarm_bases[alarm->type]; unsigned long flags; spin_lock_irqsave(&base->lock, flags); if (alarm->enabled) alarmtimer_remove(base, alarm); alarm->node.expires = start; alarm->period = period; alarmtimer_enqueue(base, alarm); alarm->enabled = 1; spin_unlock_irqrestore(&base->lock, flags); } /** * alarm_cancel - Tries to cancel an alarm timer * @alarm: ptr to alarm to be canceled */ void alarm_cancel(struct alarm *alarm) { struct alarm_base *base = &alarm_bases[alarm->type]; unsigned long flags; spin_lock_irqsave(&base->lock, flags); if (alarm->enabled) alarmtimer_remove(base, alarm); alarm->enabled = 0; spin_unlock_irqrestore(&base->lock, flags); } /** * clock2alarm - helper that converts from clockid to alarmtypes * @clockid: clockid. */ static enum alarmtimer_type clock2alarm(clockid_t clockid) { if (clockid == CLOCK_REALTIME_ALARM) return ALARM_REALTIME; if (clockid == CLOCK_BOOTTIME_ALARM) return ALARM_BOOTTIME; return -1; } /** * alarm_handle_timer - Callback for posix timers * @alarm: alarm that fired * * Posix timer callback for expired alarm timers. */ static void alarm_handle_timer(struct alarm *alarm) { struct k_itimer *ptr = container_of(alarm, struct k_itimer, it.alarmtimer); if (posix_timer_event(ptr, 0) != 0) ptr->it_overrun++; } /** * alarm_clock_getres - posix getres interface * @which_clock: clockid * @tp: timespec to fill * * Returns the granularity of underlying alarm base clock */ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp) { clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; if (!alarmtimer_get_rtcdev()) return -ENOTSUPP; return hrtimer_get_res(baseid, tp); } /** * alarm_clock_get - posix clock_get interface * @which_clock: clockid * @tp: timespec to fill. * * Provides the underlying alarm base time. */ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp) { struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; if (!alarmtimer_get_rtcdev()) return -ENOTSUPP; *tp = ktime_to_timespec(base->gettime()); return 0; } /** * alarm_timer_create - posix timer_create interface * @new_timer: k_itimer pointer to manage * * Initializes the k_itimer structure. */ static int alarm_timer_create(struct k_itimer *new_timer) { enum alarmtimer_type type; struct alarm_base *base; if (!alarmtimer_get_rtcdev()) return -ENOTSUPP; if (!capable(CAP_WAKE_ALARM)) return -EPERM; type = clock2alarm(new_timer->it_clock); base = &alarm_bases[type]; alarm_init(&new_timer->it.alarmtimer, type, alarm_handle_timer); return 0; } /** * alarm_timer_get - posix timer_get interface * @new_timer: k_itimer pointer * @cur_setting: itimerspec data to fill * * Copies the itimerspec data out from the k_itimer */ static void alarm_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) { memset(cur_setting, 0, sizeof(struct itimerspec)); cur_setting->it_interval = ktime_to_timespec(timr->it.alarmtimer.period); cur_setting->it_value = ktime_to_timespec(timr->it.alarmtimer.node.expires); return; } /** * alarm_timer_del - posix timer_del interface * @timr: k_itimer pointer to be deleted * * Cancels any programmed alarms for the given timer. */ static int alarm_timer_del(struct k_itimer *timr) { if (!rtcdev) return -ENOTSUPP; alarm_cancel(&timr->it.alarmtimer); return 0; } /** * alarm_timer_set - posix timer_set interface * @timr: k_itimer pointer to be deleted * @flags: timer flags * @new_setting: itimerspec to be used * @old_setting: itimerspec being replaced * * Sets the timer to new_setting, and starts the timer. */ static int alarm_timer_set(struct k_itimer *timr, int flags, struct itimerspec *new_setting, struct itimerspec *old_setting) { if (!rtcdev) return -ENOTSUPP; /* * XXX HACK! Currently we can DOS a system if the interval * period on alarmtimers is too small. Cap the interval here * to 100us and solve this properly in a future patch! -jstultz */ if ((new_setting->it_interval.tv_sec == 0) && (new_setting->it_interval.tv_nsec < 100000)) new_setting->it_interval.tv_nsec = 100000; if (old_setting) alarm_timer_get(timr, old_setting); /* If the timer was already set, cancel it */ alarm_cancel(&timr->it.alarmtimer); /* start the timer */ alarm_start(&timr->it.alarmtimer, timespec_to_ktime(new_setting->it_value), timespec_to_ktime(new_setting->it_interval)); return 0; } /** * alarmtimer_nsleep_wakeup - Wakeup function for alarm_timer_nsleep * @alarm: ptr to alarm that fired * * Wakes up the task that set the alarmtimer */ static void alarmtimer_nsleep_wakeup(struct alarm *alarm) { struct task_struct *task = (struct task_struct *)alarm->data; alarm->data = NULL; if (task) wake_up_process(task); } /** * alarmtimer_do_nsleep - Internal alarmtimer nsleep implementation * @alarm: ptr to alarmtimer * @absexp: absolute expiration time * * Sets the alarm timer and sleeps until it is fired or interrupted. */ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp) { alarm->data = (void *)current; do { set_current_state(TASK_INTERRUPTIBLE); alarm_start(alarm, absexp, ktime_set(0, 0)); if (likely(alarm->data)) schedule(); alarm_cancel(alarm); } while (alarm->data && !signal_pending(current)); __set_current_state(TASK_RUNNING); return (alarm->data == NULL); } /** * update_rmtp - Update remaining timespec value * @exp: expiration time * @type: timer type * @rmtp: user pointer to remaining timepsec value * * Helper function that fills in rmtp value with time between * now and the exp value */ static int update_rmtp(ktime_t exp, enum alarmtimer_type type, struct timespec __user *rmtp) { struct timespec rmt; ktime_t rem; rem = ktime_sub(exp, alarm_bases[type].gettime()); if (rem.tv64 <= 0) return 0; rmt = ktime_to_timespec(rem); if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) return -EFAULT; return 1; } /** * alarm_timer_nsleep_restart - restartblock alarmtimer nsleep * @restart: ptr to restart block * * Handles restarted clock_nanosleep calls */ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) { enum alarmtimer_type type = restart->nanosleep.clockid; ktime_t exp; struct timespec __user *rmtp; struct alarm alarm; int ret = 0; exp.tv64 = restart->nanosleep.expires; alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); if (alarmtimer_do_nsleep(&alarm, exp)) goto out; if (freezing(current)) alarmtimer_freezerset(exp, type); rmtp = restart->nanosleep.rmtp; if (rmtp) { ret = update_rmtp(exp, type, rmtp); if (ret <= 0) goto out; } /* The other values in restart are already filled in */ ret = -ERESTART_RESTARTBLOCK; out: return ret; } /** * alarm_timer_nsleep - alarmtimer nanosleep * @which_clock: clockid * @flags: determins abstime or relative * @tsreq: requested sleep time (abs or rel) * @rmtp: remaining sleep time saved * * Handles clock_nanosleep calls against _ALARM clockids */ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, struct timespec *tsreq, struct timespec __user *rmtp) { enum alarmtimer_type type = clock2alarm(which_clock); struct alarm alarm; ktime_t exp; int ret = 0; struct restart_block *restart; if (!alarmtimer_get_rtcdev()) return -ENOTSUPP; if (!capable(CAP_WAKE_ALARM)) return -EPERM; alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); exp = timespec_to_ktime(*tsreq); /* Convert (if necessary) to absolute time */ if (flags != TIMER_ABSTIME) { ktime_t now = alarm_bases[type].gettime(); exp = ktime_add(now, exp); } if (alarmtimer_do_nsleep(&alarm, exp)) goto out; if (freezing(current)) alarmtimer_freezerset(exp, type); /* abs timers don't set remaining time or restart */ if (flags == TIMER_ABSTIME) { ret = -ERESTARTNOHAND; goto out; } if (rmtp) { ret = update_rmtp(exp, type, rmtp); if (ret <= 0) goto out; } restart = &current_thread_info()->restart_block; restart->fn = alarm_timer_nsleep_restart; restart->nanosleep.clockid = type; restart->nanosleep.expires = exp.tv64; restart->nanosleep.rmtp = rmtp; ret = -ERESTART_RESTARTBLOCK; out: return ret; } /* Suspend hook structures */ static const struct dev_pm_ops alarmtimer_pm_ops = { .suspend = alarmtimer_suspend, }; static struct platform_driver alarmtimer_driver = { .driver = { .name = "alarmtimer", .pm = &alarmtimer_pm_ops, } }; /** * alarmtimer_init - Initialize alarm timer code * * This function initializes the alarm bases and registers * the posix clock ids. */ static int __init alarmtimer_init(void) { int error = 0; int i; struct k_clock alarm_clock = { .clock_getres = alarm_clock_getres, .clock_get = alarm_clock_get, .timer_create = alarm_timer_create, .timer_set = alarm_timer_set, .timer_del = alarm_timer_del, .timer_get = alarm_timer_get, .nsleep = alarm_timer_nsleep, }; posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); /* Initialize alarm bases */ alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME; alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real; alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME; alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime; for (i = 0; i < ALARM_NUMTYPE; i++) { timerqueue_init_head(&alarm_bases[i].timerqueue); spin_lock_init(&alarm_bases[i].lock); hrtimer_init(&alarm_bases[i].timer, alarm_bases[i].base_clockid, HRTIMER_MODE_ABS); alarm_bases[i].timer.function = alarmtimer_fired; } error = platform_driver_register(&alarmtimer_driver); platform_device_register_simple("alarmtimer", -1, NULL, 0); return error; } device_initcall(alarmtimer_init);
gpl-2.0
gompa/linux
net/bluetooth/rfcomm/tty.c
1557
27418
/* RFCOMM implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* * RFCOMM TTY. */ #include <linux/module.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/rfcomm.h> #define RFCOMM_TTY_MAGIC 0x6d02 /* magic number for rfcomm struct */ #define RFCOMM_TTY_PORTS RFCOMM_MAX_DEV /* whole lotta rfcomm devices */ #define RFCOMM_TTY_MAJOR 216 /* device node major id of the usb/bluetooth.c driver */ #define RFCOMM_TTY_MINOR 0 static DEFINE_MUTEX(rfcomm_ioctl_mutex); static struct tty_driver *rfcomm_tty_driver; struct rfcomm_dev { struct tty_port port; struct list_head list; char name[12]; int id; unsigned long flags; int err; unsigned long status; /* don't export to userspace */ bdaddr_t src; bdaddr_t dst; u8 channel; uint modem_status; struct rfcomm_dlc *dlc; struct device *tty_dev; atomic_t wmem_alloc; struct sk_buff_head pending; }; static LIST_HEAD(rfcomm_dev_list); static DEFINE_MUTEX(rfcomm_dev_lock); static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); /* ---- Device functions ---- */ static void rfcomm_dev_destruct(struct tty_port *port) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); struct rfcomm_dlc *dlc = dev->dlc; BT_DBG("dev %p dlc %p", dev, dlc); rfcomm_dlc_lock(dlc); /* Detach DLC if it's owned by this dev */ if (dlc->owner == dev) dlc->owner = NULL; rfcomm_dlc_unlock(dlc); rfcomm_dlc_put(dlc); if (dev->tty_dev) tty_unregister_device(rfcomm_tty_driver, dev->id); mutex_lock(&rfcomm_dev_lock); list_del(&dev->list); mutex_unlock(&rfcomm_dev_lock); kfree(dev); /* It's safe to call module_put() here because socket still holds reference to this module. */ module_put(THIS_MODULE); } /* device-specific initialization: open the dlc */ static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); int err; err = rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel); if (err) set_bit(TTY_IO_ERROR, &tty->flags); return err; } /* we block the open until the dlc->state becomes BT_CONNECTED */ static int rfcomm_dev_carrier_raised(struct tty_port *port) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); return (dev->dlc->state == BT_CONNECTED); } /* device-specific cleanup: close the dlc */ static void rfcomm_dev_shutdown(struct tty_port *port) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); if (dev->tty_dev->parent) device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); /* close the dlc */ rfcomm_dlc_close(dev->dlc, 0); } static const struct tty_port_operations rfcomm_port_ops = { .destruct = rfcomm_dev_destruct, .activate = rfcomm_dev_activate, .shutdown = rfcomm_dev_shutdown, .carrier_raised = rfcomm_dev_carrier_raised, }; static struct rfcomm_dev *__rfcomm_dev_lookup(int id) { struct rfcomm_dev *dev; list_for_each_entry(dev, &rfcomm_dev_list, list) if (dev->id == id) return dev; return NULL; } static struct rfcomm_dev *rfcomm_dev_get(int id) { struct rfcomm_dev *dev; mutex_lock(&rfcomm_dev_lock); dev = __rfcomm_dev_lookup(id); if (dev && !tty_port_get(&dev->port)) dev = NULL; mutex_unlock(&rfcomm_dev_lock); return dev; } static void rfcomm_reparent_device(struct rfcomm_dev *dev) { struct hci_dev *hdev; struct hci_conn *conn; hdev = hci_get_route(&dev->dst, &dev->src); if (!hdev) return; /* The lookup results are unsafe to access without the * hci device lock (FIXME: why is this not documented?) */ hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst); /* Just because the acl link is in the hash table is no * guarantee the sysfs device has been added ... */ if (conn && device_is_registered(&conn->dev)) device_move(dev->tty_dev, &conn->dev, DPM_ORDER_DEV_AFTER_PARENT); hci_dev_unlock(hdev); hci_dev_put(hdev); } static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) { struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); return sprintf(buf, "%pMR\n", &dev->dst); } static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf) { struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); return sprintf(buf, "%d\n", dev->channel); } static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) { struct rfcomm_dev *dev, *entry; struct list_head *head = &rfcomm_dev_list; int err = 0; dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); mutex_lock(&rfcomm_dev_lock); if (req->dev_id < 0) { dev->id = 0; list_for_each_entry(entry, &rfcomm_dev_list, list) { if (entry->id != dev->id) break; dev->id++; head = &entry->list; } } else { dev->id = req->dev_id; list_for_each_entry(entry, &rfcomm_dev_list, list) { if (entry->id == dev->id) { err = -EADDRINUSE; goto out; } if (entry->id > dev->id - 1) break; head = &entry->list; } } if ((dev->id < 0) || (dev->id > RFCOMM_MAX_DEV - 1)) { err = -ENFILE; goto out; } sprintf(dev->name, "rfcomm%d", dev->id); list_add(&dev->list, head); bacpy(&dev->src, &req->src); bacpy(&dev->dst, &req->dst); dev->channel = req->channel; dev->flags = req->flags & ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC)); tty_port_init(&dev->port); dev->port.ops = &rfcomm_port_ops; skb_queue_head_init(&dev->pending); rfcomm_dlc_lock(dlc); if (req->flags & (1 << RFCOMM_REUSE_DLC)) { struct sock *sk = dlc->owner; struct sk_buff *skb; BUG_ON(!sk); rfcomm_dlc_throttle(dlc); while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); skb_queue_tail(&dev->pending, skb); atomic_sub(skb->len, &sk->sk_rmem_alloc); } } dlc->data_ready = rfcomm_dev_data_ready; dlc->state_change = rfcomm_dev_state_change; dlc->modem_status = rfcomm_dev_modem_status; dlc->owner = dev; dev->dlc = dlc; rfcomm_dev_modem_status(dlc, dlc->remote_v24_sig); rfcomm_dlc_unlock(dlc); /* It's safe to call __module_get() here because socket already holds reference to this module. */ __module_get(THIS_MODULE); mutex_unlock(&rfcomm_dev_lock); return dev; out: mutex_unlock(&rfcomm_dev_lock); kfree(dev); return ERR_PTR(err); } static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) { struct rfcomm_dev *dev; struct device *tty; BT_DBG("id %d channel %d", req->dev_id, req->channel); dev = __rfcomm_dev_add(req, dlc); if (IS_ERR(dev)) { rfcomm_dlc_put(dlc); return PTR_ERR(dev); } tty = tty_port_register_device(&dev->port, rfcomm_tty_driver, dev->id, NULL); if (IS_ERR(tty)) { tty_port_put(&dev->port); return PTR_ERR(tty); } dev->tty_dev = tty; rfcomm_reparent_device(dev); dev_set_drvdata(dev->tty_dev, dev); if (device_create_file(dev->tty_dev, &dev_attr_address) < 0) BT_ERR("Failed to create address attribute"); if (device_create_file(dev->tty_dev, &dev_attr_channel) < 0) BT_ERR("Failed to create channel attribute"); return dev->id; } /* ---- Send buffer ---- */ static inline unsigned int rfcomm_room(struct rfcomm_dev *dev) { struct rfcomm_dlc *dlc = dev->dlc; /* Limit the outstanding number of packets not yet sent to 40 */ int pending = 40 - atomic_read(&dev->wmem_alloc); return max(0, pending) * dlc->mtu; } static void rfcomm_wfree(struct sk_buff *skb) { struct rfcomm_dev *dev = (void *) skb->sk; atomic_dec(&dev->wmem_alloc); if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) tty_port_tty_wakeup(&dev->port); tty_port_put(&dev->port); } static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) { tty_port_get(&dev->port); atomic_inc(&dev->wmem_alloc); skb->sk = (void *) dev; skb->destructor = rfcomm_wfree; } static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority) { struct sk_buff *skb = alloc_skb(size, priority); if (skb) rfcomm_set_owner_w(skb, dev); return skb; } /* ---- Device IOCTLs ---- */ #define NOCAP_FLAGS ((1 << RFCOMM_REUSE_DLC) | (1 << RFCOMM_RELEASE_ONHUP)) static int __rfcomm_create_dev(struct sock *sk, void __user *arg) { struct rfcomm_dev_req req; struct rfcomm_dlc *dlc; int id; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags); if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) return -EPERM; if (req.flags & (1 << RFCOMM_REUSE_DLC)) { /* Socket must be connected */ if (sk->sk_state != BT_CONNECTED) return -EBADFD; dlc = rfcomm_pi(sk)->dlc; rfcomm_dlc_hold(dlc); } else { /* Validate the channel is unused */ dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel); if (IS_ERR(dlc)) return PTR_ERR(dlc); else if (dlc) { rfcomm_dlc_put(dlc); return -EBUSY; } dlc = rfcomm_dlc_alloc(GFP_KERNEL); if (!dlc) return -ENOMEM; } id = rfcomm_dev_add(&req, dlc); if (id < 0) return id; if (req.flags & (1 << RFCOMM_REUSE_DLC)) { /* DLC is now used by device. * Socket must be disconnected */ sk->sk_state = BT_CLOSED; } return id; } static int __rfcomm_release_dev(void __user *arg) { struct rfcomm_dev_req req; struct rfcomm_dev *dev; struct tty_struct *tty; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags); dev = rfcomm_dev_get(req.dev_id); if (!dev) return -ENODEV; if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) { tty_port_put(&dev->port); return -EPERM; } /* only release once */ if (test_and_set_bit(RFCOMM_DEV_RELEASED, &dev->status)) { tty_port_put(&dev->port); return -EALREADY; } if (req.flags & (1 << RFCOMM_HANGUP_NOW)) rfcomm_dlc_close(dev->dlc, 0); /* Shut down TTY synchronously before freeing rfcomm_dev */ tty = tty_port_tty_get(&dev->port); if (tty) { tty_vhangup(tty); tty_kref_put(tty); } if (!test_bit(RFCOMM_TTY_OWNED, &dev->status)) tty_port_put(&dev->port); tty_port_put(&dev->port); return 0; } static int rfcomm_create_dev(struct sock *sk, void __user *arg) { int ret; mutex_lock(&rfcomm_ioctl_mutex); ret = __rfcomm_create_dev(sk, arg); mutex_unlock(&rfcomm_ioctl_mutex); return ret; } static int rfcomm_release_dev(void __user *arg) { int ret; mutex_lock(&rfcomm_ioctl_mutex); ret = __rfcomm_release_dev(arg); mutex_unlock(&rfcomm_ioctl_mutex); return ret; } static int rfcomm_get_dev_list(void __user *arg) { struct rfcomm_dev *dev; struct rfcomm_dev_list_req *dl; struct rfcomm_dev_info *di; int n = 0, size, err; u16 dev_num; BT_DBG(""); if (get_user(dev_num, (u16 __user *) arg)) return -EFAULT; if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di)) return -EINVAL; size = sizeof(*dl) + dev_num * sizeof(*di); dl = kzalloc(size, GFP_KERNEL); if (!dl) return -ENOMEM; di = dl->dev_info; mutex_lock(&rfcomm_dev_lock); list_for_each_entry(dev, &rfcomm_dev_list, list) { if (!tty_port_get(&dev->port)) continue; (di + n)->id = dev->id; (di + n)->flags = dev->flags; (di + n)->state = dev->dlc->state; (di + n)->channel = dev->channel; bacpy(&(di + n)->src, &dev->src); bacpy(&(di + n)->dst, &dev->dst); tty_port_put(&dev->port); if (++n >= dev_num) break; } mutex_unlock(&rfcomm_dev_lock); dl->dev_num = n; size = sizeof(*dl) + n * sizeof(*di); err = copy_to_user(arg, dl, size); kfree(dl); return err ? -EFAULT : 0; } static int rfcomm_get_dev_info(void __user *arg) { struct rfcomm_dev *dev; struct rfcomm_dev_info di; int err = 0; BT_DBG(""); if (copy_from_user(&di, arg, sizeof(di))) return -EFAULT; dev = rfcomm_dev_get(di.id); if (!dev) return -ENODEV; di.flags = dev->flags; di.channel = dev->channel; di.state = dev->dlc->state; bacpy(&di.src, &dev->src); bacpy(&di.dst, &dev->dst); if (copy_to_user(arg, &di, sizeof(di))) err = -EFAULT; tty_port_put(&dev->port); return err; } int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { BT_DBG("cmd %d arg %p", cmd, arg); switch (cmd) { case RFCOMMCREATEDEV: return rfcomm_create_dev(sk, arg); case RFCOMMRELEASEDEV: return rfcomm_release_dev(arg); case RFCOMMGETDEVLIST: return rfcomm_get_dev_list(arg); case RFCOMMGETDEVINFO: return rfcomm_get_dev_info(arg); } return -EINVAL; } /* ---- DLC callbacks ---- */ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb) { struct rfcomm_dev *dev = dlc->owner; if (!dev) { kfree_skb(skb); return; } if (!skb_queue_empty(&dev->pending)) { skb_queue_tail(&dev->pending, skb); return; } BT_DBG("dlc %p len %d", dlc, skb->len); tty_insert_flip_string(&dev->port, skb->data, skb->len); tty_flip_buffer_push(&dev->port); kfree_skb(skb); } static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) { struct rfcomm_dev *dev = dlc->owner; if (!dev) return; BT_DBG("dlc %p dev %p err %d", dlc, dev, err); dev->err = err; if (dlc->state == BT_CONNECTED) { rfcomm_reparent_device(dev); wake_up_interruptible(&dev->port.open_wait); } else if (dlc->state == BT_CLOSED) tty_port_tty_hangup(&dev->port, false); } static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) { struct rfcomm_dev *dev = dlc->owner; if (!dev) return; BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig); if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) tty_port_tty_hangup(&dev->port, true); dev->modem_status = ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) | ((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0); } /* ---- TTY functions ---- */ static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev) { struct sk_buff *skb; int inserted = 0; BT_DBG("dev %p", dev); rfcomm_dlc_lock(dev->dlc); while ((skb = skb_dequeue(&dev->pending))) { inserted += tty_insert_flip_string(&dev->port, skb->data, skb->len); kfree_skb(skb); } rfcomm_dlc_unlock(dev->dlc); if (inserted > 0) tty_flip_buffer_push(&dev->port); } /* do the reverse of install, clearing the tty fields and releasing the * reference to tty_port */ static void rfcomm_tty_cleanup(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); rfcomm_dlc_lock(dev->dlc); tty->driver_data = NULL; rfcomm_dlc_unlock(dev->dlc); /* * purge the dlc->tx_queue to avoid circular dependencies * between dev and dlc */ skb_queue_purge(&dev->dlc->tx_queue); tty_port_put(&dev->port); } /* we acquire the tty_port reference since it's here the tty is first used * by setting the termios. We also populate the driver_data field and install * the tty port */ static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) { struct rfcomm_dev *dev; struct rfcomm_dlc *dlc; int err; dev = rfcomm_dev_get(tty->index); if (!dev) return -ENODEV; dlc = dev->dlc; /* Attach TTY and open DLC */ rfcomm_dlc_lock(dlc); tty->driver_data = dev; rfcomm_dlc_unlock(dlc); set_bit(RFCOMM_TTY_ATTACHED, &dev->flags); /* install the tty_port */ err = tty_port_install(&dev->port, driver, tty); if (err) { rfcomm_tty_cleanup(tty); return err; } /* take over the tty_port reference if the port was created with the * flag RFCOMM_RELEASE_ONHUP. This will force the release of the port * when the last process closes the tty. The behaviour is expected by * userspace. */ if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { set_bit(RFCOMM_TTY_OWNED, &dev->status); tty_port_put(&dev->port); } return 0; } static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) { struct rfcomm_dev *dev = tty->driver_data; int err; BT_DBG("tty %p id %d", tty, tty->index); BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, dev->channel, dev->port.count); err = tty_port_open(&dev->port, tty, filp); if (err) return err; /* * FIXME: rfcomm should use proper flow control for * received data. This hack will be unnecessary and can * be removed when that's implemented */ rfcomm_tty_copy_pending(dev); rfcomm_dlc_unthrottle(dev->dlc); return 0; } static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, dev->port.count); tty_port_close(&dev->port, tty, filp); } static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; struct rfcomm_dlc *dlc = dev->dlc; struct sk_buff *skb; int sent = 0, size; BT_DBG("tty %p count %d", tty, count); while (count) { size = min_t(uint, count, dlc->mtu); skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC); if (!skb) break; skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); memcpy(skb_put(skb, size), buf + sent, size); rfcomm_dlc_send_noerror(dlc, skb); sent += size; count -= size; } return sent; } static int rfcomm_tty_write_room(struct tty_struct *tty) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; int room = 0; if (dev && dev->dlc) room = rfcomm_room(dev); BT_DBG("tty %p room %d", tty, room); return room; } static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { BT_DBG("tty %p cmd 0x%02x", tty, cmd); switch (cmd) { case TCGETS: BT_DBG("TCGETS is not supported"); return -ENOIOCTLCMD; case TCSETS: BT_DBG("TCSETS is not supported"); return -ENOIOCTLCMD; case TIOCMIWAIT: BT_DBG("TIOCMIWAIT"); break; case TIOCGSERIAL: BT_ERR("TIOCGSERIAL is not supported"); return -ENOIOCTLCMD; case TIOCSSERIAL: BT_ERR("TIOCSSERIAL is not supported"); return -ENOIOCTLCMD; case TIOCSERGSTRUCT: BT_ERR("TIOCSERGSTRUCT is not supported"); return -ENOIOCTLCMD; case TIOCSERGETLSR: BT_ERR("TIOCSERGETLSR is not supported"); return -ENOIOCTLCMD; case TIOCSERCONFIG: BT_ERR("TIOCSERCONFIG is not supported"); return -ENOIOCTLCMD; default: return -ENOIOCTLCMD; /* ioctls which we must ignore */ } return -ENOIOCTLCMD; } static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) { struct ktermios *new = &tty->termios; int old_baud_rate = tty_termios_baud_rate(old); int new_baud_rate = tty_termios_baud_rate(new); u8 baud, data_bits, stop_bits, parity, x_on, x_off; u16 changes = 0; struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; BT_DBG("tty %p termios %p", tty, old); if (!dev || !dev->dlc || !dev->dlc->session) return; /* Handle turning off CRTSCTS */ if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS)) BT_DBG("Turning off CRTSCTS unsupported"); /* Parity on/off and when on, odd/even */ if (((old->c_cflag & PARENB) != (new->c_cflag & PARENB)) || ((old->c_cflag & PARODD) != (new->c_cflag & PARODD))) { changes |= RFCOMM_RPN_PM_PARITY; BT_DBG("Parity change detected."); } /* Mark and space parity are not supported! */ if (new->c_cflag & PARENB) { if (new->c_cflag & PARODD) { BT_DBG("Parity is ODD"); parity = RFCOMM_RPN_PARITY_ODD; } else { BT_DBG("Parity is EVEN"); parity = RFCOMM_RPN_PARITY_EVEN; } } else { BT_DBG("Parity is OFF"); parity = RFCOMM_RPN_PARITY_NONE; } /* Setting the x_on / x_off characters */ if (old->c_cc[VSTOP] != new->c_cc[VSTOP]) { BT_DBG("XOFF custom"); x_on = new->c_cc[VSTOP]; changes |= RFCOMM_RPN_PM_XON; } else { BT_DBG("XOFF default"); x_on = RFCOMM_RPN_XON_CHAR; } if (old->c_cc[VSTART] != new->c_cc[VSTART]) { BT_DBG("XON custom"); x_off = new->c_cc[VSTART]; changes |= RFCOMM_RPN_PM_XOFF; } else { BT_DBG("XON default"); x_off = RFCOMM_RPN_XOFF_CHAR; } /* Handle setting of stop bits */ if ((old->c_cflag & CSTOPB) != (new->c_cflag & CSTOPB)) changes |= RFCOMM_RPN_PM_STOP; /* POSIX does not support 1.5 stop bits and RFCOMM does not * support 2 stop bits. So a request for 2 stop bits gets * translated to 1.5 stop bits */ if (new->c_cflag & CSTOPB) stop_bits = RFCOMM_RPN_STOP_15; else stop_bits = RFCOMM_RPN_STOP_1; /* Handle number of data bits [5-8] */ if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE)) changes |= RFCOMM_RPN_PM_DATA; switch (new->c_cflag & CSIZE) { case CS5: data_bits = RFCOMM_RPN_DATA_5; break; case CS6: data_bits = RFCOMM_RPN_DATA_6; break; case CS7: data_bits = RFCOMM_RPN_DATA_7; break; case CS8: data_bits = RFCOMM_RPN_DATA_8; break; default: data_bits = RFCOMM_RPN_DATA_8; break; } /* Handle baudrate settings */ if (old_baud_rate != new_baud_rate) changes |= RFCOMM_RPN_PM_BITRATE; switch (new_baud_rate) { case 2400: baud = RFCOMM_RPN_BR_2400; break; case 4800: baud = RFCOMM_RPN_BR_4800; break; case 7200: baud = RFCOMM_RPN_BR_7200; break; case 9600: baud = RFCOMM_RPN_BR_9600; break; case 19200: baud = RFCOMM_RPN_BR_19200; break; case 38400: baud = RFCOMM_RPN_BR_38400; break; case 57600: baud = RFCOMM_RPN_BR_57600; break; case 115200: baud = RFCOMM_RPN_BR_115200; break; case 230400: baud = RFCOMM_RPN_BR_230400; break; default: /* 9600 is standard accordinag to the RFCOMM specification */ baud = RFCOMM_RPN_BR_9600; break; } if (changes) rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud, data_bits, stop_bits, parity, RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes); } static void rfcomm_tty_throttle(struct tty_struct *tty) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); rfcomm_dlc_throttle(dev->dlc); } static void rfcomm_tty_unthrottle(struct tty_struct *tty) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); rfcomm_dlc_unthrottle(dev->dlc); } static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); if (!dev || !dev->dlc) return 0; if (!skb_queue_empty(&dev->dlc->tx_queue)) return dev->dlc->mtu; return 0; } static void rfcomm_tty_flush_buffer(struct tty_struct *tty) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); if (!dev || !dev->dlc) return; skb_queue_purge(&dev->dlc->tx_queue); tty_wakeup(tty); } static void rfcomm_tty_send_xchar(struct tty_struct *tty, char ch) { BT_DBG("tty %p ch %c", tty, ch); } static void rfcomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) { BT_DBG("tty %p timeout %d", tty, timeout); } static void rfcomm_tty_hangup(struct tty_struct *tty) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); tty_port_hangup(&dev->port); } static int rfcomm_tty_tiocmget(struct tty_struct *tty) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); return dev->modem_status; } static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; struct rfcomm_dlc *dlc = dev->dlc; u8 v24_sig; BT_DBG("tty %p dev %p set 0x%02x clear 0x%02x", tty, dev, set, clear); rfcomm_dlc_get_modem_status(dlc, &v24_sig); if (set & TIOCM_DSR || set & TIOCM_DTR) v24_sig |= RFCOMM_V24_RTC; if (set & TIOCM_RTS || set & TIOCM_CTS) v24_sig |= RFCOMM_V24_RTR; if (set & TIOCM_RI) v24_sig |= RFCOMM_V24_IC; if (set & TIOCM_CD) v24_sig |= RFCOMM_V24_DV; if (clear & TIOCM_DSR || clear & TIOCM_DTR) v24_sig &= ~RFCOMM_V24_RTC; if (clear & TIOCM_RTS || clear & TIOCM_CTS) v24_sig &= ~RFCOMM_V24_RTR; if (clear & TIOCM_RI) v24_sig &= ~RFCOMM_V24_IC; if (clear & TIOCM_CD) v24_sig &= ~RFCOMM_V24_DV; rfcomm_dlc_set_modem_status(dlc, v24_sig); return 0; } /* ---- TTY structure ---- */ static const struct tty_operations rfcomm_ops = { .open = rfcomm_tty_open, .close = rfcomm_tty_close, .write = rfcomm_tty_write, .write_room = rfcomm_tty_write_room, .chars_in_buffer = rfcomm_tty_chars_in_buffer, .flush_buffer = rfcomm_tty_flush_buffer, .ioctl = rfcomm_tty_ioctl, .throttle = rfcomm_tty_throttle, .unthrottle = rfcomm_tty_unthrottle, .set_termios = rfcomm_tty_set_termios, .send_xchar = rfcomm_tty_send_xchar, .hangup = rfcomm_tty_hangup, .wait_until_sent = rfcomm_tty_wait_until_sent, .tiocmget = rfcomm_tty_tiocmget, .tiocmset = rfcomm_tty_tiocmset, .install = rfcomm_tty_install, .cleanup = rfcomm_tty_cleanup, }; int __init rfcomm_init_ttys(void) { int error; rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); if (!rfcomm_tty_driver) return -ENOMEM; rfcomm_tty_driver->driver_name = "rfcomm"; rfcomm_tty_driver->name = "rfcomm"; rfcomm_tty_driver->major = RFCOMM_TTY_MAJOR; rfcomm_tty_driver->minor_start = RFCOMM_TTY_MINOR; rfcomm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL; rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; rfcomm_tty_driver->init_termios = tty_std_termios; rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL; rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); error = tty_register_driver(rfcomm_tty_driver); if (error) { BT_ERR("Can't register RFCOMM TTY driver"); put_tty_driver(rfcomm_tty_driver); return error; } BT_INFO("RFCOMM TTY layer initialized"); return 0; } void rfcomm_cleanup_ttys(void) { tty_unregister_driver(rfcomm_tty_driver); put_tty_driver(rfcomm_tty_driver); }
gpl-2.0
TheNameIsNigel/android_kernel_huawei_msm8928
drivers/input/misc/pmic8xxx-pwrkey.c
1557
7576
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/log2.h> #include <linux/mfd/pm8xxx/core.h> #include <linux/input/pmic8xxx-pwrkey.h> #define PON_CNTL_1 0x1C #define PON_CNTL_PULL_UP BIT(7) #define PON_CNTL_TRIG_DELAY_MASK (0x7) /** * struct pmic8xxx_pwrkey - pmic8xxx pwrkey information * @key_press_irq: key press irq number * @pdata: platform data */ struct pmic8xxx_pwrkey { struct input_dev *pwr; int key_press_irq; int key_release_irq; bool press; const struct pm8xxx_pwrkey_platform_data *pdata; }; static irqreturn_t pwrkey_press_irq(int irq, void *_pwrkey) { struct pmic8xxx_pwrkey *pwrkey = _pwrkey; if (pwrkey->press == true) { pwrkey->press = false; return IRQ_HANDLED; } else { pwrkey->press = true; } input_report_key(pwrkey->pwr, KEY_POWER, 1); input_sync(pwrkey->pwr); return IRQ_HANDLED; } static irqreturn_t pwrkey_release_irq(int irq, void *_pwrkey) { struct pmic8xxx_pwrkey *pwrkey = _pwrkey; if (pwrkey->press == false) { input_report_key(pwrkey->pwr, KEY_POWER, 1); input_sync(pwrkey->pwr); pwrkey->press = true; } else { pwrkey->press = false; } input_report_key(pwrkey->pwr, KEY_POWER, 0); input_sync(pwrkey->pwr); return IRQ_HANDLED; } #ifdef CONFIG_PM_SLEEP static int pmic8xxx_pwrkey_suspend(struct device *dev) { struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev); if (device_may_wakeup(dev)) { enable_irq_wake(pwrkey->key_press_irq); enable_irq_wake(pwrkey->key_release_irq); } return 0; } static int pmic8xxx_pwrkey_resume(struct device *dev) { struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev); if (device_may_wakeup(dev)) { disable_irq_wake(pwrkey->key_press_irq); disable_irq_wake(pwrkey->key_release_irq); } return 0; } #endif static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops, pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume); static int pmic8xxx_set_pon1(struct device *dev, u32 debounce_us, bool pull_up) { int err; u32 delay; u8 pon_cntl; /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */ if (debounce_us > USEC_PER_SEC * 2 || debounce_us < USEC_PER_SEC / 64) { dev_err(dev, "invalid power key trigger delay\n"); return -EINVAL; } delay = (debounce_us << 6) / USEC_PER_SEC; delay = ilog2(delay); err = pm8xxx_readb(dev->parent, PON_CNTL_1, &pon_cntl); if (err < 0) { dev_err(dev, "failed reading PON_CNTL_1 err=%d\n", err); return err; } pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK; pon_cntl |= (delay & PON_CNTL_TRIG_DELAY_MASK); if (pull_up) pon_cntl |= PON_CNTL_PULL_UP; else pon_cntl &= ~PON_CNTL_PULL_UP; err = pm8xxx_writeb(dev->parent, PON_CNTL_1, pon_cntl); if (err < 0) { dev_err(dev, "failed writing PON_CNTL_1 err=%d\n", err); return err; } return 0; } static ssize_t pmic8xxx_debounce_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev); int err; unsigned long val; if (size > 8) return -EINVAL; err = kstrtoul(buf, 10, &val); if (err < 0) return err; err = pmic8xxx_set_pon1(dev, val, pwrkey->pdata->pull_up); if (err < 0) return err; return size; } static DEVICE_ATTR(debounce_us, 0664, NULL, pmic8xxx_debounce_store); static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev) { struct input_dev *pwr; int key_release_irq = platform_get_irq(pdev, 0); int key_press_irq = platform_get_irq(pdev, 1); int err; struct pmic8xxx_pwrkey *pwrkey; const struct pm8xxx_pwrkey_platform_data *pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "power key platform data not supplied\n"); return -EINVAL; } pwrkey = kzalloc(sizeof(*pwrkey), GFP_KERNEL); if (!pwrkey) return -ENOMEM; pwrkey->pdata = pdata; pwr = input_allocate_device(); if (!pwr) { dev_dbg(&pdev->dev, "Can't allocate power button\n"); err = -ENOMEM; goto free_pwrkey; } input_set_capability(pwr, EV_KEY, KEY_POWER); pwr->name = "pmic8xxx_pwrkey"; pwr->phys = "pmic8xxx_pwrkey/input0"; pwr->dev.parent = &pdev->dev; err = pmic8xxx_set_pon1(&pdev->dev, pdata->kpd_trigger_delay_us, pdata->pull_up); if (err) { dev_dbg(&pdev->dev, "Can't set PON CTRL1 register: %d\n", err); goto free_input_dev; } err = input_register_device(pwr); if (err) { dev_dbg(&pdev->dev, "Can't register power key: %d\n", err); goto free_input_dev; } pwrkey->key_press_irq = key_press_irq; pwrkey->key_release_irq = key_release_irq; pwrkey->pwr = pwr; platform_set_drvdata(pdev, pwrkey); /* check power key status during boot */ err = pm8xxx_read_irq_stat(pdev->dev.parent, key_press_irq); if (err < 0) { dev_err(&pdev->dev, "reading irq status failed\n"); goto unreg_input_dev; } pwrkey->press = !!err; if (pwrkey->press) { input_report_key(pwrkey->pwr, KEY_POWER, 1); input_sync(pwrkey->pwr); } err = request_any_context_irq(key_press_irq, pwrkey_press_irq, IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_press", pwrkey); if (err < 0) { dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n", key_press_irq, err); goto unreg_input_dev; } err = request_any_context_irq(key_release_irq, pwrkey_release_irq, IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_release", pwrkey); if (err < 0) { dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n", key_release_irq, err); goto free_press_irq; } err = device_create_file(&pdev->dev, &dev_attr_debounce_us); if (err < 0) { dev_err(&pdev->dev, "dev file creation for debounce failed: %d\n", err); goto free_rel_irq; } device_init_wakeup(&pdev->dev, pdata->wakeup); return 0; free_rel_irq: free_irq(key_release_irq, pwrkey); free_press_irq: free_irq(key_press_irq, pwrkey); unreg_input_dev: platform_set_drvdata(pdev, NULL); input_unregister_device(pwr); pwr = NULL; free_input_dev: input_free_device(pwr); free_pwrkey: kfree(pwrkey); return err; } static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev) { struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev); int key_release_irq = platform_get_irq(pdev, 0); int key_press_irq = platform_get_irq(pdev, 1); device_init_wakeup(&pdev->dev, 0); device_remove_file(&pdev->dev, &dev_attr_debounce_us); free_irq(key_press_irq, pwrkey); free_irq(key_release_irq, pwrkey); input_unregister_device(pwrkey->pwr); platform_set_drvdata(pdev, NULL); kfree(pwrkey); return 0; } static struct platform_driver pmic8xxx_pwrkey_driver = { .probe = pmic8xxx_pwrkey_probe, .remove = __devexit_p(pmic8xxx_pwrkey_remove), .driver = { .name = PM8XXX_PWRKEY_DEV_NAME, .owner = THIS_MODULE, .pm = &pm8xxx_pwr_key_pm_ops, }, }; module_platform_driver(pmic8xxx_pwrkey_driver); MODULE_ALIAS("platform:pmic8xxx_pwrkey"); MODULE_DESCRIPTION("PMIC8XXX Power Key driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>");
gpl-2.0
dsexton702/sgs4g_gb_kernel
arch/mn10300/mm/cache.c
1557
3031
/* MN10300 Cache flushing routines * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/threads.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/uaccess.h> EXPORT_SYMBOL(mn10300_icache_inv); EXPORT_SYMBOL(mn10300_dcache_inv); EXPORT_SYMBOL(mn10300_dcache_inv_range); EXPORT_SYMBOL(mn10300_dcache_inv_range2); EXPORT_SYMBOL(mn10300_dcache_inv_page); #ifdef CONFIG_MN10300_CACHE_WBACK EXPORT_SYMBOL(mn10300_dcache_flush); EXPORT_SYMBOL(mn10300_dcache_flush_inv); EXPORT_SYMBOL(mn10300_dcache_flush_inv_range); EXPORT_SYMBOL(mn10300_dcache_flush_inv_range2); EXPORT_SYMBOL(mn10300_dcache_flush_inv_page); EXPORT_SYMBOL(mn10300_dcache_flush_range); EXPORT_SYMBOL(mn10300_dcache_flush_range2); EXPORT_SYMBOL(mn10300_dcache_flush_page); #endif /* * write a page back from the dcache and invalidate the icache so that we can * run code from it that we've just written into it */ void flush_icache_page(struct vm_area_struct *vma, struct page *page) { mn10300_dcache_flush_page(page_to_phys(page)); mn10300_icache_inv(); } EXPORT_SYMBOL(flush_icache_page); /* * write some code we've just written back from the dcache and invalidate the * icache so that we can run that code */ void flush_icache_range(unsigned long start, unsigned long end) { #ifdef CONFIG_MN10300_CACHE_WBACK unsigned long addr, size, off; struct page *page; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ppte, pte; for (; start < end; start += size) { /* work out how much of the page to flush */ off = start & (PAGE_SIZE - 1); size = end - start; if (size > PAGE_SIZE - off) size = PAGE_SIZE - off; /* get the physical address the page is mapped to from the page * tables */ pgd = pgd_offset(current->mm, start); if (!pgd || !pgd_val(*pgd)) continue; pud = pud_offset(pgd, start); if (!pud || !pud_val(*pud)) continue; pmd = pmd_offset(pud, start); if (!pmd || !pmd_val(*pmd)) continue; ppte = pte_offset_map(pmd, start); if (!ppte) continue; pte = *ppte; pte_unmap(ppte); if (pte_none(pte)) continue; page = pte_page(pte); if (!page) continue; addr = page_to_phys(page); /* flush the dcache and invalidate the icache coverage on that * region */ mn10300_dcache_flush_range2(addr + off, size); } #endif mn10300_icache_inv(); } EXPORT_SYMBOL(flush_icache_range); /* * allow userspace to flush the instruction cache */ asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) { if (end < start) return -EINVAL; flush_icache_range(start, end); return 0; }
gpl-2.0
ND-3500/golden_cm10.2_kernel
drivers/net/wireless/ath/ath9k/ar9002_phy.c
2325
17159
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * DOC: Programming Atheros 802.11n analog front end radios * * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express * devices have either an external AR2133 analog front end radio for single * band 2.4 GHz communication or an AR5133 analog front end radio for dual * band 2.4 GHz / 5 GHz communication. * * All devices after the AR5416 and AR5418 family starting with the AR9280 * have their analog front radios, MAC/BB and host PCIe/USB interface embedded * into a single-chip and require less programming. * * The following single-chips exist with a respective embedded radio: * * AR9280 - 11n dual-band 2x2 MIMO for PCIe * AR9281 - 11n single-band 1x2 MIMO for PCIe * AR9285 - 11n single-band 1x1 for PCIe * AR9287 - 11n single-band 2x2 MIMO for PCIe * * AR9220 - 11n dual-band 2x2 MIMO for PCI * AR9223 - 11n single-band 2x2 MIMO for PCI * * AR9287 - 11n single-band 1x1 MIMO for USB */ #include "hw.h" #include "ar9002_phy.h" /** * ar9002_hw_set_channel - set channel on single-chip device * @ah: atheros hardware structure * @chan: * * This is the function to change channel on single-chip devices, that is * all devices after ar9280. * * This function takes the channel value in MHz and sets * hardware channel value. Assumes writes have been enabled to analog bus. * * Actual Expression, * * For 2GHz channel, * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17) * (freq_ref = 40MHz) * * For 5GHz channel, * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10) * (freq_ref = 40MHz/(24>>amodeRefSel)) */ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) { u16 bMode, fracMode, aModeRefSel = 0; u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0; struct chan_centers centers; u32 refDivA = 24; ath9k_hw_get_channel_centers(ah, chan, &centers); freq = centers.synth_center; reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL); reg32 &= 0xc0000000; if (freq < 4800) { /* 2 GHz, fractional mode */ u32 txctl; int regWrites = 0; bMode = 1; fracMode = 1; aModeRefSel = 0; channelSel = CHANSEL_2G(freq); if (AR_SREV_9287_11_OR_LATER(ah)) { if (freq == 2484) { /* Enable channel spreading for channel 14 */ REG_WRITE_ARRAY(&ah->iniCckfirJapan2484, 1, regWrites); } else { REG_WRITE_ARRAY(&ah->iniCckfirNormal, 1, regWrites); } } else { txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); if (freq == 2484) { /* Enable channel spreading for channel 14 */ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl | AR_PHY_CCK_TX_CTRL_JAPAN); } else { REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); } } } else { bMode = 0; fracMode = 0; switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) { case 0: if ((freq % 20) == 0) aModeRefSel = 3; else if ((freq % 10) == 0) aModeRefSel = 2; if (aModeRefSel) break; case 1: default: aModeRefSel = 0; /* * Enable 2G (fractional) mode for channels * which are 5MHz spaced. */ fracMode = 1; refDivA = 1; channelSel = CHANSEL_5G(freq); /* RefDivA setting */ REG_RMW_FIELD(ah, AR_AN_SYNTH9, AR_AN_SYNTH9_REFDIVA, refDivA); } if (!fracMode) { ndiv = (freq * (refDivA >> aModeRefSel)) / 60; channelSel = ndiv & 0x1ff; channelFrac = (ndiv & 0xfffffe00) * 2; channelSel = (channelSel << 17) | channelFrac; } } reg32 = reg32 | (bMode << 29) | (fracMode << 28) | (aModeRefSel << 26) | (channelSel); REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); ah->curchan = chan; ah->curchan_rad_index = -1; return 0; } /** * ar9002_hw_spur_mitigate - convert baseband spur frequency * @ah: atheros hardware structure * @chan: * * For single-chip solutions. Converts to baseband spur frequency given the * input channel frequency and compute register settings below. */ static void ar9002_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan) { int bb_spur = AR_NO_SPUR; int freq; int bin, cur_bin; int bb_spur_off, spur_subchannel_sd; int spur_freq_sd; int spur_delta_phase; int denominator; int upper, lower, cur_vit_mask; int tmp, newVal; int i; static const int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 }; static const int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 }; static const int inc[4] = { 0, 100, 0, 0 }; struct chan_centers centers; int8_t mask_m[123]; int8_t mask_p[123]; int8_t mask_amt; int tmp_mask; int cur_bb_spur; bool is2GHz = IS_CHAN_2GHZ(chan); memset(&mask_m, 0, sizeof(int8_t) * 123); memset(&mask_p, 0, sizeof(int8_t) * 123); ath9k_hw_get_channel_centers(ah, chan, &centers); freq = centers.synth_center; ah->config.spurmode = SPUR_ENABLE_EEPROM; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); if (AR_NO_SPUR == cur_bb_spur) break; if (is2GHz) cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ; else cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ; cur_bb_spur = cur_bb_spur - freq; if (IS_CHAN_HT40(chan)) { if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) && (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) { bb_spur = cur_bb_spur; break; } } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) && (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) { bb_spur = cur_bb_spur; break; } } if (AR_NO_SPUR == bb_spur) { REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); return; } else { REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); } bin = bb_spur * 320; tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); ENABLE_REGWRITE_BUFFER(ah); newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal); newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | AR_PHY_SPUR_REG_ENABLE_MASK_PPM | AR_PHY_SPUR_REG_MASK_RATE_SELECT | AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); REG_WRITE(ah, AR_PHY_SPUR_REG, newVal); if (IS_CHAN_HT40(chan)) { if (bb_spur < 0) { spur_subchannel_sd = 1; bb_spur_off = bb_spur + 10; } else { spur_subchannel_sd = 0; bb_spur_off = bb_spur - 10; } } else { spur_subchannel_sd = 0; bb_spur_off = bb_spur; } if (IS_CHAN_HT40(chan)) spur_delta_phase = ((bb_spur * 262144) / 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; else spur_delta_phase = ((bb_spur * 524288) / 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; denominator = IS_CHAN_2GHZ(chan) ? 44 : 40; spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff; newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); REG_WRITE(ah, AR_PHY_TIMING11, newVal); newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S; REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal); cur_bin = -6000; upper = bin + 100; lower = bin - 100; for (i = 0; i < 4; i++) { int pilot_mask = 0; int chan_mask = 0; int bp = 0; for (bp = 0; bp < 30; bp++) { if ((cur_bin > lower) && (cur_bin < upper)) { pilot_mask = pilot_mask | 0x1 << bp; chan_mask = chan_mask | 0x1 << bp; } cur_bin += 100; } cur_bin += inc[i]; REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); REG_WRITE(ah, chan_mask_reg[i], chan_mask); } cur_vit_mask = 6100; upper = bin + 120; lower = bin - 120; for (i = 0; i < 123; i++) { if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { /* workaround for gcc bug #37014 */ volatile int tmp_v = abs(cur_vit_mask - bin); if (tmp_v < 75) mask_amt = 1; else mask_amt = 0; if (cur_vit_mask < 0) mask_m[abs(cur_vit_mask / 100)] = mask_amt; else mask_p[cur_vit_mask / 100] = mask_amt; } cur_vit_mask -= 100; } tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) | (mask_m[48] << 26) | (mask_m[49] << 24) | (mask_m[50] << 22) | (mask_m[51] << 20) | (mask_m[52] << 18) | (mask_m[53] << 16) | (mask_m[54] << 14) | (mask_m[55] << 12) | (mask_m[56] << 10) | (mask_m[57] << 8) | (mask_m[58] << 6) | (mask_m[59] << 4) | (mask_m[60] << 2) | (mask_m[61] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); tmp_mask = (mask_m[31] << 28) | (mask_m[32] << 26) | (mask_m[33] << 24) | (mask_m[34] << 22) | (mask_m[35] << 20) | (mask_m[36] << 18) | (mask_m[37] << 16) | (mask_m[48] << 14) | (mask_m[39] << 12) | (mask_m[40] << 10) | (mask_m[41] << 8) | (mask_m[42] << 6) | (mask_m[43] << 4) | (mask_m[44] << 2) | (mask_m[45] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) | (mask_m[18] << 26) | (mask_m[18] << 24) | (mask_m[20] << 22) | (mask_m[20] << 20) | (mask_m[22] << 18) | (mask_m[22] << 16) | (mask_m[24] << 14) | (mask_m[24] << 12) | (mask_m[25] << 10) | (mask_m[26] << 8) | (mask_m[27] << 6) | (mask_m[28] << 4) | (mask_m[29] << 2) | (mask_m[30] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) | (mask_m[2] << 26) | (mask_m[3] << 24) | (mask_m[4] << 22) | (mask_m[5] << 20) | (mask_m[6] << 18) | (mask_m[7] << 16) | (mask_m[8] << 14) | (mask_m[9] << 12) | (mask_m[10] << 10) | (mask_m[11] << 8) | (mask_m[12] << 6) | (mask_m[13] << 4) | (mask_m[14] << 2) | (mask_m[15] << 0); REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); tmp_mask = (mask_p[15] << 28) | (mask_p[14] << 26) | (mask_p[13] << 24) | (mask_p[12] << 22) | (mask_p[11] << 20) | (mask_p[10] << 18) | (mask_p[9] << 16) | (mask_p[8] << 14) | (mask_p[7] << 12) | (mask_p[6] << 10) | (mask_p[5] << 8) | (mask_p[4] << 6) | (mask_p[3] << 4) | (mask_p[2] << 2) | (mask_p[1] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); tmp_mask = (mask_p[30] << 28) | (mask_p[29] << 26) | (mask_p[28] << 24) | (mask_p[27] << 22) | (mask_p[26] << 20) | (mask_p[25] << 18) | (mask_p[24] << 16) | (mask_p[23] << 14) | (mask_p[22] << 12) | (mask_p[21] << 10) | (mask_p[20] << 8) | (mask_p[19] << 6) | (mask_p[18] << 4) | (mask_p[17] << 2) | (mask_p[16] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); tmp_mask = (mask_p[45] << 28) | (mask_p[44] << 26) | (mask_p[43] << 24) | (mask_p[42] << 22) | (mask_p[41] << 20) | (mask_p[40] << 18) | (mask_p[39] << 16) | (mask_p[38] << 14) | (mask_p[37] << 12) | (mask_p[36] << 10) | (mask_p[35] << 8) | (mask_p[34] << 6) | (mask_p[33] << 4) | (mask_p[32] << 2) | (mask_p[31] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) | (mask_p[59] << 26) | (mask_p[58] << 24) | (mask_p[57] << 22) | (mask_p[56] << 20) | (mask_p[55] << 18) | (mask_p[54] << 16) | (mask_p[53] << 14) | (mask_p[52] << 12) | (mask_p[51] << 10) | (mask_p[50] << 8) | (mask_p[49] << 6) | (mask_p[48] << 4) | (mask_p[47] << 2) | (mask_p[46] << 0); REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); REGWRITE_BUFFER_FLUSH(ah); } static void ar9002_olc_init(struct ath_hw *ah) { u32 i; if (!OLC_FOR_AR9280_20_LATER) return; if (OLC_FOR_AR9287_10_LATER) { REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9, AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL); ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0, AR9287_AN_TXPC0_TXPCMODE, AR9287_AN_TXPC0_TXPCMODE_S, AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE); udelay(100); } else { for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++) ah->originalGain[i] = MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4), AR_PHY_TX_GAIN); ah->PDADCdelta = 0; } } static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); if (chan && IS_CHAN_HALF_RATE(chan)) pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); else if (chan && IS_CHAN_QUARTER_RATE(chan)) pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); if (chan && IS_CHAN_5GHZ(chan)) { if (IS_CHAN_A_FAST_CLOCK(ah, chan)) pll = 0x142c; else if (AR_SREV_9280_20(ah)) pll = 0x2850; else pll |= SM(0x28, AR_RTC_9160_PLL_DIV); } else { pll |= SM(0x2c, AR_RTC_9160_PLL_DIV); } return pll; } static void ar9002_hw_do_getnf(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]) { int16_t nf; nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); nfarray[0] = sign_extend32(nf, 8); nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR); if (IS_CHAN_HT40(ah->curchan)) nfarray[3] = sign_extend32(nf, 8); if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) return; nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR); nfarray[1] = sign_extend32(nf, 8); nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR); if (IS_CHAN_HT40(ah->curchan)) nfarray[4] = sign_extend32(nf, 8); } static void ar9002_hw_set_nf_limits(struct ath_hw *ah) { if (AR_SREV_9285(ah)) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9285_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9285_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9285_2GHZ; } else if (AR_SREV_9287(ah)) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9287_2GHZ; } else if (AR_SREV_9271(ah)) { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9271_2GHZ; } else { ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_2GHZ; ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_2GHZ; ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9280_2GHZ; ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_5GHZ; ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_5GHZ; ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9280_5GHZ; } } static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf) { u32 regval; regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); antconf->main_lna_conf = (regval & AR_PHY_9285_ANT_DIV_MAIN_LNACONF) >> AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S; antconf->alt_lna_conf = (regval & AR_PHY_9285_ANT_DIV_ALT_LNACONF) >> AR_PHY_9285_ANT_DIV_ALT_LNACONF_S; antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >> AR_PHY_9285_FAST_DIV_BIAS_S; antconf->lna1_lna2_delta = -3; antconf->div_group = 0; } static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf) { u32 regval; regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regval &= ~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF | AR_PHY_9285_ANT_DIV_ALT_LNACONF | AR_PHY_9285_FAST_DIV_BIAS); regval |= ((antconf->main_lna_conf << AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S) & AR_PHY_9285_ANT_DIV_MAIN_LNACONF); regval |= ((antconf->alt_lna_conf << AR_PHY_9285_ANT_DIV_ALT_LNACONF_S) & AR_PHY_9285_ANT_DIV_ALT_LNACONF); regval |= ((antconf->fast_div_bias << AR_PHY_9285_FAST_DIV_BIAS_S) & AR_PHY_9285_FAST_DIV_BIAS); REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval); } void ar9002_hw_attach_phy_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); struct ath_hw_ops *ops = ath9k_hw_ops(ah); priv_ops->set_rf_regs = NULL; priv_ops->rf_alloc_ext_banks = NULL; priv_ops->rf_free_ext_banks = NULL; priv_ops->rf_set_freq = ar9002_hw_set_channel; priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate; priv_ops->olc_init = ar9002_olc_init; priv_ops->compute_pll_control = ar9002_hw_compute_pll_control; priv_ops->do_getnf = ar9002_hw_do_getnf; ops->antdiv_comb_conf_get = ar9002_hw_antdiv_comb_conf_get; ops->antdiv_comb_conf_set = ar9002_hw_antdiv_comb_conf_set; ar9002_hw_set_nf_limits(ah); }
gpl-2.0
rickyzhang82/odroid-linux
arch/mips/mm/tlb-r3k.c
2581
6556
/* * r2300.c: R2000 and R3000 specific mmu/cache code. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * with a lot of changes to make this thing work for R3000s * Tx39XX R4k style caches added. HK * Copyright (C) 1998, 1999, 2000 Harald Koerfgen * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov * Copyright (C) 2002 Ralf Baechle * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/system.h> #include <asm/isadep.h> #include <asm/io.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #undef DEBUG_TLB extern void build_tlb_refill_handler(void); /* CP0 hazard avoidance. */ #define BARRIER \ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ "nop\n\t" \ ".set pop\n\t") int r3k_have_wired_reg; /* should be in cpu_data? */ /* TLB operations. */ void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; #ifdef DEBUG_TLB printk("[tlball]"); #endif local_irq_save(flags); old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(0); entry = r3k_have_wired_reg ? read_c0_wired() : 8; for (; entry < current_cpu_data.tlbsize; entry++) { write_c0_index(entry << 8); write_c0_entryhi((entry | 0x80000) << 12); BARRIER; tlb_write_indexed(); } write_c0_entryhi(old_ctx); local_irq_restore(flags); } void local_flush_tlb_mm(struct mm_struct *mm) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { #ifdef DEBUG_TLB printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm)); #endif drop_mmu_context(mm, cpu); } } void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", cpu_context(cpu, mm) & ASID_MASK, start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { int oldpid = read_c0_entryhi() & ASID_MASK; int newpid = cpu_context(cpu, mm) & ASID_MASK; start &= PAGE_MASK; end += PAGE_SIZE - 1; end &= PAGE_MASK; while (start < end) { int idx; write_c0_entryhi(start | newpid); start += PAGE_SIZE; /* BARRIER */ tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entryhi(KSEG0); if (idx < 0) /* BARRIER */ continue; tlb_write_indexed(); } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); } } void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long size, flags; #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { int pid = read_c0_entryhi(); start &= PAGE_MASK; end += PAGE_SIZE - 1; end &= PAGE_MASK; while (start < end) { int idx; write_c0_entryhi(start); start += PAGE_SIZE; /* BARRIER */ tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entryhi(KSEG0); if (idx < 0) /* BARRIER */ continue; tlb_write_indexed(); } write_c0_entryhi(pid); } else { local_flush_tlb_all(); } local_irq_restore(flags); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cpu = smp_processor_id(); if (!vma || cpu_context(cpu, vma->vm_mm) != 0) { unsigned long flags; int oldpid, newpid, idx; #ifdef DEBUG_TLB printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); #endif newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; page &= PAGE_MASK; local_irq_save(flags); oldpid = read_c0_entryhi() & ASID_MASK; write_c0_entryhi(page | newpid); BARRIER; tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entryhi(KSEG0); if (idx < 0) /* BARRIER */ goto finish; tlb_write_indexed(); finish: write_c0_entryhi(oldpid); local_irq_restore(flags); } } void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long flags; int idx, pid; /* * Handle debugger faulting in for debugee. */ if (current->active_mm != vma->vm_mm) return; pid = read_c0_entryhi() & ASID_MASK; #ifdef DEBUG_TLB if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", (cpu_context(cpu, vma->vm_mm)), pid); } #endif local_irq_save(flags); address &= PAGE_MASK; write_c0_entryhi(address | pid); BARRIER; tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(pte_val(pte)); write_c0_entryhi(address | pid); if (idx < 0) { /* BARRIER */ tlb_write_random(); } else { tlb_write_indexed(); } write_c0_entryhi(pid); local_irq_restore(flags); } void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { unsigned long flags; unsigned long old_ctx; static unsigned long wired = 0; if (r3k_have_wired_reg) { /* TX39XX */ unsigned long old_pagemask; unsigned long w; #ifdef DEBUG_TLB printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n", entrylo0, entryhi, pagemask); #endif local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & ASID_MASK; old_pagemask = read_c0_pagemask(); w = read_c0_wired(); write_c0_wired(w + 1); write_c0_index(w << 8); write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); BARRIER; tlb_write_indexed(); write_c0_entryhi(old_ctx); write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); } else if (wired < 8) { #ifdef DEBUG_TLB printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n", entrylo0, entryhi); #endif local_irq_save(flags); old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(entrylo0); write_c0_entryhi(entryhi); write_c0_index(wired); wired++; /* BARRIER */ tlb_write_indexed(); write_c0_entryhi(old_ctx); local_flush_tlb_all(); local_irq_restore(flags); } } void __cpuinit tlb_init(void) { local_flush_tlb_all(); build_tlb_refill_handler(); }
gpl-2.0
GalaxyTab4/android_kernel_samsung_matissevewifi
drivers/net/wireless/ti/wl1251/sdio.c
2581
8621
/* * wl12xx SDIO routines * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * Copyright (C) 2005 Texas Instruments Incorporated * Copyright (C) 2008 Google Inc * Copyright (C) 2009 Bob Copeland (me@bobcopeland.com) */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/platform_device.h> #include <linux/wl12xx.h> #include <linux/irq.h> #include <linux/pm_runtime.h> #include "wl1251.h" #ifndef SDIO_VENDOR_ID_TI #define SDIO_VENDOR_ID_TI 0x104c #endif #ifndef SDIO_DEVICE_ID_TI_WL1251 #define SDIO_DEVICE_ID_TI_WL1251 0x9066 #endif struct wl1251_sdio { struct sdio_func *func; u32 elp_val; }; static struct sdio_func *wl_to_func(struct wl1251 *wl) { struct wl1251_sdio *wl_sdio = wl->if_priv; return wl_sdio->func; } static void wl1251_sdio_interrupt(struct sdio_func *func) { struct wl1251 *wl = sdio_get_drvdata(func); wl1251_debug(DEBUG_IRQ, "IRQ"); /* FIXME should be synchronous for sdio */ ieee80211_queue_work(wl->hw, &wl->irq_work); } static const struct sdio_device_id wl1251_devices[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1251) }, {} }; MODULE_DEVICE_TABLE(sdio, wl1251_devices); static void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len) { int ret; struct sdio_func *func = wl_to_func(wl); sdio_claim_host(func); ret = sdio_memcpy_fromio(func, buf, addr, len); if (ret) wl1251_error("sdio read failed (%d)", ret); sdio_release_host(func); } static void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len) { int ret; struct sdio_func *func = wl_to_func(wl); sdio_claim_host(func); ret = sdio_memcpy_toio(func, addr, buf, len); if (ret) wl1251_error("sdio write failed (%d)", ret); sdio_release_host(func); } static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val) { int ret = 0; struct wl1251_sdio *wl_sdio = wl->if_priv; struct sdio_func *func = wl_sdio->func; /* * The hardware only supports RAW (read after write) access for * reading, regular sdio_readb won't work here (it interprets * the unused bits of CMD52 as write data even if we send read * request). */ sdio_claim_host(func); *val = sdio_writeb_readb(func, wl_sdio->elp_val, addr, &ret); sdio_release_host(func); if (ret) wl1251_error("sdio_readb failed (%d)", ret); } static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val) { int ret = 0; struct wl1251_sdio *wl_sdio = wl->if_priv; struct sdio_func *func = wl_sdio->func; sdio_claim_host(func); sdio_writeb(func, val, addr, &ret); sdio_release_host(func); if (ret) wl1251_error("sdio_writeb failed (%d)", ret); else wl_sdio->elp_val = val; } static void wl1251_sdio_reset(struct wl1251 *wl) { } static void wl1251_sdio_enable_irq(struct wl1251 *wl) { struct sdio_func *func = wl_to_func(wl); sdio_claim_host(func); sdio_claim_irq(func, wl1251_sdio_interrupt); sdio_release_host(func); } static void wl1251_sdio_disable_irq(struct wl1251 *wl) { struct sdio_func *func = wl_to_func(wl); sdio_claim_host(func); sdio_release_irq(func); sdio_release_host(func); } /* Interrupts when using dedicated WLAN_IRQ pin */ static irqreturn_t wl1251_line_irq(int irq, void *cookie) { struct wl1251 *wl = cookie; ieee80211_queue_work(wl->hw, &wl->irq_work); return IRQ_HANDLED; } static void wl1251_enable_line_irq(struct wl1251 *wl) { return enable_irq(wl->irq); } static void wl1251_disable_line_irq(struct wl1251 *wl) { return disable_irq(wl->irq); } static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable) { struct sdio_func *func = wl_to_func(wl); int ret; if (enable) { /* * Power is controlled by runtime PM, but we still call board * callback in case it wants to do any additional setup, * for example enabling clock buffer for the module. */ if (wl->set_power) wl->set_power(true); ret = pm_runtime_get_sync(&func->dev); if (ret < 0) { pm_runtime_put_sync(&func->dev); goto out; } sdio_claim_host(func); sdio_enable_func(func); sdio_release_host(func); } else { sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); ret = pm_runtime_put_sync(&func->dev); if (ret < 0) goto out; if (wl->set_power) wl->set_power(false); } out: return ret; } static struct wl1251_if_operations wl1251_sdio_ops = { .read = wl1251_sdio_read, .write = wl1251_sdio_write, .write_elp = wl1251_sdio_write_elp, .read_elp = wl1251_sdio_read_elp, .reset = wl1251_sdio_reset, .power = wl1251_sdio_set_power, }; static int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret; struct wl1251 *wl; struct ieee80211_hw *hw; struct wl1251_sdio *wl_sdio; const struct wl12xx_platform_data *wl12xx_board_data; hw = wl1251_alloc_hw(); if (IS_ERR(hw)) return PTR_ERR(hw); wl = hw->priv; wl_sdio = kzalloc(sizeof(*wl_sdio), GFP_KERNEL); if (wl_sdio == NULL) { ret = -ENOMEM; goto out_free_hw; } sdio_claim_host(func); ret = sdio_enable_func(func); if (ret) goto release; sdio_set_block_size(func, 512); sdio_release_host(func); SET_IEEE80211_DEV(hw, &func->dev); wl_sdio->func = func; wl->if_priv = wl_sdio; wl->if_ops = &wl1251_sdio_ops; wl12xx_board_data = wl12xx_get_platform_data(); if (!IS_ERR(wl12xx_board_data)) { wl->set_power = wl12xx_board_data->set_power; wl->irq = wl12xx_board_data->irq; wl->use_eeprom = wl12xx_board_data->use_eeprom; } if (wl->irq) { irq_set_status_flags(wl->irq, IRQ_NOAUTOEN); ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl); if (ret < 0) { wl1251_error("request_irq() failed: %d", ret); goto disable; } irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq; wl1251_info("using dedicated interrupt line"); } else { wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq; wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq; wl1251_info("using SDIO interrupt"); } ret = wl1251_init_ieee80211(wl); if (ret) goto out_free_irq; sdio_set_drvdata(func, wl); /* Tell PM core that we don't need the card to be powered now */ pm_runtime_put_noidle(&func->dev); return ret; out_free_irq: if (wl->irq) free_irq(wl->irq, wl); disable: sdio_claim_host(func); sdio_disable_func(func); release: sdio_release_host(func); kfree(wl_sdio); out_free_hw: wl1251_free_hw(wl); return ret; } static void wl1251_sdio_remove(struct sdio_func *func) { struct wl1251 *wl = sdio_get_drvdata(func); struct wl1251_sdio *wl_sdio = wl->if_priv; /* Undo decrement done above in wl1251_probe */ pm_runtime_get_noresume(&func->dev); if (wl->irq) free_irq(wl->irq, wl); wl1251_free_hw(wl); kfree(wl_sdio); sdio_claim_host(func); sdio_release_irq(func); sdio_disable_func(func); sdio_release_host(func); } static int wl1251_suspend(struct device *dev) { /* * Tell MMC/SDIO core it's OK to power down the card * (if it isn't already), but not to remove it completely. */ return 0; } static int wl1251_resume(struct device *dev) { return 0; } static const struct dev_pm_ops wl1251_sdio_pm_ops = { .suspend = wl1251_suspend, .resume = wl1251_resume, }; static struct sdio_driver wl1251_sdio_driver = { .name = "wl1251_sdio", .id_table = wl1251_devices, .probe = wl1251_sdio_probe, .remove = wl1251_sdio_remove, .drv.pm = &wl1251_sdio_pm_ops, }; static int __init wl1251_sdio_init(void) { int err; err = sdio_register_driver(&wl1251_sdio_driver); if (err) wl1251_error("failed to register sdio driver: %d", err); return err; } static void __exit wl1251_sdio_exit(void) { sdio_unregister_driver(&wl1251_sdio_driver); wl1251_notice("unloaded"); } module_init(wl1251_sdio_init); module_exit(wl1251_sdio_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
gpl-2.0
keily90/tf101-nv-linux
fs/9p/vfs_dentry.c
3093
3580
/* * linux/fs/9p/vfs_dentry.c * * This file contians vfs dentry ops for the 9P2000 protocol. * * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/inet.h> #include <linux/namei.h> #include <linux/idr.h> #include <linux/sched.h> #include <linux/slab.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" /** * v9fs_dentry_delete - called when dentry refcount equals 0 * @dentry: dentry in question * * By returning 1 here we should remove cacheing of unused * dentry components. * */ static int v9fs_dentry_delete(const struct dentry *dentry) { P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, dentry); return 1; } /** * v9fs_cached_dentry_delete - called when dentry refcount equals 0 * @dentry: dentry in question * */ static int v9fs_cached_dentry_delete(const struct dentry *dentry) { P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, dentry); /* Don't cache negative dentries */ if (!dentry->d_inode) return 1; return 0; } /** * v9fs_dentry_release - called when dentry is going to be freed * @dentry: dentry that is being release * */ static void v9fs_dentry_release(struct dentry *dentry) { struct v9fs_dentry *dent; struct p9_fid *temp, *current_fid; P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, dentry); dent = dentry->d_fsdata; if (dent) { list_for_each_entry_safe(current_fid, temp, &dent->fidlist, dlist) { p9_client_clunk(current_fid); } kfree(dent); dentry->d_fsdata = NULL; } } static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) { struct p9_fid *fid; struct inode *inode; struct v9fs_inode *v9inode; if (nd->flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; if (!inode) goto out_valid; v9inode = V9FS_I(inode); if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) { int retval; struct v9fs_session_info *v9ses; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); v9ses = v9fs_inode2v9ses(inode); if (v9fs_proto_dotl(v9ses)) retval = v9fs_refresh_inode_dotl(fid, inode); else retval = v9fs_refresh_inode(fid, inode); if (retval == -ENOENT) return 0; if (retval < 0) return retval; } out_valid: return 1; } const struct dentry_operations v9fs_cached_dentry_operations = { .d_revalidate = v9fs_lookup_revalidate, .d_delete = v9fs_cached_dentry_delete, .d_release = v9fs_dentry_release, }; const struct dentry_operations v9fs_dentry_operations = { .d_delete = v9fs_dentry_delete, .d_release = v9fs_dentry_release, };
gpl-2.0
AOSP-TEAM/android_kernel_google_tuna
drivers/net/tokenring/tmspci.c
3349
6354
/* * tmspci.c: A generic network driver for TMS380-based PCI token ring cards. * * Written 1999 by Adam Fritzler * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * This driver module supports the following cards: * - SysKonnect TR4/16(+) PCI (SK-4590) * - SysKonnect TR4/16 PCI (SK-4591) * - Compaq TR 4/16 PCI * - Thomas-Conrad TC4048 4/16 PCI * - 3Com 3C339 Token Link Velocity * * Maintainer(s): * AF Adam Fritzler * * Modification History: * 30-Dec-99 AF Split off from the tms380tr driver. * 22-Jan-00 AF Updated to use indirect read/writes * 23-Nov-00 JG New PCI API, cleanups * * TODO: * 1. See if we can use MMIO instead of port accesses * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/trdevice.h> #include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> #include "tms380tr.h" static char version[] __devinitdata = "tmspci.c: v1.02 23/11/2000 by Adam Fritzler\n"; #define TMS_PCI_IO_EXTENT 32 struct card_info { unsigned char nselout[2]; /* NSELOUT vals for 4mb([0]) and 16mb([1]) */ char *name; }; static struct card_info card_info_table[] = { { {0x03, 0x01}, "Compaq 4/16 TR PCI"}, { {0x03, 0x01}, "SK NET TR 4/16 PCI"}, { {0x03, 0x01}, "Thomas-Conrad TC4048 PCI 4/16"}, { {0x03, 0x01}, "3Com Token Link Velocity"}, }; static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = { { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, tmspci_pci_tbl); MODULE_LICENSE("GPL"); static void tms_pci_read_eeprom(struct net_device *dev); static unsigned short tms_pci_setnselout_pins(struct net_device *dev); static unsigned short tms_pci_sifreadb(struct net_device *dev, unsigned short reg) { return inb(dev->base_addr + reg); } static unsigned short tms_pci_sifreadw(struct net_device *dev, unsigned short reg) { return inw(dev->base_addr + reg); } static void tms_pci_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg) { outb(val, dev->base_addr + reg); } static void tms_pci_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg) { outw(val, dev->base_addr + reg); } static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_device_id *ent) { static int versionprinted; struct net_device *dev; struct net_local *tp; int ret; unsigned int pci_irq_line; unsigned long pci_ioaddr; struct card_info *cardinfo = &card_info_table[ent->driver_data]; if (versionprinted++ == 0) printk("%s", version); if (pci_enable_device(pdev)) return -EIO; /* Remove I/O space marker in bit 0. */ pci_irq_line = pdev->irq; pci_ioaddr = pci_resource_start (pdev, 0); /* At this point we have found a valid card. */ dev = alloc_trdev(sizeof(struct net_local)); if (!dev) return -ENOMEM; if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) { ret = -EBUSY; goto err_out_trdev; } dev->base_addr = pci_ioaddr; dev->irq = pci_irq_line; dev->dma = 0; dev_info(&pdev->dev, "%s\n", cardinfo->name); dev_info(&pdev->dev, " IO: %#4lx IRQ: %d\n", dev->base_addr, dev->irq); tms_pci_read_eeprom(dev); dev_info(&pdev->dev, " Ring Station Address: %pM\n", dev->dev_addr); ret = tmsdev_init(dev, &pdev->dev); if (ret) { dev_info(&pdev->dev, "unable to get memory for dev->priv.\n"); goto err_out_region; } tp = netdev_priv(dev); tp->setnselout = tms_pci_setnselout_pins; tp->sifreadb = tms_pci_sifreadb; tp->sifreadw = tms_pci_sifreadw; tp->sifwriteb = tms_pci_sifwriteb; tp->sifwritew = tms_pci_sifwritew; memcpy(tp->ProductID, cardinfo->name, PROD_ID_SIZE + 1); tp->tmspriv = cardinfo; dev->netdev_ops = &tms380tr_netdev_ops; ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED, dev->name, dev); if (ret) goto err_out_tmsdev; pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); ret = register_netdev(dev); if (ret) goto err_out_irq; return 0; err_out_irq: free_irq(pdev->irq, dev); err_out_tmsdev: pci_set_drvdata(pdev, NULL); tmsdev_term(dev); err_out_region: release_region(pci_ioaddr, TMS_PCI_IO_EXTENT); err_out_trdev: free_netdev(dev); return ret; } /* * Reads MAC address from adapter RAM, which should've read it from * the onboard ROM. * * Calling this on a board that does not support it can be a very * dangerous thing. The Madge board, for instance, will lock your * machine hard when this is called. Luckily, its supported in a * separate driver. --ASF */ static void tms_pci_read_eeprom(struct net_device *dev) { int i; /* Address: 0000:0000 */ tms_pci_sifwritew(dev, 0, SIFADX); tms_pci_sifwritew(dev, 0, SIFADR); /* Read six byte MAC address data */ dev->addr_len = 6; for(i = 0; i < 6; i++) dev->dev_addr[i] = tms_pci_sifreadw(dev, SIFINC) >> 8; } static unsigned short tms_pci_setnselout_pins(struct net_device *dev) { unsigned short val = 0; struct net_local *tp = netdev_priv(dev); struct card_info *cardinfo = tp->tmspriv; if(tp->DataRate == SPEED_4) val |= cardinfo->nselout[0]; /* Set 4Mbps */ else val |= cardinfo->nselout[1]; /* Set 16Mbps */ return val; } static void __devexit tms_pci_detach (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); BUG_ON(!dev); unregister_netdev(dev); release_region(dev->base_addr, TMS_PCI_IO_EXTENT); free_irq(dev->irq, dev); tmsdev_term(dev); free_netdev(dev); pci_set_drvdata(pdev, NULL); } static struct pci_driver tms_pci_driver = { .name = "tmspci", .id_table = tmspci_pci_tbl, .probe = tms_pci_attach, .remove = __devexit_p(tms_pci_detach), }; static int __init tms_pci_init (void) { return pci_register_driver(&tms_pci_driver); } static void __exit tms_pci_rmmod (void) { pci_unregister_driver (&tms_pci_driver); } module_init(tms_pci_init); module_exit(tms_pci_rmmod);
gpl-2.0
friedrich420/S4-AEL-GPE-LOLLIPOP
drivers/usb/serial/ark3116.c
3605
23494
/* * Copyright (C) 2009 by Bart Hartgers (bart.hartgers+ark3116@gmail.com) * Original version: * Copyright (C) 2006 * Simon Schulz (ark3116_driver <at> auctionant.de) * * ark3116 * - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547, * productid=0x0232) (used in a datacable called KQ-U8A) * * Supports full modem status lines, break, hardware flow control. Does not * support software flow control, since I do not know how to enable it in hw. * * This driver is a essentially new implementation. I initially dug * into the old ark3116.c driver and suddenly realized the ark3116 is * a 16450 with a USB interface glued to it. See comments at the * bottom of this file. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioctl.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include <linux/serial_reg.h> #include <linux/uaccess.h> #include <linux/mutex.h> #include <linux/spinlock.h> static bool debug; /* * Version information */ #define DRIVER_VERSION "v0.7" #define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>" #define DRIVER_DESC "USB ARK3116 serial/IrDA driver" #define DRIVER_DEV_DESC "ARK3116 RS232/IrDA" #define DRIVER_NAME "ark3116" /* usb timeout of 1 second */ #define ARK_TIMEOUT (1*HZ) static const struct usb_device_id id_table[] = { { USB_DEVICE(0x6547, 0x0232) }, { USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */ { }, }; MODULE_DEVICE_TABLE(usb, id_table); static int is_irda(struct usb_serial *serial) { struct usb_device *dev = serial->dev; if (le16_to_cpu(dev->descriptor.idVendor) == 0x18ec && le16_to_cpu(dev->descriptor.idProduct) == 0x3118) return 1; return 0; } struct ark3116_private { wait_queue_head_t delta_msr_wait; struct async_icount icount; int irda; /* 1 for irda device */ /* protects hw register updates */ struct mutex hw_lock; int quot; /* baudrate divisor */ __u32 lcr; /* line control register value */ __u32 hcr; /* handshake control register (0x8) * value */ __u32 mcr; /* modem contol register value */ /* protects the status values below */ spinlock_t status_lock; __u32 msr; /* modem status register value */ __u32 lsr; /* line status register value */ }; static int ark3116_write_reg(struct usb_serial *serial, unsigned reg, __u8 val) { int result; /* 0xfe 0x40 are magic values taken from original driver */ result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0xfe, 0x40, val, reg, NULL, 0, ARK_TIMEOUT); return result; } static int ark3116_read_reg(struct usb_serial *serial, unsigned reg, unsigned char *buf) { int result; /* 0xfe 0xc0 are magic values taken from original driver */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 0xfe, 0xc0, 0, reg, buf, 1, ARK_TIMEOUT); if (result < 0) return result; else return buf[0]; } static inline int calc_divisor(int bps) { /* Original ark3116 made some exceptions in rounding here * because windows did the same. Assume that is not really * necessary. * Crystal is 12MHz, probably because of USB, but we divide by 4? */ return (12000000 + 2*bps) / (4*bps); } static int ark3116_attach(struct usb_serial *serial) { struct usb_serial_port *port = serial->port[0]; struct ark3116_private *priv; /* make sure we have our end-points */ if ((serial->num_bulk_in == 0) || (serial->num_bulk_out == 0) || (serial->num_interrupt_in == 0)) { dev_err(&serial->dev->dev, "%s - missing endpoint - " "bulk in: %d, bulk out: %d, int in %d\n", KBUILD_MODNAME, serial->num_bulk_in, serial->num_bulk_out, serial->num_interrupt_in); return -EINVAL; } priv = kzalloc(sizeof(struct ark3116_private), GFP_KERNEL); if (!priv) return -ENOMEM; init_waitqueue_head(&priv->delta_msr_wait); mutex_init(&priv->hw_lock); spin_lock_init(&priv->status_lock); priv->irda = is_irda(serial); usb_set_serial_port_data(port, priv); /* setup the hardware */ ark3116_write_reg(serial, UART_IER, 0); /* disable DMA */ ark3116_write_reg(serial, UART_FCR, 0); /* handshake control */ priv->hcr = 0; ark3116_write_reg(serial, 0x8 , 0); /* modem control */ priv->mcr = 0; ark3116_write_reg(serial, UART_MCR, 0); if (!(priv->irda)) { ark3116_write_reg(serial, 0xb , 0); } else { ark3116_write_reg(serial, 0xb , 1); ark3116_write_reg(serial, 0xc , 0); ark3116_write_reg(serial, 0xd , 0x41); ark3116_write_reg(serial, 0xa , 1); } /* setup baudrate */ ark3116_write_reg(serial, UART_LCR, UART_LCR_DLAB); /* setup for 9600 8N1 */ priv->quot = calc_divisor(9600); ark3116_write_reg(serial, UART_DLL, priv->quot & 0xff); ark3116_write_reg(serial, UART_DLM, (priv->quot>>8) & 0xff); priv->lcr = UART_LCR_WLEN8; ark3116_write_reg(serial, UART_LCR, UART_LCR_WLEN8); ark3116_write_reg(serial, 0xe, 0); if (priv->irda) ark3116_write_reg(serial, 0x9, 0); dev_info(&serial->dev->dev, "%s using %s mode\n", KBUILD_MODNAME, priv->irda ? "IrDA" : "RS232"); return 0; } static void ark3116_release(struct usb_serial *serial) { struct usb_serial_port *port = serial->port[0]; struct ark3116_private *priv = usb_get_serial_port_data(port); /* device is closed, so URBs and DMA should be down */ usb_set_serial_port_data(port, NULL); mutex_destroy(&priv->hw_lock); kfree(priv); } static void ark3116_init_termios(struct tty_struct *tty) { struct ktermios *termios = tty->termios; *termios = tty_std_termios; termios->c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; termios->c_ispeed = 9600; termios->c_ospeed = 9600; } static void ark3116_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct ark3116_private *priv = usb_get_serial_port_data(port); struct ktermios *termios = tty->termios; unsigned int cflag = termios->c_cflag; int bps = tty_get_baud_rate(tty); int quot; __u8 lcr, hcr, eval; /* set data bit count */ switch (cflag & CSIZE) { case CS5: lcr = UART_LCR_WLEN5; break; case CS6: lcr = UART_LCR_WLEN6; break; case CS7: lcr = UART_LCR_WLEN7; break; default: case CS8: lcr = UART_LCR_WLEN8; break; } if (cflag & CSTOPB) lcr |= UART_LCR_STOP; if (cflag & PARENB) lcr |= UART_LCR_PARITY; if (!(cflag & PARODD)) lcr |= UART_LCR_EPAR; #ifdef CMSPAR if (cflag & CMSPAR) lcr |= UART_LCR_SPAR; #endif /* handshake control */ hcr = (cflag & CRTSCTS) ? 0x03 : 0x00; /* calc baudrate */ dbg("%s - setting bps to %d", __func__, bps); eval = 0; switch (bps) { case 0: quot = calc_divisor(9600); break; default: if ((bps < 75) || (bps > 3000000)) bps = 9600; quot = calc_divisor(bps); break; case 460800: eval = 1; quot = calc_divisor(bps); break; case 921600: eval = 2; quot = calc_divisor(bps); break; } /* Update state: synchronize */ mutex_lock(&priv->hw_lock); /* keep old LCR_SBC bit */ lcr |= (priv->lcr & UART_LCR_SBC); dbg("%s - setting hcr:0x%02x,lcr:0x%02x,quot:%d", __func__, hcr, lcr, quot); /* handshake control */ if (priv->hcr != hcr) { priv->hcr = hcr; ark3116_write_reg(serial, 0x8, hcr); } /* baudrate */ if (priv->quot != quot) { priv->quot = quot; priv->lcr = lcr; /* need to write lcr anyway */ /* disable DMA since transmit/receive is * shadowed by UART_DLL */ ark3116_write_reg(serial, UART_FCR, 0); ark3116_write_reg(serial, UART_LCR, lcr|UART_LCR_DLAB); ark3116_write_reg(serial, UART_DLL, quot & 0xff); ark3116_write_reg(serial, UART_DLM, (quot>>8) & 0xff); /* restore lcr */ ark3116_write_reg(serial, UART_LCR, lcr); /* magic baudrate thingy: not sure what it does, * but windows does this as well. */ ark3116_write_reg(serial, 0xe, eval); /* enable DMA */ ark3116_write_reg(serial, UART_FCR, UART_FCR_DMA_SELECT); } else if (priv->lcr != lcr) { priv->lcr = lcr; ark3116_write_reg(serial, UART_LCR, lcr); } mutex_unlock(&priv->hw_lock); /* check for software flow control */ if (I_IXOFF(tty) || I_IXON(tty)) { dev_warn(&serial->dev->dev, "%s: don't know how to do software flow control\n", KBUILD_MODNAME); } /* Don't rewrite B0 */ if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, bps, bps); } static void ark3116_close(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; if (serial->dev) { /* disable DMA */ ark3116_write_reg(serial, UART_FCR, 0); /* deactivate interrupts */ ark3116_write_reg(serial, UART_IER, 0); usb_serial_generic_close(port); if (serial->num_interrupt_in) usb_kill_urb(port->interrupt_in_urb); } } static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ark3116_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; unsigned char *buf; int result; buf = kmalloc(1, GFP_KERNEL); if (buf == NULL) return -ENOMEM; result = usb_serial_generic_open(tty, port); if (result) { dbg("%s - usb_serial_generic_open failed: %d", __func__, result); goto err_out; } /* remove any data still left: also clears error state */ ark3116_read_reg(serial, UART_RX, buf); /* read modem status */ priv->msr = ark3116_read_reg(serial, UART_MSR, buf); /* read line status */ priv->lsr = ark3116_read_reg(serial, UART_LSR, buf); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "submit irq_in urb failed %d\n", result); ark3116_close(port); goto err_out; } /* activate interrupts */ ark3116_write_reg(port->serial, UART_IER, UART_IER_MSI|UART_IER_RLSI); /* enable DMA */ ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT); /* setup termios */ if (tty) ark3116_set_termios(tty, port, NULL); err_out: kfree(buf); return result; } static int ark3116_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct usb_serial_port *port = tty->driver_data; struct ark3116_private *priv = usb_get_serial_port_data(port); struct async_icount cnow = priv->icount; icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } static int ark3116_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct ark3116_private *priv = usb_get_serial_port_data(port); struct serial_struct serstruct; void __user *user_arg = (void __user *)arg; switch (cmd) { case TIOCGSERIAL: /* XXX: Some of these values are probably wrong. */ memset(&serstruct, 0, sizeof(serstruct)); serstruct.type = PORT_16654; serstruct.line = port->serial->minor; serstruct.port = port->number; serstruct.custom_divisor = 0; serstruct.baud_base = 460800; if (copy_to_user(user_arg, &serstruct, sizeof(serstruct))) return -EFAULT; return 0; case TIOCSSERIAL: if (copy_from_user(&serstruct, user_arg, sizeof(serstruct))) return -EFAULT; return 0; case TIOCMIWAIT: for (;;) { struct async_icount prev = priv->icount; interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; if ((prev.rng == priv->icount.rng) && (prev.dsr == priv->icount.dsr) && (prev.dcd == priv->icount.dcd) && (prev.cts == priv->icount.cts)) return -EIO; if ((arg & TIOCM_RNG && (prev.rng != priv->icount.rng)) || (arg & TIOCM_DSR && (prev.dsr != priv->icount.dsr)) || (arg & TIOCM_CD && (prev.dcd != priv->icount.dcd)) || (arg & TIOCM_CTS && (prev.cts != priv->icount.cts))) return 0; } break; } return -ENOIOCTLCMD; } static int ark3116_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ark3116_private *priv = usb_get_serial_port_data(port); __u32 status; __u32 ctrl; unsigned long flags; mutex_lock(&priv->hw_lock); ctrl = priv->mcr; mutex_unlock(&priv->hw_lock); spin_lock_irqsave(&priv->status_lock, flags); status = priv->msr; spin_unlock_irqrestore(&priv->status_lock, flags); return (status & UART_MSR_DSR ? TIOCM_DSR : 0) | (status & UART_MSR_CTS ? TIOCM_CTS : 0) | (status & UART_MSR_RI ? TIOCM_RI : 0) | (status & UART_MSR_DCD ? TIOCM_CD : 0) | (ctrl & UART_MCR_DTR ? TIOCM_DTR : 0) | (ctrl & UART_MCR_RTS ? TIOCM_RTS : 0) | (ctrl & UART_MCR_OUT1 ? TIOCM_OUT1 : 0) | (ctrl & UART_MCR_OUT2 ? TIOCM_OUT2 : 0); } static int ark3116_tiocmset(struct tty_struct *tty, unsigned set, unsigned clr) { struct usb_serial_port *port = tty->driver_data; struct ark3116_private *priv = usb_get_serial_port_data(port); /* we need to take the mutex here, to make sure that the value * in priv->mcr is actually the one that is in the hardware */ mutex_lock(&priv->hw_lock); if (set & TIOCM_RTS) priv->mcr |= UART_MCR_RTS; if (set & TIOCM_DTR) priv->mcr |= UART_MCR_DTR; if (set & TIOCM_OUT1) priv->mcr |= UART_MCR_OUT1; if (set & TIOCM_OUT2) priv->mcr |= UART_MCR_OUT2; if (clr & TIOCM_RTS) priv->mcr &= ~UART_MCR_RTS; if (clr & TIOCM_DTR) priv->mcr &= ~UART_MCR_DTR; if (clr & TIOCM_OUT1) priv->mcr &= ~UART_MCR_OUT1; if (clr & TIOCM_OUT2) priv->mcr &= ~UART_MCR_OUT2; ark3116_write_reg(port->serial, UART_MCR, priv->mcr); mutex_unlock(&priv->hw_lock); return 0; } static void ark3116_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct ark3116_private *priv = usb_get_serial_port_data(port); /* LCR is also used for other things: protect access */ mutex_lock(&priv->hw_lock); if (break_state) priv->lcr |= UART_LCR_SBC; else priv->lcr &= ~UART_LCR_SBC; ark3116_write_reg(port->serial, UART_LCR, priv->lcr); mutex_unlock(&priv->hw_lock); } static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr) { struct ark3116_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->status_lock, flags); priv->msr = msr; spin_unlock_irqrestore(&priv->status_lock, flags); if (msr & UART_MSR_ANY_DELTA) { /* update input line counters */ if (msr & UART_MSR_DCTS) priv->icount.cts++; if (msr & UART_MSR_DDSR) priv->icount.dsr++; if (msr & UART_MSR_DDCD) priv->icount.dcd++; if (msr & UART_MSR_TERI) priv->icount.rng++; wake_up_interruptible(&priv->delta_msr_wait); } } static void ark3116_update_lsr(struct usb_serial_port *port, __u8 lsr) { struct ark3116_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->status_lock, flags); /* combine bits */ priv->lsr |= lsr; spin_unlock_irqrestore(&priv->status_lock, flags); if (lsr&UART_LSR_BRK_ERROR_BITS) { if (lsr & UART_LSR_BI) priv->icount.brk++; if (lsr & UART_LSR_FE) priv->icount.frame++; if (lsr & UART_LSR_PE) priv->icount.parity++; if (lsr & UART_LSR_OE) priv->icount.overrun++; } } static void ark3116_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; int status = urb->status; const __u8 *data = urb->transfer_buffer; int result; switch (status) { case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dbg("%s - nonzero urb status received: %d", __func__, status); break; case 0: /* success */ /* discovered this by trail and error... */ if ((urb->actual_length == 4) && (data[0] == 0xe8)) { const __u8 id = data[1]&UART_IIR_ID; dbg("%s: iir=%02x", __func__, data[1]); if (id == UART_IIR_MSI) { dbg("%s: msr=%02x", __func__, data[3]); ark3116_update_msr(port, data[3]); break; } else if (id == UART_IIR_RLSI) { dbg("%s: lsr=%02x", __func__, data[2]); ark3116_update_lsr(port, data[2]); break; } } /* * Not sure what this data meant... */ usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, urb->transfer_buffer); break; } result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_err(&urb->dev->dev, "%s - Error %d submitting interrupt urb\n", __func__, result); } /* Data comes in via the bulk (data) URB, erors/interrupts via the int URB. * This means that we cannot be sure which data byte has an associated error * condition, so we report an error for all data in the next bulk read. * * Actually, there might even be a window between the bulk data leaving the * ark and reading/resetting the lsr in the read_bulk_callback where an * interrupt for the next data block could come in. * Without somekind of ordering on the ark, we would have to report the * error for the next block of data as well... * For now, let's pretend this can't happen. */ static void ark3116_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct ark3116_private *priv = usb_get_serial_port_data(port); struct tty_struct *tty; unsigned char *data = urb->transfer_buffer; char tty_flag = TTY_NORMAL; unsigned long flags; __u32 lsr; /* update line status */ spin_lock_irqsave(&priv->status_lock, flags); lsr = priv->lsr; priv->lsr &= ~UART_LSR_BRK_ERROR_BITS; spin_unlock_irqrestore(&priv->status_lock, flags); if (!urb->actual_length) return; tty = tty_port_tty_get(&port->port); if (!tty) return; if (lsr & UART_LSR_BRK_ERROR_BITS) { if (lsr & UART_LSR_BI) tty_flag = TTY_BREAK; else if (lsr & UART_LSR_PE) tty_flag = TTY_PARITY; else if (lsr & UART_LSR_FE) tty_flag = TTY_FRAME; /* overrun is special, not associated with a char */ if (lsr & UART_LSR_OE) tty_insert_flip_char(tty, 0, TTY_OVERRUN); } tty_insert_flip_string_fixed_flag(tty, data, tty_flag, urb->actual_length); tty_flip_buffer_push(tty); tty_kref_put(tty); } static struct usb_driver ark3116_driver = { .name = "ark3116", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table, }; static struct usb_serial_driver ark3116_device = { .driver = { .owner = THIS_MODULE, .name = "ark3116", }, .id_table = id_table, .num_ports = 1, .attach = ark3116_attach, .release = ark3116_release, .set_termios = ark3116_set_termios, .init_termios = ark3116_init_termios, .ioctl = ark3116_ioctl, .tiocmget = ark3116_tiocmget, .tiocmset = ark3116_tiocmset, .get_icount = ark3116_get_icount, .open = ark3116_open, .close = ark3116_close, .break_ctl = ark3116_break_ctl, .read_int_callback = ark3116_read_int_callback, .process_read_urb = ark3116_process_read_urb, }; static struct usb_serial_driver * const serial_drivers[] = { &ark3116_device, NULL }; module_usb_serial_driver(ark3116_driver, serial_drivers); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debug"); /* * The following describes what I learned from studying the old * ark3116.c driver, disassembling the windows driver, and some lucky * guesses. Since I do not have any datasheet or other * documentation, inaccuracies are almost guaranteed. * * Some specs for the ARK3116 can be found here: * http://web.archive.org/web/20060318000438/ * www.arkmicro.com/en/products/view.php?id=10 * On that page, 2 GPIO pins are mentioned: I assume these are the * OUT1 and OUT2 pins of the UART, so I added support for those * through the MCR. Since the pins are not available on my hardware, * I could not verify this. * Also, it states there is "on-chip hardware flow control". I have * discovered how to enable that. Unfortunately, I do not know how to * enable XON/XOFF (software) flow control, which would need support * from the chip as well to work. Because of the wording on the web * page there is a real possibility the chip simply does not support * software flow control. * * I got my ark3116 as part of a mobile phone adapter cable. On the * PCB, the following numbered contacts are present: * * 1:- +5V * 2:o DTR * 3:i RX * 4:i DCD * 5:o RTS * 6:o TX * 7:i RI * 8:i DSR * 10:- 0V * 11:i CTS * * On my chip, all signals seem to be 3.3V, but 5V tolerant. But that * may be different for the one you have ;-). * * The windows driver limits the registers to 0-F, so I assume there * are actually 16 present on the device. * * On an UART interrupt, 4 bytes of data come in on the interrupt * endpoint. The bytes are 0xe8 IIR LSR MSR. * * The baudrate seems to be generated from the 12MHz crystal, using * 4-times subsampling. So quot=12e6/(4*baud). Also see description * of register E. * * Registers 0-7: * These seem to be the same as for a regular 16450. The FCR is set * to UART_FCR_DMA_SELECT (0x8), I guess to enable transfers between * the UART and the USB bridge/DMA engine. * * Register 8: * By trial and error, I found out that bit 0 enables hardware CTS, * stopping TX when CTS is +5V. Bit 1 does the same for RTS, making * RTS +5V when the 3116 cannot transfer the data to the USB bus * (verified by disabling the reading URB). Note that as far as I can * tell, the windows driver does NOT use this, so there might be some * hardware bug or something. * * According to a patch provided here * (http://lkml.org/lkml/2009/7/26/56), the ARK3116 can also be used * as an IrDA dongle. Since I do not have such a thing, I could not * investigate that aspect. However, I can speculate ;-). * * - IrDA encodes data differently than RS232. Most likely, one of * the bits in registers 9..E enables the IR ENDEC (encoder/decoder). * - Depending on the IR transceiver, the input and output need to be * inverted, so there are probably bits for that as well. * - IrDA is half-duplex, so there should be a bit for selecting that. * * This still leaves at least two registers unaccounted for. Perhaps * The chip can do XON/XOFF or CRC in HW? * * Register 9: * Set to 0x00 for IrDA, when the baudrate is initialised. * * Register A: * Set to 0x01 for IrDA, at init. * * Register B: * Set to 0x01 for IrDA, 0x00 for RS232, at init. * * Register C: * Set to 00 for IrDA, at init. * * Register D: * Set to 0x41 for IrDA, at init. * * Register E: * Somekind of baudrate override. The windows driver seems to set * this to 0x00 for normal baudrates, 0x01 for 460800, 0x02 for 921600. * Since 460800 and 921600 cannot be obtained by dividing 3MHz by an integer, * it could be somekind of subdivisor thingy. * However,it does not seem to do anything: selecting 921600 (divisor 3, * reg E=2), still gets 1 MHz. I also checked if registers 9, C or F would * work, but they don't. * * Register F: unknown */
gpl-2.0
vickylinuxer/at91sam9g35-kernel
drivers/staging/msm/logo.c
3861
2340
/* drivers/video/msm/logo.c * * Show Logo in RLE 565 format * * Copyright (C) 2008 Google Incorporated * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/fb.h> #include <linux/vt_kern.h> #include <linux/unistd.h> #include <linux/syscalls.h> #include <linux/irq.h> #include <asm/system.h> #define fb_width(fb) ((fb)->var.xres) #define fb_height(fb) ((fb)->var.yres) #define fb_size(fb) ((fb)->var.xres * (fb)->var.yres * 2) static void memset16(void *_ptr, unsigned short val, unsigned count) { unsigned short *ptr = _ptr; count >>= 1; while (count--) *ptr++ = val; } /* 565RLE image format: [count(2 bytes), rle(2 bytes)] */ int load_565rle_image(char *filename) { struct fb_info *info; int fd, err = 0; unsigned count, max; unsigned short *data, *bits, *ptr; info = registered_fb[0]; if (!info) { printk(KERN_WARNING "%s: Can not access framebuffer\n", __func__); return -ENODEV; } fd = sys_open(filename, O_RDONLY, 0); if (fd < 0) { printk(KERN_WARNING "%s: Can not open %s\n", __func__, filename); return -ENOENT; } count = (unsigned)sys_lseek(fd, (off_t)0, 2); if (count == 0) { sys_close(fd); err = -EIO; goto err_logo_close_file; } sys_lseek(fd, (off_t)0, 0); data = kmalloc(count, GFP_KERNEL); if (!data) { printk(KERN_WARNING "%s: Can not alloc data\n", __func__); err = -ENOMEM; goto err_logo_close_file; } if ((unsigned)sys_read(fd, (char *)data, count) != count) { err = -EIO; goto err_logo_free_data; } max = fb_width(info) * fb_height(info); ptr = data; bits = (unsigned short *)(info->screen_base); while (count > 3) { unsigned n = ptr[0]; if (n > max) break; memset16(bits, ptr[1], n << 1); bits += n; max -= n; ptr += 2; count -= 4; } err_logo_free_data: kfree(data); err_logo_close_file: sys_close(fd); return err; } EXPORT_SYMBOL(load_565rle_image);
gpl-2.0
flar2/evita-ElementalX
drivers/net/plip/plip.c
5141
35132
/* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */ /* PLIP: A parallel port "network" driver for Linux. */ /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */ /* * Authors: Donald Becker <becker@scyld.com> * Tommy Thorn <thorn@daimi.aau.dk> * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp> * Alan Cox <gw4pts@gw4pts.ampr.org> * Peter Bauer <100136.3530@compuserve.com> * Niibe Yutaka <gniibe@mri.co.jp> * Nimrod Zimerman <zimerman@mailandnews.com> * * Enhancements: * Modularization and ifreq/ifmap support by Alan Cox. * Rewritten by Niibe Yutaka. * parport-sharing awareness code by Philip Blundell. * SMP locking by Niibe Yutaka. * Support for parallel ports with no IRQ (poll mode), * Modifications to use the parallel port API * by Nimrod Zimerman. * * Fixes: * Niibe Yutaka * - Module initialization. * - MTU fix. * - Make sure other end is OK, before sending a packet. * - Fix immediate timer problem. * * Al Viro * - Changed {enable,disable}_irq handling to make it work * with new ("stack") semantics. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com> * inspired by Russ Nelson's parallel port packet driver. * * NOTE: * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0. * Because of the necessity to communicate to DOS machines with the * Crynwr packet driver, Peter Bauer changed the protocol again * back to original protocol. * * This version follows original PLIP protocol. * So, this PLIP can't communicate the PLIP of Linux v1.0. */ /* * To use with DOS box, please do (Turn on ARP switch): * # ifconfig plip[0-2] arp */ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"; /* Sources: Ideas and protocols came from Russ Nelson's <nelson@crynwr.com> "parallel.asm" parallel port packet driver. The "Crynwr" parallel port standard specifies the following protocol: Trigger by sending nibble '0x8' (this causes interrupt on other end) count-low octet count-high octet ... data octets checksum octet Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)> <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)> The packet is encapsulated as if it were ethernet. The cable used is a de facto standard parallel null cable -- sold as a "LapLink" cable by various places. You'll need a 12-conductor cable to make one yourself. The wiring is: SLCTIN 17 - 17 GROUND 25 - 25 D0->ERROR 2 - 15 15 - 2 D1->SLCT 3 - 13 13 - 3 D2->PAPOUT 4 - 12 12 - 4 D3->ACK 5 - 10 10 - 5 D4->BUSY 6 - 11 11 - 6 Do not connect the other pins. They are D5,D6,D7 are 7,8,9 STROBE is 1, FEED is 14, INIT is 16 extra grounds are 18,19,20,21,22,23,24 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/if_ether.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/inetdevice.h> #include <linux/skbuff.h> #include <linux/if_plip.h> #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/parport.h> #include <linux/bitops.h> #include <net/neighbour.h> #include <asm/irq.h> #include <asm/byteorder.h> /* Maximum number of devices to support. */ #define PLIP_MAX 8 /* Use 0 for production, 1 for verification, >2 for debug */ #ifndef NET_DEBUG #define NET_DEBUG 1 #endif static const unsigned int net_debug = NET_DEBUG; #define ENABLE(irq) if (irq != -1) enable_irq(irq) #define DISABLE(irq) if (irq != -1) disable_irq(irq) /* In micro second */ #define PLIP_DELAY_UNIT 1 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */ #define PLIP_TRIGGER_WAIT 500 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */ #define PLIP_NIBBLE_WAIT 3000 /* Bottom halves */ static void plip_kick_bh(struct work_struct *work); static void plip_bh(struct work_struct *work); static void plip_timer_bh(struct work_struct *work); /* Interrupt handler */ static void plip_interrupt(void *dev_id); /* Functions for DEV methods */ static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev); static int plip_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); static int plip_hard_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); static int plip_open(struct net_device *dev); static int plip_close(struct net_device *dev); static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static int plip_preempt(void *handle); static void plip_wakeup(void *handle); enum plip_connection_state { PLIP_CN_NONE=0, PLIP_CN_RECEIVE, PLIP_CN_SEND, PLIP_CN_CLOSING, PLIP_CN_ERROR }; enum plip_packet_state { PLIP_PK_DONE=0, PLIP_PK_TRIGGER, PLIP_PK_LENGTH_LSB, PLIP_PK_LENGTH_MSB, PLIP_PK_DATA, PLIP_PK_CHECKSUM }; enum plip_nibble_state { PLIP_NB_BEGIN, PLIP_NB_1, PLIP_NB_2, }; struct plip_local { enum plip_packet_state state; enum plip_nibble_state nibble; union { struct { #if defined(__LITTLE_ENDIAN) unsigned char lsb; unsigned char msb; #elif defined(__BIG_ENDIAN) unsigned char msb; unsigned char lsb; #else #error "Please fix the endianness defines in <asm/byteorder.h>" #endif } b; unsigned short h; } length; unsigned short byte; unsigned char checksum; unsigned char data; struct sk_buff *skb; }; struct net_local { struct net_device *dev; struct work_struct immediate; struct delayed_work deferred; struct delayed_work timer; struct plip_local snd_data; struct plip_local rcv_data; struct pardevice *pardev; unsigned long trigger; unsigned long nibble; enum plip_connection_state connection; unsigned short timeout_count; int is_deferred; int port_owner; int should_relinquish; spinlock_t lock; atomic_t kill_timer; struct completion killed_timer_cmp; }; static inline void enable_parport_interrupts (struct net_device *dev) { if (dev->irq != -1) { struct parport *port = ((struct net_local *)netdev_priv(dev))->pardev->port; port->ops->enable_irq (port); } } static inline void disable_parport_interrupts (struct net_device *dev) { if (dev->irq != -1) { struct parport *port = ((struct net_local *)netdev_priv(dev))->pardev->port; port->ops->disable_irq (port); } } static inline void write_data (struct net_device *dev, unsigned char data) { struct parport *port = ((struct net_local *)netdev_priv(dev))->pardev->port; port->ops->write_data (port, data); } static inline unsigned char read_status (struct net_device *dev) { struct parport *port = ((struct net_local *)netdev_priv(dev))->pardev->port; return port->ops->read_status (port); } static const struct header_ops plip_header_ops = { .create = plip_hard_header, .cache = plip_hard_header_cache, }; static const struct net_device_ops plip_netdev_ops = { .ndo_open = plip_open, .ndo_stop = plip_close, .ndo_start_xmit = plip_tx_packet, .ndo_do_ioctl = plip_ioctl, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* Entry point of PLIP driver. Probe the hardware, and register/initialize the driver. PLIP is rather weird, because of the way it interacts with the parport system. It is _not_ initialised from Space.c. Instead, plip_init() is called, and that function makes up a "struct net_device" for each port, and then calls us here. */ static void plip_init_netdev(struct net_device *dev) { struct net_local *nl = netdev_priv(dev); /* Then, override parts of it */ dev->tx_queue_len = 10; dev->flags = IFF_POINTOPOINT|IFF_NOARP; memset(dev->dev_addr, 0xfc, ETH_ALEN); dev->netdev_ops = &plip_netdev_ops; dev->header_ops = &plip_header_ops; nl->port_owner = 0; /* Initialize constants */ nl->trigger = PLIP_TRIGGER_WAIT; nl->nibble = PLIP_NIBBLE_WAIT; /* Initialize task queue structures */ INIT_WORK(&nl->immediate, plip_bh); INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh); if (dev->irq == -1) INIT_DELAYED_WORK(&nl->timer, plip_timer_bh); spin_lock_init(&nl->lock); } /* Bottom half handler for the delayed request. This routine is kicked by do_timer(). Request `plip_bh' to be invoked. */ static void plip_kick_bh(struct work_struct *work) { struct net_local *nl = container_of(work, struct net_local, deferred.work); if (nl->is_deferred) schedule_work(&nl->immediate); } /* Forward declarations of internal routines */ static int plip_none(struct net_device *, struct net_local *, struct plip_local *, struct plip_local *); static int plip_receive_packet(struct net_device *, struct net_local *, struct plip_local *, struct plip_local *); static int plip_send_packet(struct net_device *, struct net_local *, struct plip_local *, struct plip_local *); static int plip_connection_close(struct net_device *, struct net_local *, struct plip_local *, struct plip_local *); static int plip_error(struct net_device *, struct net_local *, struct plip_local *, struct plip_local *); static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, struct plip_local *snd, struct plip_local *rcv, int error); #define OK 0 #define TIMEOUT 1 #define ERROR 2 #define HS_TIMEOUT 3 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl, struct plip_local *snd, struct plip_local *rcv); static const plip_func connection_state_table[] = { plip_none, plip_receive_packet, plip_send_packet, plip_connection_close, plip_error }; /* Bottom half handler of PLIP. */ static void plip_bh(struct work_struct *work) { struct net_local *nl = container_of(work, struct net_local, immediate); struct plip_local *snd = &nl->snd_data; struct plip_local *rcv = &nl->rcv_data; plip_func f; int r; nl->is_deferred = 0; f = connection_state_table[nl->connection]; if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) { nl->is_deferred = 1; schedule_delayed_work(&nl->deferred, 1); } } static void plip_timer_bh(struct work_struct *work) { struct net_local *nl = container_of(work, struct net_local, timer.work); if (!(atomic_read (&nl->kill_timer))) { plip_interrupt (nl->dev); schedule_delayed_work(&nl->timer, 1); } else { complete(&nl->killed_timer_cmp); } } static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, struct plip_local *snd, struct plip_local *rcv, int error) { unsigned char c0; /* * This is tricky. If we got here from the beginning of send (either * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's * already disabled. With the old variant of {enable,disable}_irq() * extra disable_irq() was a no-op. Now it became mortal - it's * unbalanced and thus we'll never re-enable IRQ (until rmmod plip, * that is). So we have to treat HS_TIMEOUT and ERROR from send * in a special way. */ spin_lock_irq(&nl->lock); if (nl->connection == PLIP_CN_SEND) { if (error != ERROR) { /* Timeout */ nl->timeout_count++; if ((error == HS_TIMEOUT && nl->timeout_count <= 10) || nl->timeout_count <= 3) { spin_unlock_irq(&nl->lock); /* Try again later */ return TIMEOUT; } c0 = read_status(dev); printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n", dev->name, snd->state, c0); } else error = HS_TIMEOUT; dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; } else if (nl->connection == PLIP_CN_RECEIVE) { if (rcv->state == PLIP_PK_TRIGGER) { /* Transmission was interrupted. */ spin_unlock_irq(&nl->lock); return OK; } if (error != ERROR) { /* Timeout */ if (++nl->timeout_count <= 3) { spin_unlock_irq(&nl->lock); /* Try again later */ return TIMEOUT; } c0 = read_status(dev); printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n", dev->name, rcv->state, c0); } dev->stats.rx_dropped++; } rcv->state = PLIP_PK_DONE; if (rcv->skb) { kfree_skb(rcv->skb); rcv->skb = NULL; } snd->state = PLIP_PK_DONE; if (snd->skb) { dev_kfree_skb(snd->skb); snd->skb = NULL; } spin_unlock_irq(&nl->lock); if (error == HS_TIMEOUT) { DISABLE(dev->irq); synchronize_irq(dev->irq); } disable_parport_interrupts (dev); netif_stop_queue (dev); nl->connection = PLIP_CN_ERROR; write_data (dev, 0x00); return TIMEOUT; } static int plip_none(struct net_device *dev, struct net_local *nl, struct plip_local *snd, struct plip_local *rcv) { return OK; } /* PLIP_RECEIVE --- receive a byte(two nibbles) Returns OK on success, TIMEOUT on timeout */ static inline int plip_receive(unsigned short nibble_timeout, struct net_device *dev, enum plip_nibble_state *ns_p, unsigned char *data_p) { unsigned char c0, c1; unsigned int cx; switch (*ns_p) { case PLIP_NB_BEGIN: cx = nibble_timeout; while (1) { c0 = read_status(dev); udelay(PLIP_DELAY_UNIT); if ((c0 & 0x80) == 0) { c1 = read_status(dev); if (c0 == c1) break; } if (--cx == 0) return TIMEOUT; } *data_p = (c0 >> 3) & 0x0f; write_data (dev, 0x10); /* send ACK */ *ns_p = PLIP_NB_1; case PLIP_NB_1: cx = nibble_timeout; while (1) { c0 = read_status(dev); udelay(PLIP_DELAY_UNIT); if (c0 & 0x80) { c1 = read_status(dev); if (c0 == c1) break; } if (--cx == 0) return TIMEOUT; } *data_p |= (c0 << 1) & 0xf0; write_data (dev, 0x00); /* send ACK */ *ns_p = PLIP_NB_BEGIN; case PLIP_NB_2: break; } return OK; } /* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * PLIP is ethernet ish but the daddr might not be valid if unicast. * PLIP fortunately has no bus architecture (its Point-to-point). * * We can't fix the daddr thing as that quirk (more bug) is embedded * in far too many old systems not all even running Linux. */ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev) { struct ethhdr *eth; unsigned char *rawp; skb_reset_mac_header(skb); skb_pull(skb,dev->hard_header_len); eth = eth_hdr(skb); if(*eth->h_dest&1) { if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0) skb->pkt_type=PACKET_BROADCAST; else skb->pkt_type=PACKET_MULTICAST; } /* * This ALLMULTI check should be redundant by 1.4 * so don't forget to remove it. */ if (ntohs(eth->h_proto) >= 1536) return eth->h_proto; rawp = skb->data; /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (*(unsigned short *)rawp == 0xFFFF) return htons(ETH_P_802_3); /* * Real 802.2 LLC */ return htons(ETH_P_802_2); } /* PLIP_RECEIVE_PACKET --- receive a packet */ static int plip_receive_packet(struct net_device *dev, struct net_local *nl, struct plip_local *snd, struct plip_local *rcv) { unsigned short nibble_timeout = nl->nibble; unsigned char *lbuf; switch (rcv->state) { case PLIP_PK_TRIGGER: DISABLE(dev->irq); /* Don't need to synchronize irq, as we can safely ignore it */ disable_parport_interrupts (dev); write_data (dev, 0x01); /* send ACK */ if (net_debug > 2) printk(KERN_DEBUG "%s: receive start\n", dev->name); rcv->state = PLIP_PK_LENGTH_LSB; rcv->nibble = PLIP_NB_BEGIN; case PLIP_PK_LENGTH_LSB: if (snd->state != PLIP_PK_DONE) { if (plip_receive(nl->trigger, dev, &rcv->nibble, &rcv->length.b.lsb)) { /* collision, here dev->tbusy == 1 */ rcv->state = PLIP_PK_DONE; nl->is_deferred = 1; nl->connection = PLIP_CN_SEND; schedule_delayed_work(&nl->deferred, 1); enable_parport_interrupts (dev); ENABLE(dev->irq); return OK; } } else { if (plip_receive(nibble_timeout, dev, &rcv->nibble, &rcv->length.b.lsb)) return TIMEOUT; } rcv->state = PLIP_PK_LENGTH_MSB; case PLIP_PK_LENGTH_MSB: if (plip_receive(nibble_timeout, dev, &rcv->nibble, &rcv->length.b.msb)) return TIMEOUT; if (rcv->length.h > dev->mtu + dev->hard_header_len || rcv->length.h < 8) { printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h); return ERROR; } /* Malloc up new buffer. */ rcv->skb = dev_alloc_skb(rcv->length.h + 2); if (rcv->skb == NULL) { printk(KERN_ERR "%s: Memory squeeze.\n", dev->name); return ERROR; } skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */ skb_put(rcv->skb,rcv->length.h); rcv->skb->dev = dev; rcv->state = PLIP_PK_DATA; rcv->byte = 0; rcv->checksum = 0; case PLIP_PK_DATA: lbuf = rcv->skb->data; do { if (plip_receive(nibble_timeout, dev, &rcv->nibble, &lbuf[rcv->byte])) return TIMEOUT; } while (++rcv->byte < rcv->length.h); do { rcv->checksum += lbuf[--rcv->byte]; } while (rcv->byte); rcv->state = PLIP_PK_CHECKSUM; case PLIP_PK_CHECKSUM: if (plip_receive(nibble_timeout, dev, &rcv->nibble, &rcv->data)) return TIMEOUT; if (rcv->data != rcv->checksum) { dev->stats.rx_crc_errors++; if (net_debug) printk(KERN_DEBUG "%s: checksum error\n", dev->name); return ERROR; } rcv->state = PLIP_PK_DONE; case PLIP_PK_DONE: /* Inform the upper layer for the arrival of a packet. */ rcv->skb->protocol=plip_type_trans(rcv->skb, dev); netif_rx_ni(rcv->skb); dev->stats.rx_bytes += rcv->length.h; dev->stats.rx_packets++; rcv->skb = NULL; if (net_debug > 2) printk(KERN_DEBUG "%s: receive end\n", dev->name); /* Close the connection. */ write_data (dev, 0x00); spin_lock_irq(&nl->lock); if (snd->state != PLIP_PK_DONE) { nl->connection = PLIP_CN_SEND; spin_unlock_irq(&nl->lock); schedule_work(&nl->immediate); enable_parport_interrupts (dev); ENABLE(dev->irq); return OK; } else { nl->connection = PLIP_CN_NONE; spin_unlock_irq(&nl->lock); enable_parport_interrupts (dev); ENABLE(dev->irq); return OK; } } return OK; } /* PLIP_SEND --- send a byte (two nibbles) Returns OK on success, TIMEOUT when timeout */ static inline int plip_send(unsigned short nibble_timeout, struct net_device *dev, enum plip_nibble_state *ns_p, unsigned char data) { unsigned char c0; unsigned int cx; switch (*ns_p) { case PLIP_NB_BEGIN: write_data (dev, data & 0x0f); *ns_p = PLIP_NB_1; case PLIP_NB_1: write_data (dev, 0x10 | (data & 0x0f)); cx = nibble_timeout; while (1) { c0 = read_status(dev); if ((c0 & 0x80) == 0) break; if (--cx == 0) return TIMEOUT; udelay(PLIP_DELAY_UNIT); } write_data (dev, 0x10 | (data >> 4)); *ns_p = PLIP_NB_2; case PLIP_NB_2: write_data (dev, (data >> 4)); cx = nibble_timeout; while (1) { c0 = read_status(dev); if (c0 & 0x80) break; if (--cx == 0) return TIMEOUT; udelay(PLIP_DELAY_UNIT); } *ns_p = PLIP_NB_BEGIN; return OK; } return OK; } /* PLIP_SEND_PACKET --- send a packet */ static int plip_send_packet(struct net_device *dev, struct net_local *nl, struct plip_local *snd, struct plip_local *rcv) { unsigned short nibble_timeout = nl->nibble; unsigned char *lbuf; unsigned char c0; unsigned int cx; if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) { printk(KERN_DEBUG "%s: send skb lost\n", dev->name); snd->state = PLIP_PK_DONE; snd->skb = NULL; return ERROR; } switch (snd->state) { case PLIP_PK_TRIGGER: if ((read_status(dev) & 0xf8) != 0x80) return HS_TIMEOUT; /* Trigger remote rx interrupt. */ write_data (dev, 0x08); cx = nl->trigger; while (1) { udelay(PLIP_DELAY_UNIT); spin_lock_irq(&nl->lock); if (nl->connection == PLIP_CN_RECEIVE) { spin_unlock_irq(&nl->lock); /* Interrupted. */ dev->stats.collisions++; return OK; } c0 = read_status(dev); if (c0 & 0x08) { spin_unlock_irq(&nl->lock); DISABLE(dev->irq); synchronize_irq(dev->irq); if (nl->connection == PLIP_CN_RECEIVE) { /* Interrupted. We don't need to enable irq, as it is soon disabled. */ /* Yes, we do. New variant of {enable,disable}_irq *counts* them. -- AV */ ENABLE(dev->irq); dev->stats.collisions++; return OK; } disable_parport_interrupts (dev); if (net_debug > 2) printk(KERN_DEBUG "%s: send start\n", dev->name); snd->state = PLIP_PK_LENGTH_LSB; snd->nibble = PLIP_NB_BEGIN; nl->timeout_count = 0; break; } spin_unlock_irq(&nl->lock); if (--cx == 0) { write_data (dev, 0x00); return HS_TIMEOUT; } } case PLIP_PK_LENGTH_LSB: if (plip_send(nibble_timeout, dev, &snd->nibble, snd->length.b.lsb)) return TIMEOUT; snd->state = PLIP_PK_LENGTH_MSB; case PLIP_PK_LENGTH_MSB: if (plip_send(nibble_timeout, dev, &snd->nibble, snd->length.b.msb)) return TIMEOUT; snd->state = PLIP_PK_DATA; snd->byte = 0; snd->checksum = 0; case PLIP_PK_DATA: do { if (plip_send(nibble_timeout, dev, &snd->nibble, lbuf[snd->byte])) return TIMEOUT; } while (++snd->byte < snd->length.h); do { snd->checksum += lbuf[--snd->byte]; } while (snd->byte); snd->state = PLIP_PK_CHECKSUM; case PLIP_PK_CHECKSUM: if (plip_send(nibble_timeout, dev, &snd->nibble, snd->checksum)) return TIMEOUT; dev->stats.tx_bytes += snd->skb->len; dev_kfree_skb(snd->skb); dev->stats.tx_packets++; snd->state = PLIP_PK_DONE; case PLIP_PK_DONE: /* Close the connection */ write_data (dev, 0x00); snd->skb = NULL; if (net_debug > 2) printk(KERN_DEBUG "%s: send end\n", dev->name); nl->connection = PLIP_CN_CLOSING; nl->is_deferred = 1; schedule_delayed_work(&nl->deferred, 1); enable_parport_interrupts (dev); ENABLE(dev->irq); return OK; } return OK; } static int plip_connection_close(struct net_device *dev, struct net_local *nl, struct plip_local *snd, struct plip_local *rcv) { spin_lock_irq(&nl->lock); if (nl->connection == PLIP_CN_CLOSING) { nl->connection = PLIP_CN_NONE; netif_wake_queue (dev); } spin_unlock_irq(&nl->lock); if (nl->should_relinquish) { nl->should_relinquish = nl->port_owner = 0; parport_release(nl->pardev); } return OK; } /* PLIP_ERROR --- wait till other end settled */ static int plip_error(struct net_device *dev, struct net_local *nl, struct plip_local *snd, struct plip_local *rcv) { unsigned char status; status = read_status(dev); if ((status & 0xf8) == 0x80) { if (net_debug > 2) printk(KERN_DEBUG "%s: reset interface.\n", dev->name); nl->connection = PLIP_CN_NONE; nl->should_relinquish = 0; netif_start_queue (dev); enable_parport_interrupts (dev); ENABLE(dev->irq); netif_wake_queue (dev); } else { nl->is_deferred = 1; schedule_delayed_work(&nl->deferred, 1); } return OK; } /* Handle the parallel port interrupts. */ static void plip_interrupt(void *dev_id) { struct net_device *dev = dev_id; struct net_local *nl; struct plip_local *rcv; unsigned char c0; unsigned long flags; nl = netdev_priv(dev); rcv = &nl->rcv_data; spin_lock_irqsave (&nl->lock, flags); c0 = read_status(dev); if ((c0 & 0xf8) != 0xc0) { if ((dev->irq != -1) && (net_debug > 1)) printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name); spin_unlock_irqrestore (&nl->lock, flags); return; } if (net_debug > 3) printk(KERN_DEBUG "%s: interrupt.\n", dev->name); switch (nl->connection) { case PLIP_CN_CLOSING: netif_wake_queue (dev); case PLIP_CN_NONE: case PLIP_CN_SEND: rcv->state = PLIP_PK_TRIGGER; nl->connection = PLIP_CN_RECEIVE; nl->timeout_count = 0; schedule_work(&nl->immediate); break; case PLIP_CN_RECEIVE: /* May occur because there is race condition around test and set of dev->interrupt. Ignore this interrupt. */ break; case PLIP_CN_ERROR: printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name); break; } spin_unlock_irqrestore(&nl->lock, flags); } static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *nl = netdev_priv(dev); struct plip_local *snd = &nl->snd_data; if (netif_queue_stopped(dev)) return NETDEV_TX_BUSY; /* We may need to grab the bus */ if (!nl->port_owner) { if (parport_claim(nl->pardev)) return NETDEV_TX_BUSY; nl->port_owner = 1; } netif_stop_queue (dev); if (skb->len > dev->mtu + dev->hard_header_len) { printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len); netif_start_queue (dev); return NETDEV_TX_BUSY; } if (net_debug > 2) printk(KERN_DEBUG "%s: send request\n", dev->name); spin_lock_irq(&nl->lock); snd->skb = skb; snd->length.h = skb->len; snd->state = PLIP_PK_TRIGGER; if (nl->connection == PLIP_CN_NONE) { nl->connection = PLIP_CN_SEND; nl->timeout_count = 0; } schedule_work(&nl->immediate); spin_unlock_irq(&nl->lock); return NETDEV_TX_OK; } static void plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth) { const struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (in_dev) { /* Any address will do - we take the first */ const struct in_ifaddr *ifa = in_dev->ifa_list; if (ifa) { memcpy(eth->h_source, dev->dev_addr, 6); memset(eth->h_dest, 0xfc, 2); memcpy(eth->h_dest+2, &ifa->ifa_address, 4); } } rcu_read_unlock(); } static int plip_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { int ret; ret = eth_header(skb, dev, type, daddr, saddr, len); if (ret >= 0) plip_rewrite_address (dev, (struct ethhdr *)skb->data); return ret; } static int plip_hard_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) { int ret; ret = eth_header_cache(neigh, hh, type); if (ret == 0) { struct ethhdr *eth; eth = (struct ethhdr*)(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(*eth))); plip_rewrite_address (neigh->dev, eth); } return ret; } /* Open/initialize the board. This is called (in the current kernel) sometime after booting when the 'ifconfig' program is run. This routine gets exclusive access to the parallel port by allocating its IRQ line. */ static int plip_open(struct net_device *dev) { struct net_local *nl = netdev_priv(dev); struct in_device *in_dev; /* Grab the port */ if (!nl->port_owner) { if (parport_claim(nl->pardev)) return -EAGAIN; nl->port_owner = 1; } nl->should_relinquish = 0; /* Clear the data port. */ write_data (dev, 0x00); /* Enable rx interrupt. */ enable_parport_interrupts (dev); if (dev->irq == -1) { atomic_set (&nl->kill_timer, 0); schedule_delayed_work(&nl->timer, 1); } /* Initialize the state machine. */ nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE; nl->rcv_data.skb = nl->snd_data.skb = NULL; nl->connection = PLIP_CN_NONE; nl->is_deferred = 0; /* Fill in the MAC-level header. We used to abuse dev->broadcast to store the point-to-point MAC address, but we no longer do it. Instead, we fetch the interface address whenever it is needed, which is cheap enough because we use the hh_cache. Actually, abusing dev->broadcast didn't work, because when using plip_open the point-to-point address isn't yet known. PLIP doesn't have a real MAC address, but we need it to be DOS compatible, and to properly support taps (otherwise, when the device address isn't identical to the address of a received frame, the kernel incorrectly drops it). */ in_dev=__in_dev_get_rtnl(dev); if (in_dev) { /* Any address will do - we take the first. We already have the first two bytes filled with 0xfc, from plip_init_dev(). */ struct in_ifaddr *ifa=in_dev->ifa_list; if (ifa != NULL) { memcpy(dev->dev_addr+2, &ifa->ifa_local, 4); } } netif_start_queue (dev); return 0; } /* The inverse routine to plip_open (). */ static int plip_close(struct net_device *dev) { struct net_local *nl = netdev_priv(dev); struct plip_local *snd = &nl->snd_data; struct plip_local *rcv = &nl->rcv_data; netif_stop_queue (dev); DISABLE(dev->irq); synchronize_irq(dev->irq); if (dev->irq == -1) { init_completion(&nl->killed_timer_cmp); atomic_set (&nl->kill_timer, 1); wait_for_completion(&nl->killed_timer_cmp); } #ifdef NOTDEF outb(0x00, PAR_DATA(dev)); #endif nl->is_deferred = 0; nl->connection = PLIP_CN_NONE; if (nl->port_owner) { parport_release(nl->pardev); nl->port_owner = 0; } snd->state = PLIP_PK_DONE; if (snd->skb) { dev_kfree_skb(snd->skb); snd->skb = NULL; } rcv->state = PLIP_PK_DONE; if (rcv->skb) { kfree_skb(rcv->skb); rcv->skb = NULL; } #ifdef NOTDEF /* Reset. */ outb(0x00, PAR_CONTROL(dev)); #endif return 0; } static int plip_preempt(void *handle) { struct net_device *dev = (struct net_device *)handle; struct net_local *nl = netdev_priv(dev); /* Stand our ground if a datagram is on the wire */ if (nl->connection != PLIP_CN_NONE) { nl->should_relinquish = 1; return 1; } nl->port_owner = 0; /* Remember that we released the bus */ return 0; } static void plip_wakeup(void *handle) { struct net_device *dev = (struct net_device *)handle; struct net_local *nl = netdev_priv(dev); if (nl->port_owner) { /* Why are we being woken up? */ printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name); if (!parport_claim(nl->pardev)) /* bus_owner is already set (but why?) */ printk(KERN_DEBUG "%s: I'm broken.\n", dev->name); else return; } if (!(dev->flags & IFF_UP)) /* Don't need the port when the interface is down */ return; if (!parport_claim(nl->pardev)) { nl->port_owner = 1; /* Clear the data port. */ write_data (dev, 0x00); } } static int plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct net_local *nl = netdev_priv(dev); struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru; if (cmd != SIOCDEVPLIP) return -EOPNOTSUPP; switch(pc->pcmd) { case PLIP_GET_TIMEOUT: pc->trigger = nl->trigger; pc->nibble = nl->nibble; break; case PLIP_SET_TIMEOUT: if(!capable(CAP_NET_ADMIN)) return -EPERM; nl->trigger = pc->trigger; nl->nibble = pc->nibble; break; default: return -EOPNOTSUPP; } return 0; } static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 }; static int timid; module_param_array(parport, int, NULL, 0); module_param(timid, int, 0); MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip"); static struct net_device *dev_plip[PLIP_MAX] = { NULL, }; static inline int plip_searchfor(int list[], int a) { int i; for (i = 0; i < PLIP_MAX && list[i] != -1; i++) { if (list[i] == a) return 1; } return 0; } /* plip_attach() is called (by the parport code) when a port is * available to use. */ static void plip_attach (struct parport *port) { static int unit; struct net_device *dev; struct net_local *nl; char name[IFNAMSIZ]; if ((parport[0] == -1 && (!timid || !port->devices)) || plip_searchfor(parport, port->number)) { if (unit == PLIP_MAX) { printk(KERN_ERR "plip: too many devices\n"); return; } sprintf(name, "plip%d", unit); dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) return; strcpy(dev->name, name); dev->irq = port->irq; dev->base_addr = port->base; if (port->irq == -1) { printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode," "which is fairly inefficient!\n", port->name); } nl = netdev_priv(dev); nl->dev = dev; nl->pardev = parport_register_device(port, dev->name, plip_preempt, plip_wakeup, plip_interrupt, 0, dev); if (!nl->pardev) { printk(KERN_ERR "%s: parport_register failed\n", name); goto err_free_dev; } plip_init_netdev(dev); if (register_netdev(dev)) { printk(KERN_ERR "%s: network register failed\n", name); goto err_parport_unregister; } printk(KERN_INFO "%s", version); if (dev->irq != -1) printk(KERN_INFO "%s: Parallel port at %#3lx, " "using IRQ %d.\n", dev->name, dev->base_addr, dev->irq); else printk(KERN_INFO "%s: Parallel port at %#3lx, " "not using IRQ.\n", dev->name, dev->base_addr); dev_plip[unit++] = dev; } return; err_parport_unregister: parport_unregister_device(nl->pardev); err_free_dev: free_netdev(dev); } /* plip_detach() is called (by the parport code) when a port is * no longer available to use. */ static void plip_detach (struct parport *port) { /* Nothing to do */ } static struct parport_driver plip_driver = { .name = "plip", .attach = plip_attach, .detach = plip_detach }; static void __exit plip_cleanup_module (void) { struct net_device *dev; int i; parport_unregister_driver (&plip_driver); for (i=0; i < PLIP_MAX; i++) { if ((dev = dev_plip[i])) { struct net_local *nl = netdev_priv(dev); unregister_netdev(dev); if (nl->port_owner) parport_release(nl->pardev); parport_unregister_device(nl->pardev); free_netdev(dev); dev_plip[i] = NULL; } } } #ifndef MODULE static int parport_ptr; static int __init plip_setup(char *str) { int ints[4]; str = get_options(str, ARRAY_SIZE(ints), ints); /* Ugh. */ if (!strncmp(str, "parport", 7)) { int n = simple_strtoul(str+7, NULL, 10); if (parport_ptr < PLIP_MAX) parport[parport_ptr++] = n; else printk(KERN_INFO "plip: too many ports, %s ignored.\n", str); } else if (!strcmp(str, "timid")) { timid = 1; } else { if (ints[0] == 0 || ints[1] == 0) { /* disable driver on "plip=" or "plip=0" */ parport[0] = -2; } else { printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n", ints[1]); } } return 1; } __setup("plip=", plip_setup); #endif /* !MODULE */ static int __init plip_init (void) { if (parport[0] == -2) return 0; if (parport[0] != -1 && timid) { printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n"); timid = 0; } if (parport_register_driver (&plip_driver)) { printk (KERN_WARNING "plip: couldn't register driver\n"); return 1; } return 0; } module_init(plip_init); module_exit(plip_cleanup_module); MODULE_LICENSE("GPL");
gpl-2.0
WZeke/m2_kernel
drivers/gpu/drm/mga/mga_warp.c
5653
4832
/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*- * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com * * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes <gareth@valinux.com> */ #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/platform_device.h> #include <linux/module.h> #include "drmP.h" #include "drm.h" #include "mga_drm.h" #include "mga_drv.h" #define FIRMWARE_G200 "matrox/g200_warp.fw" #define FIRMWARE_G400 "matrox/g400_warp.fw" MODULE_FIRMWARE(FIRMWARE_G200); MODULE_FIRMWARE(FIRMWARE_G400); #define MGA_WARP_CODE_ALIGN 256 /* in bytes */ #define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN) int mga_warp_install_microcode(drm_mga_private_t *dev_priv) { unsigned char *vcbase = dev_priv->warp->handle; unsigned long pcbase = dev_priv->warp->offset; const char *firmware_name; struct platform_device *pdev; const struct firmware *fw = NULL; const struct ihex_binrec *rec; unsigned int size; int n_pipes, where; int rc = 0; switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: firmware_name = FIRMWARE_G400; n_pipes = MGA_MAX_G400_PIPES; break; case MGA_CARD_TYPE_G200: firmware_name = FIRMWARE_G200; n_pipes = MGA_MAX_G200_PIPES; break; default: return -EINVAL; } pdev = platform_device_register_simple("mga_warp", 0, NULL, 0); if (IS_ERR(pdev)) { DRM_ERROR("mga: Failed to register microcode\n"); return PTR_ERR(pdev); } rc = request_ihex_firmware(&fw, firmware_name, &pdev->dev); platform_device_unregister(pdev); if (rc) { DRM_ERROR("mga: Failed to load microcode \"%s\"\n", firmware_name); return rc; } size = 0; where = 0; for (rec = (const struct ihex_binrec *)fw->data; rec; rec = ihex_next_binrec(rec)) { size += WARP_UCODE_SIZE(be16_to_cpu(rec->len)); where++; } if (where != n_pipes) { DRM_ERROR("mga: Invalid microcode \"%s\"\n", firmware_name); rc = -EINVAL; goto out; } size = PAGE_ALIGN(size); DRM_DEBUG("MGA ucode size = %d bytes\n", size); if (size > dev_priv->warp->size) { DRM_ERROR("microcode too large! (%u > %lu)\n", size, dev_priv->warp->size); rc = -ENOMEM; goto out; } memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); where = 0; for (rec = (const struct ihex_binrec *)fw->data; rec; rec = ihex_next_binrec(rec)) { unsigned int src_size, dst_size; DRM_DEBUG(" pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase); dev_priv->warp_pipe_phys[where] = pcbase; src_size = be16_to_cpu(rec->len); dst_size = WARP_UCODE_SIZE(src_size); memcpy(vcbase, rec->data, src_size); pcbase += dst_size; vcbase += dst_size; where++; } out: release_firmware(fw); return rc; } #define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE) int mga_warp_init(drm_mga_private_t *dev_priv) { u32 wmisc; /* FIXME: Get rid of these damned magic numbers... */ switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x00000E00); MGA_WRITE(MGA_WVRTXSZ, 0x00001807); MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000); break; case MGA_CARD_TYPE_G200: MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x1606); MGA_WRITE(MGA_WVRTXSZ, 7); break; default: return -EINVAL; } MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE)); wmisc = MGA_READ(MGA_WMISC); if (wmisc != WMISC_EXPECTED) { DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", wmisc, WMISC_EXPECTED); return -EINVAL; } return 0; }
gpl-2.0
gproj-m/lge-kernel-gproj
drivers/gpu/drm/nouveau/nv50_instmem.c
5909
11179
/* * Copyright (C) 2007 Ben Skeggs. * * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" #include "nouveau_vm.h" #define BAR1_VM_BASE 0x0020000000ULL #define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1) #define BAR3_VM_BASE 0x0000000000ULL #define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3) struct nv50_instmem_priv { uint32_t save1700[5]; /* 0x1700->0x1710 */ struct nouveau_gpuobj *bar1_dmaobj; struct nouveau_gpuobj *bar3_dmaobj; }; static void nv50_channel_del(struct nouveau_channel **pchan) { struct nouveau_channel *chan; chan = *pchan; *pchan = NULL; if (!chan) return; nouveau_gpuobj_ref(NULL, &chan->ramfc); nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); if (drm_mm_initialized(&chan->ramin_heap)) drm_mm_takedown(&chan->ramin_heap); nouveau_gpuobj_ref(NULL, &chan->ramin); kfree(chan); } static int nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm, struct nouveau_channel **pchan) { struct drm_nouveau_private *dev_priv = dev->dev_private; u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; struct nouveau_channel *chan; int ret, i; chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; chan->dev = dev; ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); if (ret) { nv50_channel_del(&chan); return ret; } ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size); if (ret) { nv50_channel_del(&chan); return ret; } ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : chan->ramin->pinst + pgd, chan->ramin->vinst + pgd, 0x4000, NVOBJ_FLAG_ZERO_ALLOC, &chan->vm_pd); if (ret) { nv50_channel_del(&chan); return ret; } for (i = 0; i < 0x4000; i += 8) { nv_wo32(chan->vm_pd, i + 0, 0x00000000); nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); } ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); if (ret) { nv50_channel_del(&chan); return ret; } ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : chan->ramin->pinst + fc, chan->ramin->vinst + fc, 0x100, NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc); if (ret) { nv50_channel_del(&chan); return ret; } *pchan = chan; return 0; } int nv50_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv; struct nouveau_channel *chan; struct nouveau_vm *vm; int ret, i; u32 tmp; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev_priv->engine.instmem.priv = priv; /* Save state, will restore at takedown. */ for (i = 0x1700; i <= 0x1710; i += 4) priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); /* Global PRAMIN heap */ ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); if (ret) { NV_ERROR(dev, "Failed to init RAMIN heap\n"); goto error; } /* BAR3 */ ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE, &dev_priv->bar3_vm); if (ret) goto error; ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8, 0x1000, NVOBJ_FLAG_DONT_MAP | NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->bar3_vm->pgt[0].obj[0]); if (ret) goto error; dev_priv->bar3_vm->pgt[0].refcount[0] = 1; nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]); ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan); if (ret) goto error; dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan; ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE, NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, NV_MEM_TYPE_VM, NV_MEM_COMP_VM, &priv->bar3_dmaobj); if (ret) goto error; nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4)); dev_priv->engine.instmem.flush(dev); dev_priv->ramin_available = true; tmp = nv_ro32(chan->ramin, 0); nv_wo32(chan->ramin, 0, ~tmp); if (nv_ro32(chan->ramin, 0) != ~tmp) { NV_ERROR(dev, "PRAMIN readback failed\n"); ret = -EIO; goto error; } nv_wo32(chan->ramin, 0, tmp); /* BAR1 */ ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm); if (ret) goto error; ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd); if (ret) goto error; nouveau_vm_ref(NULL, &vm, NULL); ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE, NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, NV_MEM_TYPE_VM, NV_MEM_COMP_VM, &priv->bar1_dmaobj); if (ret) goto error; nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4)); for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); /* Create shared channel VM, space is reserved at the beginning * to catch "NULL pointer" references */ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, &dev_priv->chan_vm); if (ret) return ret; return 0; error: nv50_instmem_takedown(dev); return ret; } void nv50_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nouveau_channel *chan = dev_priv->channels.ptr[0]; int i; NV_DEBUG(dev, "\n"); if (!priv) return; dev_priv->ramin_available = false; nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); for (i = 0x1700; i <= 0x1710; i += 4) nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj); nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj); nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd); dev_priv->channels.ptr[127] = 0; nv50_channel_del(&dev_priv->channels.ptr[0]); nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); if (drm_mm_initialized(&dev_priv->ramin_heap)) drm_mm_takedown(&dev_priv->ramin_heap); dev_priv->engine.instmem.priv = NULL; kfree(priv); } int nv50_instmem_suspend(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; dev_priv->ramin_available = false; return 0; } void nv50_instmem_resume(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nouveau_channel *chan = dev_priv->channels.ptr[0]; int i; /* Poke the relevant regs, and pray it works :) */ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); nv_wr32(dev, NV50_PUNK_UNK1710, 0); nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | NV50_PUNK_BAR_CFG_BASE_VALID); nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) | NV50_PUNK_BAR1_CTXDMA_VALID); nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) | NV50_PUNK_BAR3_CTXDMA_VALID); for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); dev_priv->ramin_available = true; } struct nv50_gpuobj_node { struct nouveau_mem *vram; struct nouveau_vma chan_vma; u32 align; }; int nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, u32 size, u32 align) { struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nv50_gpuobj_node *node = NULL; int ret; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; node->align = align; size = (size + 4095) & ~4095; align = max(align, (u32)4096); ret = vram->get(dev, size, align, 0, 0, &node->vram); if (ret) { kfree(node); return ret; } gpuobj->vinst = node->vram->offset; if (gpuobj->flags & NVOBJ_FLAG_VM) { u32 flags = NV_MEM_ACCESS_RW; if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) flags |= NV_MEM_ACCESS_SYS; ret = nouveau_vm_get(chan->vm, size, 12, flags, &node->chan_vma); if (ret) { vram->put(dev, &node->vram); kfree(node); return ret; } nouveau_vm_map(&node->chan_vma, node->vram); gpuobj->linst = node->chan_vma.offset; } gpuobj->size = size; gpuobj->node = node; return 0; } void nv50_instmem_put(struct nouveau_gpuobj *gpuobj) { struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nv50_gpuobj_node *node; node = gpuobj->node; gpuobj->node = NULL; if (node->chan_vma.node) { nouveau_vm_unmap(&node->chan_vma); nouveau_vm_put(&node->chan_vma); } vram->put(dev, &node->vram); kfree(node); } int nv50_instmem_map(struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct nv50_gpuobj_node *node = gpuobj->node; int ret; ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12, NV_MEM_ACCESS_RW, &node->vram->bar_vma); if (ret) return ret; nouveau_vm_map(&node->vram->bar_vma, node->vram); gpuobj->pinst = node->vram->bar_vma.offset; return 0; } void nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) { struct nv50_gpuobj_node *node = gpuobj->node; if (node->vram->bar_vma.node) { nouveau_vm_unmap(&node->vram->bar_vma); nouveau_vm_put(&node->vram->bar_vma); } } void nv50_instmem_flush(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->vm_lock, flags); nv_wr32(dev, 0x00330c, 0x00000001); if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); spin_unlock_irqrestore(&dev_priv->vm_lock, flags); } void nv84_instmem_flush(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->vm_lock, flags); nv_wr32(dev, 0x070000, 0x00000001); if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); spin_unlock_irqrestore(&dev_priv->vm_lock, flags); }
gpl-2.0
naufragoweb/android_kernel_samsung_kyleopen
arch/sh/kernel/cpu/sh5/unwind.c
9237
8532
/* * arch/sh/kernel/cpu/sh5/unwind.c * * Copyright (C) 2004 Paul Mundt * Copyright (C) 2004 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/io.h> static u8 regcache[63]; /* * Finding the previous stack frame isn't horribly straightforward as it is * on some other platforms. In the sh64 case, we don't have "linked" stack * frames, so we need to do a bit of work to determine the previous frame, * and in turn, the previous r14/r18 pair. * * There are generally a few cases which determine where we can find out * the r14/r18 values. In the general case, this can be determined by poking * around the prologue of the symbol PC is in (note that we absolutely must * have frame pointer support as well as the kernel symbol table mapped, * otherwise we can't even get this far). * * In other cases, such as the interrupt/exception path, we can poke around * the sp/fp. * * Notably, this entire approach is somewhat error prone, and in the event * that the previous frame cannot be determined, that's all we can do. * Either way, this still leaves us with a more correct backtrace then what * we would be able to come up with by walking the stack (which is garbage * for anything beyond the first frame). * -- PFM. */ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, unsigned long *pprev_fp, unsigned long *pprev_pc, struct pt_regs *regs) { const char *sym; char namebuf[128]; unsigned long offset; unsigned long prologue = 0; unsigned long fp_displacement = 0; unsigned long fp_prev = 0; unsigned long offset_r14 = 0, offset_r18 = 0; int i, found_prologue_end = 0; sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf); if (!sym) return -EINVAL; prologue = pc - offset; if (!prologue) return -EINVAL; /* Validate fp, to avoid risk of dereferencing a bad pointer later. Assume 128Mb since that's the amount of RAM on a Cayman. Modify when there is an SH-5 board with more. */ if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) || (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) || ((fp & 7) != 0)) { return -EINVAL; } /* * Depth to walk, depth is completely arbitrary. */ for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) { unsigned long op; u8 major, minor; u8 src, dest, disp; op = *(unsigned long *)prologue; major = (op >> 26) & 0x3f; src = (op >> 20) & 0x3f; minor = (op >> 16) & 0xf; disp = (op >> 10) & 0x3f; dest = (op >> 4) & 0x3f; /* * Stack frame creation happens in a number of ways.. in the * general case when the stack frame is less than 511 bytes, * it's generally created by an addi or addi.l: * * addi/addi.l r15, -FRAME_SIZE, r15 * * in the event that the frame size is bigger than this, it's * typically created using a movi/sub pair as follows: * * movi FRAME_SIZE, rX * sub r15, rX, r15 */ switch (major) { case (0x00 >> 2): switch (minor) { case 0x8: /* add.l */ case 0x9: /* add */ /* Look for r15, r63, r14 */ if (src == 15 && disp == 63 && dest == 14) found_prologue_end = 1; break; case 0xa: /* sub.l */ case 0xb: /* sub */ if (src != 15 || dest != 15) continue; fp_displacement -= regcache[disp]; fp_prev = fp - fp_displacement; break; } break; case (0xa8 >> 2): /* st.l */ if (src != 15) continue; switch (dest) { case 14: if (offset_r14 || fp_displacement == 0) continue; offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); offset_r14 *= sizeof(unsigned long); offset_r14 += fp_displacement; break; case 18: if (offset_r18 || fp_displacement == 0) continue; offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); offset_r18 *= sizeof(unsigned long); offset_r18 += fp_displacement; break; } break; case (0xcc >> 2): /* movi */ if (dest >= 63) { printk(KERN_NOTICE "%s: Invalid dest reg %d " "specified in movi handler. Failed " "opcode was 0x%lx: ", __func__, dest, op); continue; } /* Sign extend */ regcache[dest] = ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54; break; case (0xd0 >> 2): /* addi */ case (0xd4 >> 2): /* addi.l */ /* Look for r15, -FRAME_SIZE, r15 */ if (src != 15 || dest != 15) continue; /* Sign extended frame size.. */ fp_displacement += (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); fp_prev = fp - fp_displacement; break; } if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev) break; } if (offset_r14 == 0 || fp_prev == 0) { if (!offset_r14) pr_debug("Unable to find r14 offset\n"); if (!fp_prev) pr_debug("Unable to find previous fp\n"); return -EINVAL; } /* For innermost leaf function, there might not be a offset_r18 */ if (!*pprev_pc && (offset_r18 == 0)) return -EINVAL; *pprev_fp = *(unsigned long *)(fp_prev + offset_r14); if (offset_r18) *pprev_pc = *(unsigned long *)(fp_prev + offset_r18); *pprev_pc &= ~1; return 0; } /* Don't put this on the stack since we'll want to call sh64_unwind * when we're close to underflowing the stack anyway. */ static struct pt_regs here_regs; extern const char syscall_ret; extern const char ret_from_syscall; extern const char ret_from_exception; extern const char ret_from_irq; static void sh64_unwind_inner(struct pt_regs *regs); static void unwind_nested (unsigned long pc, unsigned long fp) { if ((fp >= __MEMORY_START) && ((fp & 7) == 0)) { sh64_unwind_inner((struct pt_regs *) fp); } } static void sh64_unwind_inner(struct pt_regs *regs) { unsigned long pc, fp; int ofs = 0; int first_pass; pc = regs->pc & ~1; fp = regs->regs[14]; first_pass = 1; for (;;) { int cond; unsigned long next_fp, next_pc; if (pc == ((unsigned long) &syscall_ret & ~1)) { printk("SYSCALL\n"); unwind_nested(pc,fp); return; } if (pc == ((unsigned long) &ret_from_syscall & ~1)) { printk("SYSCALL (PREEMPTED)\n"); unwind_nested(pc,fp); return; } /* In this case, the PC is discovered by lookup_prev_stack_frame but it has 4 taken off it to look like the 'caller' */ if (pc == ((unsigned long) &ret_from_exception & ~1)) { printk("EXCEPTION\n"); unwind_nested(pc,fp); return; } if (pc == ((unsigned long) &ret_from_irq & ~1)) { printk("IRQ\n"); unwind_nested(pc,fp); return; } cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) && ((pc & 3) == 0) && ((fp & 7) == 0)); pc -= ofs; printk("[<%08lx>] ", pc); print_symbol("%s\n", pc); if (first_pass) { /* If the innermost frame is a leaf function, it's * possible that r18 is never saved out to the stack. */ next_pc = regs->regs[18]; } else { next_pc = 0; } if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) { ofs = sizeof(unsigned long); pc = next_pc & ~1; fp = next_fp; } else { printk("Unable to lookup previous stack frame\n"); break; } first_pass = 0; } printk("\n"); } void sh64_unwind(struct pt_regs *regs) { if (!regs) { /* * Fetch current regs if we have no other saved state to back * trace from. */ regs = &here_regs; __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14])); __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15])); __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18])); __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0])); __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1])); __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2])); __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3])); __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4])); __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5])); __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6])); __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7])); __asm__ __volatile__ ( "pta 0f, tr0\n\t" "blink tr0, %0\n\t" "0: nop" : "=r" (regs->pc) ); } printk("\nCall Trace:\n"); sh64_unwind_inner(regs); }
gpl-2.0
pbystrup/CHIP-linux
arch/sh/kernel/cpu/sh3/clock-sh7709.c
9237
2106
/* * arch/sh/kernel/cpu/sh3/clock-sh7709.c * * SH7709 support for the clock framework * * Copyright (C) 2005 Andriy Skulysh * * Based on arch/sh/kernel/cpu/sh3/clock-sh7705.c * Copyright (C) 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int stc_multipliers[] = { 1, 2, 4, 8, 3, 6, 1, 1 }; static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 }; static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; static void master_clk_init(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); clk->rate *= pfc_divisors[idx]; } static struct sh_clk_ops sh7709_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); return clk->parent->rate / pfc_divisors[idx]; } static struct sh_clk_ops sh7709_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = (frqcr & 0x0080) ? ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1; return clk->parent->rate * stc_multipliers[idx]; } static struct sh_clk_ops sh7709_bus_clk_ops = { .recalc = bus_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); return clk->parent->rate / ifc_divisors[idx]; } static struct sh_clk_ops sh7709_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct sh_clk_ops *sh7709_clk_ops[] = { &sh7709_master_clk_ops, &sh7709_module_clk_ops, &sh7709_bus_clk_ops, &sh7709_cpu_clk_ops, }; void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7709_clk_ops)) *ops = sh7709_clk_ops[idx]; }
gpl-2.0
CyanogenMod/android_kernel_goldfish
fs/nls/nls_cp1251.c
12565
12751
/* * linux/fs/nls/nls_cp1251.c * * Charset cp1251 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0402, 0x0403, 0x201a, 0x0453, 0x201e, 0x2026, 0x2020, 0x2021, 0x20ac, 0x2030, 0x0409, 0x2039, 0x040a, 0x040c, 0x040b, 0x040f, /* 0x90*/ 0x0452, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x0000, 0x2122, 0x0459, 0x203a, 0x045a, 0x045c, 0x045b, 0x045f, /* 0xa0*/ 0x00a0, 0x040e, 0x045e, 0x0408, 0x00a4, 0x0490, 0x00a6, 0x00a7, 0x0401, 0x00a9, 0x0404, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x0407, /* 0xb0*/ 0x00b0, 0x00b1, 0x0406, 0x0456, 0x0491, 0x00b5, 0x00b6, 0x00b7, 0x0451, 0x2116, 0x0454, 0x00bb, 0x0458, 0x0405, 0x0455, 0x0457, /* 0xc0*/ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0xd0*/ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xe0*/ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xf0*/ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0x00, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */ 0xb0, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page04[256] = { 0x00, 0xa8, 0x80, 0x81, 0xaa, 0xbd, 0xb2, 0xaf, /* 0x00-0x07 */ 0xa3, 0x8a, 0x8c, 0x8e, 0x8d, 0x00, 0xa1, 0x8f, /* 0x08-0x0f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x10-0x17 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x18-0x1f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x20-0x27 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0x28-0x2f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0x48-0x4f */ 0x00, 0xb8, 0x90, 0x83, 0xba, 0xbe, 0xb3, 0xbf, /* 0x50-0x57 */ 0xbc, 0x9a, 0x9c, 0x9e, 0x9d, 0x00, 0xa2, 0x9f, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0xa5, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */ 0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, page21, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x90, 0x83, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa2, 0xa2, 0xbc, 0xa4, 0xb4, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xb8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb3, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x81, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x80, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa1, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb2, 0xa5, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xa8, 0xb9, 0xaa, 0xbb, 0xa3, 0xbd, 0xbd, 0xaf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp1251", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp1251(void) { return register_nls(&table); } static void __exit exit_nls_cp1251(void) { unregister_nls(&table); } module_init(init_nls_cp1251) module_exit(exit_nls_cp1251) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
ParanoidAndroid/android_kernel_grouper
lib/crc-itu-t.c
13845
2892
/* * crc-itu-t.c * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/types.h> #include <linux/module.h> #include <linux/crc-itu-t.h> /** CRC table for the CRC ITU-T V.41 0x0x1021 (x^16 + x^12 + x^15 + 1) */ const u16 crc_itu_t_table[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 }; EXPORT_SYMBOL(crc_itu_t_table); /** * crc_itu_t - Compute the CRC-ITU-T for the data buffer * * @crc: previous CRC value * @buffer: data pointer * @len: number of bytes in the buffer * * Returns the updated CRC value */ u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len) { while (len--) crc = crc_itu_t_byte(crc, *buffer++); return crc; } EXPORT_SYMBOL(crc_itu_t); MODULE_DESCRIPTION("CRC ITU-T V.41 calculations"); MODULE_LICENSE("GPL");
gpl-2.0
TeamBliss-Devices/android_kernel_nvidia_shieldtablet
arch/arm/mach-tegra/board-pluto-sdhci.c
22
8477
/* * arch/arm/mach-tegra/board-pluto-sdhci.c * * Copyright (c) 2012-2013 NVIDIA Corporation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/resource.h> #include <linux/platform_device.h> #include <linux/wlan_plat.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/mmc/host.h> #include <linux/wl12xx.h> #include <linux/platform_data/mmc-sdhci-tegra.h> #include "tegra-board-id.h" #include <asm/mach-types.h> #include <mach/irqs.h> #include <mach/gpio-tegra.h> #include "gpio-names.h" #include "board.h" #include "board-pluto.h" #include "dvfs.h" #include "iomap.h" #define PLUTO_WLAN_PWR TEGRA_GPIO_PCC5 #define PLUTO_WLAN_WOW TEGRA_GPIO_PU5 #define PLUTO_SD_CD TEGRA_GPIO_PV2 #define WLAN_PWR_STR "wlan_power" #define WLAN_WOW_STR "bcmsdh_sdmmc" #if defined(CONFIG_BCMDHD_EDP_SUPPORT) /* Wifi power levels */ #define ON 1080 /* 1080 mW */ #define OFF 0 static unsigned int wifi_states[] = {ON, OFF}; #endif static void (*wifi_status_cb)(int card_present, void *dev_id); static void *wifi_status_cb_devid; static int pluto_wifi_status_register(void (*callback)(int , void *), void *); static int pluto_wifi_reset(int on); static int pluto_wifi_power(int on); static int pluto_wifi_set_carddetect(int val); static struct wifi_platform_data pluto_wifi_control = { .set_power = pluto_wifi_power, .set_reset = pluto_wifi_reset, .set_carddetect = pluto_wifi_set_carddetect, }; static struct resource wifi_resource[] = { [0] = { .name = "bcm4329_wlan_irq", .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE, }, }; static struct platform_device pluto_wifi_device = { .name = "bcm4329_wlan", .id = 1, .num_resources = 1, .resource = wifi_resource, .dev = { .platform_data = &pluto_wifi_control, }, }; #ifdef CONFIG_MMC_EMBEDDED_SDIO static struct embedded_sdio_data embedded_sdio_data0 = { .cccr = { .sdio_vsn = 2, .multi_block = 1, .low_speed = 0, .wide_bus = 0, .high_power = 1, .high_speed = 1, }, .cis = { .vendor = 0x02d0, .device = 0x4329, }, }; #endif struct tegra_sdhci_platform_data pluto_tegra_sdhci_platform_data0 = { .mmc_data = { .register_status_notify = pluto_wifi_status_register, #ifdef CONFIG_MMC_EMBEDDED_SDIO .embedded_sdio = &embedded_sdio_data0, #endif .built_in = 0, .ocr_mask = MMC_OCR_1V8_MASK, }, #ifndef CONFIG_MMC_EMBEDDED_SDIO .pm_flags = MMC_PM_KEEP_POWER, #endif .cd_gpio = -1, .wp_gpio = -1, .power_gpio = -1, .tap_delay = 0x2, .trim_delay = 0x2, .ddr_clk_limit = 41000000, .max_clk_limit = 82000000, .uhs_mask = MMC_UHS_MASK_DDR50, .disable_clock_gate = true, }; static struct resource sdhci_resource0[] = { [0] = { .start = INT_SDMMC1, .end = INT_SDMMC1, .flags = IORESOURCE_IRQ, }, [1] = { .start = TEGRA_SDMMC1_BASE, .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1, .flags = IORESOURCE_MEM, }, }; static struct resource sdhci_resource2[] = { [0] = { .start = INT_SDMMC3, .end = INT_SDMMC3, .flags = IORESOURCE_IRQ, }, [1] = { .start = TEGRA_SDMMC3_BASE, .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1, .flags = IORESOURCE_MEM, }, }; static struct resource sdhci_resource3[] = { [0] = { .start = INT_SDMMC4, .end = INT_SDMMC4, .flags = IORESOURCE_IRQ, }, [1] = { .start = TEGRA_SDMMC4_BASE, .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1, .flags = IORESOURCE_MEM, }, }; static struct tegra_sdhci_platform_data tegra_sdhci_platform_data2 = { .cd_gpio = PLUTO_SD_CD, .wp_gpio = -1, .power_gpio = -1, .tap_delay = 0x3, .trim_delay = 0x3, .ddr_clk_limit = 41000000, .max_clk_limit = 156000000, .uhs_mask = MMC_UHS_MASK_DDR50, }; static struct tegra_sdhci_platform_data tegra_sdhci_platform_data3 = { .cd_gpio = -1, .wp_gpio = -1, .power_gpio = -1, .is_8bit = 1, .tap_delay = 0x5, .trim_delay = 0xA, .ddr_trim_delay = -1, .ddr_clk_limit = 41000000, .max_clk_limit = 156000000, .mmc_data = { .built_in = 1, .ocr_mask = MMC_OCR_1V8_MASK, } }; static struct platform_device tegra_sdhci_device0 = { .name = "sdhci-tegra", .id = 0, .resource = sdhci_resource0, .num_resources = ARRAY_SIZE(sdhci_resource0), .dev = { .platform_data = &pluto_tegra_sdhci_platform_data0, }, }; static struct platform_device tegra_sdhci_device2 = { .name = "sdhci-tegra", .id = 2, .resource = sdhci_resource2, .num_resources = ARRAY_SIZE(sdhci_resource2), .dev = { .platform_data = &tegra_sdhci_platform_data2, }, }; static struct platform_device tegra_sdhci_device3 = { .name = "sdhci-tegra", .id = 3, .resource = sdhci_resource3, .num_resources = ARRAY_SIZE(sdhci_resource3), .dev = { .platform_data = &tegra_sdhci_platform_data3, }, }; static int pluto_wifi_status_register( void (*callback)(int card_present, void *dev_id), void *dev_id) { if (wifi_status_cb) return -EAGAIN; wifi_status_cb = callback; wifi_status_cb_devid = dev_id; return 0; } static int pluto_wifi_set_carddetect(int val) { pr_debug("%s: %d\n", __func__, val); if (wifi_status_cb) wifi_status_cb(val, wifi_status_cb_devid); else pr_warning("%s: Nobody to notify\n", __func__); return 0; } static int pluto_wifi_power(int on) { pr_debug("%s: %d\n", __func__, on); gpio_set_value(PLUTO_WLAN_PWR, on); mdelay(100); return 0; } static int pluto_wifi_reset(int on) { pr_debug("%s: do nothing\n", __func__); return 0; } static int __init pluto_wifi_init(void) { int rc = 0; /* init wlan_pwr gpio */ rc = gpio_request(PLUTO_WLAN_PWR, WLAN_PWR_STR); /* Due to pre powering, sometimes gpio req returns EBUSY */ if ((rc < 0) && (rc != -EBUSY)) { pr_err("Wifi init: gpio req failed:%d\n", rc); return rc; } /* Due to pre powering, sometimes gpio req returns EBUSY */ rc = gpio_direction_output(PLUTO_WLAN_PWR, 0); if ((rc < 0) && (rc != -EBUSY)) { gpio_free(PLUTO_WLAN_PWR); return rc; } /* init wlan_wow gpio */ rc = gpio_request(PLUTO_WLAN_WOW, WLAN_WOW_STR); if (rc < 0) { pr_err("wifi init: gpio req failed:%d\n", rc); gpio_free(PLUTO_WLAN_PWR); return rc; } rc = gpio_direction_input(PLUTO_WLAN_WOW); if (rc < 0) { gpio_free(PLUTO_WLAN_WOW); gpio_free(PLUTO_WLAN_PWR); return rc; } wifi_resource[0].start = wifi_resource[0].end = gpio_to_irq(PLUTO_WLAN_WOW); platform_device_register(&pluto_wifi_device); return rc; } #ifdef CONFIG_TEGRA_PREPOWER_WIFI static int __init pluto_wifi_prepower(void) { if (!machine_is_tegra_pluto()) return 0; pluto_wifi_power(1); return 0; } subsys_initcall_sync(pluto_wifi_prepower); #endif int __init pluto_sdhci_init(void) { int nominal_core_mv; int min_vcore_override_mv; int boot_vcore_mv; nominal_core_mv = tegra_dvfs_rail_get_nominal_millivolts(tegra_core_rail); if (nominal_core_mv > 0) { pluto_tegra_sdhci_platform_data0.nominal_vcore_mv = nominal_core_mv; tegra_sdhci_platform_data2.nominal_vcore_mv = nominal_core_mv; tegra_sdhci_platform_data3.nominal_vcore_mv = nominal_core_mv; } min_vcore_override_mv = tegra_dvfs_rail_get_override_floor(tegra_core_rail); if (min_vcore_override_mv) { pluto_tegra_sdhci_platform_data0.min_vcore_override_mv = min_vcore_override_mv; tegra_sdhci_platform_data2.min_vcore_override_mv = min_vcore_override_mv; tegra_sdhci_platform_data3.min_vcore_override_mv = min_vcore_override_mv; } boot_vcore_mv = tegra_dvfs_rail_get_boot_level(tegra_core_rail); if (boot_vcore_mv) { pluto_tegra_sdhci_platform_data0.boot_vcore_mv = boot_vcore_mv; tegra_sdhci_platform_data2.boot_vcore_mv = boot_vcore_mv; tegra_sdhci_platform_data3.boot_vcore_mv = boot_vcore_mv; } if ((tegra_sdhci_platform_data3.uhs_mask & MMC_MASK_HS200) && (!(tegra_sdhci_platform_data3.uhs_mask & MMC_UHS_MASK_DDR50))) tegra_sdhci_platform_data3.trim_delay = 0; platform_device_register(&tegra_sdhci_device3); platform_device_register(&tegra_sdhci_device2); platform_device_register(&tegra_sdhci_device0); pluto_wifi_init(); return 0; }
gpl-2.0
blueskycoco/rt-thread
bsp/gkipc/libraries/drv/710XS/gh/src/gh_uart2.c
22
37789
/****************************************************************************** ** ** \file gh_uart2.c ** ** \brief UART2. ** ** Copyright: 2012 - 2013 (C) GoKe Microelectronics ShangHai Branch ** ** \attention THIS SAMPLE CODE IS PROVIDED AS IS. GOKE MICROELECTRONICS ** ACCEPTS NO RESPONSIBILITY OR LIABILITY FOR ANY ERRORS OR ** OMMISSIONS. ** ** \note Do not modify this file as it is generated automatically. ** ******************************************************************************/ #include "gh_uart2.h" /*----------------------------------------------------------------------------*/ /* mirror variables */ /*----------------------------------------------------------------------------*/ GH_UART2_THR_S m_uart2_thr; GH_UART2_FCR_S m_uart2_fcr; /*----------------------------------------------------------------------------*/ /* register UART2_RBR (read) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 U32 GH_UART2_get_RBR(void) { U32 value = (*(volatile U32 *)REG_UART2_RBR); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_RBR] --> 0x%08x\n", REG_UART2_RBR,value); #endif return value; } U8 GH_UART2_get_RBR_Data(void) { GH_UART2_RBR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_RBR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_RBR_Data] --> 0x%08x\n", REG_UART2_RBR,value); #endif return tmp_value.bitc.data; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* register UART2_THR (write) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL < 2 void GH_UART2_set_THR(U32 data) { m_uart2_thr.all = data; *(volatile U32 *)REG_UART2_THR = data; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_THR] <-- 0x%08x\n", REG_UART2_THR,data,data); #endif } U32 GH_UART2_getm_THR(void) { #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "[GH_UART2_getm_THR] --> 0x%08x\n", m_uart2_thr.all); #endif return m_uart2_thr.all; } void GH_UART2_set_THR_Data(U8 data) { m_uart2_thr.bitc.data = data; *(volatile U32 *)REG_UART2_THR = m_uart2_thr.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_THR_Data] <-- 0x%08x\n", REG_UART2_THR,m_uart2_thr.all,m_uart2_thr.all); #endif } U8 GH_UART2_getm_THR_Data(void) { #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "[GH_UART2_getm_THR_Data] --> 0x%08x\n", m_uart2_thr.bitc.data); #endif return m_uart2_thr.bitc.data; } #endif /* GH_INLINE_LEVEL < 2 */ /*----------------------------------------------------------------------------*/ /* register UART2_DLL (read/write) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 void GH_UART2_set_DLL(U32 data) { *(volatile U32 *)REG_UART2_DLL = data; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_DLL] <-- 0x%08x\n", REG_UART2_DLL,data,data); #endif } U32 GH_UART2_get_DLL(void) { U32 value = (*(volatile U32 *)REG_UART2_DLL); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_DLL] --> 0x%08x\n", REG_UART2_DLL,value); #endif return value; } void GH_UART2_set_DLL_BaudDivint_L(U8 data) { GH_UART2_DLL_S d; d.all = *(volatile U32 *)REG_UART2_DLL; d.bitc.bauddivint_l = data; *(volatile U32 *)REG_UART2_DLL = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_DLL_BaudDivint_L] <-- 0x%08x\n", REG_UART2_DLL,d.all,d.all); #endif } U8 GH_UART2_get_DLL_BaudDivint_L(void) { GH_UART2_DLL_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_DLL); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_DLL_BaudDivint_L] --> 0x%08x\n", REG_UART2_DLL,value); #endif return tmp_value.bitc.bauddivint_l; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* register UART2_IER (read/write) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 void GH_UART2_set_IER(U32 data) { *(volatile U32 *)REG_UART2_IER = data; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_IER] <-- 0x%08x\n", REG_UART2_IER,data,data); #endif } U32 GH_UART2_get_IER(void) { U32 value = (*(volatile U32 *)REG_UART2_IER); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_IER] --> 0x%08x\n", REG_UART2_IER,value); #endif return value; } void GH_UART2_set_IER_erbfi(U8 data) { GH_UART2_IER_S d; d.all = *(volatile U32 *)REG_UART2_IER; d.bitc.erbfi = data; *(volatile U32 *)REG_UART2_IER = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_IER_erbfi] <-- 0x%08x\n", REG_UART2_IER,d.all,d.all); #endif } U8 GH_UART2_get_IER_erbfi(void) { GH_UART2_IER_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_IER); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_IER_erbfi] --> 0x%08x\n", REG_UART2_IER,value); #endif return tmp_value.bitc.erbfi; } void GH_UART2_set_IER_etbei(U8 data) { GH_UART2_IER_S d; d.all = *(volatile U32 *)REG_UART2_IER; d.bitc.etbei = data; *(volatile U32 *)REG_UART2_IER = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_IER_etbei] <-- 0x%08x\n", REG_UART2_IER,d.all,d.all); #endif } U8 GH_UART2_get_IER_etbei(void) { GH_UART2_IER_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_IER); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_IER_etbei] --> 0x%08x\n", REG_UART2_IER,value); #endif return tmp_value.bitc.etbei; } void GH_UART2_set_IER_elsi(U8 data) { GH_UART2_IER_S d; d.all = *(volatile U32 *)REG_UART2_IER; d.bitc.elsi = data; *(volatile U32 *)REG_UART2_IER = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_IER_elsi] <-- 0x%08x\n", REG_UART2_IER,d.all,d.all); #endif } U8 GH_UART2_get_IER_elsi(void) { GH_UART2_IER_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_IER); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_IER_elsi] --> 0x%08x\n", REG_UART2_IER,value); #endif return tmp_value.bitc.elsi; } void GH_UART2_set_IER_edssi(U8 data) { GH_UART2_IER_S d; d.all = *(volatile U32 *)REG_UART2_IER; d.bitc.edssi = data; *(volatile U32 *)REG_UART2_IER = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_IER_edssi] <-- 0x%08x\n", REG_UART2_IER,d.all,d.all); #endif } U8 GH_UART2_get_IER_edssi(void) { GH_UART2_IER_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_IER); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_IER_edssi] --> 0x%08x\n", REG_UART2_IER,value); #endif return tmp_value.bitc.edssi; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* register UART2_DLH (read/write) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 void GH_UART2_set_DLH(U32 data) { *(volatile U32 *)REG_UART2_DLH = data; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_DLH] <-- 0x%08x\n", REG_UART2_DLH,data,data); #endif } U32 GH_UART2_get_DLH(void) { U32 value = (*(volatile U32 *)REG_UART2_DLH); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_DLH] --> 0x%08x\n", REG_UART2_DLH,value); #endif return value; } void GH_UART2_set_DLH_BaudDivint_H(U8 data) { GH_UART2_DLH_S d; d.all = *(volatile U32 *)REG_UART2_DLH; d.bitc.bauddivint_h = data; *(volatile U32 *)REG_UART2_DLH = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_DLH_BaudDivint_H] <-- 0x%08x\n", REG_UART2_DLH,d.all,d.all); #endif } U8 GH_UART2_get_DLH_BaudDivint_H(void) { GH_UART2_DLH_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_DLH); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_DLH_BaudDivint_H] --> 0x%08x\n", REG_UART2_DLH,value); #endif return tmp_value.bitc.bauddivint_h; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* register UART2_IIR (read) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 U32 GH_UART2_get_IIR(void) { U32 value = (*(volatile U32 *)REG_UART2_IIR); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_IIR] --> 0x%08x\n", REG_UART2_IIR,value); #endif return value; } U8 GH_UART2_get_IIR_interrupt_id(void) { GH_UART2_IIR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_IIR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_IIR_interrupt_id] --> 0x%08x\n", REG_UART2_IIR,value); #endif return tmp_value.bitc.interrupt_id; } U8 GH_UART2_get_IIR_fifos_enabled(void) { GH_UART2_IIR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_IIR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_IIR_fifos_enabled] --> 0x%08x\n", REG_UART2_IIR,value); #endif return tmp_value.bitc.fifos_enabled; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* register UART2_FCR (write) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL < 2 void GH_UART2_set_FCR(U32 data) { m_uart2_fcr.all = data; *(volatile U32 *)REG_UART2_FCR = data; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_FCR] <-- 0x%08x\n", REG_UART2_FCR,data,data); #endif } U32 GH_UART2_getm_FCR(void) { #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "[GH_UART2_getm_FCR] --> 0x%08x\n", m_uart2_fcr.all); #endif return m_uart2_fcr.all; } void GH_UART2_set_FCR_FIFO_Enable(U8 data) { m_uart2_fcr.bitc.fifo_enable = data; *(volatile U32 *)REG_UART2_FCR = m_uart2_fcr.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_FCR_FIFO_Enable] <-- 0x%08x\n", REG_UART2_FCR,m_uart2_fcr.all,m_uart2_fcr.all); #endif } U8 GH_UART2_getm_FCR_FIFO_Enable(void) { #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "[GH_UART2_getm_FCR_FIFO_Enable] --> 0x%08x\n", m_uart2_fcr.bitc.fifo_enable); #endif return m_uart2_fcr.bitc.fifo_enable; } void GH_UART2_set_FCR_RCVR_FIFO_Reset(U8 data) { m_uart2_fcr.bitc.rcvr_fifo_reset = data; *(volatile U32 *)REG_UART2_FCR = m_uart2_fcr.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_FCR_RCVR_FIFO_Reset] <-- 0x%08x\n", REG_UART2_FCR,m_uart2_fcr.all,m_uart2_fcr.all); #endif } U8 GH_UART2_getm_FCR_RCVR_FIFO_Reset(void) { #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "[GH_UART2_getm_FCR_RCVR_FIFO_Reset] --> 0x%08x\n", m_uart2_fcr.bitc.rcvr_fifo_reset); #endif return m_uart2_fcr.bitc.rcvr_fifo_reset; } void GH_UART2_set_FCR_XMIT_FIFO_Reset(U8 data) { m_uart2_fcr.bitc.xmit_fifo_reset = data; *(volatile U32 *)REG_UART2_FCR = m_uart2_fcr.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_FCR_XMIT_FIFO_Reset] <-- 0x%08x\n", REG_UART2_FCR,m_uart2_fcr.all,m_uart2_fcr.all); #endif } U8 GH_UART2_getm_FCR_XMIT_FIFO_Reset(void) { #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "[GH_UART2_getm_FCR_XMIT_FIFO_Reset] --> 0x%08x\n", m_uart2_fcr.bitc.xmit_fifo_reset); #endif return m_uart2_fcr.bitc.xmit_fifo_reset; } void GH_UART2_set_FCR_DMA_Mode(U8 data) { m_uart2_fcr.bitc.dma_mode = data; *(volatile U32 *)REG_UART2_FCR = m_uart2_fcr.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_FCR_DMA_Mode] <-- 0x%08x\n", REG_UART2_FCR,m_uart2_fcr.all,m_uart2_fcr.all); #endif } U8 GH_UART2_getm_FCR_DMA_Mode(void) { #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "[GH_UART2_getm_FCR_DMA_Mode] --> 0x%08x\n", m_uart2_fcr.bitc.dma_mode); #endif return m_uart2_fcr.bitc.dma_mode; } void GH_UART2_set_FCR_TX_Empty_Trigger(U8 data) { m_uart2_fcr.bitc.tx_empty_trigger = data; *(volatile U32 *)REG_UART2_FCR = m_uart2_fcr.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_FCR_TX_Empty_Trigger] <-- 0x%08x\n", REG_UART2_FCR,m_uart2_fcr.all,m_uart2_fcr.all); #endif } U8 GH_UART2_getm_FCR_TX_Empty_Trigger(void) { #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "[GH_UART2_getm_FCR_TX_Empty_Trigger] --> 0x%08x\n", m_uart2_fcr.bitc.tx_empty_trigger); #endif return m_uart2_fcr.bitc.tx_empty_trigger; } void GH_UART2_set_FCR_RCVR_Trigger(U8 data) { m_uart2_fcr.bitc.rcvr_trigger = data; *(volatile U32 *)REG_UART2_FCR = m_uart2_fcr.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_FCR_RCVR_Trigger] <-- 0x%08x\n", REG_UART2_FCR,m_uart2_fcr.all,m_uart2_fcr.all); #endif } U8 GH_UART2_getm_FCR_RCVR_Trigger(void) { #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "[GH_UART2_getm_FCR_RCVR_Trigger] --> 0x%08x\n", m_uart2_fcr.bitc.rcvr_trigger); #endif return m_uart2_fcr.bitc.rcvr_trigger; } #endif /* GH_INLINE_LEVEL < 2 */ /*----------------------------------------------------------------------------*/ /* register UART2_LCR (read/write) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 void GH_UART2_set_LCR(U32 data) { *(volatile U32 *)REG_UART2_LCR = data; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_LCR] <-- 0x%08x\n", REG_UART2_LCR,data,data); #endif } U32 GH_UART2_get_LCR(void) { U32 value = (*(volatile U32 *)REG_UART2_LCR); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LCR] --> 0x%08x\n", REG_UART2_LCR,value); #endif return value; } void GH_UART2_set_LCR_cls(U8 data) { GH_UART2_LCR_S d; d.all = *(volatile U32 *)REG_UART2_LCR; d.bitc.cls = data; *(volatile U32 *)REG_UART2_LCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_LCR_cls] <-- 0x%08x\n", REG_UART2_LCR,d.all,d.all); #endif } U8 GH_UART2_get_LCR_cls(void) { GH_UART2_LCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LCR_cls] --> 0x%08x\n", REG_UART2_LCR,value); #endif return tmp_value.bitc.cls; } void GH_UART2_set_LCR_stop(U8 data) { GH_UART2_LCR_S d; d.all = *(volatile U32 *)REG_UART2_LCR; d.bitc.stop = data; *(volatile U32 *)REG_UART2_LCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_LCR_stop] <-- 0x%08x\n", REG_UART2_LCR,d.all,d.all); #endif } U8 GH_UART2_get_LCR_stop(void) { GH_UART2_LCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LCR_stop] --> 0x%08x\n", REG_UART2_LCR,value); #endif return tmp_value.bitc.stop; } void GH_UART2_set_LCR_pen(U8 data) { GH_UART2_LCR_S d; d.all = *(volatile U32 *)REG_UART2_LCR; d.bitc.pen = data; *(volatile U32 *)REG_UART2_LCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_LCR_pen] <-- 0x%08x\n", REG_UART2_LCR,d.all,d.all); #endif } U8 GH_UART2_get_LCR_pen(void) { GH_UART2_LCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LCR_pen] --> 0x%08x\n", REG_UART2_LCR,value); #endif return tmp_value.bitc.pen; } void GH_UART2_set_LCR_eps(U8 data) { GH_UART2_LCR_S d; d.all = *(volatile U32 *)REG_UART2_LCR; d.bitc.eps = data; *(volatile U32 *)REG_UART2_LCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_LCR_eps] <-- 0x%08x\n", REG_UART2_LCR,d.all,d.all); #endif } U8 GH_UART2_get_LCR_eps(void) { GH_UART2_LCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LCR_eps] --> 0x%08x\n", REG_UART2_LCR,value); #endif return tmp_value.bitc.eps; } void GH_UART2_set_LCR_sticky_parity(U8 data) { GH_UART2_LCR_S d; d.all = *(volatile U32 *)REG_UART2_LCR; d.bitc.sticky_parity = data; *(volatile U32 *)REG_UART2_LCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_LCR_sticky_parity] <-- 0x%08x\n", REG_UART2_LCR,d.all,d.all); #endif } U8 GH_UART2_get_LCR_sticky_parity(void) { GH_UART2_LCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LCR_sticky_parity] --> 0x%08x\n", REG_UART2_LCR,value); #endif return tmp_value.bitc.sticky_parity; } void GH_UART2_set_LCR_breaks(U8 data) { GH_UART2_LCR_S d; d.all = *(volatile U32 *)REG_UART2_LCR; d.bitc.breaks = data; *(volatile U32 *)REG_UART2_LCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_LCR_breaks] <-- 0x%08x\n", REG_UART2_LCR,d.all,d.all); #endif } U8 GH_UART2_get_LCR_breaks(void) { GH_UART2_LCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LCR_breaks] --> 0x%08x\n", REG_UART2_LCR,value); #endif return tmp_value.bitc.breaks; } void GH_UART2_set_LCR_dlab(U8 data) { GH_UART2_LCR_S d; d.all = *(volatile U32 *)REG_UART2_LCR; d.bitc.dlab = data; *(volatile U32 *)REG_UART2_LCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_LCR_dlab] <-- 0x%08x\n", REG_UART2_LCR,d.all,d.all); #endif } U8 GH_UART2_get_LCR_dlab(void) { GH_UART2_LCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LCR_dlab] --> 0x%08x\n", REG_UART2_LCR,value); #endif return tmp_value.bitc.dlab; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* register UART2_MCR (read/write) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 void GH_UART2_set_MCR(U32 data) { *(volatile U32 *)REG_UART2_MCR = data; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_MCR] <-- 0x%08x\n", REG_UART2_MCR,data,data); #endif } U32 GH_UART2_get_MCR(void) { U32 value = (*(volatile U32 *)REG_UART2_MCR); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MCR] --> 0x%08x\n", REG_UART2_MCR,value); #endif return value; } void GH_UART2_set_MCR_dtr(U8 data) { GH_UART2_MCR_S d; d.all = *(volatile U32 *)REG_UART2_MCR; d.bitc.dtr = data; *(volatile U32 *)REG_UART2_MCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_MCR_dtr] <-- 0x%08x\n", REG_UART2_MCR,d.all,d.all); #endif } U8 GH_UART2_get_MCR_dtr(void) { GH_UART2_MCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MCR_dtr] --> 0x%08x\n", REG_UART2_MCR,value); #endif return tmp_value.bitc.dtr; } void GH_UART2_set_MCR_rts(U8 data) { GH_UART2_MCR_S d; d.all = *(volatile U32 *)REG_UART2_MCR; d.bitc.rts = data; *(volatile U32 *)REG_UART2_MCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_MCR_rts] <-- 0x%08x\n", REG_UART2_MCR,d.all,d.all); #endif } U8 GH_UART2_get_MCR_rts(void) { GH_UART2_MCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MCR_rts] --> 0x%08x\n", REG_UART2_MCR,value); #endif return tmp_value.bitc.rts; } void GH_UART2_set_MCR_out1(U8 data) { GH_UART2_MCR_S d; d.all = *(volatile U32 *)REG_UART2_MCR; d.bitc.out1 = data; *(volatile U32 *)REG_UART2_MCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_MCR_out1] <-- 0x%08x\n", REG_UART2_MCR,d.all,d.all); #endif } U8 GH_UART2_get_MCR_out1(void) { GH_UART2_MCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MCR_out1] --> 0x%08x\n", REG_UART2_MCR,value); #endif return tmp_value.bitc.out1; } void GH_UART2_set_MCR_out2(U8 data) { GH_UART2_MCR_S d; d.all = *(volatile U32 *)REG_UART2_MCR; d.bitc.out2 = data; *(volatile U32 *)REG_UART2_MCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_MCR_out2] <-- 0x%08x\n", REG_UART2_MCR,d.all,d.all); #endif } U8 GH_UART2_get_MCR_out2(void) { GH_UART2_MCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MCR_out2] --> 0x%08x\n", REG_UART2_MCR,value); #endif return tmp_value.bitc.out2; } void GH_UART2_set_MCR_loopback(U8 data) { GH_UART2_MCR_S d; d.all = *(volatile U32 *)REG_UART2_MCR; d.bitc.loopback = data; *(volatile U32 *)REG_UART2_MCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_MCR_loopback] <-- 0x%08x\n", REG_UART2_MCR,d.all,d.all); #endif } U8 GH_UART2_get_MCR_loopback(void) { GH_UART2_MCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MCR_loopback] --> 0x%08x\n", REG_UART2_MCR,value); #endif return tmp_value.bitc.loopback; } void GH_UART2_set_MCR_afce(U8 data) { GH_UART2_MCR_S d; d.all = *(volatile U32 *)REG_UART2_MCR; d.bitc.afce = data; *(volatile U32 *)REG_UART2_MCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_MCR_afce] <-- 0x%08x\n", REG_UART2_MCR,d.all,d.all); #endif } U8 GH_UART2_get_MCR_afce(void) { GH_UART2_MCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MCR_afce] --> 0x%08x\n", REG_UART2_MCR,value); #endif return tmp_value.bitc.afce; } void GH_UART2_set_MCR_sire(U8 data) { GH_UART2_MCR_S d; d.all = *(volatile U32 *)REG_UART2_MCR; d.bitc.sire = data; *(volatile U32 *)REG_UART2_MCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_MCR_sire] <-- 0x%08x\n", REG_UART2_MCR,d.all,d.all); #endif } U8 GH_UART2_get_MCR_sire(void) { GH_UART2_MCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MCR_sire] --> 0x%08x\n", REG_UART2_MCR,value); #endif return tmp_value.bitc.sire; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* register UART2_LSR (read) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 U32 GH_UART2_get_LSR(void) { U32 value = (*(volatile U32 *)REG_UART2_LSR); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LSR] --> 0x%08x\n", REG_UART2_LSR,value); #endif return value; } U8 GH_UART2_get_LSR_dr(void) { GH_UART2_LSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LSR_dr] --> 0x%08x\n", REG_UART2_LSR,value); #endif return tmp_value.bitc.dr; } U8 GH_UART2_get_LSR_oe(void) { GH_UART2_LSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LSR_oe] --> 0x%08x\n", REG_UART2_LSR,value); #endif return tmp_value.bitc.oe; } U8 GH_UART2_get_LSR_pe(void) { GH_UART2_LSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LSR_pe] --> 0x%08x\n", REG_UART2_LSR,value); #endif return tmp_value.bitc.pe; } U8 GH_UART2_get_LSR_fe(void) { GH_UART2_LSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LSR_fe] --> 0x%08x\n", REG_UART2_LSR,value); #endif return tmp_value.bitc.fe; } U8 GH_UART2_get_LSR_bi(void) { GH_UART2_LSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LSR_bi] --> 0x%08x\n", REG_UART2_LSR,value); #endif return tmp_value.bitc.bi; } U8 GH_UART2_get_LSR_temt(void) { GH_UART2_LSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_LSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_LSR_temt] --> 0x%08x\n", REG_UART2_LSR,value); #endif return tmp_value.bitc.temt; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* register UART2_MSR (read) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 U32 GH_UART2_get_MSR(void) { U32 value = (*(volatile U32 *)REG_UART2_MSR); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MSR] --> 0x%08x\n", REG_UART2_MSR,value); #endif return value; } U8 GH_UART2_get_MSR_dcts(void) { GH_UART2_MSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MSR_dcts] --> 0x%08x\n", REG_UART2_MSR,value); #endif return tmp_value.bitc.dcts; } U8 GH_UART2_get_MSR_ddsr(void) { GH_UART2_MSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MSR_ddsr] --> 0x%08x\n", REG_UART2_MSR,value); #endif return tmp_value.bitc.ddsr; } U8 GH_UART2_get_MSR_teri(void) { GH_UART2_MSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MSR_teri] --> 0x%08x\n", REG_UART2_MSR,value); #endif return tmp_value.bitc.teri; } U8 GH_UART2_get_MSR_ddcd(void) { GH_UART2_MSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MSR_ddcd] --> 0x%08x\n", REG_UART2_MSR,value); #endif return tmp_value.bitc.ddcd; } U8 GH_UART2_get_MSR_cts(void) { GH_UART2_MSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MSR_cts] --> 0x%08x\n", REG_UART2_MSR,value); #endif return tmp_value.bitc.cts; } U8 GH_UART2_get_MSR_dsr(void) { GH_UART2_MSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MSR_dsr] --> 0x%08x\n", REG_UART2_MSR,value); #endif return tmp_value.bitc.dsr; } U8 GH_UART2_get_MSR_ri(void) { GH_UART2_MSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MSR_ri] --> 0x%08x\n", REG_UART2_MSR,value); #endif return tmp_value.bitc.ri; } U8 GH_UART2_get_MSR_dcd(void) { GH_UART2_MSR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_MSR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_MSR_dcd] --> 0x%08x\n", REG_UART2_MSR,value); #endif return tmp_value.bitc.dcd; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* register UART2_SCR (read/write) */ /*----------------------------------------------------------------------------*/ #if GH_INLINE_LEVEL == 0 void GH_UART2_set_SCR(U32 data) { *(volatile U32 *)REG_UART2_SCR = data; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_SCR] <-- 0x%08x\n", REG_UART2_SCR,data,data); #endif } U32 GH_UART2_get_SCR(void) { U32 value = (*(volatile U32 *)REG_UART2_SCR); #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_SCR] --> 0x%08x\n", REG_UART2_SCR,value); #endif return value; } void GH_UART2_set_SCR_scr(U8 data) { GH_UART2_SCR_S d; d.all = *(volatile U32 *)REG_UART2_SCR; d.bitc.scr = data; *(volatile U32 *)REG_UART2_SCR = d.all; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "WRREG(0x%08x,0x%08x); \\\\ [GH_UART2_set_SCR_scr] <-- 0x%08x\n", REG_UART2_SCR,d.all,d.all); #endif } U8 GH_UART2_get_SCR_scr(void) { GH_UART2_SCR_S tmp_value; U32 value = (*(volatile U32 *)REG_UART2_SCR); tmp_value.all = value; #if GH_UART2_ENABLE_DEBUG_PRINT GH_UART2_DEBUG_PRINT_FUNCTION( "value = RDREG(0x%08x); \\\\ [GH_UART2_get_SCR_scr] --> 0x%08x\n", REG_UART2_SCR,value); #endif return tmp_value.bitc.scr; } #endif /* GH_INLINE_LEVEL == 0 */ /*----------------------------------------------------------------------------*/ /* init function */ /*----------------------------------------------------------------------------*/ void GH_UART2_init(void) { GH_UART2_set_THR((U32)0x00000000); GH_UART2_set_DLL((U32)0x00000000); GH_UART2_set_IER((U32)0x00000000); GH_UART2_set_DLH((U32)0x00000000); GH_UART2_set_FCR((U32)0x00000000); GH_UART2_set_LCR((U32)0x00000000); GH_UART2_set_MCR((U32)0x00000000); GH_UART2_set_SCR((U32)0x00000000); /* read read-clear registers in order to set mirror variables */ } /*----------------------------------------------------------------------------*/ /* end of file */ /*----------------------------------------------------------------------------*/
gpl-2.0
CirrusLogic/rpi-linux
drivers/s390/cio/chsc_sch.c
1302
23759
/* * Driver for s390 chsc subchannels * * Copyright IBM Corp. 2008, 2011 * * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * */ #include <linux/slab.h> #include <linux/compat.h> #include <linux/device.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/kernel_stat.h> #include <asm/compat.h> #include <asm/cio.h> #include <asm/chsc.h> #include <asm/isc.h> #include "cio.h" #include "cio_debug.h" #include "css.h" #include "chsc_sch.h" #include "ioasm.h" static debug_info_t *chsc_debug_msg_id; static debug_info_t *chsc_debug_log_id; static struct chsc_request *on_close_request; static struct chsc_async_area *on_close_chsc_area; static DEFINE_MUTEX(on_close_mutex); #define CHSC_MSG(imp, args...) do { \ debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ } while (0) #define CHSC_LOG(imp, txt) do { \ debug_text_event(chsc_debug_log_id, imp , txt); \ } while (0) static void CHSC_LOG_HEX(int level, void *data, int length) { while (length > 0) { debug_event(chsc_debug_log_id, level, data, length); length -= chsc_debug_log_id->buf_size; data += chsc_debug_log_id->buf_size; } } MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("driver for s390 chsc subchannels"); MODULE_LICENSE("GPL"); static void chsc_subchannel_irq(struct subchannel *sch) { struct chsc_private *private = dev_get_drvdata(&sch->dev); struct chsc_request *request = private->request; struct irb *irb = this_cpu_ptr(&cio_irb); CHSC_LOG(4, "irb"); CHSC_LOG_HEX(4, irb, sizeof(*irb)); inc_irq_stat(IRQIO_CSC); /* Copy irb to provided request and set done. */ if (!request) { CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", sch->schid.ssid, sch->schid.sch_no); return; } private->request = NULL; memcpy(&request->irb, irb, sizeof(*irb)); cio_update_schib(sch); complete(&request->completion); put_device(&sch->dev); } static int chsc_subchannel_probe(struct subchannel *sch) { struct chsc_private *private; int ret; CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n", sch->schid.ssid, sch->schid.sch_no); sch->isc = CHSC_SCH_ISC; private = kzalloc(sizeof(*private), GFP_KERNEL); if (!private) return -ENOMEM; dev_set_drvdata(&sch->dev, private); ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); if (ret) { CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", sch->schid.ssid, sch->schid.sch_no, ret); dev_set_drvdata(&sch->dev, NULL); kfree(private); } else { if (dev_get_uevent_suppress(&sch->dev)) { dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); } } return ret; } static int chsc_subchannel_remove(struct subchannel *sch) { struct chsc_private *private; cio_disable_subchannel(sch); private = dev_get_drvdata(&sch->dev); dev_set_drvdata(&sch->dev, NULL); if (private->request) { complete(&private->request->completion); put_device(&sch->dev); } kfree(private); return 0; } static void chsc_subchannel_shutdown(struct subchannel *sch) { cio_disable_subchannel(sch); } static int chsc_subchannel_prepare(struct subchannel *sch) { int cc; struct schib schib; /* * Don't allow suspend while the subchannel is not idle * since we don't have a way to clear the subchannel and * cannot disable it with a request running. */ cc = stsch_err(sch->schid, &schib); if (!cc && scsw_stctl(&schib.scsw)) return -EAGAIN; return 0; } static int chsc_subchannel_freeze(struct subchannel *sch) { return cio_disable_subchannel(sch); } static int chsc_subchannel_restore(struct subchannel *sch) { return cio_enable_subchannel(sch, (u32)(unsigned long)sch); } static struct css_device_id chsc_subchannel_ids[] = { { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); static struct css_driver chsc_subchannel_driver = { .drv = { .owner = THIS_MODULE, .name = "chsc_subchannel", }, .subchannel_type = chsc_subchannel_ids, .irq = chsc_subchannel_irq, .probe = chsc_subchannel_probe, .remove = chsc_subchannel_remove, .shutdown = chsc_subchannel_shutdown, .prepare = chsc_subchannel_prepare, .freeze = chsc_subchannel_freeze, .thaw = chsc_subchannel_restore, .restore = chsc_subchannel_restore, }; static int __init chsc_init_dbfs(void) { chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long)); if (!chsc_debug_msg_id) goto out; debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); debug_set_level(chsc_debug_msg_id, 2); chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16); if (!chsc_debug_log_id) goto out; debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view); debug_set_level(chsc_debug_log_id, 2); return 0; out: if (chsc_debug_msg_id) debug_unregister(chsc_debug_msg_id); return -ENOMEM; } static void chsc_remove_dbfs(void) { debug_unregister(chsc_debug_log_id); debug_unregister(chsc_debug_msg_id); } static int __init chsc_init_sch_driver(void) { return css_driver_register(&chsc_subchannel_driver); } static void chsc_cleanup_sch_driver(void) { css_driver_unregister(&chsc_subchannel_driver); } static DEFINE_SPINLOCK(chsc_lock); static int chsc_subchannel_match_next_free(struct device *dev, void *data) { struct subchannel *sch = to_subchannel(dev); return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); } static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) { struct device *dev; dev = driver_find_device(&chsc_subchannel_driver.drv, sch ? &sch->dev : NULL, NULL, chsc_subchannel_match_next_free); return dev ? to_subchannel(dev) : NULL; } /** * chsc_async() - try to start a chsc request asynchronously * @chsc_area: request to be started * @request: request structure to associate * * Tries to start a chsc request on one of the existing chsc subchannels. * Returns: * %0 if the request was performed synchronously * %-EINPROGRESS if the request was successfully started * %-EBUSY if all chsc subchannels are busy * %-ENODEV if no chsc subchannels are available * Context: * interrupts disabled, chsc_lock held */ static int chsc_async(struct chsc_async_area *chsc_area, struct chsc_request *request) { int cc; struct chsc_private *private; struct subchannel *sch = NULL; int ret = -ENODEV; char dbf[10]; chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; while ((sch = chsc_get_next_subchannel(sch))) { spin_lock(sch->lock); private = dev_get_drvdata(&sch->dev); if (private->request) { spin_unlock(sch->lock); ret = -EBUSY; continue; } chsc_area->header.sid = sch->schid; CHSC_LOG(2, "schid"); CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); cc = chsc(chsc_area); snprintf(dbf, sizeof(dbf), "cc:%d", cc); CHSC_LOG(2, dbf); switch (cc) { case 0: ret = 0; break; case 1: sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; ret = -EINPROGRESS; private->request = request; break; case 2: ret = -EBUSY; break; default: ret = -ENODEV; } spin_unlock(sch->lock); CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", sch->schid.ssid, sch->schid.sch_no, cc); if (ret == -EINPROGRESS) return -EINPROGRESS; put_device(&sch->dev); if (ret == 0) return 0; } return ret; } static void chsc_log_command(void *chsc_area) { char dbf[10]; snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]); CHSC_LOG(0, dbf); CHSC_LOG_HEX(0, chsc_area, 32); } static int chsc_examine_irb(struct chsc_request *request) { int backed_up; if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)) return -EIO; backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; if (scsw_cstat(&request->irb.scsw) == 0) return 0; if (!backed_up) return 0; if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK) return -EIO; if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK) return -EPERM; if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK) return -EAGAIN; if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK) return -EAGAIN; return -EIO; } static int chsc_ioctl_start(void __user *user_area) { struct chsc_request *request; struct chsc_async_area *chsc_area; int ret; char dbf[10]; if (!css_general_characteristics.dynio) /* It makes no sense to try. */ return -EOPNOTSUPP; chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); if (!chsc_area) return -ENOMEM; request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) { ret = -ENOMEM; goto out_free; } init_completion(&request->completion); if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { ret = -EFAULT; goto out_free; } chsc_log_command(chsc_area); spin_lock_irq(&chsc_lock); ret = chsc_async(chsc_area, request); spin_unlock_irq(&chsc_lock); if (ret == -EINPROGRESS) { wait_for_completion(&request->completion); ret = chsc_examine_irb(request); } /* copy area back to user */ if (!ret) if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) ret = -EFAULT; out_free: snprintf(dbf, sizeof(dbf), "ret:%d", ret); CHSC_LOG(0, dbf); kfree(request); free_page((unsigned long)chsc_area); return ret; } static int chsc_ioctl_on_close_set(void __user *user_area) { char dbf[13]; int ret; mutex_lock(&on_close_mutex); if (on_close_chsc_area) { ret = -EBUSY; goto out_unlock; } on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL); if (!on_close_request) { ret = -ENOMEM; goto out_unlock; } on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); if (!on_close_chsc_area) { ret = -ENOMEM; goto out_free_request; } if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) { ret = -EFAULT; goto out_free_chsc; } ret = 0; goto out_unlock; out_free_chsc: free_page((unsigned long)on_close_chsc_area); on_close_chsc_area = NULL; out_free_request: kfree(on_close_request); on_close_request = NULL; out_unlock: mutex_unlock(&on_close_mutex); snprintf(dbf, sizeof(dbf), "ocsret:%d", ret); CHSC_LOG(0, dbf); return ret; } static int chsc_ioctl_on_close_remove(void) { char dbf[13]; int ret; mutex_lock(&on_close_mutex); if (!on_close_chsc_area) { ret = -ENOENT; goto out_unlock; } free_page((unsigned long)on_close_chsc_area); on_close_chsc_area = NULL; kfree(on_close_request); on_close_request = NULL; ret = 0; out_unlock: mutex_unlock(&on_close_mutex); snprintf(dbf, sizeof(dbf), "ocrret:%d", ret); CHSC_LOG(0, dbf); return ret; } static int chsc_ioctl_start_sync(void __user *user_area) { struct chsc_sync_area *chsc_area; int ret, ccode; chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!chsc_area) return -ENOMEM; if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { ret = -EFAULT; goto out_free; } if (chsc_area->header.code & 0x4000) { ret = -EINVAL; goto out_free; } chsc_log_command(chsc_area); ccode = chsc(chsc_area); if (ccode != 0) { ret = -EIO; goto out_free; } if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) ret = -EFAULT; else ret = 0; out_free: free_page((unsigned long)chsc_area); return ret; } static int chsc_ioctl_info_channel_path(void __user *user_cd) { struct chsc_chp_cd *cd; int ret, ccode; struct { struct chsc_header request; u32 : 2; u32 m : 1; u32 : 1; u32 fmt1 : 4; u32 cssid : 8; u32 : 8; u32 first_chpid : 8; u32 : 24; u32 last_chpid : 8; u32 : 32; struct chsc_header response; u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *scpcd_area; scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scpcd_area) return -ENOMEM; cd = kzalloc(sizeof(*cd), GFP_KERNEL); if (!cd) { ret = -ENOMEM; goto out_free; } if (copy_from_user(cd, user_cd, sizeof(*cd))) { ret = -EFAULT; goto out_free; } scpcd_area->request.length = 0x0010; scpcd_area->request.code = 0x0028; scpcd_area->m = cd->m; scpcd_area->fmt1 = cd->fmt; scpcd_area->cssid = cd->chpid.cssid; scpcd_area->first_chpid = cd->chpid.id; scpcd_area->last_chpid = cd->chpid.id; ccode = chsc(scpcd_area); if (ccode != 0) { ret = -EIO; goto out_free; } if (scpcd_area->response.code != 0x0001) { ret = -EIO; CHSC_MSG(0, "scpcd: response code=%x\n", scpcd_area->response.code); goto out_free; } memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length); if (copy_to_user(user_cd, cd, sizeof(*cd))) ret = -EFAULT; else ret = 0; out_free: kfree(cd); free_page((unsigned long)scpcd_area); return ret; } static int chsc_ioctl_info_cu(void __user *user_cd) { struct chsc_cu_cd *cd; int ret, ccode; struct { struct chsc_header request; u32 : 2; u32 m : 1; u32 : 1; u32 fmt1 : 4; u32 cssid : 8; u32 : 8; u32 first_cun : 8; u32 : 24; u32 last_cun : 8; u32 : 32; struct chsc_header response; u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *scucd_area; scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scucd_area) return -ENOMEM; cd = kzalloc(sizeof(*cd), GFP_KERNEL); if (!cd) { ret = -ENOMEM; goto out_free; } if (copy_from_user(cd, user_cd, sizeof(*cd))) { ret = -EFAULT; goto out_free; } scucd_area->request.length = 0x0010; scucd_area->request.code = 0x0028; scucd_area->m = cd->m; scucd_area->fmt1 = cd->fmt; scucd_area->cssid = cd->cssid; scucd_area->first_cun = cd->cun; scucd_area->last_cun = cd->cun; ccode = chsc(scucd_area); if (ccode != 0) { ret = -EIO; goto out_free; } if (scucd_area->response.code != 0x0001) { ret = -EIO; CHSC_MSG(0, "scucd: response code=%x\n", scucd_area->response.code); goto out_free; } memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length); if (copy_to_user(user_cd, cd, sizeof(*cd))) ret = -EFAULT; else ret = 0; out_free: kfree(cd); free_page((unsigned long)scucd_area); return ret; } static int chsc_ioctl_info_sch_cu(void __user *user_cud) { struct chsc_sch_cud *cud; int ret, ccode; struct { struct chsc_header request; u32 : 2; u32 m : 1; u32 : 5; u32 fmt1 : 4; u32 : 2; u32 ssid : 2; u32 first_sch : 16; u32 : 8; u32 cssid : 8; u32 last_sch : 16; u32 : 32; struct chsc_header response; u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *sscud_area; sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sscud_area) return -ENOMEM; cud = kzalloc(sizeof(*cud), GFP_KERNEL); if (!cud) { ret = -ENOMEM; goto out_free; } if (copy_from_user(cud, user_cud, sizeof(*cud))) { ret = -EFAULT; goto out_free; } sscud_area->request.length = 0x0010; sscud_area->request.code = 0x0006; sscud_area->m = cud->schid.m; sscud_area->fmt1 = cud->fmt; sscud_area->ssid = cud->schid.ssid; sscud_area->first_sch = cud->schid.sch_no; sscud_area->cssid = cud->schid.cssid; sscud_area->last_sch = cud->schid.sch_no; ccode = chsc(sscud_area); if (ccode != 0) { ret = -EIO; goto out_free; } if (sscud_area->response.code != 0x0001) { ret = -EIO; CHSC_MSG(0, "sscud: response code=%x\n", sscud_area->response.code); goto out_free; } memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length); if (copy_to_user(user_cud, cud, sizeof(*cud))) ret = -EFAULT; else ret = 0; out_free: kfree(cud); free_page((unsigned long)sscud_area); return ret; } static int chsc_ioctl_conf_info(void __user *user_ci) { struct chsc_conf_info *ci; int ret, ccode; struct { struct chsc_header request; u32 : 2; u32 m : 1; u32 : 1; u32 fmt1 : 4; u32 cssid : 8; u32 : 6; u32 ssid : 2; u32 : 8; u64 : 64; struct chsc_header response; u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *sci_area; sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sci_area) return -ENOMEM; ci = kzalloc(sizeof(*ci), GFP_KERNEL); if (!ci) { ret = -ENOMEM; goto out_free; } if (copy_from_user(ci, user_ci, sizeof(*ci))) { ret = -EFAULT; goto out_free; } sci_area->request.length = 0x0010; sci_area->request.code = 0x0012; sci_area->m = ci->id.m; sci_area->fmt1 = ci->fmt; sci_area->cssid = ci->id.cssid; sci_area->ssid = ci->id.ssid; ccode = chsc(sci_area); if (ccode != 0) { ret = -EIO; goto out_free; } if (sci_area->response.code != 0x0001) { ret = -EIO; CHSC_MSG(0, "sci: response code=%x\n", sci_area->response.code); goto out_free; } memcpy(&ci->scid, &sci_area->response, sci_area->response.length); if (copy_to_user(user_ci, ci, sizeof(*ci))) ret = -EFAULT; else ret = 0; out_free: kfree(ci); free_page((unsigned long)sci_area); return ret; } static int chsc_ioctl_conf_comp_list(void __user *user_ccl) { struct chsc_comp_list *ccl; int ret, ccode; struct { struct chsc_header request; u32 ctype : 8; u32 : 4; u32 fmt : 4; u32 : 16; u64 : 64; u32 list_parm[2]; u64 : 64; struct chsc_header response; u8 data[PAGE_SIZE - 36]; } __attribute__ ((packed)) *sccl_area; struct { u32 m : 1; u32 : 31; u32 cssid : 8; u32 : 16; u32 chpid : 8; } __attribute__ ((packed)) *chpid_parm; struct { u32 f_cssid : 8; u32 l_cssid : 8; u32 : 16; u32 res; } __attribute__ ((packed)) *cssids_parm; sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccl_area) return -ENOMEM; ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); if (!ccl) { ret = -ENOMEM; goto out_free; } if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) { ret = -EFAULT; goto out_free; } sccl_area->request.length = 0x0020; sccl_area->request.code = 0x0030; sccl_area->fmt = ccl->req.fmt; sccl_area->ctype = ccl->req.ctype; switch (sccl_area->ctype) { case CCL_CU_ON_CHP: case CCL_IOP_CHP: chpid_parm = (void *)&sccl_area->list_parm; chpid_parm->m = ccl->req.chpid.m; chpid_parm->cssid = ccl->req.chpid.chp.cssid; chpid_parm->chpid = ccl->req.chpid.chp.id; break; case CCL_CSS_IMG: case CCL_CSS_IMG_CONF_CHAR: cssids_parm = (void *)&sccl_area->list_parm; cssids_parm->f_cssid = ccl->req.cssids.f_cssid; cssids_parm->l_cssid = ccl->req.cssids.l_cssid; break; } ccode = chsc(sccl_area); if (ccode != 0) { ret = -EIO; goto out_free; } if (sccl_area->response.code != 0x0001) { ret = -EIO; CHSC_MSG(0, "sccl: response code=%x\n", sccl_area->response.code); goto out_free; } memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length); if (copy_to_user(user_ccl, ccl, sizeof(*ccl))) ret = -EFAULT; else ret = 0; out_free: kfree(ccl); free_page((unsigned long)sccl_area); return ret; } static int chsc_ioctl_chpd(void __user *user_chpd) { struct chsc_scpd *scpd_area; struct chsc_cpd_info *chpd; int ret; chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scpd_area || !chpd) { ret = -ENOMEM; goto out_free; } if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { ret = -EFAULT; goto out_free; } ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, chpd->rfmt, chpd->c, chpd->m, scpd_area); if (ret) goto out_free; memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length); if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) ret = -EFAULT; out_free: kfree(chpd); free_page((unsigned long)scpd_area); return ret; } static int chsc_ioctl_dcal(void __user *user_dcal) { struct chsc_dcal *dcal; int ret, ccode; struct { struct chsc_header request; u32 atype : 8; u32 : 4; u32 fmt : 4; u32 : 16; u32 res0[2]; u32 list_parm[2]; u32 res1[2]; struct chsc_header response; u8 data[PAGE_SIZE - 36]; } __attribute__ ((packed)) *sdcal_area; sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sdcal_area) return -ENOMEM; dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); if (!dcal) { ret = -ENOMEM; goto out_free; } if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) { ret = -EFAULT; goto out_free; } sdcal_area->request.length = 0x0020; sdcal_area->request.code = 0x0034; sdcal_area->atype = dcal->req.atype; sdcal_area->fmt = dcal->req.fmt; memcpy(&sdcal_area->list_parm, &dcal->req.list_parm, sizeof(sdcal_area->list_parm)); ccode = chsc(sdcal_area); if (ccode != 0) { ret = -EIO; goto out_free; } if (sdcal_area->response.code != 0x0001) { ret = -EIO; CHSC_MSG(0, "sdcal: response code=%x\n", sdcal_area->response.code); goto out_free; } memcpy(&dcal->sdcal, &sdcal_area->response, sdcal_area->response.length); if (copy_to_user(user_dcal, dcal, sizeof(*dcal))) ret = -EFAULT; else ret = 0; out_free: kfree(dcal); free_page((unsigned long)sdcal_area); return ret; } static long chsc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { void __user *argp; CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); if (is_compat_task()) argp = compat_ptr(arg); else argp = (void __user *)arg; switch (cmd) { case CHSC_START: return chsc_ioctl_start(argp); case CHSC_START_SYNC: return chsc_ioctl_start_sync(argp); case CHSC_INFO_CHANNEL_PATH: return chsc_ioctl_info_channel_path(argp); case CHSC_INFO_CU: return chsc_ioctl_info_cu(argp); case CHSC_INFO_SCH_CU: return chsc_ioctl_info_sch_cu(argp); case CHSC_INFO_CI: return chsc_ioctl_conf_info(argp); case CHSC_INFO_CCL: return chsc_ioctl_conf_comp_list(argp); case CHSC_INFO_CPD: return chsc_ioctl_chpd(argp); case CHSC_INFO_DCAL: return chsc_ioctl_dcal(argp); case CHSC_ON_CLOSE_SET: return chsc_ioctl_on_close_set(argp); case CHSC_ON_CLOSE_REMOVE: return chsc_ioctl_on_close_remove(); default: /* unknown ioctl number */ return -ENOIOCTLCMD; } } static atomic_t chsc_ready_for_use = ATOMIC_INIT(1); static int chsc_open(struct inode *inode, struct file *file) { if (!atomic_dec_and_test(&chsc_ready_for_use)) { atomic_inc(&chsc_ready_for_use); return -EBUSY; } return nonseekable_open(inode, file); } static int chsc_release(struct inode *inode, struct file *filp) { char dbf[13]; int ret; mutex_lock(&on_close_mutex); if (!on_close_chsc_area) goto out_unlock; init_completion(&on_close_request->completion); CHSC_LOG(0, "on_close"); chsc_log_command(on_close_chsc_area); spin_lock_irq(&chsc_lock); ret = chsc_async(on_close_chsc_area, on_close_request); spin_unlock_irq(&chsc_lock); if (ret == -EINPROGRESS) { wait_for_completion(&on_close_request->completion); ret = chsc_examine_irb(on_close_request); } snprintf(dbf, sizeof(dbf), "relret:%d", ret); CHSC_LOG(0, dbf); free_page((unsigned long)on_close_chsc_area); on_close_chsc_area = NULL; kfree(on_close_request); on_close_request = NULL; out_unlock: mutex_unlock(&on_close_mutex); atomic_inc(&chsc_ready_for_use); return 0; } static const struct file_operations chsc_fops = { .owner = THIS_MODULE, .open = chsc_open, .release = chsc_release, .unlocked_ioctl = chsc_ioctl, .compat_ioctl = chsc_ioctl, .llseek = no_llseek, }; static struct miscdevice chsc_misc_device = { .minor = MISC_DYNAMIC_MINOR, .name = "chsc", .fops = &chsc_fops, }; static int __init chsc_misc_init(void) { return misc_register(&chsc_misc_device); } static void chsc_misc_cleanup(void) { misc_deregister(&chsc_misc_device); } static int __init chsc_sch_init(void) { int ret; ret = chsc_init_dbfs(); if (ret) return ret; isc_register(CHSC_SCH_ISC); ret = chsc_init_sch_driver(); if (ret) goto out_dbf; ret = chsc_misc_init(); if (ret) goto out_driver; return ret; out_driver: chsc_cleanup_sch_driver(); out_dbf: isc_unregister(CHSC_SCH_ISC); chsc_remove_dbfs(); return ret; } static void __exit chsc_sch_exit(void) { chsc_misc_cleanup(); chsc_cleanup_sch_driver(); isc_unregister(CHSC_SCH_ISC); chsc_remove_dbfs(); } module_init(chsc_sch_init); module_exit(chsc_sch_exit);
gpl-2.0
ziozzang/linux-tcp-ack-fix
kernel/task_work.c
1558
3443
#include <linux/spinlock.h> #include <linux/task_work.h> #include <linux/tracehook.h> static struct callback_head work_exited; /* all we need is ->next == NULL */ /** * task_work_add - ask the @task to execute @work->func() * @task: the task which should run the callback * @work: the callback to run * @notify: send the notification if true * * Queue @work for task_work_run() below and notify the @task if @notify. * Fails if the @task is exiting/exited and thus it can't process this @work. * Otherwise @work->func() will be called when the @task returns from kernel * mode or exits. * * This is like the signal handler which runs in kernel mode, but it doesn't * try to wake up the @task. * * RETURNS: * 0 if succeeds or -ESRCH. */ int task_work_add(struct task_struct *task, struct callback_head *work, bool notify) { struct callback_head *head; do { head = ACCESS_ONCE(task->task_works); if (unlikely(head == &work_exited)) return -ESRCH; work->next = head; } while (cmpxchg(&task->task_works, head, work) != head); if (notify) set_notify_resume(task); return 0; } /** * task_work_cancel - cancel a pending work added by task_work_add() * @task: the task which should execute the work * @func: identifies the work to remove * * Find the last queued pending work with ->func == @func and remove * it from queue. * * RETURNS: * The found work or NULL if not found. */ struct callback_head * task_work_cancel(struct task_struct *task, task_work_func_t func) { struct callback_head **pprev = &task->task_works; struct callback_head *work; unsigned long flags; /* * If cmpxchg() fails we continue without updating pprev. * Either we raced with task_work_add() which added the * new entry before this work, we will find it again. Or * we raced with task_work_run(), *pprev == NULL/exited. */ raw_spin_lock_irqsave(&task->pi_lock, flags); while ((work = ACCESS_ONCE(*pprev))) { smp_read_barrier_depends(); if (work->func != func) pprev = &work->next; else if (cmpxchg(pprev, work, work->next) == work) break; } raw_spin_unlock_irqrestore(&task->pi_lock, flags); return work; } /** * task_work_run - execute the works added by task_work_add() * * Flush the pending works. Should be used by the core kernel code. * Called before the task returns to the user-mode or stops, or when * it exits. In the latter case task_work_add() can no longer add the * new work after task_work_run() returns. */ void task_work_run(void) { struct task_struct *task = current; struct callback_head *work, *head, *next; for (;;) { /* * work->func() can do task_work_add(), do not set * work_exited unless the list is empty. */ do { work = ACCESS_ONCE(task->task_works); head = !work && (task->flags & PF_EXITING) ? &work_exited : NULL; } while (cmpxchg(&task->task_works, work, head) != work); if (!work) break; /* * Synchronize with task_work_cancel(). It can't remove * the first entry == work, cmpxchg(task_works) should * fail, but it can play with *work and other entries. */ raw_spin_unlock_wait(&task->pi_lock); smp_mb(); /* Reverse the list to run the works in fifo order */ head = NULL; do { next = work->next; work->next = head; head = work; work = next; } while (work); work = head; do { next = work->next; work->func(work); work = next; cond_resched(); } while (work); } }
gpl-2.0
mtmichaelson/LGE_Connect_Kernel
drivers/scsi/aic94xx/aic94xx_task.c
1558
17414
/* * Aic94xx SAS/SATA Tasks * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/spinlock.h> #include "aic94xx.h" #include "aic94xx_sas.h" #include "aic94xx_hwi.h" static void asd_unbuild_ata_ascb(struct asd_ascb *a); static void asd_unbuild_smp_ascb(struct asd_ascb *a); static void asd_unbuild_ssp_ascb(struct asd_ascb *a); static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num) { unsigned long flags; spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); asd_ha->seq.can_queue += num; spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); } /* PCI_DMA_... to our direction translation. */ static const u8 data_dir_flags[] = { [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */ [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */ [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ }; static int asd_map_scatterlist(struct sas_task *task, struct sg_el *sg_arr, gfp_t gfp_flags) { struct asd_ascb *ascb = task->lldd_task; struct asd_ha_struct *asd_ha = ascb->ha; struct scatterlist *sc; int num_sg, res; if (task->data_dir == PCI_DMA_NONE) return 0; if (task->num_scatter == 0) { void *p = task->scatter; dma_addr_t dma = pci_map_single(asd_ha->pcidev, p, task->total_xfer_len, task->data_dir); sg_arr[0].bus_addr = cpu_to_le64((u64)dma); sg_arr[0].size = cpu_to_le32(task->total_xfer_len); sg_arr[0].flags |= ASD_SG_EL_LIST_EOL; return 0; } /* STP tasks come from libata which has already mapped * the SG list */ if (sas_protocol_ata(task->task_proto)) num_sg = task->num_scatter; else num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter, task->data_dir); if (num_sg == 0) return -ENOMEM; if (num_sg > 3) { int i; ascb->sg_arr = asd_alloc_coherent(asd_ha, num_sg*sizeof(struct sg_el), gfp_flags); if (!ascb->sg_arr) { res = -ENOMEM; goto err_unmap; } for_each_sg(task->scatter, sc, num_sg, i) { struct sg_el *sg = &((struct sg_el *)ascb->sg_arr->vaddr)[i]; sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); sg->size = cpu_to_le32((u32)sg_dma_len(sc)); if (i == num_sg-1) sg->flags |= ASD_SG_EL_LIST_EOL; } for_each_sg(task->scatter, sc, 2, i) { sg_arr[i].bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); } sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr); sg_arr[1].flags |= ASD_SG_EL_LIST_EOS; memset(&sg_arr[2], 0, sizeof(*sg_arr)); sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle); } else { int i; for_each_sg(task->scatter, sc, num_sg, i) { sg_arr[i].bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); } sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL; } return 0; err_unmap: if (sas_protocol_ata(task->task_proto)) pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, task->data_dir); return res; } static void asd_unmap_scatterlist(struct asd_ascb *ascb) { struct asd_ha_struct *asd_ha = ascb->ha; struct sas_task *task = ascb->uldd_task; if (task->data_dir == PCI_DMA_NONE) return; if (task->num_scatter == 0) { dma_addr_t dma = (dma_addr_t) le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr); pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len, task->data_dir); return; } asd_free_coherent(asd_ha, ascb->sg_arr); if (task->task_proto != SAS_PROTOCOL_STP) pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, task->data_dir); } /* ---------- Task complete tasklet ---------- */ static void asd_get_response_tasklet(struct asd_ascb *ascb, struct done_list_struct *dl) { struct asd_ha_struct *asd_ha = ascb->ha; struct sas_task *task = ascb->uldd_task; struct task_status_struct *ts = &task->task_status; unsigned long flags; struct tc_resp_sb_struct { __le16 index_escb; u8 len_lsb; u8 flags; } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; /* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */ int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; struct asd_ascb *escb; struct asd_dma_tok *edb; void *r; spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); escb = asd_tc_index_find(&asd_ha->seq, (int)le16_to_cpu(resp_sb->index_escb)); spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); if (!escb) { ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); return; } ts->buf_valid_size = 0; edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; r = edb->vaddr; if (task->task_proto == SAS_PROTOCOL_SSP) { struct ssp_response_iu *iu = r + 16 + sizeof(struct ssp_frame_hdr); ts->residual = le32_to_cpu(*(__le32 *)r); sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu); } else { struct ata_task_resp *resp = (void *) &ts->buf[0]; ts->residual = le32_to_cpu(*(__le32 *)r); if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { resp->frame_len = le16_to_cpu(*(__le16 *)(r+6)); memcpy(&resp->ending_fis[0], r+16, 24); ts->buf_valid_size = sizeof(*resp); } } asd_invalidate_edb(escb, edb_id); } static void asd_task_tasklet_complete(struct asd_ascb *ascb, struct done_list_struct *dl) { struct sas_task *task = ascb->uldd_task; struct task_status_struct *ts = &task->task_status; unsigned long flags; u8 opcode = dl->opcode; asd_can_dequeue(ascb->ha, 1); Again: switch (opcode) { case TC_NO_ERROR: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_GOOD; break; case TC_UNDERRUN: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_UNDERRUN; ts->residual = le32_to_cpu(*(__le32 *)dl->status_block); break; case TC_OVERRUN: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; break; case TC_SSP_RESP: case TC_ATA_RESP: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PROTO_RESPONSE; asd_get_response_tasklet(ascb, dl); break; case TF_OPEN_REJECT: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_OPEN_REJECT; if (dl->status_block[1] & 2) ts->open_rej_reason = 1 + dl->status_block[2]; else if (dl->status_block[1] & 1) ts->open_rej_reason = (dl->status_block[2] >> 4)+10; else ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; case TF_OPEN_TO: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_OPEN_TO; break; case TF_PHY_DOWN: case TU_PHY_DOWN: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; break; case TI_PHY_DOWN: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PHY_DOWN; break; case TI_BREAK: case TI_PROTO_ERR: case TI_NAK: case TI_ACK_NAK_TO: case TF_SMP_XMIT_RCV_ERR: case TC_ATA_R_ERR_RECV: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_INTERRUPTED; break; case TF_BREAK: case TU_BREAK: case TU_ACK_NAK_TO: case TF_SMPRSP_TO: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEV_NO_RESPONSE; break; case TF_NAK_RECV: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_NAK_R_ERR; break; case TA_I_T_NEXUS_LOSS: opcode = dl->status_block[0]; goto Again; break; case TF_INV_CONN_HANDLE: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEVICE_UNKNOWN; break; case TF_REQUESTED_N_PENDING: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PENDING; break; case TC_TASK_CLEARED: case TA_ON_REQ: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_ABORTED_TASK; break; case TF_NO_SMP_CONN: case TF_TMF_NO_CTX: case TF_TMF_NO_TAG: case TF_TMF_TAG_FREE: case TF_TMF_TASK_DONE: case TF_TMF_NO_CONN_HANDLE: case TF_IRTT_TO: case TF_IU_SHORT: case TF_DATA_OFFS_ERR: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEV_NO_RESPONSE; break; case TC_LINK_ADM_RESP: case TC_CONTROL_PHY: case TC_RESUME: case TC_PARTIAL_SG_LIST: default: ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode); break; } switch (task->task_proto) { case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: asd_unbuild_ata_ascb(ascb); break; case SAS_PROTOCOL_SMP: asd_unbuild_smp_ascb(ascb); break; case SAS_PROTOCOL_SSP: asd_unbuild_ssp_ascb(ascb); default: break; } spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; task->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { struct completion *completion = ascb->completion; spin_unlock_irqrestore(&task->task_state_lock, flags); ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " "stat 0x%x but aborted by upper layer!\n", task, opcode, ts->resp, ts->stat); if (completion) complete(completion); } else { spin_unlock_irqrestore(&task->task_state_lock, flags); task->lldd_task = NULL; asd_ascb_free(ascb); mb(); task->task_done(task); } } /* ---------- ATA ---------- */ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, gfp_t gfp_flags) { struct domain_device *dev = task->dev; struct scb *scb; u8 flags; int res = 0; scb = ascb->scb; if (unlikely(task->ata_task.device_control_reg_update)) scb->header.opcode = CONTROL_ATA_DEV; else if (dev->sata_dev.command_set == ATA_COMMAND_SET) scb->header.opcode = INITIATE_ATA_TASK; else scb->header.opcode = INITIATE_ATAPI_TASK; scb->ata_task.proto_conn_rate = (1 << 5); /* STP */ if (dev->port->oob_mode == SAS_OOB_MODE) scb->ata_task.proto_conn_rate |= dev->linkrate; scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); scb->ata_task.fis = task->ata_task.fis; if (likely(!task->ata_task.device_control_reg_update)) scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */ if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, 16); scb->ata_task.sister_scb = cpu_to_le16(0xFFFF); scb->ata_task.conn_handle = cpu_to_le16( (u16)(unsigned long)dev->lldd_dev); if (likely(!task->ata_task.device_control_reg_update)) { flags = 0; if (task->ata_task.dma_xfer) flags |= DATA_XFER_MODE_DMA; if (task->ata_task.use_ncq && dev->sata_dev.command_set != ATAPI_COMMAND_SET) flags |= ATA_Q_TYPE_NCQ; flags |= data_dir_flags[task->data_dir]; scb->ata_task.ata_flags = flags; scb->ata_task.retry_count = task->ata_task.retry_count; flags = 0; if (task->ata_task.set_affil_pol) flags |= SET_AFFIL_POLICY; if (task->ata_task.stp_affil_pol) flags |= STP_AFFIL_POLICY; scb->ata_task.flags = flags; } ascb->tasklet_complete = asd_task_tasklet_complete; if (likely(!task->ata_task.device_control_reg_update)) res = asd_map_scatterlist(task, scb->ata_task.sg_element, gfp_flags); return res; } static void asd_unbuild_ata_ascb(struct asd_ascb *a) { asd_unmap_scatterlist(a); } /* ---------- SMP ---------- */ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, gfp_t gfp_flags) { struct asd_ha_struct *asd_ha = ascb->ha; struct domain_device *dev = task->dev; struct scb *scb; pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); scb = ascb->scb; scb->header.opcode = INITIATE_SMP_TASK; scb->smp_task.proto_conn_rate = dev->linkrate; scb->smp_task.smp_req.bus_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); scb->smp_task.smp_req.size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); scb->smp_task.smp_resp.bus_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); scb->smp_task.smp_resp.size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); scb->smp_task.sister_scb = cpu_to_le16(0xFFFF); scb->smp_task.conn_handle = cpu_to_le16((u16) (unsigned long)dev->lldd_dev); ascb->tasklet_complete = asd_task_tasklet_complete; return 0; } static void asd_unbuild_smp_ascb(struct asd_ascb *a) { struct sas_task *task = a->uldd_task; BUG_ON(!task); pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); } /* ---------- SSP ---------- */ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task, gfp_t gfp_flags) { struct domain_device *dev = task->dev; struct scb *scb; int res = 0; scb = ascb->scb; scb->header.opcode = INITIATE_SSP_TASK; scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */ scb->ssp_task.proto_conn_rate |= dev->linkrate; scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); scb->ssp_task.ssp_frame.frame_type = SSP_DATA; memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); memcpy(scb->ssp_task.ssp_frame.hashed_src_addr, dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8); if (task->ssp_task.enable_first_burst) scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK; scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3); scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7); memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cdb, 16); scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF); scb->ssp_task.conn_handle = cpu_to_le16( (u16)(unsigned long)dev->lldd_dev); scb->ssp_task.data_dir = data_dir_flags[task->data_dir]; scb->ssp_task.retry_count = scb->ssp_task.retry_count; ascb->tasklet_complete = asd_task_tasklet_complete; res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags); return res; } static void asd_unbuild_ssp_ascb(struct asd_ascb *a) { asd_unmap_scatterlist(a); } /* ---------- Execute Task ---------- */ static int asd_can_queue(struct asd_ha_struct *asd_ha, int num) { int res = 0; unsigned long flags; spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); if ((asd_ha->seq.can_queue - num) < 0) res = -SAS_QUEUE_FULL; else asd_ha->seq.can_queue -= num; spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); return res; } int asd_execute_task(struct sas_task *task, const int num, gfp_t gfp_flags) { int res = 0; LIST_HEAD(alist); struct sas_task *t = task; struct asd_ascb *ascb = NULL, *a; struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; unsigned long flags; res = asd_can_queue(asd_ha, num); if (res) return res; res = num; ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); if (res) { res = -ENOMEM; goto out_err; } __list_add(&alist, ascb->list.prev, &ascb->list); list_for_each_entry(a, &alist, list) { a->uldd_task = t; t->lldd_task = a; t = list_entry(t->list.next, struct sas_task, list); } list_for_each_entry(a, &alist, list) { t = a->uldd_task; a->uldd_timer = 1; if (t->task_proto & SAS_PROTOCOL_STP) t->task_proto = SAS_PROTOCOL_STP; switch (t->task_proto) { case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: res = asd_build_ata_ascb(a, t, gfp_flags); break; case SAS_PROTOCOL_SMP: res = asd_build_smp_ascb(a, t, gfp_flags); break; case SAS_PROTOCOL_SSP: res = asd_build_ssp_ascb(a, t, gfp_flags); break; default: asd_printk("unknown sas_task proto: 0x%x\n", t->task_proto); res = -ENOMEM; break; } if (res) goto out_err_unmap; spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&t->task_state_lock, flags); } list_del_init(&alist); res = asd_post_ascb_list(asd_ha, ascb, num); if (unlikely(res)) { a = NULL; __list_add(&alist, ascb->list.prev, &ascb->list); goto out_err_unmap; } return 0; out_err_unmap: { struct asd_ascb *b = a; list_for_each_entry(a, &alist, list) { if (a == b) break; t = a->uldd_task; spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&t->task_state_lock, flags); switch (t->task_proto) { case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: asd_unbuild_ata_ascb(a); break; case SAS_PROTOCOL_SMP: asd_unbuild_smp_ascb(a); break; case SAS_PROTOCOL_SSP: asd_unbuild_ssp_ascb(a); default: break; } t->lldd_task = NULL; } } list_del_init(&alist); out_err: if (ascb) asd_ascb_free_list(ascb); asd_can_dequeue(asd_ha, num); return res; }
gpl-2.0
w1ndy/linux
arch/m68k/coldfire/m5206.c
1558
1605
/***************************************************************************/ /* * m5206.c -- platform support for ColdFire 5206 based boards * * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2000-2001, Lineo Inc. (www.lineo.com) */ /***************************************************************************/ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfclk.h> /***************************************************************************/ DEFINE_CLK(pll, "pll.0", MCF_CLK); DEFINE_CLK(sys, "sys.0", MCF_BUSCLK); DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK); DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK); DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK); DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK); struct clk *mcf_clks[] = { &clk_pll, &clk_sys, &clk_mcftmr0, &clk_mcftmr1, &clk_mcfuart0, &clk_mcfuart1, NULL }; /***************************************************************************/ void __init config_BSP(char *commandp, int size) { #if defined(CONFIG_NETtel) /* Copy command line from FLASH to local buffer... */ memcpy(commandp, (char *) 0xf0004000, size); commandp[size-1] = 0; #endif /* CONFIG_NETtel */ mach_sched_init = hw_timer_init; /* Only support the external interrupts on their primary level */ mcf_mapirq2imr(25, MCFINTC_EINT1); mcf_mapirq2imr(28, MCFINTC_EINT4); mcf_mapirq2imr(31, MCFINTC_EINT7); } /***************************************************************************/
gpl-2.0
geekboxzone/mmallow_kernel
fs/xfs/xfs_qm_syscalls.c
2070
24785
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/capability.h> #include "xfs.h" #include "xfs_fs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_itable.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_qm.h" #include "xfs_trace.h" #include "xfs_icache.h" STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, uint); STATIC uint xfs_qm_export_flags(uint); STATIC uint xfs_qm_export_qtype_flags(uint); /* * Turn off quota accounting and/or enforcement for all udquots and/or * gdquots. Called only at unmount time. * * This assumes that there are no dquots of this file system cached * incore, and modifies the ondisk dquot directly. Therefore, for example, * it is an error to call this twice, without purging the cache. */ int xfs_qm_scall_quotaoff( xfs_mount_t *mp, uint flags) { struct xfs_quotainfo *q = mp->m_quotainfo; uint dqtype; int error; uint inactivate_flags; xfs_qoff_logitem_t *qoffstart; /* * No file system can have quotas enabled on disk but not in core. * Note that quota utilities (like quotaoff) _expect_ * errno == EEXIST here. */ if ((mp->m_qflags & flags) == 0) return XFS_ERROR(EEXIST); error = 0; flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); /* * We don't want to deal with two quotaoffs messing up each other, * so we're going to serialize it. quotaoff isn't exactly a performance * critical thing. * If quotaoff, then we must be dealing with the root filesystem. */ ASSERT(q); mutex_lock(&q->qi_quotaofflock); /* * If we're just turning off quota enforcement, change mp and go. */ if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { mp->m_qflags &= ~(flags); spin_lock(&mp->m_sb_lock); mp->m_sb.sb_qflags = mp->m_qflags; spin_unlock(&mp->m_sb_lock); mutex_unlock(&q->qi_quotaofflock); /* XXX what to do if error ? Revert back to old vals incore ? */ error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); return (error); } dqtype = 0; inactivate_flags = 0; /* * If accounting is off, we must turn enforcement off, clear the * quota 'CHKD' certificate to make it known that we have to * do a quotacheck the next time this quota is turned on. */ if (flags & XFS_UQUOTA_ACCT) { dqtype |= XFS_QMOPT_UQUOTA; flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD); inactivate_flags |= XFS_UQUOTA_ACTIVE; } if (flags & XFS_GQUOTA_ACCT) { dqtype |= XFS_QMOPT_GQUOTA; flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); inactivate_flags |= XFS_GQUOTA_ACTIVE; } else if (flags & XFS_PQUOTA_ACCT) { dqtype |= XFS_QMOPT_PQUOTA; flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); inactivate_flags |= XFS_PQUOTA_ACTIVE; } /* * Nothing to do? Don't complain. This happens when we're just * turning off quota enforcement. */ if ((mp->m_qflags & flags) == 0) goto out_unlock; /* * Write the LI_QUOTAOFF log record, and do SB changes atomically, * and synchronously. If we fail to write, we should abort the * operation as it cannot be recovered safely if we crash. */ error = xfs_qm_log_quotaoff(mp, &qoffstart, flags); if (error) goto out_unlock; /* * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct * to take care of the race between dqget and quotaoff. We don't take * any special locks to reset these bits. All processes need to check * these bits *after* taking inode lock(s) to see if the particular * quota type is in the process of being turned off. If *ACTIVE, it is * guaranteed that all dquot structures and all quotainode ptrs will all * stay valid as long as that inode is kept locked. * * There is no turning back after this. */ mp->m_qflags &= ~inactivate_flags; /* * Give back all the dquot reference(s) held by inodes. * Here we go thru every single incore inode in this file system, and * do a dqrele on the i_udquot/i_gdquot that it may have. * Essentially, as long as somebody has an inode locked, this guarantees * that quotas will not be turned off. This is handy because in a * transaction once we lock the inode(s) and check for quotaon, we can * depend on the quota inodes (and other things) being valid as long as * we keep the lock(s). */ xfs_qm_dqrele_all_inodes(mp, flags); /* * Next we make the changes in the quota flag in the mount struct. * This isn't protected by a particular lock directly, because we * don't want to take a mrlock every time we depend on quotas being on. */ mp->m_qflags &= ~flags; /* * Go through all the dquots of this file system and purge them, * according to what was turned off. */ xfs_qm_dqpurge_all(mp, dqtype); /* * Transactions that had started before ACTIVE state bit was cleared * could have logged many dquots, so they'd have higher LSNs than * the first QUOTAOFF log record does. If we happen to crash when * the tail of the log has gone past the QUOTAOFF record, but * before the last dquot modification, those dquots __will__ * recover, and that's not good. * * So, we have QUOTAOFF start and end logitems; the start * logitem won't get overwritten until the end logitem appears... */ error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags); if (error) { /* We're screwed now. Shutdown is the only option. */ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); goto out_unlock; } /* * If quotas is completely disabled, close shop. */ if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) || ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) { mutex_unlock(&q->qi_quotaofflock); xfs_qm_destroy_quotainfo(mp); return (0); } /* * Release our quotainode references if we don't need them anymore. */ if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { IRELE(q->qi_uquotaip); q->qi_uquotaip = NULL; } if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) { IRELE(q->qi_gquotaip); q->qi_gquotaip = NULL; } out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; } STATIC int xfs_qm_scall_trunc_qfile( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; struct xfs_trans *tp; int error; if (ino == NULLFSINO) return 0; error = xfs_iget(mp, NULL, ino, 0, 0, &ip); if (error) return error; xfs_ilock(ip, XFS_IOLOCK_EXCL); tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_IOLOCK_EXCL); goto out_put; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); ip->i_d.di_size = 0; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); goto out_unlock; } ASSERT(ip->i_d.di_nextents == 0); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); out_put: IRELE(ip); return error; } int xfs_qm_scall_trunc_qfiles( xfs_mount_t *mp, uint flags) { int error = 0, error2 = 0; if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { xfs_debug(mp, "%s: flags=%x m_qflags=%x\n", __func__, flags, mp->m_qflags); return XFS_ERROR(EINVAL); } if (flags & XFS_DQ_USER) error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); return error ? error : error2; } /* * Switch on (a given) quota enforcement for a filesystem. This takes * effect immediately. * (Switching on quota accounting must be done at mount time.) */ int xfs_qm_scall_quotaon( xfs_mount_t *mp, uint flags) { int error; uint qf; __int64_t sbflags; flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); /* * Switching on quota accounting must be done at mount time. */ flags &= ~(XFS_ALL_QUOTA_ACCT); sbflags = 0; if (flags == 0) { xfs_debug(mp, "%s: zero flags, m_qflags=%x\n", __func__, mp->m_qflags); return XFS_ERROR(EINVAL); } /* No fs can turn on quotas with a delayed effect */ ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0); /* * Can't enforce without accounting. We check the superblock * qflags here instead of m_qflags because rootfs can have * quota acct on ondisk without m_qflags' knowing. */ if (((flags & XFS_UQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && (flags & XFS_UQUOTA_ENFD)) || ((flags & XFS_PQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && (flags & XFS_GQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && (flags & XFS_OQUOTA_ENFD))) { xfs_debug(mp, "%s: Can't enforce without acct, flags=%x sbflags=%x\n", __func__, flags, mp->m_sb.sb_qflags); return XFS_ERROR(EINVAL); } /* * If everything's up to-date incore, then don't waste time. */ if ((mp->m_qflags & flags) == flags) return XFS_ERROR(EEXIST); /* * Change sb_qflags on disk but not incore mp->qflags * if this is the root filesystem. */ spin_lock(&mp->m_sb_lock); qf = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = qf | flags; spin_unlock(&mp->m_sb_lock); /* * There's nothing to change if it's the same. */ if ((qf & flags) == flags && sbflags == 0) return XFS_ERROR(EEXIST); sbflags |= XFS_SB_QFLAGS; if ((error = xfs_qm_write_sb_changes(mp, sbflags))) return (error); /* * If we aren't trying to switch on quota enforcement, we are done. */ if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != (mp->m_qflags & XFS_UQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != (mp->m_qflags & XFS_PQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != (mp->m_qflags & XFS_GQUOTA_ACCT)) || (flags & XFS_ALL_QUOTA_ENFD) == 0) return (0); if (! XFS_IS_QUOTA_RUNNING(mp)) return XFS_ERROR(ESRCH); /* * Switch on quota enforcement in core. */ mutex_lock(&mp->m_quotainfo->qi_quotaofflock); mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); return (0); } /* * Return quota status information, such as uquota-off, enforcements, etc. */ int xfs_qm_scall_getqstat( struct xfs_mount *mp, struct fs_quota_stat *out) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_inode *uip, *gip; bool tempuqip, tempgqip; uip = gip = NULL; tempuqip = tempgqip = false; memset(out, 0, sizeof(fs_quota_stat_t)); out->qs_version = FS_QSTAT_VERSION; if (!xfs_sb_version_hasquota(&mp->m_sb)) { out->qs_uquota.qfs_ino = NULLFSINO; out->qs_gquota.qfs_ino = NULLFSINO; return (0); } out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & (XFS_ALL_QUOTA_ACCT| XFS_ALL_QUOTA_ENFD)); out->qs_pad = 0; out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; if (q) { uip = q->qi_uquotaip; gip = q->qi_gquotaip; } if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip) == 0) tempuqip = true; } if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip) == 0) tempgqip = true; } if (uip) { out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; if (tempuqip) IRELE(uip); } if (gip) { out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; if (tempgqip) IRELE(gip); } if (q) { out->qs_incoredqs = q->qi_dquots; out->qs_btimelimit = q->qi_btimelimit; out->qs_itimelimit = q->qi_itimelimit; out->qs_rtbtimelimit = q->qi_rtbtimelimit; out->qs_bwarnlimit = q->qi_bwarnlimit; out->qs_iwarnlimit = q->qi_iwarnlimit; } return 0; } #define XFS_DQ_MASK \ (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) /* * Adjust quota limits, and start/stop timers accordingly. */ int xfs_qm_scall_setqlim( struct xfs_mount *mp, xfs_dqid_t id, uint type, fs_disk_quota_t *newlim) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_disk_dquot *ddq; struct xfs_dquot *dqp; struct xfs_trans *tp; int error; xfs_qcnt_t hard, soft; if (newlim->d_fieldmask & ~XFS_DQ_MASK) return EINVAL; if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) return 0; /* * We don't want to race with a quotaoff so take the quotaoff lock. * We don't hold an inode lock, so there's nothing else to stop * a quotaoff from happening. */ mutex_lock(&q->qi_quotaofflock); /* * Get the dquot (locked) before we start, as we need to do a * transaction to allocate it if it doesn't exist. Once we have the * dquot, unlock it so we can start the next transaction safely. We hold * a reference to the dquot, so it's safe to do this unlock/lock without * it being reclaimed in the mean time. */ error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp); if (error) { ASSERT(error != ENOENT); goto out_unlock; } xfs_dqunlock(dqp); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp), 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); goto out_rele; } xfs_dqlock(dqp); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; /* * Make sure that hardlimits are >= soft limits before changing. */ hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : be64_to_cpu(ddq->d_blk_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : be64_to_cpu(ddq->d_blk_softlimit); if (hard == 0 || hard >= soft) { ddq->d_blk_hardlimit = cpu_to_be64(hard); ddq->d_blk_softlimit = cpu_to_be64(soft); xfs_dquot_set_prealloc_limits(dqp); if (id == 0) { q->qi_bhardlimit = hard; q->qi_bsoftlimit = soft; } } else { xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : be64_to_cpu(ddq->d_rtb_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : be64_to_cpu(ddq->d_rtb_softlimit); if (hard == 0 || hard >= soft) { ddq->d_rtb_hardlimit = cpu_to_be64(hard); ddq->d_rtb_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_rtbhardlimit = hard; q->qi_rtbsoftlimit = soft; } } else { xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : be64_to_cpu(ddq->d_ino_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : be64_to_cpu(ddq->d_ino_softlimit); if (hard == 0 || hard >= soft) { ddq->d_ino_hardlimit = cpu_to_be64(hard); ddq->d_ino_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_ihardlimit = hard; q->qi_isoftlimit = soft; } } else { xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft); } /* * Update warnings counter(s) if requested */ if (newlim->d_fieldmask & FS_DQ_BWARNS) ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); if (newlim->d_fieldmask & FS_DQ_IWARNS) ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); if (newlim->d_fieldmask & FS_DQ_RTBWARNS) ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); if (id == 0) { /* * Timelimits for the super user set the relative time * the other users can be over quota for this file system. * If it is zero a default is used. Ditto for the default * soft and hard limit values (already done, above), and * for warnings. */ if (newlim->d_fieldmask & FS_DQ_BTIMER) { q->qi_btimelimit = newlim->d_btimer; ddq->d_btimer = cpu_to_be32(newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { q->qi_itimelimit = newlim->d_itimer; ddq->d_itimer = cpu_to_be32(newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { q->qi_rtbtimelimit = newlim->d_rtbtimer; ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); } if (newlim->d_fieldmask & FS_DQ_BWARNS) q->qi_bwarnlimit = newlim->d_bwarns; if (newlim->d_fieldmask & FS_DQ_IWARNS) q->qi_iwarnlimit = newlim->d_iwarns; if (newlim->d_fieldmask & FS_DQ_RTBWARNS) q->qi_rtbwarnlimit = newlim->d_rtbwarns; } else { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp, 0); out_rele: xfs_qm_dqrele(dqp); out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; } STATIC int xfs_qm_log_quotaoff_end( xfs_mount_t *mp, xfs_qoff_logitem_t *startqoff, uint flags) { xfs_trans_t *tp; int error; xfs_qoff_logitem_t *qoffi; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END); error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_END_LOG_RES(mp), 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); return (error); } qoffi = xfs_trans_get_qoff_item(tp, startqoff, flags & XFS_ALL_QUOTA_ACCT); xfs_trans_log_quotaoff_item(tp, qoffi); /* * We have to make sure that the transaction is secure on disk before we * return and actually stop quota accounting. So, make it synchronous. * We don't care about quotoff's performance. */ xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); return (error); } STATIC int xfs_qm_log_quotaoff( xfs_mount_t *mp, xfs_qoff_logitem_t **qoffstartp, uint flags) { xfs_trans_t *tp; int error; xfs_qoff_logitem_t *qoffi=NULL; uint oldsbqflag=0; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF); error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_LOG_RES(mp), 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) goto error0; qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); xfs_trans_log_quotaoff_item(tp, qoffi); spin_lock(&mp->m_sb_lock); oldsbqflag = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; spin_unlock(&mp->m_sb_lock); xfs_mod_sb(tp, XFS_SB_QFLAGS); /* * We have to make sure that the transaction is secure on disk before we * return and actually stop quota accounting. So, make it synchronous. * We don't care about quotoff's performance. */ xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); error0: if (error) { xfs_trans_cancel(tp, 0); /* * No one else is modifying sb_qflags, so this is OK. * We still hold the quotaofflock. */ spin_lock(&mp->m_sb_lock); mp->m_sb.sb_qflags = oldsbqflag; spin_unlock(&mp->m_sb_lock); } *qoffstartp = qoffi; return (error); } int xfs_qm_scall_getquota( struct xfs_mount *mp, xfs_dqid_t id, uint type, struct fs_disk_quota *dst) { struct xfs_dquot *dqp; int error; /* * Try to get the dquot. We don't want it allocated on disk, so * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't * exist, we'll get ENOENT back. */ error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp); if (error) return error; /* * If everything's NULL, this dquot doesn't quite exist as far as * our utility programs are concerned. */ if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { error = XFS_ERROR(ENOENT); goto out_put; } memset(dst, 0, sizeof(*dst)); dst->d_version = FS_DQUOT_VERSION; dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); dst->d_id = be32_to_cpu(dqp->q_core.d_id); dst->d_blk_hardlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); dst->d_blk_softlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); dst->d_icount = dqp->q_res_icount; dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); dst->d_rtb_hardlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); dst->d_rtb_softlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); /* * Internally, we don't reset all the timers when quota enforcement * gets turned off. No need to confuse the user level code, * so return zeroes in that case. */ if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) || (!XFS_IS_OQUOTA_ENFORCED(mp) && (dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) { dst->d_btimer = 0; dst->d_itimer = 0; dst->d_rtbtimer = 0; } #ifdef DEBUG if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || (XFS_IS_OQUOTA_ENFORCED(mp) && (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && dst->d_id != 0) { if ((dst->d_bcount > dst->d_blk_softlimit) && (dst->d_blk_softlimit > 0)) { ASSERT(dst->d_btimer != 0); } if ((dst->d_icount > dst->d_ino_softlimit) && (dst->d_ino_softlimit > 0)) { ASSERT(dst->d_itimer != 0); } } #endif out_put: xfs_qm_dqput(dqp); return error; } STATIC uint xfs_qm_export_qtype_flags( uint flags) { /* * Can't be more than one, or none. */ ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != (FS_PROJ_QUOTA | FS_USER_QUOTA)); ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != (FS_USER_QUOTA | FS_GROUP_QUOTA)); ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); return (flags & XFS_DQ_USER) ? FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? FS_PROJ_QUOTA : FS_GROUP_QUOTA; } STATIC uint xfs_qm_export_flags( uint flags) { uint uflags; uflags = 0; if (flags & XFS_UQUOTA_ACCT) uflags |= FS_QUOTA_UDQ_ACCT; if (flags & XFS_PQUOTA_ACCT) uflags |= FS_QUOTA_PDQ_ACCT; if (flags & XFS_GQUOTA_ACCT) uflags |= FS_QUOTA_GDQ_ACCT; if (flags & XFS_UQUOTA_ENFD) uflags |= FS_QUOTA_UDQ_ENFD; if (flags & (XFS_OQUOTA_ENFD)) { uflags |= (flags & XFS_GQUOTA_ACCT) ? FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD; } return (uflags); } STATIC int xfs_dqrele_inode( struct xfs_inode *ip, struct xfs_perag *pag, int flags, void *args) { /* skip quota inodes */ if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || ip == ip->i_mount->m_quotainfo->qi_gquotaip) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); return 0; } xfs_ilock(ip, XFS_ILOCK_EXCL); if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } /* * Go thru all the inodes in the file system, releasing their dquots. * * Note that the mount structure gets modified to indicate that quotas are off * AFTER this, in the case of quotaoff. */ void xfs_qm_dqrele_all_inodes( struct xfs_mount *mp, uint flags) { ASSERT(mp->m_quotainfo); xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL); }
gpl-2.0
khusika/android_kernel_samsung_fortuna-common
fs/xfs/xfs_qm_syscalls.c
2070
24785
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/capability.h> #include "xfs.h" #include "xfs_fs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_itable.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_qm.h" #include "xfs_trace.h" #include "xfs_icache.h" STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, uint); STATIC uint xfs_qm_export_flags(uint); STATIC uint xfs_qm_export_qtype_flags(uint); /* * Turn off quota accounting and/or enforcement for all udquots and/or * gdquots. Called only at unmount time. * * This assumes that there are no dquots of this file system cached * incore, and modifies the ondisk dquot directly. Therefore, for example, * it is an error to call this twice, without purging the cache. */ int xfs_qm_scall_quotaoff( xfs_mount_t *mp, uint flags) { struct xfs_quotainfo *q = mp->m_quotainfo; uint dqtype; int error; uint inactivate_flags; xfs_qoff_logitem_t *qoffstart; /* * No file system can have quotas enabled on disk but not in core. * Note that quota utilities (like quotaoff) _expect_ * errno == EEXIST here. */ if ((mp->m_qflags & flags) == 0) return XFS_ERROR(EEXIST); error = 0; flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); /* * We don't want to deal with two quotaoffs messing up each other, * so we're going to serialize it. quotaoff isn't exactly a performance * critical thing. * If quotaoff, then we must be dealing with the root filesystem. */ ASSERT(q); mutex_lock(&q->qi_quotaofflock); /* * If we're just turning off quota enforcement, change mp and go. */ if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { mp->m_qflags &= ~(flags); spin_lock(&mp->m_sb_lock); mp->m_sb.sb_qflags = mp->m_qflags; spin_unlock(&mp->m_sb_lock); mutex_unlock(&q->qi_quotaofflock); /* XXX what to do if error ? Revert back to old vals incore ? */ error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); return (error); } dqtype = 0; inactivate_flags = 0; /* * If accounting is off, we must turn enforcement off, clear the * quota 'CHKD' certificate to make it known that we have to * do a quotacheck the next time this quota is turned on. */ if (flags & XFS_UQUOTA_ACCT) { dqtype |= XFS_QMOPT_UQUOTA; flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD); inactivate_flags |= XFS_UQUOTA_ACTIVE; } if (flags & XFS_GQUOTA_ACCT) { dqtype |= XFS_QMOPT_GQUOTA; flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); inactivate_flags |= XFS_GQUOTA_ACTIVE; } else if (flags & XFS_PQUOTA_ACCT) { dqtype |= XFS_QMOPT_PQUOTA; flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); inactivate_flags |= XFS_PQUOTA_ACTIVE; } /* * Nothing to do? Don't complain. This happens when we're just * turning off quota enforcement. */ if ((mp->m_qflags & flags) == 0) goto out_unlock; /* * Write the LI_QUOTAOFF log record, and do SB changes atomically, * and synchronously. If we fail to write, we should abort the * operation as it cannot be recovered safely if we crash. */ error = xfs_qm_log_quotaoff(mp, &qoffstart, flags); if (error) goto out_unlock; /* * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct * to take care of the race between dqget and quotaoff. We don't take * any special locks to reset these bits. All processes need to check * these bits *after* taking inode lock(s) to see if the particular * quota type is in the process of being turned off. If *ACTIVE, it is * guaranteed that all dquot structures and all quotainode ptrs will all * stay valid as long as that inode is kept locked. * * There is no turning back after this. */ mp->m_qflags &= ~inactivate_flags; /* * Give back all the dquot reference(s) held by inodes. * Here we go thru every single incore inode in this file system, and * do a dqrele on the i_udquot/i_gdquot that it may have. * Essentially, as long as somebody has an inode locked, this guarantees * that quotas will not be turned off. This is handy because in a * transaction once we lock the inode(s) and check for quotaon, we can * depend on the quota inodes (and other things) being valid as long as * we keep the lock(s). */ xfs_qm_dqrele_all_inodes(mp, flags); /* * Next we make the changes in the quota flag in the mount struct. * This isn't protected by a particular lock directly, because we * don't want to take a mrlock every time we depend on quotas being on. */ mp->m_qflags &= ~flags; /* * Go through all the dquots of this file system and purge them, * according to what was turned off. */ xfs_qm_dqpurge_all(mp, dqtype); /* * Transactions that had started before ACTIVE state bit was cleared * could have logged many dquots, so they'd have higher LSNs than * the first QUOTAOFF log record does. If we happen to crash when * the tail of the log has gone past the QUOTAOFF record, but * before the last dquot modification, those dquots __will__ * recover, and that's not good. * * So, we have QUOTAOFF start and end logitems; the start * logitem won't get overwritten until the end logitem appears... */ error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags); if (error) { /* We're screwed now. Shutdown is the only option. */ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); goto out_unlock; } /* * If quotas is completely disabled, close shop. */ if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) || ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) { mutex_unlock(&q->qi_quotaofflock); xfs_qm_destroy_quotainfo(mp); return (0); } /* * Release our quotainode references if we don't need them anymore. */ if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { IRELE(q->qi_uquotaip); q->qi_uquotaip = NULL; } if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) { IRELE(q->qi_gquotaip); q->qi_gquotaip = NULL; } out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; } STATIC int xfs_qm_scall_trunc_qfile( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; struct xfs_trans *tp; int error; if (ino == NULLFSINO) return 0; error = xfs_iget(mp, NULL, ino, 0, 0, &ip); if (error) return error; xfs_ilock(ip, XFS_IOLOCK_EXCL); tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_IOLOCK_EXCL); goto out_put; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); ip->i_d.di_size = 0; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); goto out_unlock; } ASSERT(ip->i_d.di_nextents == 0); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); out_put: IRELE(ip); return error; } int xfs_qm_scall_trunc_qfiles( xfs_mount_t *mp, uint flags) { int error = 0, error2 = 0; if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { xfs_debug(mp, "%s: flags=%x m_qflags=%x\n", __func__, flags, mp->m_qflags); return XFS_ERROR(EINVAL); } if (flags & XFS_DQ_USER) error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); return error ? error : error2; } /* * Switch on (a given) quota enforcement for a filesystem. This takes * effect immediately. * (Switching on quota accounting must be done at mount time.) */ int xfs_qm_scall_quotaon( xfs_mount_t *mp, uint flags) { int error; uint qf; __int64_t sbflags; flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); /* * Switching on quota accounting must be done at mount time. */ flags &= ~(XFS_ALL_QUOTA_ACCT); sbflags = 0; if (flags == 0) { xfs_debug(mp, "%s: zero flags, m_qflags=%x\n", __func__, mp->m_qflags); return XFS_ERROR(EINVAL); } /* No fs can turn on quotas with a delayed effect */ ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0); /* * Can't enforce without accounting. We check the superblock * qflags here instead of m_qflags because rootfs can have * quota acct on ondisk without m_qflags' knowing. */ if (((flags & XFS_UQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && (flags & XFS_UQUOTA_ENFD)) || ((flags & XFS_PQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && (flags & XFS_GQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && (flags & XFS_OQUOTA_ENFD))) { xfs_debug(mp, "%s: Can't enforce without acct, flags=%x sbflags=%x\n", __func__, flags, mp->m_sb.sb_qflags); return XFS_ERROR(EINVAL); } /* * If everything's up to-date incore, then don't waste time. */ if ((mp->m_qflags & flags) == flags) return XFS_ERROR(EEXIST); /* * Change sb_qflags on disk but not incore mp->qflags * if this is the root filesystem. */ spin_lock(&mp->m_sb_lock); qf = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = qf | flags; spin_unlock(&mp->m_sb_lock); /* * There's nothing to change if it's the same. */ if ((qf & flags) == flags && sbflags == 0) return XFS_ERROR(EEXIST); sbflags |= XFS_SB_QFLAGS; if ((error = xfs_qm_write_sb_changes(mp, sbflags))) return (error); /* * If we aren't trying to switch on quota enforcement, we are done. */ if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != (mp->m_qflags & XFS_UQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != (mp->m_qflags & XFS_PQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != (mp->m_qflags & XFS_GQUOTA_ACCT)) || (flags & XFS_ALL_QUOTA_ENFD) == 0) return (0); if (! XFS_IS_QUOTA_RUNNING(mp)) return XFS_ERROR(ESRCH); /* * Switch on quota enforcement in core. */ mutex_lock(&mp->m_quotainfo->qi_quotaofflock); mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); return (0); } /* * Return quota status information, such as uquota-off, enforcements, etc. */ int xfs_qm_scall_getqstat( struct xfs_mount *mp, struct fs_quota_stat *out) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_inode *uip, *gip; bool tempuqip, tempgqip; uip = gip = NULL; tempuqip = tempgqip = false; memset(out, 0, sizeof(fs_quota_stat_t)); out->qs_version = FS_QSTAT_VERSION; if (!xfs_sb_version_hasquota(&mp->m_sb)) { out->qs_uquota.qfs_ino = NULLFSINO; out->qs_gquota.qfs_ino = NULLFSINO; return (0); } out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & (XFS_ALL_QUOTA_ACCT| XFS_ALL_QUOTA_ENFD)); out->qs_pad = 0; out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; if (q) { uip = q->qi_uquotaip; gip = q->qi_gquotaip; } if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip) == 0) tempuqip = true; } if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip) == 0) tempgqip = true; } if (uip) { out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; if (tempuqip) IRELE(uip); } if (gip) { out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; if (tempgqip) IRELE(gip); } if (q) { out->qs_incoredqs = q->qi_dquots; out->qs_btimelimit = q->qi_btimelimit; out->qs_itimelimit = q->qi_itimelimit; out->qs_rtbtimelimit = q->qi_rtbtimelimit; out->qs_bwarnlimit = q->qi_bwarnlimit; out->qs_iwarnlimit = q->qi_iwarnlimit; } return 0; } #define XFS_DQ_MASK \ (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) /* * Adjust quota limits, and start/stop timers accordingly. */ int xfs_qm_scall_setqlim( struct xfs_mount *mp, xfs_dqid_t id, uint type, fs_disk_quota_t *newlim) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_disk_dquot *ddq; struct xfs_dquot *dqp; struct xfs_trans *tp; int error; xfs_qcnt_t hard, soft; if (newlim->d_fieldmask & ~XFS_DQ_MASK) return EINVAL; if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) return 0; /* * We don't want to race with a quotaoff so take the quotaoff lock. * We don't hold an inode lock, so there's nothing else to stop * a quotaoff from happening. */ mutex_lock(&q->qi_quotaofflock); /* * Get the dquot (locked) before we start, as we need to do a * transaction to allocate it if it doesn't exist. Once we have the * dquot, unlock it so we can start the next transaction safely. We hold * a reference to the dquot, so it's safe to do this unlock/lock without * it being reclaimed in the mean time. */ error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp); if (error) { ASSERT(error != ENOENT); goto out_unlock; } xfs_dqunlock(dqp); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp), 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); goto out_rele; } xfs_dqlock(dqp); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; /* * Make sure that hardlimits are >= soft limits before changing. */ hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : be64_to_cpu(ddq->d_blk_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : be64_to_cpu(ddq->d_blk_softlimit); if (hard == 0 || hard >= soft) { ddq->d_blk_hardlimit = cpu_to_be64(hard); ddq->d_blk_softlimit = cpu_to_be64(soft); xfs_dquot_set_prealloc_limits(dqp); if (id == 0) { q->qi_bhardlimit = hard; q->qi_bsoftlimit = soft; } } else { xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : be64_to_cpu(ddq->d_rtb_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : be64_to_cpu(ddq->d_rtb_softlimit); if (hard == 0 || hard >= soft) { ddq->d_rtb_hardlimit = cpu_to_be64(hard); ddq->d_rtb_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_rtbhardlimit = hard; q->qi_rtbsoftlimit = soft; } } else { xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : be64_to_cpu(ddq->d_ino_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : be64_to_cpu(ddq->d_ino_softlimit); if (hard == 0 || hard >= soft) { ddq->d_ino_hardlimit = cpu_to_be64(hard); ddq->d_ino_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_ihardlimit = hard; q->qi_isoftlimit = soft; } } else { xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft); } /* * Update warnings counter(s) if requested */ if (newlim->d_fieldmask & FS_DQ_BWARNS) ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); if (newlim->d_fieldmask & FS_DQ_IWARNS) ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); if (newlim->d_fieldmask & FS_DQ_RTBWARNS) ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); if (id == 0) { /* * Timelimits for the super user set the relative time * the other users can be over quota for this file system. * If it is zero a default is used. Ditto for the default * soft and hard limit values (already done, above), and * for warnings. */ if (newlim->d_fieldmask & FS_DQ_BTIMER) { q->qi_btimelimit = newlim->d_btimer; ddq->d_btimer = cpu_to_be32(newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { q->qi_itimelimit = newlim->d_itimer; ddq->d_itimer = cpu_to_be32(newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { q->qi_rtbtimelimit = newlim->d_rtbtimer; ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); } if (newlim->d_fieldmask & FS_DQ_BWARNS) q->qi_bwarnlimit = newlim->d_bwarns; if (newlim->d_fieldmask & FS_DQ_IWARNS) q->qi_iwarnlimit = newlim->d_iwarns; if (newlim->d_fieldmask & FS_DQ_RTBWARNS) q->qi_rtbwarnlimit = newlim->d_rtbwarns; } else { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp, 0); out_rele: xfs_qm_dqrele(dqp); out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; } STATIC int xfs_qm_log_quotaoff_end( xfs_mount_t *mp, xfs_qoff_logitem_t *startqoff, uint flags) { xfs_trans_t *tp; int error; xfs_qoff_logitem_t *qoffi; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END); error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_END_LOG_RES(mp), 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); return (error); } qoffi = xfs_trans_get_qoff_item(tp, startqoff, flags & XFS_ALL_QUOTA_ACCT); xfs_trans_log_quotaoff_item(tp, qoffi); /* * We have to make sure that the transaction is secure on disk before we * return and actually stop quota accounting. So, make it synchronous. * We don't care about quotoff's performance. */ xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); return (error); } STATIC int xfs_qm_log_quotaoff( xfs_mount_t *mp, xfs_qoff_logitem_t **qoffstartp, uint flags) { xfs_trans_t *tp; int error; xfs_qoff_logitem_t *qoffi=NULL; uint oldsbqflag=0; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF); error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_LOG_RES(mp), 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) goto error0; qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); xfs_trans_log_quotaoff_item(tp, qoffi); spin_lock(&mp->m_sb_lock); oldsbqflag = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; spin_unlock(&mp->m_sb_lock); xfs_mod_sb(tp, XFS_SB_QFLAGS); /* * We have to make sure that the transaction is secure on disk before we * return and actually stop quota accounting. So, make it synchronous. * We don't care about quotoff's performance. */ xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); error0: if (error) { xfs_trans_cancel(tp, 0); /* * No one else is modifying sb_qflags, so this is OK. * We still hold the quotaofflock. */ spin_lock(&mp->m_sb_lock); mp->m_sb.sb_qflags = oldsbqflag; spin_unlock(&mp->m_sb_lock); } *qoffstartp = qoffi; return (error); } int xfs_qm_scall_getquota( struct xfs_mount *mp, xfs_dqid_t id, uint type, struct fs_disk_quota *dst) { struct xfs_dquot *dqp; int error; /* * Try to get the dquot. We don't want it allocated on disk, so * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't * exist, we'll get ENOENT back. */ error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp); if (error) return error; /* * If everything's NULL, this dquot doesn't quite exist as far as * our utility programs are concerned. */ if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { error = XFS_ERROR(ENOENT); goto out_put; } memset(dst, 0, sizeof(*dst)); dst->d_version = FS_DQUOT_VERSION; dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); dst->d_id = be32_to_cpu(dqp->q_core.d_id); dst->d_blk_hardlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); dst->d_blk_softlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); dst->d_icount = dqp->q_res_icount; dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); dst->d_rtb_hardlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); dst->d_rtb_softlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); /* * Internally, we don't reset all the timers when quota enforcement * gets turned off. No need to confuse the user level code, * so return zeroes in that case. */ if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) || (!XFS_IS_OQUOTA_ENFORCED(mp) && (dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) { dst->d_btimer = 0; dst->d_itimer = 0; dst->d_rtbtimer = 0; } #ifdef DEBUG if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || (XFS_IS_OQUOTA_ENFORCED(mp) && (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && dst->d_id != 0) { if ((dst->d_bcount > dst->d_blk_softlimit) && (dst->d_blk_softlimit > 0)) { ASSERT(dst->d_btimer != 0); } if ((dst->d_icount > dst->d_ino_softlimit) && (dst->d_ino_softlimit > 0)) { ASSERT(dst->d_itimer != 0); } } #endif out_put: xfs_qm_dqput(dqp); return error; } STATIC uint xfs_qm_export_qtype_flags( uint flags) { /* * Can't be more than one, or none. */ ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != (FS_PROJ_QUOTA | FS_USER_QUOTA)); ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != (FS_USER_QUOTA | FS_GROUP_QUOTA)); ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); return (flags & XFS_DQ_USER) ? FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? FS_PROJ_QUOTA : FS_GROUP_QUOTA; } STATIC uint xfs_qm_export_flags( uint flags) { uint uflags; uflags = 0; if (flags & XFS_UQUOTA_ACCT) uflags |= FS_QUOTA_UDQ_ACCT; if (flags & XFS_PQUOTA_ACCT) uflags |= FS_QUOTA_PDQ_ACCT; if (flags & XFS_GQUOTA_ACCT) uflags |= FS_QUOTA_GDQ_ACCT; if (flags & XFS_UQUOTA_ENFD) uflags |= FS_QUOTA_UDQ_ENFD; if (flags & (XFS_OQUOTA_ENFD)) { uflags |= (flags & XFS_GQUOTA_ACCT) ? FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD; } return (uflags); } STATIC int xfs_dqrele_inode( struct xfs_inode *ip, struct xfs_perag *pag, int flags, void *args) { /* skip quota inodes */ if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || ip == ip->i_mount->m_quotainfo->qi_gquotaip) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); return 0; } xfs_ilock(ip, XFS_ILOCK_EXCL); if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } /* * Go thru all the inodes in the file system, releasing their dquots. * * Note that the mount structure gets modified to indicate that quotas are off * AFTER this, in the case of quotaoff. */ void xfs_qm_dqrele_all_inodes( struct xfs_mount *mp, uint flags) { ASSERT(mp->m_quotainfo); xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL); }
gpl-2.0
honeyx/S3mini_golden_kernel
arch/unicore32/kernel/traps.c
2838
7733
/* * linux/arch/unicore32/kernel/traps.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * 'traps.c' handles hardware exceptions after we have saved some state. * Mostly a debugging aid, but will probably kill the offending process. */ #include <linux/module.h> #include <linux/signal.h> #include <linux/spinlock.h> #include <linux/personality.h> #include <linux/kallsyms.h> #include <linux/kdebug.h> #include <linux/uaccess.h> #include <linux/delay.h> #include <linux/hardirq.h> #include <linux/init.h> #include <linux/atomic.h> #include <linux/unistd.h> #include <asm/cacheflush.h> #include <asm/system.h> #include <asm/traps.h> #include "setup.h" static void dump_mem(const char *, const char *, unsigned long, unsigned long); void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { #ifdef CONFIG_KALLSYMS printk(KERN_DEFAULT "[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); #else printk(KERN_DEFAULT "Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif } /* * Stack pointers should always be within the kernels view of * physical memory. If it is not there, then we can't dump * out any information relating to the stack. */ static int verify_stack(unsigned long sp) { if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != NULL)) return -EFAULT; return 0; } /* * Dump out the contents of some memory nicely... */ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, unsigned long top) { unsigned long first; mm_segment_t fs; int i; /* * We need to switch to kernel mode so that we can use __get_user * to safely read from kernel space. Note that we now dump the * code first, just in case the backtrace kills us. */ fs = get_fs(); set_fs(KERNEL_DS); printk(KERN_DEFAULT "%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top); for (first = bottom & ~31; first < top; first += 32) { unsigned long p; char str[sizeof(" 12345678") * 8 + 1]; memset(str, ' ', sizeof(str)); str[sizeof(str) - 1] = '\0'; for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { if (p >= bottom && p < top) { unsigned long val; if (__get_user(val, (unsigned long *)p) == 0) sprintf(str + i * 9, " %08lx", val); else sprintf(str + i * 9, " ????????"); } } printk(KERN_DEFAULT "%s%04lx:%s\n", lvl, first & 0xffff, str); } set_fs(fs); } static void dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int width = 8; mm_segment_t fs; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; /* * We need to switch to kernel mode so that we can use __get_user * to safely read from kernel space. Note that we now dump the * code first, just in case the backtrace kills us. */ fs = get_fs(); set_fs(KERNEL_DS); for (i = -4; i < 1; i++) { unsigned int val, bad; bad = __get_user(val, &((u32 *)addr)[i]); if (!bad) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", width, val); else { p += sprintf(p, "bad PC value"); break; } } printk(KERN_DEFAULT "%sCode: %s\n", lvl, str); set_fs(fs); } static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { unsigned int fp, mode; int ok = 1; printk(KERN_DEFAULT "Backtrace: "); if (!tsk) tsk = current; if (regs) { fp = regs->UCreg_fp; mode = processor_mode(regs); } else if (tsk != current) { fp = thread_saved_fp(tsk); mode = 0x10; } else { asm("mov %0, fp" : "=r" (fp) : : "cc"); mode = 0x10; } if (!fp) { printk("no frame pointer"); ok = 0; } else if (verify_stack(fp)) { printk("invalid frame pointer 0x%08x", fp); ok = 0; } else if (fp < (unsigned long)end_of_stack(tsk)) printk("frame pointer underflow"); printk("\n"); if (ok) c_backtrace(fp, mode); } void dump_stack(void) { dump_backtrace(NULL, NULL); } EXPORT_SYMBOL(dump_stack); void show_stack(struct task_struct *tsk, unsigned long *sp) { dump_backtrace(NULL, tsk); barrier(); } static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) { struct task_struct *tsk = thread->task; static int die_counter; int ret; printk(KERN_EMERG "Internal error: %s: %x [#%d]\n", str, err, ++die_counter); /* trap and error numbers are mostly meaningless on UniCore */ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \ SIGSEGV); if (ret == NOTIFY_STOP) return ret; print_modules(); __show_regs(regs); printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); dump_backtrace(regs, tsk); dump_instr(KERN_EMERG, regs); } return ret; } DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; oops_enter(); spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); ret = __die(str, err, thread, regs); bust_spinlocks(0); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); } void uc32_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap) { if (user_mode(regs)) { current->thread.error_code = err; current->thread.trap_no = trap; force_sig_info(info->si_signo, info, current); } else die(str, regs, err); } /* * bad_mode handles the impossible case in the vectors. If you see one of * these, then it's extremely serious, and could mean you have buggy hardware. * It never returns, and never tries to sync. We hope that we can at least * dump out some state information... */ asmlinkage void bad_mode(struct pt_regs *regs, unsigned int reason) { console_verbose(); printk(KERN_CRIT "Bad mode detected with reason 0x%x\n", reason); die("Oops - bad mode", regs, 0); local_irq_disable(); panic("bad mode"); } void __pte_error(const char *file, int line, unsigned long val) { printk(KERN_DEFAULT "%s:%d: bad pte %08lx.\n", file, line, val); } void __pmd_error(const char *file, int line, unsigned long val) { printk(KERN_DEFAULT "%s:%d: bad pmd %08lx.\n", file, line, val); } void __pgd_error(const char *file, int line, unsigned long val) { printk(KERN_DEFAULT "%s:%d: bad pgd %08lx.\n", file, line, val); } asmlinkage void __div0(void) { printk(KERN_DEFAULT "Division by zero in kernel.\n"); dump_stack(); } EXPORT_SYMBOL(__div0); void abort(void) { BUG(); /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } EXPORT_SYMBOL(abort); void __init trap_init(void) { return; } void __init early_trap_init(void) { unsigned long vectors = VECTORS_BASE; /* * Copy the vectors, stubs (in entry-unicore.S) * into the vector page, mapped at 0xffff0000, and ensure these * are visible to the instruction stream. */ memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); early_signal_init(); flush_icache_range(vectors, vectors + PAGE_SIZE); }
gpl-2.0
nikitines/zte-kernel-roamer2
drivers/media/rc/keymaps/rc-eztv.c
3094
2618
/* eztv.h - Keytable for eztv Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* Alfons Geser <a.geser@cox.net> * updates from Job D. R. Borges <jobdrb@ig.com.br> */ static struct rc_map_table eztv[] = { { 0x12, KEY_POWER }, { 0x01, KEY_TV }, /* DVR */ { 0x15, KEY_DVD }, /* DVD */ { 0x17, KEY_AUDIO }, /* music */ /* DVR mode / DVD mode / music mode */ { 0x1b, KEY_MUTE }, /* mute */ { 0x02, KEY_LANGUAGE }, /* MTS/SAP / audio / autoseek */ { 0x1e, KEY_SUBTITLE }, /* closed captioning / subtitle / seek */ { 0x16, KEY_ZOOM }, /* full screen */ { 0x1c, KEY_VIDEO }, /* video source / eject / delall */ { 0x1d, KEY_RESTART }, /* playback / angle / del */ { 0x2f, KEY_SEARCH }, /* scan / menu / playlist */ { 0x30, KEY_CHANNEL }, /* CH surfing / bookmark / memo */ { 0x31, KEY_HELP }, /* help */ { 0x32, KEY_MODE }, /* num/memo */ { 0x33, KEY_ESC }, /* cancel */ { 0x0c, KEY_UP }, /* up */ { 0x10, KEY_DOWN }, /* down */ { 0x08, KEY_LEFT }, /* left */ { 0x04, KEY_RIGHT }, /* right */ { 0x03, KEY_SELECT }, /* select */ { 0x1f, KEY_REWIND }, /* rewind */ { 0x20, KEY_PLAYPAUSE },/* play/pause */ { 0x29, KEY_FORWARD }, /* forward */ { 0x14, KEY_AGAIN }, /* repeat */ { 0x2b, KEY_RECORD }, /* recording */ { 0x2c, KEY_STOP }, /* stop */ { 0x2d, KEY_PLAY }, /* play */ { 0x2e, KEY_CAMERA }, /* snapshot / shuffle */ { 0x00, KEY_0 }, { 0x05, KEY_1 }, { 0x06, KEY_2 }, { 0x07, KEY_3 }, { 0x09, KEY_4 }, { 0x0a, KEY_5 }, { 0x0b, KEY_6 }, { 0x0d, KEY_7 }, { 0x0e, KEY_8 }, { 0x0f, KEY_9 }, { 0x2a, KEY_VOLUMEUP }, { 0x11, KEY_VOLUMEDOWN }, { 0x18, KEY_CHANNELUP },/* CH.tracking up */ { 0x19, KEY_CHANNELDOWN },/* CH.tracking down */ { 0x13, KEY_ENTER }, /* enter */ { 0x21, KEY_DOT }, /* . (decimal dot) */ }; static struct rc_map_list eztv_map = { .map = { .scan = eztv, .size = ARRAY_SIZE(eztv), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_EZTV, } }; static int __init init_rc_map_eztv(void) { return rc_map_register(&eztv_map); } static void __exit exit_rc_map_eztv(void) { rc_map_unregister(&eztv_map); } module_init(init_rc_map_eztv) module_exit(exit_rc_map_eztv) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
Ezekeel/GLaDOS-nexus-s-ics
drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
3094
2450
/* nec-terratec-cinergy-xs.h - Keytable for nec_terratec_cinergy_xs Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* Terratec Cinergy Hybrid T USB XS FM Mauro Carvalho Chehab <mchehab@redhat.com> */ static struct rc_map_table nec_terratec_cinergy_xs[] = { { 0x1441, KEY_HOME}, { 0x1401, KEY_POWER2}, { 0x1442, KEY_MENU}, /* DVD menu */ { 0x1443, KEY_SUBTITLE}, { 0x1444, KEY_TEXT}, /* Teletext */ { 0x1445, KEY_DELETE}, { 0x1402, KEY_1}, { 0x1403, KEY_2}, { 0x1404, KEY_3}, { 0x1405, KEY_4}, { 0x1406, KEY_5}, { 0x1407, KEY_6}, { 0x1408, KEY_7}, { 0x1409, KEY_8}, { 0x140a, KEY_9}, { 0x140c, KEY_0}, { 0x140b, KEY_TUNER}, /* AV */ { 0x140d, KEY_MODE}, /* A.B */ { 0x1446, KEY_TV}, { 0x1447, KEY_DVD}, { 0x1449, KEY_VIDEO}, { 0x144a, KEY_RADIO}, /* Music */ { 0x144b, KEY_CAMERA}, /* PIC */ { 0x1410, KEY_UP}, { 0x1411, KEY_LEFT}, { 0x1412, KEY_OK}, { 0x1413, KEY_RIGHT}, { 0x1414, KEY_DOWN}, { 0x140f, KEY_EPG}, { 0x1416, KEY_INFO}, { 0x144d, KEY_BACKSPACE}, { 0x141c, KEY_VOLUMEUP}, { 0x141e, KEY_VOLUMEDOWN}, { 0x144c, KEY_PLAY}, { 0x141d, KEY_MUTE}, { 0x141b, KEY_CHANNELUP}, { 0x141f, KEY_CHANNELDOWN}, { 0x1417, KEY_RED}, { 0x1418, KEY_GREEN}, { 0x1419, KEY_YELLOW}, { 0x141a, KEY_BLUE}, { 0x1458, KEY_RECORD}, { 0x1448, KEY_STOP}, { 0x1440, KEY_PAUSE}, { 0x1454, KEY_LAST}, { 0x144e, KEY_REWIND}, { 0x144f, KEY_FASTFORWARD}, { 0x145c, KEY_NEXT}, }; static struct rc_map_list nec_terratec_cinergy_xs_map = { .map = { .scan = nec_terratec_cinergy_xs, .size = ARRAY_SIZE(nec_terratec_cinergy_xs), .rc_type = RC_TYPE_NEC, .name = RC_MAP_NEC_TERRATEC_CINERGY_XS, } }; static int __init init_rc_map_nec_terratec_cinergy_xs(void) { return rc_map_register(&nec_terratec_cinergy_xs_map); } static void __exit exit_rc_map_nec_terratec_cinergy_xs(void) { rc_map_unregister(&nec_terratec_cinergy_xs_map); } module_init(init_rc_map_nec_terratec_cinergy_xs) module_exit(exit_rc_map_nec_terratec_cinergy_xs) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
aranb/linux
drivers/scsi/scsi_netlink.c
3350
3603
/* * scsi_netlink.c - SCSI Transport Netlink Interface * * Copyright (C) 2006 James Smart, Emulex Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <linux/jiffies.h> #include <linux/security.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/export.h> #include <net/sock.h> #include <net/netlink.h> #include <scsi/scsi_netlink.h> #include "scsi_priv.h" struct sock *scsi_nl_sock = NULL; EXPORT_SYMBOL_GPL(scsi_nl_sock); /** * scsi_nl_rcv_msg - Receive message handler. * @skb: socket receive buffer * * Description: Extracts message from a receive buffer. * Validates message header and calls appropriate transport message handler * * **/ static void scsi_nl_rcv_msg(struct sk_buff *skb) { struct nlmsghdr *nlh; struct scsi_nl_hdr *hdr; u32 rlen; int err, tport; while (skb->len >= NLMSG_HDRLEN) { err = 0; nlh = nlmsg_hdr(skb); if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || (skb->len < nlh->nlmsg_len)) { printk(KERN_WARNING "%s: discarding partial skb\n", __func__); return; } rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { err = -EBADMSG; goto next_msg; } hdr = nlmsg_data(nlh); if ((hdr->version != SCSI_NL_VERSION) || (hdr->magic != SCSI_NL_MAGIC)) { err = -EPROTOTYPE; goto next_msg; } if (!netlink_capable(skb, CAP_SYS_ADMIN)) { err = -EPERM; goto next_msg; } if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { printk(KERN_WARNING "%s: discarding partial message\n", __func__); goto next_msg; } /* * Deliver message to the appropriate transport */ tport = hdr->transport; if (tport == SCSI_NL_TRANSPORT) { switch (hdr->msgtype) { case SCSI_NL_SHOST_VENDOR: /* Locate the driver that corresponds to the message */ err = -ESRCH; break; default: err = -EBADR; break; } if (err) printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n", __func__, hdr->msgtype, err); } else err = -ENOENT; next_msg: if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) netlink_ack(skb, nlh, err); skb_pull(skb, rlen); } } /** * scsi_netlink_init - Called by SCSI subsystem to initialize * the SCSI transport netlink interface * **/ void scsi_netlink_init(void) { struct netlink_kernel_cfg cfg = { .input = scsi_nl_rcv_msg, .groups = SCSI_NL_GRP_CNT, }; scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, &cfg); if (!scsi_nl_sock) { printk(KERN_ERR "%s: register of receive handler failed\n", __func__); return; } return; } /** * scsi_netlink_exit - Called by SCSI subsystem to disable the SCSI transport netlink interface * **/ void scsi_netlink_exit(void) { if (scsi_nl_sock) { netlink_kernel_release(scsi_nl_sock); } return; }
gpl-2.0
kgp700/nexroid-sgs4ltea-kk
net/netfilter/ipvs/ip_vs_pe_sip.c
3862
4359
#define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <net/ip_vs.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/nf_conntrack_sip.h> #ifdef CONFIG_IP_VS_DEBUG static const char *ip_vs_dbg_callid(char *buf, size_t buf_len, const char *callid, size_t callid_len, int *idx) { size_t len = min(min(callid_len, (size_t)64), buf_len - *idx - 1); memcpy(buf + *idx, callid, len); buf[*idx+len] = '\0'; *idx += len + 1; return buf + *idx - len; } #define IP_VS_DEBUG_CALLID(callid, len) \ ip_vs_dbg_callid(ip_vs_dbg_buf, sizeof(ip_vs_dbg_buf), \ callid, len, &ip_vs_dbg_idx) #endif static int get_callid(const char *dptr, unsigned int dataoff, unsigned int datalen, unsigned int *matchoff, unsigned int *matchlen) { /* Find callid */ while (1) { int ret = ct_sip_get_header(NULL, dptr, dataoff, datalen, SIP_HDR_CALL_ID, matchoff, matchlen); if (ret > 0) break; if (!ret) return 0; dataoff += *matchoff; } /* Empty callid is useless */ if (!*matchlen) return -EINVAL; /* Too large is useless */ if (*matchlen > IP_VS_PEDATA_MAXLEN) return -EINVAL; /* SIP headers are always followed by a line terminator */ if (*matchoff + *matchlen == datalen) return -EINVAL; /* RFC 2543 allows lines to be terminated with CR, LF or CRLF, * RFC 3261 allows only CRLF, we support both. */ if (*(dptr + *matchoff + *matchlen) != '\r' && *(dptr + *matchoff + *matchlen) != '\n') return -EINVAL; IP_VS_DBG_BUF(9, "SIP callid %s (%d bytes)\n", IP_VS_DEBUG_CALLID(dptr + *matchoff, *matchlen), *matchlen); return 0; } static int ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) { struct ip_vs_iphdr iph; unsigned int dataoff, datalen, matchoff, matchlen; const char *dptr; int retc; ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph); /* Only useful with UDP */ if (iph.protocol != IPPROTO_UDP) return -EINVAL; /* No Data ? */ dataoff = iph.len + sizeof(struct udphdr); if (dataoff >= skb->len) return -EINVAL; if ((retc=skb_linearize(skb)) < 0) return retc; dptr = skb->data + dataoff; datalen = skb->len - dataoff; if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen)) return -EINVAL; /* N.B: pe_data is only set on success, * this allows fallback to the default persistence logic on failure */ p->pe_data = kmemdup(dptr + matchoff, matchlen, GFP_ATOMIC); if (!p->pe_data) return -ENOMEM; p->pe_data_len = matchlen; return 0; } static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p, struct ip_vs_conn *ct) { bool ret = false; if (ct->af == p->af && ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) && /* protocol should only be IPPROTO_IP if * d_addr is a fwmark */ ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af, p->vaddr, &ct->vaddr) && ct->vport == p->vport && ct->flags & IP_VS_CONN_F_TEMPLATE && ct->protocol == p->protocol && ct->pe_data && ct->pe_data_len == p->pe_data_len && !memcmp(ct->pe_data, p->pe_data, p->pe_data_len)) ret = true; IP_VS_DBG_BUF(9, "SIP template match %s %s->%s:%d %s\n", ip_vs_proto_name(p->protocol), IP_VS_DEBUG_CALLID(p->pe_data, p->pe_data_len), IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport), ret ? "hit" : "not hit"); return ret; } static u32 ip_vs_sip_hashkey_raw(const struct ip_vs_conn_param *p, u32 initval, bool inverse) { return jhash(p->pe_data, p->pe_data_len, initval); } static int ip_vs_sip_show_pe_data(const struct ip_vs_conn *cp, char *buf) { memcpy(buf, cp->pe_data, cp->pe_data_len); return cp->pe_data_len; } static struct ip_vs_pe ip_vs_sip_pe = { .name = "sip", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_sip_pe.n_list), .fill_param = ip_vs_sip_fill_param, .ct_match = ip_vs_sip_ct_match, .hashkey_raw = ip_vs_sip_hashkey_raw, .show_pe_data = ip_vs_sip_show_pe_data, }; static int __init ip_vs_sip_init(void) { return register_ip_vs_pe(&ip_vs_sip_pe); } static void __exit ip_vs_sip_cleanup(void) { unregister_ip_vs_pe(&ip_vs_sip_pe); } module_init(ip_vs_sip_init); module_exit(ip_vs_sip_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
Kevindeving/android_kernel_lge_gee
drivers/crypto/amcc/crypto4xx_core.c
4886
34408
/** * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. * All rights reserved. James Hsiao <jhsiao@amcc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This file implements AMCC crypto offload Linux device driver for use with * Linux CryptoAPI. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/spinlock_types.h> #include <linux/random.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <asm/dcr.h> #include <asm/dcr-regs.h> #include <asm/cacheflush.h> #include <crypto/aes.h> #include <crypto/sha.h> #include "crypto4xx_reg_def.h" #include "crypto4xx_core.h" #include "crypto4xx_sa.h" #define PPC4XX_SEC_VERSION_STR "0.5" /** * PPC4xx Crypto Engine Initialization Routine */ static void crypto4xx_hw_init(struct crypto4xx_device *dev) { union ce_ring_size ring_size; union ce_ring_contol ring_ctrl; union ce_part_ring_size part_ring_size; union ce_io_threshold io_threshold; u32 rand_num; union ce_pe_dma_cfg pe_dma_cfg; u32 device_ctrl; writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG); /* setup pe dma, include reset sg, pdr and pe, then release reset */ pe_dma_cfg.w = 0; pe_dma_cfg.bf.bo_sgpd_en = 1; pe_dma_cfg.bf.bo_data_en = 0; pe_dma_cfg.bf.bo_sa_en = 1; pe_dma_cfg.bf.bo_pd_en = 1; pe_dma_cfg.bf.dynamic_sa_en = 1; pe_dma_cfg.bf.reset_sg = 1; pe_dma_cfg.bf.reset_pdr = 1; pe_dma_cfg.bf.reset_pe = 1; writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG); /* un reset pe,sg and pdr */ pe_dma_cfg.bf.pe_mode = 0; pe_dma_cfg.bf.reset_sg = 0; pe_dma_cfg.bf.reset_pdr = 0; pe_dma_cfg.bf.reset_pe = 0; pe_dma_cfg.bf.bo_td_en = 0; writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG); writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE); writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE); writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL); get_random_bytes(&rand_num, sizeof(rand_num)); writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L); get_random_bytes(&rand_num, sizeof(rand_num)); writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H); ring_size.w = 0; ring_size.bf.ring_offset = PPC4XX_PD_SIZE; ring_size.bf.ring_size = PPC4XX_NUM_PD; writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE); ring_ctrl.w = 0; writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL); device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL); device_ctrl |= PPC4XX_DC_3DES_EN; writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL); writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE); writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE); part_ring_size.w = 0; part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE; part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE; writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE); writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG); io_threshold.w = 0; io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD; io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD; writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD); writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR); writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR); writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR); writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR); writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR); writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR); writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR); /* un reset pe,sg and pdr */ pe_dma_cfg.bf.pe_mode = 1; pe_dma_cfg.bf.reset_sg = 0; pe_dma_cfg.bf.reset_pdr = 0; pe_dma_cfg.bf.reset_pe = 0; pe_dma_cfg.bf.bo_td_en = 0; writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG); /*clear all pending interrupt*/ writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR); writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG); writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN); } int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) { ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, &ctx->sa_in_dma_addr, GFP_ATOMIC); if (ctx->sa_in == NULL) return -ENOMEM; ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, &ctx->sa_out_dma_addr, GFP_ATOMIC); if (ctx->sa_out == NULL) { dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, ctx->sa_in, ctx->sa_in_dma_addr); return -ENOMEM; } memset(ctx->sa_in, 0, size * 4); memset(ctx->sa_out, 0, size * 4); ctx->sa_len = size; return 0; } void crypto4xx_free_sa(struct crypto4xx_ctx *ctx) { if (ctx->sa_in != NULL) dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, ctx->sa_in, ctx->sa_in_dma_addr); if (ctx->sa_out != NULL) dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, ctx->sa_out, ctx->sa_out_dma_addr); ctx->sa_in_dma_addr = 0; ctx->sa_out_dma_addr = 0; ctx->sa_len = 0; } u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx) { ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device, sizeof(struct sa_state_record), &ctx->state_record_dma_addr, GFP_ATOMIC); if (!ctx->state_record_dma_addr) return -ENOMEM; memset(ctx->state_record, 0, sizeof(struct sa_state_record)); return 0; } void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx) { if (ctx->state_record != NULL) dma_free_coherent(ctx->dev->core_dev->device, sizeof(struct sa_state_record), ctx->state_record, ctx->state_record_dma_addr); ctx->state_record_dma_addr = 0; } /** * alloc memory for the gather ring * no need to alloc buf for the ring * gdr_tail, gdr_head and gdr_count are initialized by this function */ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) { int i; struct pd_uinfo *pd_uinfo; dev->pdr = dma_alloc_coherent(dev->core_dev->device, sizeof(struct ce_pd) * PPC4XX_NUM_PD, &dev->pdr_pa, GFP_ATOMIC); if (!dev->pdr) return -ENOMEM; dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD, GFP_KERNEL); if (!dev->pdr_uinfo) { dma_free_coherent(dev->core_dev->device, sizeof(struct ce_pd) * PPC4XX_NUM_PD, dev->pdr, dev->pdr_pa); return -ENOMEM; } memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, &dev->shadow_sa_pool_pa, GFP_ATOMIC); if (!dev->shadow_sa_pool) return -ENOMEM; dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device, sizeof(struct sa_state_record) * PPC4XX_NUM_PD, &dev->shadow_sr_pool_pa, GFP_ATOMIC); if (!dev->shadow_sr_pool) return -ENOMEM; for (i = 0; i < PPC4XX_NUM_PD; i++) { pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo + sizeof(struct pd_uinfo) * i); /* alloc 256 bytes which is enough for any kind of dynamic sa */ pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i; pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i; /* alloc state record */ pd_uinfo->sr_va = dev->shadow_sr_pool + sizeof(struct sa_state_record) * i; pd_uinfo->sr_pa = dev->shadow_sr_pool_pa + sizeof(struct sa_state_record) * i; } return 0; } static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) { if (dev->pdr != NULL) dma_free_coherent(dev->core_dev->device, sizeof(struct ce_pd) * PPC4XX_NUM_PD, dev->pdr, dev->pdr_pa); if (dev->shadow_sa_pool) dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, dev->shadow_sa_pool, dev->shadow_sa_pool_pa); if (dev->shadow_sr_pool) dma_free_coherent(dev->core_dev->device, sizeof(struct sa_state_record) * PPC4XX_NUM_PD, dev->shadow_sr_pool, dev->shadow_sr_pool_pa); kfree(dev->pdr_uinfo); } static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev) { u32 retval; u32 tmp; retval = dev->pdr_head; tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD; if (tmp == dev->pdr_tail) return ERING_WAS_FULL; dev->pdr_head = tmp; return retval; } static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) { struct pd_uinfo *pd_uinfo; unsigned long flags; pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + sizeof(struct pd_uinfo) * idx); spin_lock_irqsave(&dev->core_dev->lock, flags); if (dev->pdr_tail != PPC4XX_LAST_PD) dev->pdr_tail++; else dev->pdr_tail = 0; pd_uinfo->state = PD_ENTRY_FREE; spin_unlock_irqrestore(&dev->core_dev->lock, flags); return 0; } static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev, dma_addr_t *pd_dma, u32 idx) { *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx; return dev->pdr + sizeof(struct ce_pd) * idx; } /** * alloc memory for the gather ring * no need to alloc buf for the ring * gdr_tail, gdr_head and gdr_count are initialized by this function */ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) { dev->gdr = dma_alloc_coherent(dev->core_dev->device, sizeof(struct ce_gd) * PPC4XX_NUM_GD, &dev->gdr_pa, GFP_ATOMIC); if (!dev->gdr) return -ENOMEM; memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD); return 0; } static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev) { dma_free_coherent(dev->core_dev->device, sizeof(struct ce_gd) * PPC4XX_NUM_GD, dev->gdr, dev->gdr_pa); } /* * when this function is called. * preemption or interrupt must be disabled */ u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n) { u32 retval; u32 tmp; if (n >= PPC4XX_NUM_GD) return ERING_WAS_FULL; retval = dev->gdr_head; tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD; if (dev->gdr_head > dev->gdr_tail) { if (tmp < dev->gdr_head && tmp >= dev->gdr_tail) return ERING_WAS_FULL; } else if (dev->gdr_head < dev->gdr_tail) { if (tmp < dev->gdr_head || tmp >= dev->gdr_tail) return ERING_WAS_FULL; } dev->gdr_head = tmp; return retval; } static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev) { unsigned long flags; spin_lock_irqsave(&dev->core_dev->lock, flags); if (dev->gdr_tail == dev->gdr_head) { spin_unlock_irqrestore(&dev->core_dev->lock, flags); return 0; } if (dev->gdr_tail != PPC4XX_LAST_GD) dev->gdr_tail++; else dev->gdr_tail = 0; spin_unlock_irqrestore(&dev->core_dev->lock, flags); return 0; } static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev, dma_addr_t *gd_dma, u32 idx) { *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx; return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx); } /** * alloc memory for the scatter ring * need to alloc buf for the ring * sdr_tail, sdr_head and sdr_count are initialized by this function */ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) { int i; struct ce_sd *sd_array; /* alloc memory for scatter descriptor ring */ dev->sdr = dma_alloc_coherent(dev->core_dev->device, sizeof(struct ce_sd) * PPC4XX_NUM_SD, &dev->sdr_pa, GFP_ATOMIC); if (!dev->sdr) return -ENOMEM; dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE; dev->scatter_buffer_va = dma_alloc_coherent(dev->core_dev->device, dev->scatter_buffer_size * PPC4XX_NUM_SD, &dev->scatter_buffer_pa, GFP_ATOMIC); if (!dev->scatter_buffer_va) { dma_free_coherent(dev->core_dev->device, sizeof(struct ce_sd) * PPC4XX_NUM_SD, dev->sdr, dev->sdr_pa); return -ENOMEM; } sd_array = dev->sdr; for (i = 0; i < PPC4XX_NUM_SD; i++) { sd_array[i].ptr = dev->scatter_buffer_pa + dev->scatter_buffer_size * i; } return 0; } static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) { if (dev->sdr != NULL) dma_free_coherent(dev->core_dev->device, sizeof(struct ce_sd) * PPC4XX_NUM_SD, dev->sdr, dev->sdr_pa); if (dev->scatter_buffer_va != NULL) dma_free_coherent(dev->core_dev->device, dev->scatter_buffer_size * PPC4XX_NUM_SD, dev->scatter_buffer_va, dev->scatter_buffer_pa); } /* * when this function is called. * preemption or interrupt must be disabled */ static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n) { u32 retval; u32 tmp; if (n >= PPC4XX_NUM_SD) return ERING_WAS_FULL; retval = dev->sdr_head; tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD; if (dev->sdr_head > dev->gdr_tail) { if (tmp < dev->sdr_head && tmp >= dev->sdr_tail) return ERING_WAS_FULL; } else if (dev->sdr_head < dev->sdr_tail) { if (tmp < dev->sdr_head || tmp >= dev->sdr_tail) return ERING_WAS_FULL; } /* the head = tail, or empty case is already take cared */ dev->sdr_head = tmp; return retval; } static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev) { unsigned long flags; spin_lock_irqsave(&dev->core_dev->lock, flags); if (dev->sdr_tail == dev->sdr_head) { spin_unlock_irqrestore(&dev->core_dev->lock, flags); return 0; } if (dev->sdr_tail != PPC4XX_LAST_SD) dev->sdr_tail++; else dev->sdr_tail = 0; spin_unlock_irqrestore(&dev->core_dev->lock, flags); return 0; } static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev, dma_addr_t *sd_dma, u32 idx) { *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx; return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx); } static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev, dma_addr_t *addr, u32 *length, u32 *idx, u32 *offset, u32 *nbytes) { u32 len; if (*length > dev->scatter_buffer_size) { memcpy(phys_to_virt(*addr), dev->scatter_buffer_va + *idx * dev->scatter_buffer_size + *offset, dev->scatter_buffer_size); *offset = 0; *length -= dev->scatter_buffer_size; *nbytes -= dev->scatter_buffer_size; if (*idx == PPC4XX_LAST_SD) *idx = 0; else (*idx)++; *addr = *addr + dev->scatter_buffer_size; return 1; } else if (*length < dev->scatter_buffer_size) { memcpy(phys_to_virt(*addr), dev->scatter_buffer_va + *idx * dev->scatter_buffer_size + *offset, *length); if ((*offset + *length) == dev->scatter_buffer_size) { if (*idx == PPC4XX_LAST_SD) *idx = 0; else (*idx)++; *nbytes -= *length; *offset = 0; } else { *nbytes -= *length; *offset += *length; } return 0; } else { len = (*nbytes <= dev->scatter_buffer_size) ? (*nbytes) : dev->scatter_buffer_size; memcpy(phys_to_virt(*addr), dev->scatter_buffer_va + *idx * dev->scatter_buffer_size + *offset, len); *offset = 0; *nbytes -= len; if (*idx == PPC4XX_LAST_SD) *idx = 0; else (*idx)++; return 0; } } static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, struct ce_pd *pd, struct pd_uinfo *pd_uinfo, u32 nbytes, struct scatterlist *dst) { dma_addr_t addr; u32 this_sd; u32 offset; u32 len; u32 i; u32 sg_len; struct scatterlist *sg; this_sd = pd_uinfo->first_sd; offset = 0; i = 0; while (nbytes) { sg = &dst[i]; sg_len = sg->length; addr = dma_map_page(dev->core_dev->device, sg_page(sg), sg->offset, sg->length, DMA_TO_DEVICE); if (offset == 0) { len = (nbytes <= sg->length) ? nbytes : sg->length; while (crypto4xx_fill_one_page(dev, &addr, &len, &this_sd, &offset, &nbytes)) ; if (!nbytes) return; i++; } else { len = (nbytes <= (dev->scatter_buffer_size - offset)) ? nbytes : (dev->scatter_buffer_size - offset); len = (sg->length < len) ? sg->length : len; while (crypto4xx_fill_one_page(dev, &addr, &len, &this_sd, &offset, &nbytes)) ; if (!nbytes) return; sg_len -= len; if (sg_len) { addr += len; while (crypto4xx_fill_one_page(dev, &addr, &sg_len, &this_sd, &offset, &nbytes)) ; } i++; } } } static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo, struct crypto4xx_ctx *ctx) { struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in; struct sa_state_record *state_record = (struct sa_state_record *) pd_uinfo->sr_va; if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) { memcpy((void *) pd_uinfo->dest_va, state_record->save_digest, SA_HASH_ALG_SHA1_DIGEST_SIZE); } return 0; } static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, struct pd_uinfo *pd_uinfo) { int i; if (pd_uinfo->num_gd) { for (i = 0; i < pd_uinfo->num_gd; i++) crypto4xx_put_gd_to_gdr(dev); pd_uinfo->first_gd = 0xffffffff; pd_uinfo->num_gd = 0; } if (pd_uinfo->num_sd) { for (i = 0; i < pd_uinfo->num_sd; i++) crypto4xx_put_sd_to_sdr(dev); pd_uinfo->first_sd = 0xffffffff; pd_uinfo->num_sd = 0; } } static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, struct pd_uinfo *pd_uinfo, struct ce_pd *pd) { struct crypto4xx_ctx *ctx; struct ablkcipher_request *ablk_req; struct scatterlist *dst; dma_addr_t addr; ablk_req = ablkcipher_request_cast(pd_uinfo->async_req); ctx = crypto_tfm_ctx(ablk_req->base.tfm); if (pd_uinfo->using_sd) { crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes, ablk_req->dst); } else { dst = pd_uinfo->dest_va; addr = dma_map_page(dev->core_dev->device, sg_page(dst), dst->offset, dst->length, DMA_FROM_DEVICE); } crypto4xx_ret_sg_desc(dev, pd_uinfo); if (ablk_req->base.complete != NULL) ablk_req->base.complete(&ablk_req->base, 0); return 0; } static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev, struct pd_uinfo *pd_uinfo) { struct crypto4xx_ctx *ctx; struct ahash_request *ahash_req; ahash_req = ahash_request_cast(pd_uinfo->async_req); ctx = crypto_tfm_ctx(ahash_req->base.tfm); crypto4xx_copy_digest_to_dst(pd_uinfo, crypto_tfm_ctx(ahash_req->base.tfm)); crypto4xx_ret_sg_desc(dev, pd_uinfo); /* call user provided callback function x */ if (ahash_req->base.complete != NULL) ahash_req->base.complete(&ahash_req->base, 0); return 0; } static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) { struct ce_pd *pd; struct pd_uinfo *pd_uinfo; pd = dev->pdr + sizeof(struct ce_pd)*idx; pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx; if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd); else return crypto4xx_ahash_done(dev, pd_uinfo); } /** * Note: Only use this function to copy items that is word aligned. */ void crypto4xx_memcpy_le(unsigned int *dst, const unsigned char *buf, int len) { u8 *tmp; for (; len >= 4; buf += 4, len -= 4) *dst++ = cpu_to_le32(*(unsigned int *) buf); tmp = (u8 *)dst; switch (len) { case 3: *tmp++ = 0; *tmp++ = *(buf+2); *tmp++ = *(buf+1); *tmp++ = *buf; break; case 2: *tmp++ = 0; *tmp++ = 0; *tmp++ = *(buf+1); *tmp++ = *buf; break; case 1: *tmp++ = 0; *tmp++ = 0; *tmp++ = 0; *tmp++ = *buf; break; default: break; } } static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev) { crypto4xx_destroy_pdr(core_dev->dev); crypto4xx_destroy_gdr(core_dev->dev); crypto4xx_destroy_sdr(core_dev->dev); dev_set_drvdata(core_dev->device, NULL); iounmap(core_dev->dev->ce_base); kfree(core_dev->dev); kfree(core_dev); } void crypto4xx_return_pd(struct crypto4xx_device *dev, u32 pd_entry, struct ce_pd *pd, struct pd_uinfo *pd_uinfo) { /* irq should be already disabled */ dev->pdr_head = pd_entry; pd->pd_ctl.w = 0; pd->pd_ctl_len.w = 0; pd_uinfo->state = PD_ENTRY_FREE; } /* * derive number of elements in scatterlist * Shamlessly copy from talitos.c */ static int get_sg_count(struct scatterlist *sg_list, int nbytes) { struct scatterlist *sg = sg_list; int sg_nents = 0; while (nbytes) { sg_nents++; if (sg->length > nbytes) break; nbytes -= sg->length; sg = sg_next(sg); } return sg_nents; } static u32 get_next_gd(u32 current) { if (current != PPC4XX_LAST_GD) return current + 1; else return 0; } static u32 get_next_sd(u32 current) { if (current != PPC4XX_LAST_SD) return current + 1; else return 0; } u32 crypto4xx_build_pd(struct crypto_async_request *req, struct crypto4xx_ctx *ctx, struct scatterlist *src, struct scatterlist *dst, unsigned int datalen, void *iv, u32 iv_len) { struct crypto4xx_device *dev = ctx->dev; dma_addr_t addr, pd_dma, sd_dma, gd_dma; struct dynamic_sa_ctl *sa; struct scatterlist *sg; struct ce_gd *gd; struct ce_pd *pd; u32 num_gd, num_sd; u32 fst_gd = 0xffffffff; u32 fst_sd = 0xffffffff; u32 pd_entry; unsigned long flags; struct pd_uinfo *pd_uinfo = NULL; unsigned int nbytes = datalen, idx; unsigned int ivlen = 0; u32 gd_idx = 0; /* figure how many gd is needed */ num_gd = get_sg_count(src, datalen); if (num_gd == 1) num_gd = 0; /* figure how many sd is needed */ if (sg_is_last(dst) || ctx->is_hash) { num_sd = 0; } else { if (datalen > PPC4XX_SD_BUFFER_SIZE) { num_sd = datalen / PPC4XX_SD_BUFFER_SIZE; if (datalen % PPC4XX_SD_BUFFER_SIZE) num_sd++; } else { num_sd = 1; } } /* * The follow section of code needs to be protected * The gather ring and scatter ring needs to be consecutive * In case of run out of any kind of descriptor, the descriptor * already got must be return the original place. */ spin_lock_irqsave(&dev->core_dev->lock, flags); if (num_gd) { fst_gd = crypto4xx_get_n_gd(dev, num_gd); if (fst_gd == ERING_WAS_FULL) { spin_unlock_irqrestore(&dev->core_dev->lock, flags); return -EAGAIN; } } if (num_sd) { fst_sd = crypto4xx_get_n_sd(dev, num_sd); if (fst_sd == ERING_WAS_FULL) { if (num_gd) dev->gdr_head = fst_gd; spin_unlock_irqrestore(&dev->core_dev->lock, flags); return -EAGAIN; } } pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev); if (pd_entry == ERING_WAS_FULL) { if (num_gd) dev->gdr_head = fst_gd; if (num_sd) dev->sdr_head = fst_sd; spin_unlock_irqrestore(&dev->core_dev->lock, flags); return -EAGAIN; } spin_unlock_irqrestore(&dev->core_dev->lock, flags); pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + sizeof(struct pd_uinfo) * pd_entry); pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry); pd_uinfo->async_req = req; pd_uinfo->num_gd = num_gd; pd_uinfo->num_sd = num_sd; if (iv_len || ctx->is_hash) { ivlen = iv_len; pd->sa = pd_uinfo->sa_pa; sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va; if (ctx->direction == DIR_INBOUND) memcpy(sa, ctx->sa_in, ctx->sa_len * 4); else memcpy(sa, ctx->sa_out, ctx->sa_len * 4); memcpy((void *) sa + ctx->offset_to_sr_ptr, &pd_uinfo->sr_pa, 4); if (iv_len) crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len); } else { if (ctx->direction == DIR_INBOUND) { pd->sa = ctx->sa_in_dma_addr; sa = (struct dynamic_sa_ctl *) ctx->sa_in; } else { pd->sa = ctx->sa_out_dma_addr; sa = (struct dynamic_sa_ctl *) ctx->sa_out; } } pd->sa_len = ctx->sa_len; if (num_gd) { /* get first gd we are going to use */ gd_idx = fst_gd; pd_uinfo->first_gd = fst_gd; pd_uinfo->num_gd = num_gd; gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); pd->src = gd_dma; /* enable gather */ sa->sa_command_0.bf.gather = 1; idx = 0; src = &src[0]; /* walk the sg, and setup gather array */ while (nbytes) { sg = &src[idx]; addr = dma_map_page(dev->core_dev->device, sg_page(sg), sg->offset, sg->length, DMA_TO_DEVICE); gd->ptr = addr; gd->ctl_len.len = sg->length; gd->ctl_len.done = 0; gd->ctl_len.ready = 1; if (sg->length >= nbytes) break; nbytes -= sg->length; gd_idx = get_next_gd(gd_idx); gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); idx++; } } else { pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src), src->offset, src->length, DMA_TO_DEVICE); /* * Disable gather in sa command */ sa->sa_command_0.bf.gather = 0; /* * Indicate gather array is not used */ pd_uinfo->first_gd = 0xffffffff; pd_uinfo->num_gd = 0; } if (ctx->is_hash || sg_is_last(dst)) { /* * we know application give us dst a whole piece of memory * no need to use scatter ring. * In case of is_hash, the icv is always at end of src data. */ pd_uinfo->using_sd = 0; pd_uinfo->first_sd = 0xffffffff; pd_uinfo->num_sd = 0; pd_uinfo->dest_va = dst; sa->sa_command_0.bf.scatter = 0; if (ctx->is_hash) pd->dest = virt_to_phys((void *)dst); else pd->dest = (u32)dma_map_page(dev->core_dev->device, sg_page(dst), dst->offset, dst->length, DMA_TO_DEVICE); } else { struct ce_sd *sd = NULL; u32 sd_idx = fst_sd; nbytes = datalen; sa->sa_command_0.bf.scatter = 1; pd_uinfo->using_sd = 1; pd_uinfo->dest_va = dst; pd_uinfo->first_sd = fst_sd; pd_uinfo->num_sd = num_sd; sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); pd->dest = sd_dma; /* setup scatter descriptor */ sd->ctl.done = 0; sd->ctl.rdy = 1; /* sd->ptr should be setup by sd_init routine*/ idx = 0; if (nbytes >= PPC4XX_SD_BUFFER_SIZE) nbytes -= PPC4XX_SD_BUFFER_SIZE; else nbytes = 0; while (nbytes) { sd_idx = get_next_sd(sd_idx); sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); /* setup scatter descriptor */ sd->ctl.done = 0; sd->ctl.rdy = 1; if (nbytes >= PPC4XX_SD_BUFFER_SIZE) nbytes -= PPC4XX_SD_BUFFER_SIZE; else /* * SD entry can hold PPC4XX_SD_BUFFER_SIZE, * which is more than nbytes, so done. */ nbytes = 0; } } sa->sa_command_1.bf.hash_crypto_offset = 0; pd->pd_ctl.w = ctx->pd_ctl; pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen; pd_uinfo->state = PD_ENTRY_INUSE; wmb(); /* write any value to push engine to read a pd */ writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD); return -EINPROGRESS; } /** * Algorithm Registration Functions */ static int crypto4xx_alg_init(struct crypto_tfm *tfm) { struct crypto_alg *alg = tfm->__crt_alg; struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg); struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); ctx->dev = amcc_alg->dev; ctx->sa_in = NULL; ctx->sa_out = NULL; ctx->sa_in_dma_addr = 0; ctx->sa_out_dma_addr = 0; ctx->sa_len = 0; switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { default: tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); break; case CRYPTO_ALG_TYPE_AHASH: crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct crypto4xx_ctx)); break; } return 0; } static void crypto4xx_alg_exit(struct crypto_tfm *tfm) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); crypto4xx_free_sa(ctx); crypto4xx_free_state_record(ctx); } int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, struct crypto4xx_alg_common *crypto_alg, int array_size) { struct crypto4xx_alg *alg; int i; int rc = 0; for (i = 0; i < array_size; i++) { alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL); if (!alg) return -ENOMEM; alg->alg = crypto_alg[i]; alg->dev = sec_dev; switch (alg->alg.type) { case CRYPTO_ALG_TYPE_AHASH: rc = crypto_register_ahash(&alg->alg.u.hash); break; default: rc = crypto_register_alg(&alg->alg.u.cipher); break; } if (rc) { list_del(&alg->entry); kfree(alg); } else { list_add_tail(&alg->entry, &sec_dev->alg_list); } } return 0; } static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) { struct crypto4xx_alg *alg, *tmp; list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { list_del(&alg->entry); switch (alg->alg.type) { case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&alg->alg.u.hash); break; default: crypto_unregister_alg(&alg->alg.u.cipher); } kfree(alg); } } static void crypto4xx_bh_tasklet_cb(unsigned long data) { struct device *dev = (struct device *)data; struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); struct pd_uinfo *pd_uinfo; struct ce_pd *pd; u32 tail; while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) { tail = core_dev->dev->pdr_tail; pd_uinfo = core_dev->dev->pdr_uinfo + sizeof(struct pd_uinfo)*tail; pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail; if ((pd_uinfo->state == PD_ENTRY_INUSE) && pd->pd_ctl.bf.pe_done && !pd->pd_ctl.bf.host_ready) { pd->pd_ctl.bf.pe_done = 0; crypto4xx_pd_done(core_dev->dev, tail); crypto4xx_put_pd_to_pdr(core_dev->dev, tail); pd_uinfo->state = PD_ENTRY_FREE; } else { /* if tail not done, break */ break; } } } /** * Top Half of isr. */ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) { struct device *dev = (struct device *)data; struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); if (core_dev->dev->ce_base == 0) return 0; writel(PPC4XX_INTERRUPT_CLR, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR); tasklet_schedule(&core_dev->tasklet); return IRQ_HANDLED; } /** * Supported Crypto Algorithms */ struct crypto4xx_alg_common crypto4xx_alg[] = { /* Crypto AES modes */ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-ppc4xx", .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto4xx_ctx), .cra_type = &crypto_ablkcipher_type, .cra_init = crypto4xx_alg_init, .cra_exit = crypto4xx_alg_exit, .cra_module = THIS_MODULE, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_IV_SIZE, .setkey = crypto4xx_setkey_aes_cbc, .encrypt = crypto4xx_encrypt, .decrypt = crypto4xx_decrypt, } } }}, }; /** * Module Initialization Routine */ static int __init crypto4xx_probe(struct platform_device *ofdev) { int rc; struct resource res; struct device *dev = &ofdev->dev; struct crypto4xx_core_device *core_dev; rc = of_address_to_resource(ofdev->dev.of_node, 0, &res); if (rc) return -ENODEV; if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) { mtdcri(SDR0, PPC460EX_SDR0_SRST, mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET); mtdcri(SDR0, PPC460EX_SDR0_SRST, mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET); } else if (of_find_compatible_node(NULL, NULL, "amcc,ppc405ex-crypto")) { mtdcri(SDR0, PPC405EX_SDR0_SRST, mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); mtdcri(SDR0, PPC405EX_SDR0_SRST, mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); } else if (of_find_compatible_node(NULL, NULL, "amcc,ppc460sx-crypto")) { mtdcri(SDR0, PPC460SX_SDR0_SRST, mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET); mtdcri(SDR0, PPC460SX_SDR0_SRST, mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET); } else { printk(KERN_ERR "Crypto Function Not supported!\n"); return -EINVAL; } core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL); if (!core_dev) return -ENOMEM; dev_set_drvdata(dev, core_dev); core_dev->ofdev = ofdev; core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL); if (!core_dev->dev) goto err_alloc_dev; core_dev->dev->core_dev = core_dev; core_dev->device = dev; spin_lock_init(&core_dev->lock); INIT_LIST_HEAD(&core_dev->dev->alg_list); rc = crypto4xx_build_pdr(core_dev->dev); if (rc) goto err_build_pdr; rc = crypto4xx_build_gdr(core_dev->dev); if (rc) goto err_build_gdr; rc = crypto4xx_build_sdr(core_dev->dev); if (rc) goto err_build_sdr; /* Init tasklet for bottom half processing */ tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb, (unsigned long) dev); /* Register for Crypto isr, Crypto Engine IRQ */ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, core_dev->dev->name, dev); if (rc) goto err_request_irq; core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); if (!core_dev->dev->ce_base) { dev_err(dev, "failed to of_iomap\n"); goto err_iomap; } /* need to setup pdr, rdr, gdr and sdr before this */ crypto4xx_hw_init(core_dev->dev); /* Register security algorithms with Linux CryptoAPI */ rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg, ARRAY_SIZE(crypto4xx_alg)); if (rc) goto err_start_dev; return 0; err_start_dev: iounmap(core_dev->dev->ce_base); err_iomap: free_irq(core_dev->irq, dev); irq_dispose_mapping(core_dev->irq); tasklet_kill(&core_dev->tasklet); err_request_irq: crypto4xx_destroy_sdr(core_dev->dev); err_build_sdr: crypto4xx_destroy_gdr(core_dev->dev); err_build_gdr: crypto4xx_destroy_pdr(core_dev->dev); err_build_pdr: kfree(core_dev->dev); err_alloc_dev: kfree(core_dev); return rc; } static int __exit crypto4xx_remove(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); free_irq(core_dev->irq, dev); irq_dispose_mapping(core_dev->irq); tasklet_kill(&core_dev->tasklet); /* Un-register with Linux CryptoAPI */ crypto4xx_unregister_alg(core_dev->dev); /* Free all allocated memory */ crypto4xx_stop_all(core_dev); return 0; } static const struct of_device_id crypto4xx_match[] = { { .compatible = "amcc,ppc4xx-crypto",}, { }, }; static struct platform_driver crypto4xx_driver = { .driver = { .name = "crypto4xx", .owner = THIS_MODULE, .of_match_table = crypto4xx_match, }, .probe = crypto4xx_probe, .remove = crypto4xx_remove, }; module_platform_driver(crypto4xx_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>"); MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
gpl-2.0
kundancool/android_kernel_xiaomi_msm8974
fs/ubifs/tnc_misc.c
4886
12915
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file contains miscelanious TNC-related functions shared betweend * different files. This file does not form any logically separate TNC * sub-system. The file was created because there is a lot of TNC code and * putting it all in one file would make that file too big and unreadable. */ #include "ubifs.h" /** * ubifs_tnc_levelorder_next - next TNC tree element in levelorder traversal. * @zr: root of the subtree to traverse * @znode: previous znode * * This function implements levelorder TNC traversal. The LNC is ignored. * Returns the next element or %NULL if @znode is already the last one. */ struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr, struct ubifs_znode *znode) { int level, iip, level_search = 0; struct ubifs_znode *zn; ubifs_assert(zr); if (unlikely(!znode)) return zr; if (unlikely(znode == zr)) { if (znode->level == 0) return NULL; return ubifs_tnc_find_child(zr, 0); } level = znode->level; iip = znode->iip; while (1) { ubifs_assert(znode->level <= zr->level); /* * First walk up until there is a znode with next branch to * look at. */ while (znode->parent != zr && iip >= znode->parent->child_cnt) { znode = znode->parent; iip = znode->iip; } if (unlikely(znode->parent == zr && iip >= znode->parent->child_cnt)) { /* This level is done, switch to the lower one */ level -= 1; if (level_search || level < 0) /* * We were already looking for znode at lower * level ('level_search'). As we are here * again, it just does not exist. Or all levels * were finished ('level < 0'). */ return NULL; level_search = 1; iip = -1; znode = ubifs_tnc_find_child(zr, 0); ubifs_assert(znode); } /* Switch to the next index */ zn = ubifs_tnc_find_child(znode->parent, iip + 1); if (!zn) { /* No more children to look at, we have walk up */ iip = znode->parent->child_cnt; continue; } /* Walk back down to the level we came from ('level') */ while (zn->level != level) { znode = zn; zn = ubifs_tnc_find_child(zn, 0); if (!zn) { /* * This path is not too deep so it does not * reach 'level'. Try next path. */ iip = znode->iip; break; } } if (zn) { ubifs_assert(zn->level >= 0); return zn; } } } /** * ubifs_search_zbranch - search znode branch. * @c: UBIFS file-system description object * @znode: znode to search in * @key: key to search for * @n: znode branch slot number is returned here * * This is a helper function which search branch with key @key in @znode using * binary search. The result of the search may be: * o exact match, then %1 is returned, and the slot number of the branch is * stored in @n; * o no exact match, then %0 is returned and the slot number of the left * closest branch is returned in @n; the slot if all keys in this znode are * greater than @key, then %-1 is returned in @n. */ int ubifs_search_zbranch(const struct ubifs_info *c, const struct ubifs_znode *znode, const union ubifs_key *key, int *n) { int beg = 0, end = znode->child_cnt, uninitialized_var(mid); int uninitialized_var(cmp); const struct ubifs_zbranch *zbr = &znode->zbranch[0]; ubifs_assert(end > beg); while (end > beg) { mid = (beg + end) >> 1; cmp = keys_cmp(c, key, &zbr[mid].key); if (cmp > 0) beg = mid + 1; else if (cmp < 0) end = mid; else { *n = mid; return 1; } } *n = end - 1; /* The insert point is after *n */ ubifs_assert(*n >= -1 && *n < znode->child_cnt); if (*n == -1) ubifs_assert(keys_cmp(c, key, &zbr[0].key) < 0); else ubifs_assert(keys_cmp(c, key, &zbr[*n].key) > 0); if (*n + 1 < znode->child_cnt) ubifs_assert(keys_cmp(c, key, &zbr[*n + 1].key) < 0); return 0; } /** * ubifs_tnc_postorder_first - find first znode to do postorder tree traversal. * @znode: znode to start at (root of the sub-tree to traverse) * * Find the lowest leftmost znode in a subtree of the TNC tree. The LNC is * ignored. */ struct ubifs_znode *ubifs_tnc_postorder_first(struct ubifs_znode *znode) { if (unlikely(!znode)) return NULL; while (znode->level > 0) { struct ubifs_znode *child; child = ubifs_tnc_find_child(znode, 0); if (!child) return znode; znode = child; } return znode; } /** * ubifs_tnc_postorder_next - next TNC tree element in postorder traversal. * @znode: previous znode * * This function implements postorder TNC traversal. The LNC is ignored. * Returns the next element or %NULL if @znode is already the last one. */ struct ubifs_znode *ubifs_tnc_postorder_next(struct ubifs_znode *znode) { struct ubifs_znode *zn; ubifs_assert(znode); if (unlikely(!znode->parent)) return NULL; /* Switch to the next index in the parent */ zn = ubifs_tnc_find_child(znode->parent, znode->iip + 1); if (!zn) /* This is in fact the last child, return parent */ return znode->parent; /* Go to the first znode in this new subtree */ return ubifs_tnc_postorder_first(zn); } /** * ubifs_destroy_tnc_subtree - destroy all znodes connected to a subtree. * @znode: znode defining subtree to destroy * * This function destroys subtree of the TNC tree. Returns number of clean * znodes in the subtree. */ long ubifs_destroy_tnc_subtree(struct ubifs_znode *znode) { struct ubifs_znode *zn = ubifs_tnc_postorder_first(znode); long clean_freed = 0; int n; ubifs_assert(zn); while (1) { for (n = 0; n < zn->child_cnt; n++) { if (!zn->zbranch[n].znode) continue; if (zn->level > 0 && !ubifs_zn_dirty(zn->zbranch[n].znode)) clean_freed += 1; cond_resched(); kfree(zn->zbranch[n].znode); } if (zn == znode) { if (!ubifs_zn_dirty(zn)) clean_freed += 1; kfree(zn); return clean_freed; } zn = ubifs_tnc_postorder_next(zn); } } /** * read_znode - read an indexing node from flash and fill znode. * @c: UBIFS file-system description object * @lnum: LEB of the indexing node to read * @offs: node offset * @len: node length * @znode: znode to read to * * This function reads an indexing node from the flash media and fills znode * with the read data. Returns zero in case of success and a negative error * code in case of failure. The read indexing node is validated and if anything * is wrong with it, this function prints complaint messages and returns * %-EINVAL. */ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, struct ubifs_znode *znode) { int i, err, type, cmp; struct ubifs_idx_node *idx; idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); if (!idx) return -ENOMEM; err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); if (err < 0) { kfree(idx); return err; } znode->child_cnt = le16_to_cpu(idx->child_cnt); znode->level = le16_to_cpu(idx->level); dbg_tnc("LEB %d:%d, level %d, %d branch", lnum, offs, znode->level, znode->child_cnt); if (znode->child_cnt > c->fanout || znode->level > UBIFS_MAX_LEVELS) { dbg_err("current fanout %d, branch count %d", c->fanout, znode->child_cnt); dbg_err("max levels %d, znode level %d", UBIFS_MAX_LEVELS, znode->level); err = 1; goto out_dump; } for (i = 0; i < znode->child_cnt; i++) { const struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); struct ubifs_zbranch *zbr = &znode->zbranch[i]; key_read(c, &br->key, &zbr->key); zbr->lnum = le32_to_cpu(br->lnum); zbr->offs = le32_to_cpu(br->offs); zbr->len = le32_to_cpu(br->len); zbr->znode = NULL; /* Validate branch */ if (zbr->lnum < c->main_first || zbr->lnum >= c->leb_cnt || zbr->offs < 0 || zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) { dbg_err("bad branch %d", i); err = 2; goto out_dump; } switch (key_type(c, &zbr->key)) { case UBIFS_INO_KEY: case UBIFS_DATA_KEY: case UBIFS_DENT_KEY: case UBIFS_XENT_KEY: break; default: dbg_msg("bad key type at slot %d: %d", i, key_type(c, &zbr->key)); err = 3; goto out_dump; } if (znode->level) continue; type = key_type(c, &zbr->key); if (c->ranges[type].max_len == 0) { if (zbr->len != c->ranges[type].len) { dbg_err("bad target node (type %d) length (%d)", type, zbr->len); dbg_err("have to be %d", c->ranges[type].len); err = 4; goto out_dump; } } else if (zbr->len < c->ranges[type].min_len || zbr->len > c->ranges[type].max_len) { dbg_err("bad target node (type %d) length (%d)", type, zbr->len); dbg_err("have to be in range of %d-%d", c->ranges[type].min_len, c->ranges[type].max_len); err = 5; goto out_dump; } } /* * Ensure that the next key is greater or equivalent to the * previous one. */ for (i = 0; i < znode->child_cnt - 1; i++) { const union ubifs_key *key1, *key2; key1 = &znode->zbranch[i].key; key2 = &znode->zbranch[i + 1].key; cmp = keys_cmp(c, key1, key2); if (cmp > 0) { dbg_err("bad key order (keys %d and %d)", i, i + 1); err = 6; goto out_dump; } else if (cmp == 0 && !is_hash_key(c, key1)) { /* These can only be keys with colliding hash */ dbg_err("keys %d and %d are not hashed but equivalent", i, i + 1); err = 7; goto out_dump; } } kfree(idx); return 0; out_dump: ubifs_err("bad indexing node at LEB %d:%d, error %d", lnum, offs, err); dbg_dump_node(c, idx); kfree(idx); return -EINVAL; } /** * ubifs_load_znode - load znode to TNC cache. * @c: UBIFS file-system description object * @zbr: znode branch * @parent: znode's parent * @iip: index in parent * * This function loads znode pointed to by @zbr into the TNC cache and * returns pointer to it in case of success and a negative error code in case * of failure. */ struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr, struct ubifs_znode *parent, int iip) { int err; struct ubifs_znode *znode; ubifs_assert(!zbr->znode); /* * A slab cache is not presently used for znodes because the znode size * depends on the fanout which is stored in the superblock. */ znode = kzalloc(c->max_znode_sz, GFP_NOFS); if (!znode) return ERR_PTR(-ENOMEM); err = read_znode(c, zbr->lnum, zbr->offs, zbr->len, znode); if (err) goto out; atomic_long_inc(&c->clean_zn_cnt); /* * Increment the global clean znode counter as well. It is OK that * global and per-FS clean znode counters may be inconsistent for some * short time (because we might be preempted at this point), the global * one is only used in shrinker. */ atomic_long_inc(&ubifs_clean_zn_cnt); zbr->znode = znode; znode->parent = parent; znode->time = get_seconds(); znode->iip = iip; return znode; out: kfree(znode); return ERR_PTR(err); } /** * ubifs_tnc_read_node - read a leaf node from the flash media. * @c: UBIFS file-system description object * @zbr: key and position of the node * @node: node is returned here * * This function reads a node defined by @zbr from the flash media. Returns * zero in case of success or a negative negative error code in case of * failure. */ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *node) { union ubifs_key key1, *key = &zbr->key; int err, type = key_type(c, key); struct ubifs_wbuf *wbuf; /* * 'zbr' has to point to on-flash node. The node may sit in a bud and * may even be in a write buffer, so we have to take care about this. */ wbuf = ubifs_get_wbuf(c, zbr->lnum); if (wbuf) err = ubifs_read_node_wbuf(wbuf, node, type, zbr->len, zbr->lnum, zbr->offs); else err = ubifs_read_node(c, node, type, zbr->len, zbr->lnum, zbr->offs); if (err) { dbg_tnck(key, "key "); return err; } /* Make sure the key of the read node is correct */ key_read(c, node + UBIFS_KEY_OFFSET, &key1); if (!keys_eq(c, key, &key1)) { ubifs_err("bad key in node at LEB %d:%d", zbr->lnum, zbr->offs); dbg_tnck(key, "looked for key "); dbg_tnck(&key1, "but found node's key "); dbg_dump_node(c, node); return -EINVAL; } return 0; }
gpl-2.0
syhost/kernel_ef65l_3.4
arch/arm/mach-orion5x/tsx09-common.c
8726
3269
/* * QNAP TS-x09 Boards common functions * * Maintainers: Lennert Buytenhek <buytenh@marvell.com> * Byron Bradley <byron.bbradley@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/mv643xx_eth.h> #include <linux/timex.h> #include <linux/serial_reg.h> #include <mach/orion5x.h> #include "tsx09-common.h" #include "common.h" /***************************************************************************** * QNAP TS-x09 specific power off method via UART1-attached PIC ****************************************************************************/ #define UART1_REG(x) (UART1_VIRT_BASE + ((UART_##x) << 2)) void qnap_tsx09_power_off(void) { /* 19200 baud divisor */ const unsigned divisor = ((orion5x_tclk + (8 * 19200)) / (16 * 19200)); pr_info("%s: triggering power-off...\n", __func__); /* hijack uart1 and reset into sane state (19200,8n1) */ writel(0x83, UART1_REG(LCR)); writel(divisor & 0xff, UART1_REG(DLL)); writel((divisor >> 8) & 0xff, UART1_REG(DLM)); writel(0x03, UART1_REG(LCR)); writel(0x00, UART1_REG(IER)); writel(0x00, UART1_REG(FCR)); writel(0x00, UART1_REG(MCR)); /* send the power-off command 'A' to PIC */ writel('A', UART1_REG(TX)); } /***************************************************************************** * Ethernet ****************************************************************************/ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; static int __init qnap_tsx09_parse_hex_nibble(char n) { if (n >= '0' && n <= '9') return n - '0'; if (n >= 'A' && n <= 'F') return n - 'A' + 10; if (n >= 'a' && n <= 'f') return n - 'a' + 10; return -1; } static int __init qnap_tsx09_parse_hex_byte(const char *b) { int hi; int lo; hi = qnap_tsx09_parse_hex_nibble(b[0]); lo = qnap_tsx09_parse_hex_nibble(b[1]); if (hi < 0 || lo < 0) return -1; return (hi << 4) | lo; } static int __init qnap_tsx09_check_mac_addr(const char *addr_str) { u_int8_t addr[6]; int i; for (i = 0; i < 6; i++) { int byte; /* * Enforce "xx:xx:xx:xx:xx:xx\n" format. */ if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n')) return -1; byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3)); if (byte < 0) return -1; addr[i] = byte; } printk(KERN_INFO "tsx09: found ethernet mac address "); for (i = 0; i < 6; i++) printk("%.2x%s", addr[i], (i < 5) ? ":" : ".\n"); memcpy(qnap_tsx09_eth_data.mac_addr, addr, 6); return 0; } /* * The 'NAS Config' flash partition has an ext2 filesystem which * contains a file that has the ethernet MAC address in plain text * (format "xx:xx:xx:xx:xx:xx\n"). */ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size) { unsigned long addr; for (addr = mem_base; addr < (mem_base + size); addr += 1024) { char *nor_page; int ret = 0; nor_page = ioremap(addr, 1024); if (nor_page != NULL) { ret = qnap_tsx09_check_mac_addr(nor_page); iounmap(nor_page); } if (ret == 0) break; } }
gpl-2.0
keiranFTW/buzz-kernel-ics
arch/mips/pci/ops-pnx8550.c
9494
6467
/* * * BRIEF MODULE DESCRIPTION * * 2.6 port, Embedded Alley Solutions, Inc * * Based on: * Author: source@mvista.com * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <asm/mach-pnx8550/pci.h> #include <asm/mach-pnx8550/glb.h> static inline void clear_status(void) { unsigned long pci_stat; pci_stat = inl(PCI_BASE | PCI_GPPM_STATUS); outl(pci_stat, PCI_BASE | PCI_GPPM_ICLR); } static inline unsigned int calc_cfg_addr(struct pci_bus *bus, unsigned int devfn, int where) { unsigned int addr; addr = ((bus->number > 0) ? (((bus->number & 0xff) << PCI_CFG_BUS_SHIFT) | 1) : 0); addr |= ((devfn & 0xff) << PCI_CFG_FUNC_SHIFT) | (where & 0xfc); return addr; } static int config_access(unsigned int pci_cmd, struct pci_bus *bus, unsigned int devfn, int where, unsigned int pci_mode, unsigned int *val) { unsigned int flags; unsigned long loops = 0; unsigned long ioaddr = calc_cfg_addr(bus, devfn, where); local_irq_save(flags); /*Clear pending interrupt status */ if (inl(PCI_BASE | PCI_GPPM_STATUS)) { clear_status(); while (!(inl(PCI_BASE | PCI_GPPM_STATUS) == 0)) ; } outl(ioaddr, PCI_BASE | PCI_GPPM_ADDR); if ((pci_cmd == PCI_CMD_IOW) || (pci_cmd == PCI_CMD_CONFIG_WRITE)) outl(*val, PCI_BASE | PCI_GPPM_WDAT); outl(INIT_PCI_CYCLE | pci_cmd | (pci_mode & PCI_BYTE_ENABLE_MASK), PCI_BASE | PCI_GPPM_CTRL); loops = ((loops_per_jiffy * PCI_IO_JIFFIES_TIMEOUT) >> (PCI_IO_JIFFIES_SHIFT)); while (1) { if (inl(PCI_BASE | PCI_GPPM_STATUS) & GPPM_DONE) { if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_CONFIG_READ)) *val = inl(PCI_BASE | PCI_GPPM_RDAT); clear_status(); local_irq_restore(flags); return PCIBIOS_SUCCESSFUL; } else if (inl(PCI_BASE | PCI_GPPM_STATUS) & GPPM_R_MABORT) { break; } loops--; if (loops == 0) { printk("%s : Arbiter Locked.\n", __func__); } } clear_status(); if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_IOW)) { printk("%s timeout (GPPM_CTRL=%X) ioaddr %lX pci_cmd %X\n", __func__, inl(PCI_BASE | PCI_GPPM_CTRL), ioaddr, pci_cmd); } if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_CONFIG_READ)) *val = 0xffffffff; local_irq_restore(flags); return PCIBIOS_DEVICE_NOT_FOUND; } /* * We can't address 8 and 16 bit words directly. Instead we have to * read/write a 32bit word and mask/modify the data we actually want. */ static int read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 * val) { unsigned int data = 0; int err; if (bus == NULL) return -1; err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(1 << (where & 3)), &data); switch (where & 0x03) { case 0: *val = (unsigned char)(data & 0x000000ff); break; case 1: *val = (unsigned char)((data & 0x0000ff00) >> 8); break; case 2: *val = (unsigned char)((data & 0x00ff0000) >> 16); break; case 3: *val = (unsigned char)((data & 0xff000000) >> 24); break; } return err; } static int read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 * val) { unsigned int data = 0; int err; if (bus == NULL) return -1; if (where & 0x01) return PCIBIOS_BAD_REGISTER_NUMBER; err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(3 << (where & 3)), &data); switch (where & 0x02) { case 0: *val = (unsigned short)(data & 0x0000ffff); break; case 2: *val = (unsigned short)((data & 0xffff0000) >> 16); break; } return err; } static int read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 * val) { int err; if (bus == NULL) return -1; if (where & 0x03) return PCIBIOS_BAD_REGISTER_NUMBER; err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, 0, val); return err; } static int write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val) { unsigned int data = (unsigned int)val; int err; if (bus == NULL) return -1; switch (where & 0x03) { case 1: data = (data << 8); break; case 2: data = (data << 16); break; case 3: data = (data << 24); break; default: break; } err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(1 << (where & 3)), &data); return err; } static int write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val) { unsigned int data = (unsigned int)val; int err; if (bus == NULL) return -1; if (where & 0x01) return PCIBIOS_BAD_REGISTER_NUMBER; switch (where & 0x02) { case 2: data = (data << 16); break; default: break; } err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(3 << (where & 3)), &data); return err; } static int write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val) { int err; if (bus == NULL) return -1; if (where & 0x03) return PCIBIOS_BAD_REGISTER_NUMBER; err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, 0, &val); return err; } static int config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val) { switch (size) { case 1: { u8 _val; int rc = read_config_byte(bus, devfn, where, &_val); *val = _val; return rc; } case 2: { u16 _val; int rc = read_config_word(bus, devfn, where, &_val); *val = _val; return rc; } default: return read_config_dword(bus, devfn, where, val); } } static int config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { switch (size) { case 1: return write_config_byte(bus, devfn, where, (u8) val); case 2: return write_config_word(bus, devfn, where, (u16) val); default: return write_config_dword(bus, devfn, where, val); } } struct pci_ops pnx8550_pci_ops = { config_read, config_write };
gpl-2.0
TeamWin/android_kernel_samsung_zerofltespr
drivers/parport/share.c
10518
29890
/* * Parallel-port resource manager code. * * Authors: David Campbell <campbell@tirian.che.curtin.edu.au> * Tim Waugh <tim@cyberelk.demon.co.uk> * Jose Renau <renau@acm.org> * Philip Blundell <philb@gnu.org> * Andrea Arcangeli * * based on work by Grant Guenther <grant@torque.net> * and Philip Blundell * * Any part of this program may be used in documents licensed under * the GNU Free Documentation License, Version 1.1 or any later version * published by the Free Software Foundation. */ #undef PARPORT_DEBUG_SHARING /* undef for production */ #include <linux/module.h> #include <linux/string.h> #include <linux/threads.h> #include <linux/parport.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <asm/irq.h> #undef PARPORT_PARANOID #define PARPORT_DEFAULT_TIMESLICE (HZ/5) unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE; int parport_default_spintime = DEFAULT_SPIN_TIME; static LIST_HEAD(portlist); static DEFINE_SPINLOCK(parportlist_lock); /* list of all allocated ports, sorted by ->number */ static LIST_HEAD(all_ports); static DEFINE_SPINLOCK(full_list_lock); static LIST_HEAD(drivers); static DEFINE_MUTEX(registration_lock); /* What you can do to a port that's gone away.. */ static void dead_write_lines (struct parport *p, unsigned char b){} static unsigned char dead_read_lines (struct parport *p) { return 0; } static unsigned char dead_frob_lines (struct parport *p, unsigned char b, unsigned char c) { return 0; } static void dead_onearg (struct parport *p){} static void dead_initstate (struct pardevice *d, struct parport_state *s) { } static void dead_state (struct parport *p, struct parport_state *s) { } static size_t dead_write (struct parport *p, const void *b, size_t l, int f) { return 0; } static size_t dead_read (struct parport *p, void *b, size_t l, int f) { return 0; } static struct parport_operations dead_ops = { .write_data = dead_write_lines, /* data */ .read_data = dead_read_lines, .write_control = dead_write_lines, /* control */ .read_control = dead_read_lines, .frob_control = dead_frob_lines, .read_status = dead_read_lines, /* status */ .enable_irq = dead_onearg, /* enable_irq */ .disable_irq = dead_onearg, /* disable_irq */ .data_forward = dead_onearg, /* data_forward */ .data_reverse = dead_onearg, /* data_reverse */ .init_state = dead_initstate, /* init_state */ .save_state = dead_state, .restore_state = dead_state, .epp_write_data = dead_write, /* epp */ .epp_read_data = dead_read, .epp_write_addr = dead_write, .epp_read_addr = dead_read, .ecp_write_data = dead_write, /* ecp */ .ecp_read_data = dead_read, .ecp_write_addr = dead_write, .compat_write_data = dead_write, /* compat */ .nibble_read_data = dead_read, /* nibble */ .byte_read_data = dead_read, /* byte */ .owner = NULL, }; /* Call attach(port) for each registered driver. */ static void attach_driver_chain(struct parport *port) { /* caller has exclusive registration_lock */ struct parport_driver *drv; list_for_each_entry(drv, &drivers, list) drv->attach(port); } /* Call detach(port) for each registered driver. */ static void detach_driver_chain(struct parport *port) { struct parport_driver *drv; /* caller has exclusive registration_lock */ list_for_each_entry(drv, &drivers, list) drv->detach (port); } /* Ask kmod for some lowlevel drivers. */ static void get_lowlevel_driver (void) { /* There is no actual module called this: you should set * up an alias for modutils. */ request_module ("parport_lowlevel"); } /** * parport_register_driver - register a parallel port device driver * @drv: structure describing the driver * * This can be called by a parallel port device driver in order * to receive notifications about ports being found in the * system, as well as ports no longer available. * * The @drv structure is allocated by the caller and must not be * deallocated until after calling parport_unregister_driver(). * * The driver's attach() function may block. The port that * attach() is given will be valid for the duration of the * callback, but if the driver wants to take a copy of the * pointer it must call parport_get_port() to do so. Calling * parport_register_device() on that port will do this for you. * * The driver's detach() function may block. The port that * detach() is given will be valid for the duration of the * callback, but if the driver wants to take a copy of the * pointer it must call parport_get_port() to do so. * * Returns 0 on success. Currently it always succeeds. **/ int parport_register_driver (struct parport_driver *drv) { struct parport *port; if (list_empty(&portlist)) get_lowlevel_driver (); mutex_lock(&registration_lock); list_for_each_entry(port, &portlist, list) drv->attach(port); list_add(&drv->list, &drivers); mutex_unlock(&registration_lock); return 0; } /** * parport_unregister_driver - deregister a parallel port device driver * @drv: structure describing the driver that was given to * parport_register_driver() * * This should be called by a parallel port device driver that * has registered itself using parport_register_driver() when it * is about to be unloaded. * * When it returns, the driver's attach() routine will no longer * be called, and for each port that attach() was called for, the * detach() routine will have been called. * * All the driver's attach() and detach() calls are guaranteed to have * finished by the time this function returns. **/ void parport_unregister_driver (struct parport_driver *drv) { struct parport *port; mutex_lock(&registration_lock); list_del_init(&drv->list); list_for_each_entry(port, &portlist, list) drv->detach(port); mutex_unlock(&registration_lock); } static void free_port (struct parport *port) { int d; spin_lock(&full_list_lock); list_del(&port->full_list); spin_unlock(&full_list_lock); for (d = 0; d < 5; d++) { kfree(port->probe_info[d].class_name); kfree(port->probe_info[d].mfr); kfree(port->probe_info[d].model); kfree(port->probe_info[d].cmdset); kfree(port->probe_info[d].description); } kfree(port->name); kfree(port); } /** * parport_get_port - increment a port's reference count * @port: the port * * This ensures that a struct parport pointer remains valid * until the matching parport_put_port() call. **/ struct parport *parport_get_port (struct parport *port) { atomic_inc (&port->ref_count); return port; } /** * parport_put_port - decrement a port's reference count * @port: the port * * This should be called once for each call to parport_get_port(), * once the port is no longer needed. **/ void parport_put_port (struct parport *port) { if (atomic_dec_and_test (&port->ref_count)) /* Can destroy it now. */ free_port (port); return; } /** * parport_register_port - register a parallel port * @base: base I/O address * @irq: IRQ line * @dma: DMA channel * @ops: pointer to the port driver's port operations structure * * When a parallel port (lowlevel) driver finds a port that * should be made available to parallel port device drivers, it * should call parport_register_port(). The @base, @irq, and * @dma parameters are for the convenience of port drivers, and * for ports where they aren't meaningful needn't be set to * anything special. They can be altered afterwards by adjusting * the relevant members of the parport structure that is returned * and represents the port. They should not be tampered with * after calling parport_announce_port, however. * * If there are parallel port device drivers in the system that * have registered themselves using parport_register_driver(), * they are not told about the port at this time; that is done by * parport_announce_port(). * * The @ops structure is allocated by the caller, and must not be * deallocated before calling parport_remove_port(). * * If there is no memory to allocate a new parport structure, * this function will return %NULL. **/ struct parport *parport_register_port(unsigned long base, int irq, int dma, struct parport_operations *ops) { struct list_head *l; struct parport *tmp; int num; int device; char *name; tmp = kmalloc(sizeof(struct parport), GFP_KERNEL); if (!tmp) { printk(KERN_WARNING "parport: memory squeeze\n"); return NULL; } /* Init our structure */ memset(tmp, 0, sizeof(struct parport)); tmp->base = base; tmp->irq = irq; tmp->dma = dma; tmp->muxport = tmp->daisy = tmp->muxsel = -1; tmp->modes = 0; INIT_LIST_HEAD(&tmp->list); tmp->devices = tmp->cad = NULL; tmp->flags = 0; tmp->ops = ops; tmp->physport = tmp; memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info)); rwlock_init(&tmp->cad_lock); spin_lock_init(&tmp->waitlist_lock); spin_lock_init(&tmp->pardevice_lock); tmp->ieee1284.mode = IEEE1284_MODE_COMPAT; tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE; sema_init(&tmp->ieee1284.irq, 0); tmp->spintime = parport_default_spintime; atomic_set (&tmp->ref_count, 1); INIT_LIST_HEAD(&tmp->full_list); name = kmalloc(15, GFP_KERNEL); if (!name) { printk(KERN_ERR "parport: memory squeeze\n"); kfree(tmp); return NULL; } /* Search for the lowest free parport number. */ spin_lock(&full_list_lock); for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) { struct parport *p = list_entry(l, struct parport, full_list); if (p->number != num) break; } tmp->portnum = tmp->number = num; list_add_tail(&tmp->full_list, l); spin_unlock(&full_list_lock); /* * Now that the portnum is known finish doing the Init. */ sprintf(name, "parport%d", tmp->portnum = tmp->number); tmp->name = name; for (device = 0; device < 5; device++) /* assume the worst */ tmp->probe_info[device].class = PARPORT_CLASS_LEGACY; tmp->waithead = tmp->waittail = NULL; return tmp; } /** * parport_announce_port - tell device drivers about a parallel port * @port: parallel port to announce * * After a port driver has registered a parallel port with * parport_register_port, and performed any necessary * initialisation or adjustments, it should call * parport_announce_port() in order to notify all device drivers * that have called parport_register_driver(). Their attach() * functions will be called, with @port as the parameter. **/ void parport_announce_port (struct parport *port) { int i; #ifdef CONFIG_PARPORT_1284 /* Analyse the IEEE1284.3 topology of the port. */ parport_daisy_init(port); #endif if (!port->dev) printk(KERN_WARNING "%s: fix this legacy " "no-device port driver!\n", port->name); parport_proc_register(port); mutex_lock(&registration_lock); spin_lock_irq(&parportlist_lock); list_add_tail(&port->list, &portlist); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (slave) list_add_tail(&slave->list, &portlist); } spin_unlock_irq(&parportlist_lock); /* Let drivers know that new port(s) has arrived. */ attach_driver_chain (port); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (slave) attach_driver_chain(slave); } mutex_unlock(&registration_lock); } /** * parport_remove_port - deregister a parallel port * @port: parallel port to deregister * * When a parallel port driver is forcibly unloaded, or a * parallel port becomes inaccessible, the port driver must call * this function in order to deal with device drivers that still * want to use it. * * The parport structure associated with the port has its * operations structure replaced with one containing 'null' * operations that return errors or just don't do anything. * * Any drivers that have registered themselves using * parport_register_driver() are notified that the port is no * longer accessible by having their detach() routines called * with @port as the parameter. **/ void parport_remove_port(struct parport *port) { int i; mutex_lock(&registration_lock); /* Spread the word. */ detach_driver_chain (port); #ifdef CONFIG_PARPORT_1284 /* Forget the IEEE1284.3 topology of the port. */ parport_daisy_fini(port); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (!slave) continue; detach_driver_chain(slave); parport_daisy_fini(slave); } #endif port->ops = &dead_ops; spin_lock(&parportlist_lock); list_del_init(&port->list); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (slave) list_del_init(&slave->list); } spin_unlock(&parportlist_lock); mutex_unlock(&registration_lock); parport_proc_unregister(port); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (slave) parport_put_port(slave); } } /** * parport_register_device - register a device on a parallel port * @port: port to which the device is attached * @name: a name to refer to the device * @pf: preemption callback * @kf: kick callback (wake-up) * @irq_func: interrupt handler * @flags: registration flags * @handle: data for callback functions * * This function, called by parallel port device drivers, * declares that a device is connected to a port, and tells the * system all it needs to know. * * The @name is allocated by the caller and must not be * deallocated until the caller calls @parport_unregister_device * for that device. * * The preemption callback function, @pf, is called when this * device driver has claimed access to the port but another * device driver wants to use it. It is given @handle as its * parameter, and should return zero if it is willing for the * system to release the port to another driver on its behalf. * If it wants to keep control of the port it should return * non-zero, and no action will be taken. It is good manners for * the driver to try to release the port at the earliest * opportunity after its preemption callback rejects a preemption * attempt. Note that if a preemption callback is happy for * preemption to go ahead, there is no need to release the port; * it is done automatically. This function may not block, as it * may be called from interrupt context. If the device driver * does not support preemption, @pf can be %NULL. * * The wake-up ("kick") callback function, @kf, is called when * the port is available to be claimed for exclusive access; that * is, parport_claim() is guaranteed to succeed when called from * inside the wake-up callback function. If the driver wants to * claim the port it should do so; otherwise, it need not take * any action. This function may not block, as it may be called * from interrupt context. If the device driver does not want to * be explicitly invited to claim the port in this way, @kf can * be %NULL. * * The interrupt handler, @irq_func, is called when an interrupt * arrives from the parallel port. Note that if a device driver * wants to use interrupts it should use parport_enable_irq(), * and can also check the irq member of the parport structure * representing the port. * * The parallel port (lowlevel) driver is the one that has called * request_irq() and whose interrupt handler is called first. * This handler does whatever needs to be done to the hardware to * acknowledge the interrupt (for PC-style ports there is nothing * special to be done). It then tells the IEEE 1284 code about * the interrupt, which may involve reacting to an IEEE 1284 * event depending on the current IEEE 1284 phase. After this, * it calls @irq_func. Needless to say, @irq_func will be called * from interrupt context, and may not block. * * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and * so should only be used when sharing the port with other device * drivers is impossible and would lead to incorrect behaviour. * Use it sparingly! Normally, @flags will be zero. * * This function returns a pointer to a structure that represents * the device on the port, or %NULL if there is not enough memory * to allocate space for that structure. **/ struct pardevice * parport_register_device(struct parport *port, const char *name, int (*pf)(void *), void (*kf)(void *), void (*irq_func)(void *), int flags, void *handle) { struct pardevice *tmp; if (port->physport->flags & PARPORT_FLAG_EXCL) { /* An exclusive device is registered. */ printk (KERN_DEBUG "%s: no more devices allowed\n", port->name); return NULL; } if (flags & PARPORT_DEV_LURK) { if (!pf || !kf) { printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name); return NULL; } } /* We up our own module reference count, and that of the port on which a device is to be registered, to ensure that neither of us gets unloaded while we sleep in (e.g.) kmalloc. */ if (!try_module_get(port->ops->owner)) { return NULL; } parport_get_port (port); tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL); if (tmp == NULL) { printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); goto out; } tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL); if (tmp->state == NULL) { printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); goto out_free_pardevice; } tmp->name = name; tmp->port = port; tmp->daisy = -1; tmp->preempt = pf; tmp->wakeup = kf; tmp->private = handle; tmp->flags = flags; tmp->irq_func = irq_func; tmp->waiting = 0; tmp->timeout = 5 * HZ; /* Chain this onto the list */ tmp->prev = NULL; /* * This function must not run from an irq handler so we don' t need * to clear irq on the local CPU. -arca */ spin_lock(&port->physport->pardevice_lock); if (flags & PARPORT_DEV_EXCL) { if (port->physport->devices) { spin_unlock (&port->physport->pardevice_lock); printk (KERN_DEBUG "%s: cannot grant exclusive access for " "device %s\n", port->name, name); goto out_free_all; } port->flags |= PARPORT_FLAG_EXCL; } tmp->next = port->physport->devices; wmb(); /* Make sure that tmp->next is written before it's added to the list; see comments marked 'no locking required' */ if (port->physport->devices) port->physport->devices->prev = tmp; port->physport->devices = tmp; spin_unlock(&port->physport->pardevice_lock); init_waitqueue_head(&tmp->wait_q); tmp->timeslice = parport_default_timeslice; tmp->waitnext = tmp->waitprev = NULL; /* * This has to be run as last thing since init_state may need other * pardevice fields. -arca */ port->ops->init_state(tmp, tmp->state); if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { port->proc_device = tmp; parport_device_proc_register(tmp); } return tmp; out_free_all: kfree(tmp->state); out_free_pardevice: kfree(tmp); out: parport_put_port (port); module_put(port->ops->owner); return NULL; } /** * parport_unregister_device - deregister a device on a parallel port * @dev: pointer to structure representing device * * This undoes the effect of parport_register_device(). **/ void parport_unregister_device(struct pardevice *dev) { struct parport *port; #ifdef PARPORT_PARANOID if (dev == NULL) { printk(KERN_ERR "parport_unregister_device: passed NULL\n"); return; } #endif port = dev->port->physport; if (port->proc_device == dev) { port->proc_device = NULL; clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags); parport_device_proc_unregister(dev); } if (port->cad == dev) { printk(KERN_DEBUG "%s: %s forgot to release port\n", port->name, dev->name); parport_release (dev); } spin_lock(&port->pardevice_lock); if (dev->next) dev->next->prev = dev->prev; if (dev->prev) dev->prev->next = dev->next; else port->devices = dev->next; if (dev->flags & PARPORT_DEV_EXCL) port->flags &= ~PARPORT_FLAG_EXCL; spin_unlock(&port->pardevice_lock); /* Make sure we haven't left any pointers around in the wait * list. */ spin_lock_irq(&port->waitlist_lock); if (dev->waitprev || dev->waitnext || port->waithead == dev) { if (dev->waitprev) dev->waitprev->waitnext = dev->waitnext; else port->waithead = dev->waitnext; if (dev->waitnext) dev->waitnext->waitprev = dev->waitprev; else port->waittail = dev->waitprev; } spin_unlock_irq(&port->waitlist_lock); kfree(dev->state); kfree(dev); module_put(port->ops->owner); parport_put_port (port); } /** * parport_find_number - find a parallel port by number * @number: parallel port number * * This returns the parallel port with the specified number, or * %NULL if there is none. * * There is an implicit parport_get_port() done already; to throw * away the reference to the port that parport_find_number() * gives you, use parport_put_port(). */ struct parport *parport_find_number (int number) { struct parport *port, *result = NULL; if (list_empty(&portlist)) get_lowlevel_driver (); spin_lock (&parportlist_lock); list_for_each_entry(port, &portlist, list) { if (port->number == number) { result = parport_get_port (port); break; } } spin_unlock (&parportlist_lock); return result; } /** * parport_find_base - find a parallel port by base address * @base: base I/O address * * This returns the parallel port with the specified base * address, or %NULL if there is none. * * There is an implicit parport_get_port() done already; to throw * away the reference to the port that parport_find_base() * gives you, use parport_put_port(). */ struct parport *parport_find_base (unsigned long base) { struct parport *port, *result = NULL; if (list_empty(&portlist)) get_lowlevel_driver (); spin_lock (&parportlist_lock); list_for_each_entry(port, &portlist, list) { if (port->base == base) { result = parport_get_port (port); break; } } spin_unlock (&parportlist_lock); return result; } /** * parport_claim - claim access to a parallel port device * @dev: pointer to structure representing a device on the port * * This function will not block and so can be used from interrupt * context. If parport_claim() succeeds in claiming access to * the port it returns zero and the port is available to use. It * may fail (returning non-zero) if the port is in use by another * driver and that driver is not willing to relinquish control of * the port. **/ int parport_claim(struct pardevice *dev) { struct pardevice *oldcad; struct parport *port = dev->port->physport; unsigned long flags; if (port->cad == dev) { printk(KERN_INFO "%s: %s already owner\n", dev->port->name,dev->name); return 0; } /* Preempt any current device */ write_lock_irqsave (&port->cad_lock, flags); if ((oldcad = port->cad) != NULL) { if (oldcad->preempt) { if (oldcad->preempt(oldcad->private)) goto blocked; port->ops->save_state(port, dev->state); } else goto blocked; if (port->cad != oldcad) { /* I think we'll actually deadlock rather than get here, but just in case.. */ printk(KERN_WARNING "%s: %s released port when preempted!\n", port->name, oldcad->name); if (port->cad) goto blocked; } } /* Can't fail from now on, so mark ourselves as no longer waiting. */ if (dev->waiting & 1) { dev->waiting = 0; /* Take ourselves out of the wait list again. */ spin_lock_irq (&port->waitlist_lock); if (dev->waitprev) dev->waitprev->waitnext = dev->waitnext; else port->waithead = dev->waitnext; if (dev->waitnext) dev->waitnext->waitprev = dev->waitprev; else port->waittail = dev->waitprev; spin_unlock_irq (&port->waitlist_lock); dev->waitprev = dev->waitnext = NULL; } /* Now we do the change of devices */ port->cad = dev; #ifdef CONFIG_PARPORT_1284 /* If it's a mux port, select it. */ if (dev->port->muxport >= 0) { /* FIXME */ port->muxsel = dev->port->muxport; } /* If it's a daisy chain device, select it. */ if (dev->daisy >= 0) { /* This could be lazier. */ if (!parport_daisy_select (port, dev->daisy, IEEE1284_MODE_COMPAT)) port->daisy = dev->daisy; } #endif /* IEEE1284.3 support */ /* Restore control registers */ port->ops->restore_state(port, dev->state); write_unlock_irqrestore(&port->cad_lock, flags); dev->time = jiffies; return 0; blocked: /* If this is the first time we tried to claim the port, register an interest. This is only allowed for devices sleeping in parport_claim_or_block(), or those with a wakeup function. */ /* The cad_lock is still held for writing here */ if (dev->waiting & 2 || dev->wakeup) { spin_lock (&port->waitlist_lock); if (test_and_set_bit(0, &dev->waiting) == 0) { /* First add ourselves to the end of the wait list. */ dev->waitnext = NULL; dev->waitprev = port->waittail; if (port->waittail) { port->waittail->waitnext = dev; port->waittail = dev; } else port->waithead = port->waittail = dev; } spin_unlock (&port->waitlist_lock); } write_unlock_irqrestore (&port->cad_lock, flags); return -EAGAIN; } /** * parport_claim_or_block - claim access to a parallel port device * @dev: pointer to structure representing a device on the port * * This behaves like parport_claim(), but will block if necessary * to wait for the port to be free. A return value of 1 * indicates that it slept; 0 means that it succeeded without * needing to sleep. A negative error code indicates failure. **/ int parport_claim_or_block(struct pardevice *dev) { int r; /* Signal to parport_claim() that we can wait even without a wakeup function. */ dev->waiting = 2; /* Try to claim the port. If this fails, we need to sleep. */ r = parport_claim(dev); if (r == -EAGAIN) { #ifdef PARPORT_DEBUG_SHARING printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name); #endif /* * FIXME!!! Use the proper locking for dev->waiting, * and make this use the "wait_event_interruptible()" * interfaces. The cli/sti that used to be here * did nothing. * * See also parport_release() */ /* If dev->waiting is clear now, an interrupt gave us the port and we would deadlock if we slept. */ if (dev->waiting) { interruptible_sleep_on (&dev->wait_q); if (signal_pending (current)) { return -EINTR; } r = 1; } else { r = 0; #ifdef PARPORT_DEBUG_SHARING printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n", dev->name); #endif } #ifdef PARPORT_DEBUG_SHARING if (dev->port->physport->cad != dev) printk(KERN_DEBUG "%s: exiting parport_claim_or_block " "but %s owns port!\n", dev->name, dev->port->physport->cad ? dev->port->physport->cad->name:"nobody"); #endif } dev->waiting = 0; return r; } /** * parport_release - give up access to a parallel port device * @dev: pointer to structure representing parallel port device * * This function cannot fail, but it should not be called without * the port claimed. Similarly, if the port is already claimed * you should not try claiming it again. **/ void parport_release(struct pardevice *dev) { struct parport *port = dev->port->physport; struct pardevice *pd; unsigned long flags; /* Make sure that dev is the current device */ write_lock_irqsave(&port->cad_lock, flags); if (port->cad != dev) { write_unlock_irqrestore (&port->cad_lock, flags); printk(KERN_WARNING "%s: %s tried to release parport " "when not owner\n", port->name, dev->name); return; } #ifdef CONFIG_PARPORT_1284 /* If this is on a mux port, deselect it. */ if (dev->port->muxport >= 0) { /* FIXME */ port->muxsel = -1; } /* If this is a daisy device, deselect it. */ if (dev->daisy >= 0) { parport_daisy_deselect_all (port); port->daisy = -1; } #endif port->cad = NULL; write_unlock_irqrestore(&port->cad_lock, flags); /* Save control registers */ port->ops->save_state(port, dev->state); /* If anybody is waiting, find out who's been there longest and then wake them up. (Note: no locking required) */ /* !!! LOCKING IS NEEDED HERE */ for (pd = port->waithead; pd; pd = pd->waitnext) { if (pd->waiting & 2) { /* sleeping in claim_or_block */ parport_claim(pd); if (waitqueue_active(&pd->wait_q)) wake_up_interruptible(&pd->wait_q); return; } else if (pd->wakeup) { pd->wakeup(pd->private); if (dev->port->cad) /* racy but no matter */ return; } else { printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name); } } /* Nobody was waiting, so walk the list to see if anyone is interested in being woken up. (Note: no locking required) */ /* !!! LOCKING IS NEEDED HERE */ for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) { if (pd->wakeup && pd != dev) pd->wakeup(pd->private); } } irqreturn_t parport_irq_handler(int irq, void *dev_id) { struct parport *port = dev_id; parport_generic_irq(port); return IRQ_HANDLED; } /* Exported symbols for modules. */ EXPORT_SYMBOL(parport_claim); EXPORT_SYMBOL(parport_claim_or_block); EXPORT_SYMBOL(parport_release); EXPORT_SYMBOL(parport_register_port); EXPORT_SYMBOL(parport_announce_port); EXPORT_SYMBOL(parport_remove_port); EXPORT_SYMBOL(parport_register_driver); EXPORT_SYMBOL(parport_unregister_driver); EXPORT_SYMBOL(parport_register_device); EXPORT_SYMBOL(parport_unregister_device); EXPORT_SYMBOL(parport_get_port); EXPORT_SYMBOL(parport_put_port); EXPORT_SYMBOL(parport_find_number); EXPORT_SYMBOL(parport_find_base); EXPORT_SYMBOL(parport_irq_handler); MODULE_LICENSE("GPL");
gpl-2.0
htc-mirror/kingdom-ics-crc-3.0.16-1655ff7
fs/afs/cmservice.c
11030
13555
/* AFS Cache Manager Service * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/ip.h> #include "internal.h" #include "afs_cm.h" #if 0 struct workqueue_struct *afs_cm_workqueue; #endif /* 0 */ static int afs_deliver_cb_init_call_back_state(struct afs_call *, struct sk_buff *, bool); static int afs_deliver_cb_init_call_back_state3(struct afs_call *, struct sk_buff *, bool); static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool); static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool); static int afs_deliver_cb_probe_uuid(struct afs_call *, struct sk_buff *, bool); static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *, struct sk_buff *, bool); static void afs_cm_destructor(struct afs_call *); /* * CB.CallBack operation type */ static const struct afs_call_type afs_SRXCBCallBack = { .name = "CB.CallBack", .deliver = afs_deliver_cb_callback, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, }; /* * CB.InitCallBackState operation type */ static const struct afs_call_type afs_SRXCBInitCallBackState = { .name = "CB.InitCallBackState", .deliver = afs_deliver_cb_init_call_back_state, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, }; /* * CB.InitCallBackState3 operation type */ static const struct afs_call_type afs_SRXCBInitCallBackState3 = { .name = "CB.InitCallBackState3", .deliver = afs_deliver_cb_init_call_back_state3, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, }; /* * CB.Probe operation type */ static const struct afs_call_type afs_SRXCBProbe = { .name = "CB.Probe", .deliver = afs_deliver_cb_probe, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, }; /* * CB.ProbeUuid operation type */ static const struct afs_call_type afs_SRXCBProbeUuid = { .name = "CB.ProbeUuid", .deliver = afs_deliver_cb_probe_uuid, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, }; /* * CB.TellMeAboutYourself operation type */ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { .name = "CB.TellMeAboutYourself", .deliver = afs_deliver_cb_tell_me_about_yourself, .abort_to_error = afs_abort_to_error, .destructor = afs_cm_destructor, }; /* * route an incoming cache manager call * - return T if supported, F if not */ bool afs_cm_incoming_call(struct afs_call *call) { u32 operation_id = ntohl(call->operation_ID); _enter("{CB.OP %u}", operation_id); switch (operation_id) { case CBCallBack: call->type = &afs_SRXCBCallBack; return true; case CBInitCallBackState: call->type = &afs_SRXCBInitCallBackState; return true; case CBInitCallBackState3: call->type = &afs_SRXCBInitCallBackState3; return true; case CBProbe: call->type = &afs_SRXCBProbe; return true; case CBTellMeAboutYourself: call->type = &afs_SRXCBTellMeAboutYourself; return true; default: return false; } } /* * clean up a cache manager call */ static void afs_cm_destructor(struct afs_call *call) { _enter(""); afs_put_server(call->server); call->server = NULL; kfree(call->buffer); call->buffer = NULL; } /* * allow the fileserver to see if the cache manager is still alive */ static void SRXAFSCB_CallBack(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); _enter(""); /* be sure to send the reply *before* attempting to spam the AFS server * with FSFetchStatus requests on the vnodes with broken callbacks lest * the AFS server get into a vicious cycle of trying to break further * callbacks because it hadn't received completion of the CBCallBack op * yet */ afs_send_empty_reply(call); afs_break_callbacks(call->server, call->count, call->request); _leave(""); } /* * deliver request data to a CB.CallBack call */ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, bool last) { struct afs_callback *cb; struct afs_server *server; struct in_addr addr; __be32 *bp; u32 tmp; int ret, loop; _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); switch (call->unmarshall) { case 0: call->offset = 0; call->unmarshall++; /* extract the FID array and its count in two steps */ case 1: _debug("extract FID count"); ret = afs_extract_data(call, skb, last, &call->tmp, 4); switch (ret) { case 0: break; case -EAGAIN: return 0; default: return ret; } call->count = ntohl(call->tmp); _debug("FID count: %u", call->count); if (call->count > AFSCBMAX) return -EBADMSG; call->buffer = kmalloc(call->count * 3 * 4, GFP_KERNEL); if (!call->buffer) return -ENOMEM; call->offset = 0; call->unmarshall++; case 2: _debug("extract FID array"); ret = afs_extract_data(call, skb, last, call->buffer, call->count * 3 * 4); switch (ret) { case 0: break; case -EAGAIN: return 0; default: return ret; } _debug("unmarshall FID array"); call->request = kcalloc(call->count, sizeof(struct afs_callback), GFP_KERNEL); if (!call->request) return -ENOMEM; cb = call->request; bp = call->buffer; for (loop = call->count; loop > 0; loop--, cb++) { cb->fid.vid = ntohl(*bp++); cb->fid.vnode = ntohl(*bp++); cb->fid.unique = ntohl(*bp++); cb->type = AFSCM_CB_UNTYPED; } call->offset = 0; call->unmarshall++; /* extract the callback array and its count in two steps */ case 3: _debug("extract CB count"); ret = afs_extract_data(call, skb, last, &call->tmp, 4); switch (ret) { case 0: break; case -EAGAIN: return 0; default: return ret; } tmp = ntohl(call->tmp); _debug("CB count: %u", tmp); if (tmp != call->count && tmp != 0) return -EBADMSG; call->offset = 0; call->unmarshall++; if (tmp == 0) goto empty_cb_array; case 4: _debug("extract CB array"); ret = afs_extract_data(call, skb, last, call->request, call->count * 3 * 4); switch (ret) { case 0: break; case -EAGAIN: return 0; default: return ret; } _debug("unmarshall CB array"); cb = call->request; bp = call->buffer; for (loop = call->count; loop > 0; loop--, cb++) { cb->version = ntohl(*bp++); cb->expiry = ntohl(*bp++); cb->type = ntohl(*bp++); } empty_cb_array: call->offset = 0; call->unmarshall++; case 5: _debug("trailer"); if (skb->len != 0) return -EBADMSG; break; } if (!last) return 0; call->state = AFS_CALL_REPLYING; /* we'll need the file server record as that tells us which set of * vnodes to operate upon */ memcpy(&addr, &ip_hdr(skb)->saddr, 4); server = afs_find_server(&addr); if (!server) return -ENOTCONN; call->server = server; INIT_WORK(&call->work, SRXAFSCB_CallBack); queue_work(afs_wq, &call->work); return 0; } /* * allow the fileserver to request callback state (re-)initialisation */ static void SRXAFSCB_InitCallBackState(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); _enter("{%p}", call->server); afs_init_callback_state(call->server); afs_send_empty_reply(call); _leave(""); } /* * deliver request data to a CB.InitCallBackState call */ static int afs_deliver_cb_init_call_back_state(struct afs_call *call, struct sk_buff *skb, bool last) { struct afs_server *server; struct in_addr addr; _enter(",{%u},%d", skb->len, last); if (skb->len > 0) return -EBADMSG; if (!last) return 0; /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; /* we'll need the file server record as that tells us which set of * vnodes to operate upon */ memcpy(&addr, &ip_hdr(skb)->saddr, 4); server = afs_find_server(&addr); if (!server) return -ENOTCONN; call->server = server; INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); queue_work(afs_wq, &call->work); return 0; } /* * deliver request data to a CB.InitCallBackState3 call */ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call, struct sk_buff *skb, bool last) { struct afs_server *server; struct in_addr addr; _enter(",{%u},%d", skb->len, last); if (!last) return 0; /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; /* we'll need the file server record as that tells us which set of * vnodes to operate upon */ memcpy(&addr, &ip_hdr(skb)->saddr, 4); server = afs_find_server(&addr); if (!server) return -ENOTCONN; call->server = server; INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); queue_work(afs_wq, &call->work); return 0; } /* * allow the fileserver to see if the cache manager is still alive */ static void SRXAFSCB_Probe(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); _enter(""); afs_send_empty_reply(call); _leave(""); } /* * deliver request data to a CB.Probe call */ static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, bool last) { _enter(",{%u},%d", skb->len, last); if (skb->len > 0) return -EBADMSG; if (!last) return 0; /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; INIT_WORK(&call->work, SRXAFSCB_Probe); queue_work(afs_wq, &call->work); return 0; } /* * allow the fileserver to quickly find out if the fileserver has been rebooted */ static void SRXAFSCB_ProbeUuid(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); struct afs_uuid *r = call->request; struct { __be32 match; } reply; _enter(""); if (memcmp(r, &afs_uuid, sizeof(afs_uuid)) == 0) reply.match = htonl(0); else reply.match = htonl(1); afs_send_simple_reply(call, &reply, sizeof(reply)); _leave(""); } /* * deliver request data to a CB.ProbeUuid call */ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb, bool last) { struct afs_uuid *r; unsigned loop; __be32 *b; int ret; _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); if (skb->len > 0) return -EBADMSG; if (!last) return 0; switch (call->unmarshall) { case 0: call->offset = 0; call->buffer = kmalloc(11 * sizeof(__be32), GFP_KERNEL); if (!call->buffer) return -ENOMEM; call->unmarshall++; case 1: _debug("extract UUID"); ret = afs_extract_data(call, skb, last, call->buffer, 11 * sizeof(__be32)); switch (ret) { case 0: break; case -EAGAIN: return 0; default: return ret; } _debug("unmarshall UUID"); call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL); if (!call->request) return -ENOMEM; b = call->buffer; r = call->request; r->time_low = ntohl(b[0]); r->time_mid = ntohl(b[1]); r->time_hi_and_version = ntohl(b[2]); r->clock_seq_hi_and_reserved = ntohl(b[3]); r->clock_seq_low = ntohl(b[4]); for (loop = 0; loop < 6; loop++) r->node[loop] = ntohl(b[loop + 5]); call->offset = 0; call->unmarshall++; case 2: _debug("trailer"); if (skb->len != 0) return -EBADMSG; break; } if (!last) return 0; call->state = AFS_CALL_REPLYING; INIT_WORK(&call->work, SRXAFSCB_ProbeUuid); queue_work(afs_wq, &call->work); return 0; } /* * allow the fileserver to ask about the cache manager's capabilities */ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) { struct afs_interface *ifs; struct afs_call *call = container_of(work, struct afs_call, work); int loop, nifs; struct { struct /* InterfaceAddr */ { __be32 nifs; __be32 uuid[11]; __be32 ifaddr[32]; __be32 netmask[32]; __be32 mtu[32]; } ia; struct /* Capabilities */ { __be32 capcount; __be32 caps[1]; } cap; } reply; _enter(""); nifs = 0; ifs = kcalloc(32, sizeof(*ifs), GFP_KERNEL); if (ifs) { nifs = afs_get_ipv4_interfaces(ifs, 32, false); if (nifs < 0) { kfree(ifs); ifs = NULL; nifs = 0; } } memset(&reply, 0, sizeof(reply)); reply.ia.nifs = htonl(nifs); reply.ia.uuid[0] = htonl(afs_uuid.time_low); reply.ia.uuid[1] = htonl(afs_uuid.time_mid); reply.ia.uuid[2] = htonl(afs_uuid.time_hi_and_version); reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved); reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low); for (loop = 0; loop < 6; loop++) reply.ia.uuid[loop + 5] = htonl((s8) afs_uuid.node[loop]); if (ifs) { for (loop = 0; loop < nifs; loop++) { reply.ia.ifaddr[loop] = ifs[loop].address.s_addr; reply.ia.netmask[loop] = ifs[loop].netmask.s_addr; reply.ia.mtu[loop] = htonl(ifs[loop].mtu); } kfree(ifs); } reply.cap.capcount = htonl(1); reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION); afs_send_simple_reply(call, &reply, sizeof(reply)); _leave(""); } /* * deliver request data to a CB.TellMeAboutYourself call */ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call, struct sk_buff *skb, bool last) { _enter(",{%u},%d", skb->len, last); if (skb->len > 0) return -EBADMSG; if (!last) return 0; /* no unmarshalling required */ call->state = AFS_CALL_REPLYING; INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself); queue_work(afs_wq, &call->work); return 0; }
gpl-2.0
ElectryDev/android_kernel_lenovo_msm8974
arch/x86/boot/edd.c
12310
4203
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * ----------------------------------------------------------------------- */ /* * Get EDD BIOS disk information */ #include "boot.h" #include <linux/edd.h> #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) /* * Read the MBR (first sector) from a specific device. */ static int read_mbr(u8 devno, void *buf) { struct biosregs ireg, oreg; initregs(&ireg); ireg.ax = 0x0201; /* Legacy Read, one sector */ ireg.cx = 0x0001; /* Sector 0-0-1 */ ireg.dl = devno; ireg.bx = (size_t)buf; intcall(0x13, &ireg, &oreg); return -(oreg.eflags & X86_EFLAGS_CF); /* 0 or -1 */ } static u32 read_mbr_sig(u8 devno, struct edd_info *ei, u32 *mbrsig) { int sector_size; char *mbrbuf_ptr, *mbrbuf_end; u32 buf_base, mbr_base; extern char _end[]; u16 mbr_magic; sector_size = ei->params.bytes_per_sector; if (!sector_size) sector_size = 512; /* Best available guess */ /* Produce a naturally aligned buffer on the heap */ buf_base = (ds() << 4) + (u32)&_end; mbr_base = (buf_base+sector_size-1) & ~(sector_size-1); mbrbuf_ptr = _end + (mbr_base-buf_base); mbrbuf_end = mbrbuf_ptr + sector_size; /* Make sure we actually have space on the heap... */ if (!(boot_params.hdr.loadflags & CAN_USE_HEAP)) return -1; if (mbrbuf_end > (char *)(size_t)boot_params.hdr.heap_end_ptr) return -1; memset(mbrbuf_ptr, 0, sector_size); if (read_mbr(devno, mbrbuf_ptr)) return -1; *mbrsig = *(u32 *)&mbrbuf_ptr[EDD_MBR_SIG_OFFSET]; mbr_magic = *(u16 *)&mbrbuf_ptr[510]; /* check for valid MBR magic */ return mbr_magic == 0xAA55 ? 0 : -1; } static int get_edd_info(u8 devno, struct edd_info *ei) { struct biosregs ireg, oreg; memset(ei, 0, sizeof *ei); /* Check Extensions Present */ initregs(&ireg); ireg.ah = 0x41; ireg.bx = EDDMAGIC1; ireg.dl = devno; intcall(0x13, &ireg, &oreg); if (oreg.eflags & X86_EFLAGS_CF) return -1; /* No extended information */ if (oreg.bx != EDDMAGIC2) return -1; ei->device = devno; ei->version = oreg.ah; /* EDD version number */ ei->interface_support = oreg.cx; /* EDD functionality subsets */ /* Extended Get Device Parameters */ ei->params.length = sizeof(ei->params); ireg.ah = 0x48; ireg.si = (size_t)&ei->params; intcall(0x13, &ireg, &oreg); /* Get legacy CHS parameters */ /* Ralf Brown recommends setting ES:DI to 0:0 */ ireg.ah = 0x08; ireg.es = 0; intcall(0x13, &ireg, &oreg); if (!(oreg.eflags & X86_EFLAGS_CF)) { ei->legacy_max_cylinder = oreg.ch + ((oreg.cl & 0xc0) << 2); ei->legacy_max_head = oreg.dh; ei->legacy_sectors_per_track = oreg.cl & 0x3f; } return 0; } void query_edd(void) { char eddarg[8]; int do_mbr = 1; #ifdef CONFIG_EDD_OFF int do_edd = 0; #else int do_edd = 1; #endif int be_quiet; int devno; struct edd_info ei, *edp; u32 *mbrptr; if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) { if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) { do_edd = 1; do_mbr = 0; } else if (!strcmp(eddarg, "off")) do_edd = 0; else if (!strcmp(eddarg, "on")) do_edd = 1; } be_quiet = cmdline_find_option_bool("quiet"); edp = boot_params.eddbuf; mbrptr = boot_params.edd_mbr_sig_buffer; if (!do_edd) return; /* Bugs in OnBoard or AddOnCards Bios may hang the EDD probe, * so give a hint if this happens. */ if (!be_quiet) printf("Probing EDD (edd=off to disable)... "); for (devno = 0x80; devno < 0x80+EDD_MBR_SIG_MAX; devno++) { /* * Scan the BIOS-supported hard disks and query EDD * information... */ if (!get_edd_info(devno, &ei) && boot_params.eddbuf_entries < EDDMAXNR) { memcpy(edp, &ei, sizeof ei); edp++; boot_params.eddbuf_entries++; } if (do_mbr && !read_mbr_sig(devno, &ei, mbrptr++)) boot_params.edd_mbr_sig_buf_entries = devno-0x80+1; } if (!be_quiet) printf("ok\n"); } #endif
gpl-2.0
cphelps76/kernel_google_msm
fs/nls/nls_cp863.c
12566
17182
/* * linux/fs/nls/nls_cp863.c * * Charset cp863 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00c2, 0x00e0, 0x00b6, 0x00e7, 0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x2017, 0x00c0, 0x00a7, /* 0x90*/ 0x00c9, 0x00c8, 0x00ca, 0x00f4, 0x00cb, 0x00cf, 0x00fb, 0x00f9, 0x00a4, 0x00d4, 0x00dc, 0x00a2, 0x00a3, 0x00d9, 0x00db, 0x0192, /* 0xa0*/ 0x00a6, 0x00b4, 0x00f3, 0x00fa, 0x00a8, 0x00b8, 0x00b3, 0x00af, 0x00ce, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00be, 0x00ab, 0x00bb, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4, 0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229, /* 0xf0*/ 0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0x00, 0x9b, 0x9c, 0x98, 0x00, 0xa0, 0x8f, /* 0xa0-0xa7 */ 0xa4, 0x00, 0x00, 0xae, 0xaa, 0x00, 0x00, 0xa7, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0xa6, 0xa1, 0xe6, 0x86, 0xfa, /* 0xb0-0xb7 */ 0xa5, 0x00, 0x00, 0xaf, 0xac, 0xab, 0xad, 0x00, /* 0xb8-0xbf */ 0x8e, 0x00, 0x84, 0x00, 0x00, 0x00, 0x00, 0x80, /* 0xc0-0xc7 */ 0x91, 0x90, 0x92, 0x94, 0x00, 0x00, 0xa8, 0x95, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x9d, 0x00, 0x9e, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */ 0x85, 0x00, 0x83, 0x00, 0x00, 0x00, 0x00, 0x87, /* 0xe0-0xe7 */ 0x8a, 0x82, 0x88, 0x89, 0x00, 0x00, 0x8c, 0x8b, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xa2, 0x93, 0x00, 0x00, 0xf6, /* 0xf0-0xf7 */ 0x00, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */ 0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8d, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */ }; static const unsigned char page23[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, page22, page23, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x87, 0x81, 0x82, 0x83, 0x83, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x85, 0x8f, /* 0x88-0x8f */ 0x82, 0x8a, 0x88, 0x93, 0x89, 0x8b, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x93, 0x81, 0x9b, 0x9c, 0x97, 0x96, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0x8c, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x9a, 0x90, 0x84, 0x84, 0x8e, 0x86, 0x80, /* 0x80-0x87 */ 0x92, 0x94, 0x91, 0x95, 0xa8, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x99, 0x94, 0x95, 0x9e, 0x9d, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */ 0xa0, 0xa1, 0x00, 0x00, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp863", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp863(void) { return register_nls(&table); } static void __exit exit_nls_cp863(void) { unregister_nls(&table); } module_init(init_nls_cp863) module_exit(exit_nls_cp863) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
CyanogenMod/android_kernel_sony_msm7x27a
fs/nls/nls_cp855.c
12566
12424
/* * linux/fs/nls/nls_cp855.c * * Charset cp855 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0452, 0x0402, 0x0453, 0x0403, 0x0451, 0x0401, 0x0454, 0x0404, 0x0455, 0x0405, 0x0456, 0x0406, 0x0457, 0x0407, 0x0458, 0x0408, /* 0x90*/ 0x0459, 0x0409, 0x045a, 0x040a, 0x045b, 0x040b, 0x045c, 0x040c, 0x045e, 0x040e, 0x045f, 0x040f, 0x044e, 0x042e, 0x044a, 0x042a, /* 0xa0*/ 0x0430, 0x0410, 0x0431, 0x0411, 0x0446, 0x0426, 0x0434, 0x0414, 0x0435, 0x0415, 0x0444, 0x0424, 0x0433, 0x0413, 0x00ab, 0x00bb, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x0445, 0x0425, 0x0438, 0x0418, 0x2563, 0x2551, 0x2557, 0x255d, 0x0439, 0x0419, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x043a, 0x041a, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x00a4, /* 0xd0*/ 0x043b, 0x041b, 0x043c, 0x041c, 0x043d, 0x041d, 0x043e, 0x041e, 0x043f, 0x2518, 0x250c, 0x2588, 0x2584, 0x041f, 0x044f, 0x2580, /* 0xe0*/ 0x042f, 0x0440, 0x0420, 0x0441, 0x0421, 0x0442, 0x0422, 0x0443, 0x0423, 0x0436, 0x0416, 0x0432, 0x0412, 0x044c, 0x042c, 0x2116, /* 0xf0*/ 0x00ad, 0x044b, 0x042b, 0x0437, 0x0417, 0x0448, 0x0428, 0x044d, 0x042d, 0x0449, 0x0429, 0x0447, 0x0427, 0x00a7, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0xfd, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0xae, 0x00, 0xf0, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xaf, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ }; static const unsigned char page04[256] = { 0x00, 0x85, 0x81, 0x83, 0x87, 0x89, 0x8b, 0x8d, /* 0x00-0x07 */ 0x8f, 0x91, 0x93, 0x95, 0x97, 0x00, 0x99, 0x9b, /* 0x08-0x0f */ 0xa1, 0xa3, 0xec, 0xad, 0xa7, 0xa9, 0xea, 0xf4, /* 0x10-0x17 */ 0xb8, 0xbe, 0xc7, 0xd1, 0xd3, 0xd5, 0xd7, 0xdd, /* 0x18-0x1f */ 0xe2, 0xe4, 0xe6, 0xe8, 0xab, 0xb6, 0xa5, 0xfc, /* 0x20-0x27 */ 0xf6, 0xfa, 0x9f, 0xf2, 0xee, 0xf8, 0x9d, 0xe0, /* 0x28-0x2f */ 0xa0, 0xa2, 0xeb, 0xac, 0xa6, 0xa8, 0xe9, 0xf3, /* 0x30-0x37 */ 0xb7, 0xbd, 0xc6, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, /* 0x38-0x3f */ 0xe1, 0xe3, 0xe5, 0xe7, 0xaa, 0xb5, 0xa4, 0xfb, /* 0x40-0x47 */ 0xf5, 0xf9, 0x9e, 0xf1, 0xed, 0xf7, 0x9c, 0xde, /* 0x48-0x4f */ 0x00, 0x84, 0x80, 0x82, 0x86, 0x88, 0x8a, 0x8c, /* 0x50-0x57 */ 0x8e, 0x90, 0x92, 0x94, 0x96, 0x00, 0x98, 0x9a, /* 0x58-0x5f */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0x00, /* 0x10-0x17 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */ 0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */ 0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */ 0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page21, NULL, NULL, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x80, 0x82, 0x82, 0x84, 0x84, 0x86, 0x86, /* 0x80-0x87 */ 0x88, 0x88, 0x8a, 0x8a, 0x8c, 0x8c, 0x8e, 0x8e, /* 0x88-0x8f */ 0x90, 0x90, 0x92, 0x92, 0x94, 0x94, 0x96, 0x96, /* 0x90-0x97 */ 0x98, 0x98, 0x9a, 0x9a, 0x9c, 0x9c, 0x9e, 0x9e, /* 0x98-0x9f */ 0xa0, 0xa0, 0xa2, 0xa2, 0xa4, 0xa4, 0xa6, 0xa6, /* 0xa0-0xa7 */ 0xa8, 0xa8, 0xaa, 0xaa, 0xac, 0xac, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb5, 0xb7, /* 0xb0-0xb7 */ 0xb7, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd0, 0xd2, 0xd2, 0xd4, 0xd4, 0xd6, 0xd6, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xd8, 0xde, 0xdf, /* 0xd8-0xdf */ 0xde, 0xe1, 0xe1, 0xe3, 0xe3, 0xe5, 0xe5, 0xe7, /* 0xe0-0xe7 */ 0xe7, 0xe9, 0xe9, 0xeb, 0xeb, 0xed, 0xed, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf1, 0xf3, 0xf3, 0xf5, 0xf5, 0xf7, /* 0xf0-0xf7 */ 0xf7, 0xf9, 0xf9, 0xfb, 0xfb, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x81, 0x81, 0x83, 0x83, 0x85, 0x85, 0x87, 0x87, /* 0x80-0x87 */ 0x89, 0x89, 0x8b, 0x8b, 0x8d, 0x8d, 0x8f, 0x8f, /* 0x88-0x8f */ 0x91, 0x91, 0x93, 0x93, 0x95, 0x95, 0x97, 0x97, /* 0x90-0x97 */ 0x99, 0x99, 0x9b, 0x9b, 0x9d, 0x9d, 0x9f, 0x9f, /* 0x98-0x9f */ 0xa1, 0xa1, 0xa3, 0xa3, 0xa5, 0xa5, 0xa7, 0xa7, /* 0xa0-0xa7 */ 0xa9, 0xa9, 0xab, 0xab, 0xad, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb6, 0xb6, 0xb8, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc7, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd1, 0xd1, 0xd3, 0xd3, 0xd5, 0xd5, 0xd7, 0xd7, /* 0xd0-0xd7 */ 0xdd, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xe0, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe2, 0xe2, 0xe4, 0xe4, 0xe6, 0xe6, 0xe8, /* 0xe0-0xe7 */ 0xe8, 0xea, 0xea, 0xec, 0xec, 0xee, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf2, 0xf2, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, /* 0xf0-0xf7 */ 0xf8, 0xfa, 0xfa, 0xfc, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp855", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp855(void) { return register_nls(&table); } static void __exit exit_nls_cp855(void) { unregister_nls(&table); } module_init(init_nls_cp855) module_exit(exit_nls_cp855) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
ShinySide/HispAsian_5.1.1
fs/nls/nls_iso8859-13.c
12566
11786
/* * linux/fs/nls/nls_iso8859-13.c * * Charset iso8859-13 translation tables. * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x201d, 0x00a2, 0x00a3, 0x00a4, 0x201e, 0x00a6, 0x00a7, 0x00d8, 0x00a9, 0x0156, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00c6, /* 0xb0*/ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x201c, 0x00b5, 0x00b6, 0x00b7, 0x00f8, 0x00b9, 0x0157, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00e6, /* 0xc0*/ 0x0104, 0x012e, 0x0100, 0x0106, 0x00c4, 0x00c5, 0x0118, 0x0112, 0x010c, 0x00c9, 0x0179, 0x0116, 0x0122, 0x0136, 0x012a, 0x013b, /* 0xd0*/ 0x0160, 0x0143, 0x0145, 0x00d3, 0x014c, 0x00d5, 0x00d6, 0x00d7, 0x0172, 0x0141, 0x015a, 0x016a, 0x00dc, 0x017b, 0x017d, 0x00df, /* 0xe0*/ 0x0105, 0x012f, 0x0101, 0x0107, 0x00e4, 0x00e5, 0x0119, 0x0113, 0x010d, 0x00e9, 0x017a, 0x0117, 0x0123, 0x0137, 0x012b, 0x013c, /* 0xf0*/ 0x0161, 0x0144, 0x0146, 0x00f3, 0x014d, 0x00f5, 0x00f6, 0x00f7, 0x0173, 0x0142, 0x015b, 0x016b, 0x00fc, 0x017c, 0x017e, 0x2019, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0x00, 0xa2, 0xa3, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0x00, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0x00, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0x00, 0xb9, 0x00, 0xbb, 0xbc, 0xbd, 0xbe, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0xc4, 0xc5, 0xaf, 0x00, /* 0xc0-0xc7 */ 0x00, 0xc9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xd3, 0x00, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xa8, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0xe4, 0xe5, 0xbf, 0x00, /* 0xe0-0xe7 */ 0x00, 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xf3, 0x00, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xb8, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0xc2, 0xe2, 0x00, 0x00, 0xc0, 0xe0, 0xc3, 0xe3, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0xc7, 0xe7, 0x00, 0x00, 0xcb, 0xeb, /* 0x10-0x17 */ 0xc6, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0xce, 0xee, 0x00, 0x00, 0xc1, 0xe1, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xed, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0xcf, 0xef, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0xd9, 0xf9, 0xd1, 0xf1, 0xd2, 0xf2, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0xd4, 0xf4, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0xba, /* 0x50-0x57 */ 0x00, 0x00, 0xda, 0xfa, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0xd8, 0xf8, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0xca, 0xea, 0xdd, 0xfd, 0xde, 0xfe, 0x00, /* 0x78-0x7f */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xff, 0x00, 0x00, 0xb4, 0xa1, 0xa5, 0x00, /* 0x18-0x1f */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xb1, 0xa2, 0xb3, 0xa4, 0xb5, 0xb6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbf, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xa1, 0xb2, 0xa3, 0xb4, 0xa5, 0xa6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xbd, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "iso8859-13", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_iso8859_13(void) { return register_nls(&table); } static void __exit exit_nls_iso8859_13(void) { unregister_nls(&table); } module_init(init_nls_iso8859_13) module_exit(exit_nls_iso8859_13) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
Flinny/kernel_htc_msm8994
fs/nls/nls_cp866.c
12566
12666
/* * linux/fs/nls/nls_cp866.c * * Charset cp866 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0x90*/ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xa0*/ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, /* 0xf0*/ 0x0401, 0x0451, 0x0404, 0x0454, 0x0407, 0x0457, 0x040e, 0x045e, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x2116, 0x00a4, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, /* 0xb0-0xb7 */ }; static const unsigned char page04[256] = { 0x00, 0xf0, 0x00, 0x00, 0xf2, 0x00, 0x00, 0xf4, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, /* 0x08-0x0f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x30-0x37 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0x00, 0xf1, 0x00, 0x00, 0xf3, 0x00, 0x00, 0xf5, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x00, /* 0x58-0x5f */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, /* 0x10-0x17 */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page21, page22, NULL, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x80-0x87 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x88-0x8f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x90-0x97 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf1, 0xf1, 0xf3, 0xf3, 0xf5, 0xf5, 0xf7, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0xa0-0xa7 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0xe0-0xe7 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0xe8-0xef */ 0xf0, 0xf0, 0xf2, 0xf2, 0xf4, 0xf4, 0xf6, 0xf6, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp866", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp866(void) { return register_nls(&table); } static void __exit exit_nls_cp866(void) { unregister_nls(&table); } module_init(init_nls_cp866) module_exit(exit_nls_cp866) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
SunliyMonkey/linux
drivers/staging/rdma/amso1100/c2_vq.c
12566
7714
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <linux/spinlock.h> #include "c2_vq.h" #include "c2_provider.h" /* * Verbs Request Objects: * * VQ Request Objects are allocated by the kernel verbs handlers. * They contain a wait object, a refcnt, an atomic bool indicating that the * adapter has replied, and a copy of the verb reply work request. * A pointer to the VQ Request Object is passed down in the context * field of the work request message, and reflected back by the adapter * in the verbs reply message. The function handle_vq() in the interrupt * path will use this pointer to: * 1) append a copy of the verbs reply message * 2) mark that the reply is ready * 3) wake up the kernel verbs handler blocked awaiting the reply. * * * The kernel verbs handlers do a "get" to put a 2nd reference on the * VQ Request object. If the kernel verbs handler exits before the adapter * can respond, this extra reference will keep the VQ Request object around * until the adapter's reply can be processed. The reason we need this is * because a pointer to this object is stuffed into the context field of * the verbs work request message, and reflected back in the reply message. * It is used in the interrupt handler (handle_vq()) to wake up the appropriate * kernel verb handler that is blocked awaiting the verb reply. * So handle_vq() will do a "put" on the object when it's done accessing it. * NOTE: If we guarantee that the kernel verb handler will never bail before * getting the reply, then we don't need these refcnts. * * * VQ Request objects are freed by the kernel verbs handlers only * after the verb has been processed, or when the adapter fails and * does not reply. * * * Verbs Reply Buffers: * * VQ Reply bufs are local host memory copies of a * outstanding Verb Request reply * message. The are always allocated by the kernel verbs handlers, and _may_ be * freed by either the kernel verbs handler -or- the interrupt handler. The * kernel verbs handler _must_ free the repbuf, then free the vq request object * in that order. */ int vq_init(struct c2_dev *c2dev) { sprintf(c2dev->vq_cache_name, "c2-vq:dev%c", (char) ('0' + c2dev->devnum)); c2dev->host_msg_cache = kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (c2dev->host_msg_cache == NULL) { return -ENOMEM; } return 0; } void vq_term(struct c2_dev *c2dev) { kmem_cache_destroy(c2dev->host_msg_cache); } /* vq_req_alloc - allocate a VQ Request Object and initialize it. * The refcnt is set to 1. */ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev) { struct c2_vq_req *r; r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL); if (r) { init_waitqueue_head(&r->wait_object); r->reply_msg = 0; r->event = 0; r->cm_id = NULL; r->qp = NULL; atomic_set(&r->refcnt, 1); atomic_set(&r->reply_ready, 0); } return r; } /* vq_req_free - free the VQ Request Object. It is assumed the verbs handler * has already free the VQ Reply Buffer if it existed. */ void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) { r->reply_msg = 0; if (atomic_dec_and_test(&r->refcnt)) { kfree(r); } } /* vq_req_get - reference a VQ Request Object. Done * only in the kernel verbs handlers. */ void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r) { atomic_inc(&r->refcnt); } /* vq_req_put - dereference and potentially free a VQ Request Object. * * This is only called by handle_vq() on the * interrupt when it is done processing * a verb reply message. If the associated * kernel verbs handler has already bailed, * then this put will actually free the VQ * Request object _and_ the VQ Reply Buffer * if it exists. */ void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) { if (atomic_dec_and_test(&r->refcnt)) { if (r->reply_msg != 0) vq_repbuf_free(c2dev, (void *) (unsigned long) r->reply_msg); kfree(r); } } /* * vq_repbuf_alloc - allocate a VQ Reply Buffer. */ void *vq_repbuf_alloc(struct c2_dev *c2dev) { return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC); } /* * vq_send_wr - post a verbs request message to the Verbs Request Queue. * If a message is not available in the MQ, then block until one is available. * NOTE: handle_mq() on the interrupt context will wake up threads blocked here. * When the adapter drains the Verbs Request Queue, * it inserts MQ index 0 in to the * adapter->host activity fifo and interrupts the host. */ int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr) { void *msg; wait_queue_t __wait; /* * grab adapter vq lock */ spin_lock(&c2dev->vqlock); /* * allocate msg */ msg = c2_mq_alloc(&c2dev->req_vq); /* * If we cannot get a msg, then we'll wait * When a messages are available, the int handler will wake_up() * any waiters. */ while (msg == NULL) { pr_debug("%s:%d no available msg in VQ, waiting...\n", __func__, __LINE__); init_waitqueue_entry(&__wait, current); add_wait_queue(&c2dev->req_vq_wo, &__wait); spin_unlock(&c2dev->vqlock); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!c2_mq_full(&c2dev->req_vq)) { break; } if (!signal_pending(current)) { schedule_timeout(1 * HZ); /* 1 second... */ continue; } set_current_state(TASK_RUNNING); remove_wait_queue(&c2dev->req_vq_wo, &__wait); return -EINTR; } set_current_state(TASK_RUNNING); remove_wait_queue(&c2dev->req_vq_wo, &__wait); spin_lock(&c2dev->vqlock); msg = c2_mq_alloc(&c2dev->req_vq); } /* * copy wr into adapter msg */ memcpy(msg, wr, c2dev->req_vq.msg_size); /* * post msg */ c2_mq_produce(&c2dev->req_vq); /* * release adapter vq lock */ spin_unlock(&c2dev->vqlock); return 0; } /* * vq_wait_for_reply - block until the adapter posts a Verb Reply Message. */ int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req) { if (!wait_event_timeout(req->wait_object, atomic_read(&req->reply_ready), 60*HZ)) return -ETIMEDOUT; return 0; } /* * vq_repbuf_free - Free a Verbs Reply Buffer. */ void vq_repbuf_free(struct c2_dev *c2dev, void *reply) { kmem_cache_free(c2dev->host_msg_cache, reply); }
gpl-2.0
metacloud/linux
fs/nls/nls_cp862.c
12566
19506
/* * linux/fs/nls/nls_cp862.c * * Charset cp862 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x05d0, 0x05d1, 0x05d2, 0x05d3, 0x05d4, 0x05d5, 0x05d6, 0x05d7, 0x05d8, 0x05d9, 0x05da, 0x05db, 0x05dc, 0x05dd, 0x05de, 0x05df, /* 0x90*/ 0x05e0, 0x05e1, 0x05e2, 0x05e3, 0x05e4, 0x05e5, 0x05e6, 0x05e7, 0x05e8, 0x05e9, 0x05ea, 0x00a2, 0x00a3, 0x00a5, 0x20a7, 0x0192, /* 0xa0*/ 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba, 0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4, 0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229, /* 0xf0*/ 0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0xad, 0x9b, 0x9c, 0x00, 0x9d, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */ 0x00, 0x00, 0xa7, 0xaf, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */ 0x00, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0xa4, 0x00, 0xa2, 0x00, 0x00, 0x00, 0xf6, /* 0xf0-0xf7 */ 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */ 0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */ }; static const unsigned char page05[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0xd0-0xd7 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xd8-0xdf */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0xe0-0xe7 */ 0x98, 0x99, 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */ }; static const unsigned char page23[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, page03, NULL, page05, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, page22, page23, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp862", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp862(void) { return register_nls(&table); } static void __exit exit_nls_cp862(void) { unregister_nls(&table); } module_init(init_nls_cp862) module_exit(exit_nls_cp862) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
X-ROM/android_kernel_motorola_msm8226
sound/aoa/core/gpio-pmf.c
13078
6267
/* * Apple Onboard Audio pmf GPIOs * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. */ #include <linux/slab.h> #include <asm/pmac_feature.h> #include <asm/pmac_pfunc.h> #include "../aoa.h" #define PMF_GPIO(name, bit) \ static void pmf_gpio_set_##name(struct gpio_runtime *rt, int on)\ { \ struct pmf_args args = { .count = 1, .u[0].v = !on }; \ int rc; \ \ if (unlikely(!rt)) return; \ rc = pmf_call_function(rt->node, #name "-mute", &args); \ if (rc && rc != -ENODEV) \ printk(KERN_WARNING "pmf_gpio_set_" #name \ " failed, rc: %d\n", rc); \ rt->implementation_private &= ~(1<<bit); \ rt->implementation_private |= (!!on << bit); \ } \ static int pmf_gpio_get_##name(struct gpio_runtime *rt) \ { \ if (unlikely(!rt)) return 0; \ return (rt->implementation_private>>bit)&1; \ } PMF_GPIO(headphone, 0); PMF_GPIO(amp, 1); PMF_GPIO(lineout, 2); static void pmf_gpio_set_hw_reset(struct gpio_runtime *rt, int on) { struct pmf_args args = { .count = 1, .u[0].v = !!on }; int rc; if (unlikely(!rt)) return; rc = pmf_call_function(rt->node, "hw-reset", &args); if (rc) printk(KERN_WARNING "pmf_gpio_set_hw_reset" " failed, rc: %d\n", rc); } static void pmf_gpio_all_amps_off(struct gpio_runtime *rt) { int saved; if (unlikely(!rt)) return; saved = rt->implementation_private; pmf_gpio_set_headphone(rt, 0); pmf_gpio_set_amp(rt, 0); pmf_gpio_set_lineout(rt, 0); rt->implementation_private = saved; } static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt) { int s; if (unlikely(!rt)) return; s = rt->implementation_private; pmf_gpio_set_headphone(rt, (s>>0)&1); pmf_gpio_set_amp(rt, (s>>1)&1); pmf_gpio_set_lineout(rt, (s>>2)&1); } static void pmf_handle_notify(struct work_struct *work) { struct gpio_notification *notif = container_of(work, struct gpio_notification, work.work); mutex_lock(&notif->mutex); if (notif->notify) notif->notify(notif->data); mutex_unlock(&notif->mutex); } static void pmf_gpio_init(struct gpio_runtime *rt) { pmf_gpio_all_amps_off(rt); rt->implementation_private = 0; INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify); INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify); INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify); mutex_init(&rt->headphone_notify.mutex); mutex_init(&rt->line_in_notify.mutex); mutex_init(&rt->line_out_notify.mutex); } static void pmf_gpio_exit(struct gpio_runtime *rt) { pmf_gpio_all_amps_off(rt); rt->implementation_private = 0; if (rt->headphone_notify.gpio_private) pmf_unregister_irq_client(rt->headphone_notify.gpio_private); if (rt->line_in_notify.gpio_private) pmf_unregister_irq_client(rt->line_in_notify.gpio_private); if (rt->line_out_notify.gpio_private) pmf_unregister_irq_client(rt->line_out_notify.gpio_private); /* make sure no work is pending before freeing * all things */ cancel_delayed_work_sync(&rt->headphone_notify.work); cancel_delayed_work_sync(&rt->line_in_notify.work); cancel_delayed_work_sync(&rt->line_out_notify.work); mutex_destroy(&rt->headphone_notify.mutex); mutex_destroy(&rt->line_in_notify.mutex); mutex_destroy(&rt->line_out_notify.mutex); kfree(rt->headphone_notify.gpio_private); kfree(rt->line_in_notify.gpio_private); kfree(rt->line_out_notify.gpio_private); } static void pmf_handle_notify_irq(void *data) { struct gpio_notification *notif = data; schedule_delayed_work(&notif->work, 0); } static int pmf_set_notify(struct gpio_runtime *rt, enum notify_type type, notify_func_t notify, void *data) { struct gpio_notification *notif; notify_func_t old; struct pmf_irq_client *irq_client; char *name; int err = -EBUSY; switch (type) { case AOA_NOTIFY_HEADPHONE: notif = &rt->headphone_notify; name = "headphone-detect"; break; case AOA_NOTIFY_LINE_IN: notif = &rt->line_in_notify; name = "linein-detect"; break; case AOA_NOTIFY_LINE_OUT: notif = &rt->line_out_notify; name = "lineout-detect"; break; default: return -EINVAL; } mutex_lock(&notif->mutex); old = notif->notify; if (!old && !notify) { err = 0; goto out_unlock; } if (old && notify) { if (old == notify && notif->data == data) err = 0; goto out_unlock; } if (old && !notify) { irq_client = notif->gpio_private; pmf_unregister_irq_client(irq_client); kfree(irq_client); notif->gpio_private = NULL; } if (!old && notify) { irq_client = kzalloc(sizeof(struct pmf_irq_client), GFP_KERNEL); if (!irq_client) { err = -ENOMEM; goto out_unlock; } irq_client->data = notif; irq_client->handler = pmf_handle_notify_irq; irq_client->owner = THIS_MODULE; err = pmf_register_irq_client(rt->node, name, irq_client); if (err) { printk(KERN_ERR "snd-aoa: gpio layer failed to" " register %s irq (%d)\n", name, err); kfree(irq_client); goto out_unlock; } notif->gpio_private = irq_client; } notif->notify = notify; notif->data = data; err = 0; out_unlock: mutex_unlock(&notif->mutex); return err; } static int pmf_get_detect(struct gpio_runtime *rt, enum notify_type type) { char *name; int err = -EBUSY, ret; struct pmf_args args = { .count = 1, .u[0].p = &ret }; switch (type) { case AOA_NOTIFY_HEADPHONE: name = "headphone-detect"; break; case AOA_NOTIFY_LINE_IN: name = "linein-detect"; break; case AOA_NOTIFY_LINE_OUT: name = "lineout-detect"; break; default: return -EINVAL; } err = pmf_call_function(rt->node, name, &args); if (err) return err; return ret; } static struct gpio_methods methods = { .init = pmf_gpio_init, .exit = pmf_gpio_exit, .all_amps_off = pmf_gpio_all_amps_off, .all_amps_restore = pmf_gpio_all_amps_restore, .set_headphone = pmf_gpio_set_headphone, .set_speakers = pmf_gpio_set_amp, .set_lineout = pmf_gpio_set_lineout, .set_hw_reset = pmf_gpio_set_hw_reset, .get_headphone = pmf_gpio_get_headphone, .get_speakers = pmf_gpio_get_amp, .get_lineout = pmf_gpio_get_lineout, .set_notify = pmf_set_notify, .get_detect = pmf_get_detect, }; struct gpio_methods *pmf_gpio_methods = &methods; EXPORT_SYMBOL_GPL(pmf_gpio_methods);
gpl-2.0
F4uzan/mono_hima
net/netfilter/xt_CONNSECMARK.c
13590
3666
/* * This module is used to copy security markings from packets * to connections, and restore security markings from connections * back to packets. This would normally be performed in conjunction * with the SECMARK target and state match. * * Based somewhat on CONNMARK: * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> * by Henrik Nordstrom <hno@marasystems.com> * * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_CONNSECMARK.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_ecache.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Morris <jmorris@redhat.com>"); MODULE_DESCRIPTION("Xtables: target for copying between connection and security mark"); MODULE_ALIAS("ipt_CONNSECMARK"); MODULE_ALIAS("ip6t_CONNSECMARK"); /* * If the packet has a security mark and the connection does not, copy * the security mark from the packet to the connection. */ static void secmark_save(const struct sk_buff *skb) { if (skb->secmark) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; ct = nf_ct_get(skb, &ctinfo); if (ct && !ct->secmark) { ct->secmark = skb->secmark; nf_conntrack_event_cache(IPCT_SECMARK, ct); } } } /* * If packet has no security mark, and the connection does, restore the * security mark from the connection to the packet. */ static void secmark_restore(struct sk_buff *skb) { if (!skb->secmark) { const struct nf_conn *ct; enum ip_conntrack_info ctinfo; ct = nf_ct_get(skb, &ctinfo); if (ct && ct->secmark) skb->secmark = ct->secmark; } } static unsigned int connsecmark_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_connsecmark_target_info *info = par->targinfo; switch (info->mode) { case CONNSECMARK_SAVE: secmark_save(skb); break; case CONNSECMARK_RESTORE: secmark_restore(skb); break; default: BUG(); } return XT_CONTINUE; } static int connsecmark_tg_check(const struct xt_tgchk_param *par) { const struct xt_connsecmark_target_info *info = par->targinfo; int ret; if (strcmp(par->table, "mangle") != 0 && strcmp(par->table, "security") != 0) { pr_info("target only valid in the \'mangle\' " "or \'security\' tables, not \'%s\'.\n", par->table); return -EINVAL; } switch (info->mode) { case CONNSECMARK_SAVE: case CONNSECMARK_RESTORE: break; default: pr_info("invalid mode: %hu\n", info->mode); return -EINVAL; } ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void connsecmark_tg_destroy(const struct xt_tgdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static struct xt_target connsecmark_tg_reg __read_mostly = { .name = "CONNSECMARK", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = connsecmark_tg_check, .destroy = connsecmark_tg_destroy, .target = connsecmark_tg, .targetsize = sizeof(struct xt_connsecmark_target_info), .me = THIS_MODULE, }; static int __init connsecmark_tg_init(void) { return xt_register_target(&connsecmark_tg_reg); } static void __exit connsecmark_tg_exit(void) { xt_unregister_target(&connsecmark_tg_reg); } module_init(connsecmark_tg_init); module_exit(connsecmark_tg_exit);
gpl-2.0
stribika/poppler
qt5/tests/check_optcontent.cpp
23
17553
#include <QtTest/QtTest> #include "PDFDoc.h" #include "GlobalParams.h" #include <poppler-qt5.h> class TestOptionalContent: public QObject { Q_OBJECT private slots: void checkVisPolicy(); void checkNestedLayers(); void checkNoOptionalContent(); void checkIsVisible(); void checkVisibilitySetting(); void checkRadioButtons(); }; void TestOptionalContent::checkVisPolicy() { Poppler::Document *doc; doc = Poppler::Document::load(TESTDATADIR "/unittestcases/vis_policy_test.pdf"); QVERIFY( doc ); QVERIFY( doc->hasOptionalContent() ); Poppler::OptContentModel *optContent = doc->optionalContentModel(); QModelIndex index; index = optContent->index( 0, 0, QModelIndex() ); QCOMPARE( optContent->data( index, Qt::DisplayRole ).toString(), QString( "A" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( index, Qt::CheckStateRole ).toInt() ), Qt::Checked ); index = optContent->index( 1, 0, QModelIndex() ); QCOMPARE( optContent->data( index, Qt::DisplayRole ).toString(), QString( "B" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( index, Qt::CheckStateRole ).toInt() ), Qt::Checked ); delete doc; } void TestOptionalContent::checkNestedLayers() { Poppler::Document *doc; doc = Poppler::Document::load(TESTDATADIR "/unittestcases/NestedLayers.pdf"); QVERIFY( doc ); QVERIFY( doc->hasOptionalContent() ); Poppler::OptContentModel *optContent = doc->optionalContentModel(); QModelIndex index; index = optContent->index( 0, 0, QModelIndex() ); QCOMPARE( optContent->data( index, Qt::DisplayRole ).toString(), QString( "Black Text and Green Snow" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( index, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); index = optContent->index( 1, 0, QModelIndex() ); QCOMPARE( optContent->data( index, Qt::DisplayRole ).toString(), QString( "Mountains and Image" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( index, Qt::CheckStateRole ).toInt() ), Qt::Checked ); // This is a sub-item of "Mountains and Image" QModelIndex subindex = optContent->index( 0, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "Image" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( index, Qt::CheckStateRole ).toInt() ), Qt::Checked ); index = optContent->index( 2, 0, QModelIndex() ); QCOMPARE( optContent->data( index, Qt::DisplayRole ).toString(), QString( "Starburst" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( index, Qt::CheckStateRole ).toInt() ), Qt::Checked ); index = optContent->index( 3, 0, QModelIndex() ); QCOMPARE( optContent->data( index, Qt::DisplayRole ).toString(), QString( "Watermark" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( index, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); delete doc; } void TestOptionalContent::checkNoOptionalContent() { Poppler::Document *doc; doc = Poppler::Document::load(TESTDATADIR "/unittestcases/orientation.pdf"); QVERIFY( doc ); QCOMPARE( doc->hasOptionalContent(), false ); delete doc; } void TestOptionalContent::checkIsVisible() { GooString *fileName = new GooString(TESTDATADIR "/unittestcases/vis_policy_test.pdf"); globalParams = new GlobalParams(); PDFDoc *doc = new PDFDoc( fileName ); QVERIFY( doc ); OCGs *ocgs = doc->getOptContentConfig(); QVERIFY( ocgs ); XRef *xref = doc->getXRef(); Object obj; // In this test, both Ref(21,0) and Ref(2,0) are set to On // AnyOn, one element array: // 22 0 obj<</Type/OCMD/OCGs[21 0 R]/P/AnyOn>>endobj xref->fetch( 22, 0, &obj ); QVERIFY( obj.isDict() ); QVERIFY( ocgs->optContentIsVisible( &obj ) ); obj.free(); // Same again, looking for any leaks or dubious free()'s xref->fetch( 22, 0, &obj ); QVERIFY( obj.isDict() ); QVERIFY( ocgs->optContentIsVisible( &obj ) ); obj.free(); // AnyOff, one element array: // 29 0 obj<</Type/OCMD/OCGs[21 0 R]/P/AnyOff>>endobj xref->fetch( 29, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AllOn, one element array: // 36 0 obj<</Type/OCMD/OCGs[28 0 R]/P/AllOn>>endobj xref->fetch( 36, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AllOff, one element array: // 43 0 obj<</Type/OCMD/OCGs[28 0 R]/P/AllOff>>endobj xref->fetch( 43, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AnyOn, multi-element array: // 50 0 obj<</Type/OCMD/OCGs[21 0 R 28 0 R]/P/AnyOn>>endobj xref->fetch( 50, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AnyOff, multi-element array: // 57 0 obj<</Type/OCMD/P/AnyOff/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 57, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AllOn, multi-element array: // 64 0 obj<</Type/OCMD/P/AllOn/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 64, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AllOff, multi-element array: // 71 0 obj<</Type/OCMD/P/AllOff/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 71, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); delete doc; delete globalParams; } void TestOptionalContent::checkVisibilitySetting() { globalParams = new GlobalParams(); GooString *fileName = new GooString(TESTDATADIR "/unittestcases/vis_policy_test.pdf"); PDFDoc *doc = new PDFDoc( fileName ); QVERIFY( doc ); OCGs *ocgs = doc->getOptContentConfig(); QVERIFY( ocgs ); XRef *xref = doc->getXRef(); Object obj; // In this test, both Ref(21,0) and Ref(28,0) start On, // based on the file settings Object ref21obj; ref21obj.initRef( 21, 0 ); Ref ref21 = ref21obj.getRef(); OptionalContentGroup *ocgA = ocgs->findOcgByRef( ref21 ); QVERIFY( ocgA ); QVERIFY( (ocgA->getName()->cmp("A")) == 0 ); QCOMPARE( ocgA->getState(), OptionalContentGroup::On ); Object ref28obj; ref28obj.initRef( 28, 0 ); Ref ref28 = ref28obj.getRef(); OptionalContentGroup *ocgB = ocgs->findOcgByRef( ref28 ); QVERIFY( ocgB ); QVERIFY( (ocgB->getName()->cmp("B")) == 0 ); QCOMPARE( ocgB->getState(), OptionalContentGroup::On ); // turn one Off ocgA->setState( OptionalContentGroup::Off ); // AnyOn, one element array: // 22 0 obj<</Type/OCMD/OCGs[21 0 R]/P/AnyOn>>endobj xref->fetch( 22, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // Same again, looking for any leaks or dubious free()'s xref->fetch( 22, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AnyOff, one element array: // 29 0 obj<</Type/OCMD/OCGs[21 0 R]/P/AnyOff>>endobj xref->fetch( 29, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AllOn, one element array: // 36 0 obj<</Type/OCMD/OCGs[28 0 R]/P/AllOn>>endobj xref->fetch( 36, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AllOff, one element array: // 43 0 obj<</Type/OCMD/OCGs[28 0 R]/P/AllOff>>endobj xref->fetch( 43, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AnyOn, multi-element array: // 50 0 obj<</Type/OCMD/OCGs[21 0 R 28 0 R]/P/AnyOn>>endobj xref->fetch( 50, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AnyOff, multi-element array: // 57 0 obj<</Type/OCMD/P/AnyOff/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 57, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AllOn, multi-element array: // 64 0 obj<</Type/OCMD/P/AllOn/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 64, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AllOff, multi-element array: // 71 0 obj<</Type/OCMD/P/AllOff/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 71, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // Turn the other one off as well (i.e. both are Off) ocgB->setState(OptionalContentGroup::Off); // AnyOn, one element array: // 22 0 obj<</Type/OCMD/OCGs[21 0 R]/P/AnyOn>>endobj xref->fetch( 22, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // Same again, looking for any leaks or dubious free()'s xref->fetch( 22, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AnyOff, one element array: // 29 0 obj<</Type/OCMD/OCGs[21 0 R]/P/AnyOff>>endobj xref->fetch( 29, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AllOn, one element array: // 36 0 obj<</Type/OCMD/OCGs[28 0 R]/P/AllOn>>endobj xref->fetch( 36, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AllOff, one element array: // 43 0 obj<</Type/OCMD/OCGs[28 0 R]/P/AllOff>>endobj xref->fetch( 43, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AnyOn, multi-element array: // 50 0 obj<</Type/OCMD/OCGs[21 0 R 28 0 R]/P/AnyOn>>endobj xref->fetch( 50, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AnyOff, multi-element array: // 57 0 obj<</Type/OCMD/P/AnyOff/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 57, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AllOn, multi-element array: // 64 0 obj<</Type/OCMD/P/AllOn/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 64, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AllOff, multi-element array: // 71 0 obj<</Type/OCMD/P/AllOff/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 71, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // Turn the first one on again (21 is On, 28 is Off) ocgA->setState(OptionalContentGroup::On); // AnyOn, one element array: // 22 0 obj<</Type/OCMD/OCGs[21 0 R]/P/AnyOn>>endobj xref->fetch( 22, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // Same again, looking for any leaks or dubious free()'s xref->fetch( 22, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AnyOff, one element array: // 29 0 obj<</Type/OCMD/OCGs[21 0 R]/P/AnyOff>>endobj xref->fetch( 29, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AllOn, one element array: // 36 0 obj<</Type/OCMD/OCGs[28 0 R]/P/AllOn>>endobj xref->fetch( 36, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AllOff, one element array: // 43 0 obj<</Type/OCMD/OCGs[28 0 R]/P/AllOff>>endobj xref->fetch( 43, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AnyOn, multi-element array: // 50 0 obj<</Type/OCMD/OCGs[21 0 R 28 0 R]/P/AnyOn>>endobj xref->fetch( 50, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AnyOff, multi-element array: // 57 0 obj<</Type/OCMD/P/AnyOff/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 57, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), true ); obj.free(); // AllOn, multi-element array: // 64 0 obj<</Type/OCMD/P/AllOn/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 64, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); // AllOff, multi-element array: // 71 0 obj<</Type/OCMD/P/AllOff/OCGs[21 0 R 28 0 R]>>endobj xref->fetch( 71, 0, &obj ); QVERIFY( obj.isDict() ); QCOMPARE( ocgs->optContentIsVisible( &obj ), false ); obj.free(); delete doc; delete globalParams; } void TestOptionalContent::checkRadioButtons() { Poppler::Document *doc; doc = Poppler::Document::load(TESTDATADIR "/unittestcases/ClarityOCGs.pdf"); QVERIFY( doc ); QVERIFY( doc->hasOptionalContent() ); Poppler::OptContentModel *optContent = doc->optionalContentModel(); QModelIndex index; index = optContent->index( 0, 0, QModelIndex() ); QCOMPARE( optContent->data( index, Qt::DisplayRole ).toString(), QString( "Languages" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( index, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); // These are sub-items of the "Languages" label QModelIndex subindex = optContent->index( 0, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "English" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Checked ); subindex = optContent->index( 1, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "French" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); subindex = optContent->index( 2, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "Japanese" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); // RBGroup of languages, so turning on Japanese should turn off English QVERIFY( optContent->setData( subindex, QVariant( true ), Qt::CheckStateRole ) ); subindex = optContent->index( 0, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "English" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); subindex = optContent->index( 2, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "Japanese" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Checked ); subindex = optContent->index( 1, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "French" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); // and turning on French should turn off Japanese QVERIFY( optContent->setData( subindex, QVariant( true ), Qt::CheckStateRole ) ); subindex = optContent->index( 0, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "English" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); subindex = optContent->index( 2, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "Japanese" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); subindex = optContent->index( 1, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "French" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Checked ); // and turning off French should leave them all off QVERIFY( optContent->setData( subindex, QVariant( false ), Qt::CheckStateRole ) ); subindex = optContent->index( 0, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "English" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); subindex = optContent->index( 2, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "Japanese" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); subindex = optContent->index( 1, 0, index ); QCOMPARE( optContent->data( subindex, Qt::DisplayRole ).toString(), QString( "French" ) ); QCOMPARE( static_cast<Qt::CheckState>( optContent->data( subindex, Qt::CheckStateRole ).toInt() ), Qt::Unchecked ); delete doc; } QTEST_MAIN(TestOptionalContent) #include "check_optcontent.moc"
gpl-2.0
wingrime/android_kernel_swift
arch/x86/kernel/aperture_64.c
791
14455
/* * Firmware replacement code. * * Work around broken BIOSes that don't set an aperture, only set the * aperture in the AGP bridge, or set too small aperture. * * If all fails map the aperture over some low memory. This is cheaper than * doing bounce buffering. The memory is lost. This is done at early boot * because only the bootmem allocator can allocate 32+MB. * * Copyright 2002 Andi Kleen, SuSE Labs. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mmzone.h> #include <linux/pci_ids.h> #include <linux/pci.h> #include <linux/bitops.h> #include <linux/ioport.h> #include <linux/suspend.h> #include <linux/kmemleak.h> #include <asm/e820.h> #include <asm/io.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/pci-direct.h> #include <asm/dma.h> #include <asm/k8.h> #include <asm/x86_init.h> int gart_iommu_aperture; int gart_iommu_aperture_disabled __initdata; int gart_iommu_aperture_allowed __initdata; int fallback_aper_order __initdata = 1; /* 64MB */ int fallback_aper_force __initdata; int fix_aperture __initdata = 1; struct bus_dev_range { int bus; int dev_base; int dev_limit; }; static struct bus_dev_range bus_dev_ranges[] __initdata = { { 0x00, 0x18, 0x20}, { 0xff, 0x00, 0x20}, { 0xfe, 0x00, 0x20} }; static struct resource gart_resource = { .name = "GART", .flags = IORESOURCE_MEM, }; static void __init insert_aperture_resource(u32 aper_base, u32 aper_size) { gart_resource.start = aper_base; gart_resource.end = aper_base + aper_size - 1; insert_resource(&iomem_resource, &gart_resource); } /* This code runs before the PCI subsystem is initialized, so just access the northbridge directly. */ static u32 __init allocate_aperture(void) { u32 aper_size; void *p; /* aper_size should <= 1G */ if (fallback_aper_order > 5) fallback_aper_order = 5; aper_size = (32 * 1024 * 1024) << fallback_aper_order; /* * Aperture has to be naturally aligned. This means a 2GB aperture * won't have much chance of finding a place in the lower 4GB of * memory. Unfortunately we cannot move it up because that would * make the IOMMU useless. */ /* * using 512M as goal, in case kexec will load kernel_big * that will do the on position decompress, and could overlap with * that positon with gart that is used. * sequende: * kernel_small * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) * ==> kernel_small(gart area become e820_reserved) * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) * ==> kerne_big (uncompressed size will be big than 64M or 128M) * so don't use 512M below as gart iommu, leave the space for kernel * code for safe */ p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); /* * Kmemleak should not scan this block as it may not be mapped via the * kernel direct mapping. */ kmemleak_ignore(p); if (!p || __pa(p)+aper_size > 0xffffffff) { printk(KERN_ERR "Cannot allocate aperture memory hole (%p,%uK)\n", p, aper_size>>10); if (p) free_bootmem(__pa(p), aper_size); return 0; } printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", aper_size >> 10, __pa(p)); insert_aperture_resource((u32)__pa(p), aper_size); register_nosave_region((u32)__pa(p) >> PAGE_SHIFT, (u32)__pa(p+aper_size) >> PAGE_SHIFT); return (u32)__pa(p); } /* Find a PCI capability */ static u32 __init find_cap(int bus, int slot, int func, int cap) { int bytes; u8 pos; if (!(read_pci_config_16(bus, slot, func, PCI_STATUS) & PCI_STATUS_CAP_LIST)) return 0; pos = read_pci_config_byte(bus, slot, func, PCI_CAPABILITY_LIST); for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) { u8 id; pos &= ~3; id = read_pci_config_byte(bus, slot, func, pos+PCI_CAP_LIST_ID); if (id == 0xff) break; if (id == cap) return pos; pos = read_pci_config_byte(bus, slot, func, pos+PCI_CAP_LIST_NEXT); } return 0; } /* Read a standard AGPv3 bridge header */ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order) { u32 apsize; u32 apsizereg; int nbits; u32 aper_low, aper_hi; u64 aper; u32 old_order; printk(KERN_INFO "AGP bridge at %02x:%02x:%02x\n", bus, slot, func); apsizereg = read_pci_config_16(bus, slot, func, cap + 0x14); if (apsizereg == 0xffffffff) { printk(KERN_ERR "APSIZE in AGP bridge unreadable\n"); return 0; } /* old_order could be the value from NB gart setting */ old_order = *order; apsize = apsizereg & 0xfff; /* Some BIOS use weird encodings not in the AGPv3 table. */ if (apsize & 0xff) apsize |= 0xf00; nbits = hweight16(apsize); *order = 7 - nbits; if ((int)*order < 0) /* < 32MB */ *order = 0; aper_low = read_pci_config(bus, slot, func, 0x10); aper_hi = read_pci_config(bus, slot, func, 0x14); aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32); /* * On some sick chips, APSIZE is 0. It means it wants 4G * so let double check that order, and lets trust AMD NB settings: */ printk(KERN_INFO "Aperture from AGP @ %Lx old size %u MB\n", aper, 32 << old_order); if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) { printk(KERN_INFO "Aperture size %u MB (APSIZE %x) is not right, using settings from NB\n", 32 << *order, apsizereg); *order = old_order; } printk(KERN_INFO "Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", aper, 32 << *order, apsizereg); if (!aperture_valid(aper, (32*1024*1024) << *order, 32<<20)) return 0; return (u32)aper; } /* * Look for an AGP bridge. Windows only expects the aperture in the * AGP bridge and some BIOS forget to initialize the Northbridge too. * Work around this here. * * Do an PCI bus scan by hand because we're running before the PCI * subsystem. * * All K8 AGP bridges are AGPv3 compliant, so we can do this scan * generically. It's probably overkill to always scan all slots because * the AGP bridges should be always an own bus on the HT hierarchy, * but do it here for future safety. */ static u32 __init search_agp_bridge(u32 *order, int *valid_agp) { int bus, slot, func; /* Poor man's PCI discovery */ for (bus = 0; bus < 256; bus++) { for (slot = 0; slot < 32; slot++) { for (func = 0; func < 8; func++) { u32 class, cap; u8 type; class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); if (class == 0xffffffff) break; switch (class >> 16) { case PCI_CLASS_BRIDGE_HOST: case PCI_CLASS_BRIDGE_OTHER: /* needed? */ /* AGP bridge? */ cap = find_cap(bus, slot, func, PCI_CAP_ID_AGP); if (!cap) break; *valid_agp = 1; return read_agp(bus, slot, func, cap, order); } /* No multi-function device? */ type = read_pci_config_byte(bus, slot, func, PCI_HEADER_TYPE); if (!(type & 0x80)) break; } } } printk(KERN_INFO "No AGP bridge found\n"); return 0; } static int gart_fix_e820 __initdata = 1; static int __init parse_gart_mem(char *p) { if (!p) return -EINVAL; if (!strncmp(p, "off", 3)) gart_fix_e820 = 0; else if (!strncmp(p, "on", 2)) gart_fix_e820 = 1; return 0; } early_param("gart_fix_e820", parse_gart_mem); void __init early_gart_iommu_check(void) { /* * in case it is enabled before, esp for kexec/kdump, * previous kernel already enable that. memset called * by allocate_aperture/__alloc_bootmem_nopanic cause restart. * or second kernel have different position for GART hole. and new * kernel could use hole as RAM that is still used by GART set by * first kernel * or BIOS forget to put that in reserved. * try to update e820 to make that region as reserved. */ u32 agp_aper_base = 0, agp_aper_order = 0; int i, fix, slot, valid_agp = 0; u32 ctl; u32 aper_size = 0, aper_order = 0, last_aper_order = 0; u64 aper_base = 0, last_aper_base = 0; int aper_enabled = 0, last_aper_enabled = 0, last_valid = 0; if (!early_pci_allowed()) return; /* This is mostly duplicate of iommu_hole_init */ agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp); fix = 0; for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { int bus; int dev_base, dev_limit; bus = bus_dev_ranges[i].bus; dev_base = bus_dev_ranges[i].dev_base; dev_limit = bus_dev_ranges[i].dev_limit; for (slot = dev_base; slot < dev_limit; slot++) { if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) continue; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); aper_enabled = ctl & AMD64_GARTEN; aper_order = (ctl >> 1) & 7; aper_size = (32 * 1024 * 1024) << aper_order; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base <<= 25; if (last_valid) { if ((aper_order != last_aper_order) || (aper_base != last_aper_base) || (aper_enabled != last_aper_enabled)) { fix = 1; break; } } last_aper_order = aper_order; last_aper_base = aper_base; last_aper_enabled = aper_enabled; last_valid = 1; } } if (!fix && !aper_enabled) return; if (!aper_base || !aper_size || aper_base + aper_size > 0x100000000UL) fix = 1; if (gart_fix_e820 && !fix && aper_enabled) { if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { /* reserve it, so we can reuse it in second kernel */ printk(KERN_INFO "update e820 for GART\n"); e820_add_region(aper_base, aper_size, E820_RESERVED); update_e820(); } } if (valid_agp) return; /* disable them all at first */ for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { int bus; int dev_base, dev_limit; bus = bus_dev_ranges[i].bus; dev_base = bus_dev_ranges[i].dev_base; dev_limit = bus_dev_ranges[i].dev_limit; for (slot = dev_base; slot < dev_limit; slot++) { if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) continue; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); ctl &= ~AMD64_GARTEN; write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); } } } static int __initdata printed_gart_size_msg; void __init gart_iommu_hole_init(void) { u32 agp_aper_base = 0, agp_aper_order = 0; u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0; u64 aper_base, last_aper_base = 0; int fix, slot, valid_agp = 0; int i, node; if (gart_iommu_aperture_disabled || !fix_aperture || !early_pci_allowed()) return; printk(KERN_INFO "Checking aperture...\n"); if (!fallback_aper_force) agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp); fix = 0; node = 0; for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { int bus; int dev_base, dev_limit; u32 ctl; bus = bus_dev_ranges[i].bus; dev_base = bus_dev_ranges[i].dev_base; dev_limit = bus_dev_ranges[i].dev_limit; for (slot = dev_base; slot < dev_limit; slot++) { if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) continue; iommu_detected = 1; gart_iommu_aperture = 1; x86_init.iommu.iommu_init = gart_iommu_init; ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); /* * Before we do anything else disable the GART. It may * still be enabled if we boot into a crash-kernel here. * Reconfiguring the GART while it is enabled could have * unknown side-effects. */ ctl &= ~GARTEN; write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); aper_order = (ctl >> 1) & 7; aper_size = (32 * 1024 * 1024) << aper_order; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base <<= 25; printk(KERN_INFO "Node %d: aperture @ %Lx size %u MB\n", node, aper_base, aper_size >> 20); node++; if (!aperture_valid(aper_base, aper_size, 64<<20)) { if (valid_agp && agp_aper_base && agp_aper_base == aper_base && agp_aper_order == aper_order) { /* the same between two setting from NB and agp */ if (!no_iommu && max_pfn > MAX_DMA32_PFN && !printed_gart_size_msg) { printk(KERN_ERR "you are using iommu with agp, but GART size is less than 64M\n"); printk(KERN_ERR "please increase GART size in your BIOS setup\n"); printk(KERN_ERR "if BIOS doesn't have that option, contact your HW vendor!\n"); printed_gart_size_msg = 1; } } else { fix = 1; goto out; } } if ((last_aper_order && aper_order != last_aper_order) || (last_aper_base && aper_base != last_aper_base)) { fix = 1; goto out; } last_aper_order = aper_order; last_aper_base = aper_base; } } out: if (!fix && !fallback_aper_force) { if (last_aper_base) { unsigned long n = (32 * 1024 * 1024) << last_aper_order; insert_aperture_resource((u32)last_aper_base, n); } return; } if (!fallback_aper_force) { aper_alloc = agp_aper_base; aper_order = agp_aper_order; } if (aper_alloc) { /* Got the aperture from the AGP bridge */ } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || force_iommu || valid_agp || fallback_aper_force) { printk(KERN_INFO "Your BIOS doesn't leave a aperture memory hole\n"); printk(KERN_INFO "Please enable the IOMMU option in the BIOS setup\n"); printk(KERN_INFO "This costs you %d MB of RAM\n", 32 << fallback_aper_order); aper_order = fallback_aper_order; aper_alloc = allocate_aperture(); if (!aper_alloc) { /* * Could disable AGP and IOMMU here, but it's * probably not worth it. But the later users * cannot deal with bad apertures and turning * on the aperture over memory causes very * strange problems, so it's better to panic * early. */ panic("Not enough memory for aperture"); } } else { return; } /* Fix up the north bridges */ for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { int bus; int dev_base, dev_limit; bus = bus_dev_ranges[i].bus; dev_base = bus_dev_ranges[i].dev_base; dev_limit = bus_dev_ranges[i].dev_limit; for (slot = dev_base; slot < dev_limit; slot++) { if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) continue; /* Don't enable translation yet. That is done later. Assume this BIOS didn't initialise the GART so just overwrite all previous bits */ write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, aper_order << 1); write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25); } } set_up_gart_resume(aper_order, aper_alloc); }
gpl-2.0
scotthartbti/cm_samsung_kernel_dempsey
drivers/dma/dw_dmac.c
1047
38230
/* * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on * AVR32 systems.) * * Copyright (C) 2007-2008 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "dw_dmac_regs.h" /* * This supports the Synopsys "DesignWare AHB Central DMA Controller", * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all * of which use ARM any more). See the "Databook" from Synopsys for * information beyond what licensees probably provide. * * The driver has currently been tested only with the Atmel AT32AP7000, * which does not support descriptor writeback. */ /* NOTE: DMS+SMS is system-specific. We should get this information * from the platform code somehow. */ #define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ | DWC_CTLL_SRC_MSIZE(0) \ | DWC_CTLL_DMS(0) \ | DWC_CTLL_SMS(1) \ | DWC_CTLL_LLP_D_EN \ | DWC_CTLL_LLP_S_EN) /* * This is configuration-dependent and usually a funny size like 4095. * Let's round it down to the nearest power of two. * * Note that this is a transfer count, i.e. if we transfer 32-bit * words, we can do 8192 bytes per descriptor. * * This parameter is also system-specific. */ #define DWC_MAX_COUNT 2048U /* * Number of descriptors to allocate for each channel. This should be * made configurable somehow; preferably, the clients (at least the * ones using slave transfers) should be able to give us a hint. */ #define NR_DESCS_PER_CHANNEL 64 /*----------------------------------------------------------------------*/ /* * Because we're not relying on writeback from the controller (it may not * even be configured into the core!) we don't need to use dma_pool. These * descriptors -- and associated data -- are cacheable. We do need to make * sure their dcache entries are written back before handing them off to * the controller, though. */ static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } static struct device *chan2parent(struct dma_chan *chan) { return chan->dev->device.parent; } static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) { return list_entry(dwc->active_list.next, struct dw_desc, desc_node); } static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) { return list_entry(dwc->queue.next, struct dw_desc, desc_node); } static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) { struct dw_desc *desc, *_desc; struct dw_desc *ret = NULL; unsigned int i = 0; spin_lock_bh(&dwc->lock); list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { if (async_tx_test_ack(&desc->txd)) { list_del(&desc->desc_node); ret = desc; break; } dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); i++; } spin_unlock_bh(&dwc->lock); dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); return ret; } static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) { struct dw_desc *child; list_for_each_entry(child, &desc->tx_list, desc_node) dma_sync_single_for_cpu(chan2parent(&dwc->chan), child->txd.phys, sizeof(child->lli), DMA_TO_DEVICE); dma_sync_single_for_cpu(chan2parent(&dwc->chan), desc->txd.phys, sizeof(desc->lli), DMA_TO_DEVICE); } /* * Move a descriptor, including any children, to the free list. * `desc' must not be on any lists. */ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) { if (desc) { struct dw_desc *child; dwc_sync_desc_for_cpu(dwc, desc); spin_lock_bh(&dwc->lock); list_for_each_entry(child, &desc->tx_list, desc_node) dev_vdbg(chan2dev(&dwc->chan), "moving child desc %p to freelist\n", child); list_splice_init(&desc->tx_list, &dwc->free_list); dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); list_add(&desc->desc_node, &dwc->free_list); spin_unlock_bh(&dwc->lock); } } /* Called with dwc->lock held and bh disabled */ static dma_cookie_t dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) { dma_cookie_t cookie = dwc->chan.cookie; if (++cookie < 0) cookie = 1; dwc->chan.cookie = cookie; desc->txd.cookie = cookie; return cookie; } /*----------------------------------------------------------------------*/ /* Called with dwc->lock held and bh disabled */ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); /* ASSERT: channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) { dev_err(chan2dev(&dwc->chan), "BUG: Attempted to start non-idle channel\n"); dev_err(chan2dev(&dwc->chan), " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", channel_readl(dwc, SAR), channel_readl(dwc, DAR), channel_readl(dwc, LLP), channel_readl(dwc, CTL_HI), channel_readl(dwc, CTL_LO)); /* The tasklet will hopefully advance the queue... */ return; } channel_writel(dwc, LLP, first->txd.phys); channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); channel_writel(dwc, CTL_HI, 0); channel_set_bit(dw, CH_EN, dwc->mask); } /*----------------------------------------------------------------------*/ static void dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) { dma_async_tx_callback callback; void *param; struct dma_async_tx_descriptor *txd = &desc->txd; dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); dwc->completed = txd->cookie; callback = txd->callback; param = txd->callback_param; dwc_sync_desc_for_cpu(dwc, desc); list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); if (!dwc->chan.private) { struct device *parent = chan2parent(&dwc->chan); if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) dma_unmap_single(parent, desc->lli.dar, desc->len, DMA_FROM_DEVICE); else dma_unmap_page(parent, desc->lli.dar, desc->len, DMA_FROM_DEVICE); } if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) dma_unmap_single(parent, desc->lli.sar, desc->len, DMA_TO_DEVICE); else dma_unmap_page(parent, desc->lli.sar, desc->len, DMA_TO_DEVICE); } } /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here */ if (callback) callback(param); } static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) { struct dw_desc *desc, *_desc; LIST_HEAD(list); if (dma_readl(dw, CH_EN) & dwc->mask) { dev_err(chan2dev(&dwc->chan), "BUG: XFER bit set, but channel not idle!\n"); /* Try to continue after resetting the channel... */ channel_clear_bit(dw, CH_EN, dwc->mask); while (dma_readl(dw, CH_EN) & dwc->mask) cpu_relax(); } /* * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. */ if (!list_empty(&dwc->queue)) dwc_dostart(dwc, dwc_first_queued(dwc)); list_splice_init(&dwc->active_list, &list); list_splice_init(&dwc->queue, &dwc->active_list); list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_descriptor_complete(dwc, desc); } static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) { dma_addr_t llp; struct dw_desc *desc, *_desc; struct dw_desc *child; u32 status_xfer; /* * Clear block interrupt flag before scanning so that we don't * miss any, and read LLP before RAW_XFER to ensure it is * valid if we decide to scan the list. */ dma_writel(dw, CLEAR.BLOCK, dwc->mask); llp = channel_readl(dwc, LLP); status_xfer = dma_readl(dw, RAW.XFER); if (status_xfer & dwc->mask) { /* Everything we've submitted is done */ dma_writel(dw, CLEAR.XFER, dwc->mask); dwc_complete_all(dw, dwc); return; } dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { if (desc->lli.llp == llp) /* This one is currently in progress */ return; list_for_each_entry(child, &desc->tx_list, desc_node) if (child->lli.llp == llp) /* Currently in progress */ return; /* * No descriptors so far seem to be in progress, i.e. * this one must be done. */ dwc_descriptor_complete(dwc, desc); } dev_err(chan2dev(&dwc->chan), "BUG: All descriptors done, but channel not idle!\n"); /* Try to continue after resetting the channel... */ channel_clear_bit(dw, CH_EN, dwc->mask); while (dma_readl(dw, CH_EN) & dwc->mask) cpu_relax(); if (!list_empty(&dwc->queue)) { dwc_dostart(dwc, dwc_first_queued(dwc)); list_splice_init(&dwc->queue, &dwc->active_list); } } static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) { dev_printk(KERN_CRIT, chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); } static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) { struct dw_desc *bad_desc; struct dw_desc *child; dwc_scan_descriptors(dw, dwc); /* * The descriptor currently at the head of the active list is * borked. Since we don't have any way to report errors, we'll * just have to scream loudly and try to carry on. */ bad_desc = dwc_first_active(dwc); list_del_init(&bad_desc->desc_node); list_splice_init(&dwc->queue, dwc->active_list.prev); /* Clear the error flag and try to restart the controller */ dma_writel(dw, CLEAR.ERROR, dwc->mask); if (!list_empty(&dwc->active_list)) dwc_dostart(dwc, dwc_first_active(dwc)); /* * KERN_CRITICAL may seem harsh, but since this only happens * when someone submits a bad physical address in a * descriptor, we should consider ourselves lucky that the * controller flagged an error instead of scribbling over * random memory locations. */ dev_printk(KERN_CRIT, chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"); dev_printk(KERN_CRIT, chan2dev(&dwc->chan), " cookie: %d\n", bad_desc->txd.cookie); dwc_dump_lli(dwc, &bad_desc->lli); list_for_each_entry(child, &bad_desc->tx_list, desc_node) dwc_dump_lli(dwc, &child->lli); /* Pretend the descriptor completed successfully */ dwc_descriptor_complete(dwc, bad_desc); } /* --------------------- Cyclic DMA API extensions -------------------- */ inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); return channel_readl(dwc, SAR); } EXPORT_SYMBOL(dw_dma_get_src_addr); inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); return channel_readl(dwc, DAR); } EXPORT_SYMBOL(dw_dma_get_dst_addr); /* called with dwc->lock held and all DMAC interrupts disabled */ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, u32 status_block, u32 status_err, u32 status_xfer) { if (status_block & dwc->mask) { void (*callback)(void *param); void *callback_param; dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", channel_readl(dwc, LLP)); dma_writel(dw, CLEAR.BLOCK, dwc->mask); callback = dwc->cdesc->period_callback; callback_param = dwc->cdesc->period_callback_param; if (callback) { spin_unlock(&dwc->lock); callback(callback_param); spin_lock(&dwc->lock); } } /* * Error and transfer complete are highly unlikely, and will most * likely be due to a configuration error by the user. */ if (unlikely(status_err & dwc->mask) || unlikely(status_xfer & dwc->mask)) { int i; dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " "interrupt, stopping DMA transfer\n", status_xfer ? "xfer" : "error"); dev_err(chan2dev(&dwc->chan), " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", channel_readl(dwc, SAR), channel_readl(dwc, DAR), channel_readl(dwc, LLP), channel_readl(dwc, CTL_HI), channel_readl(dwc, CTL_LO)); channel_clear_bit(dw, CH_EN, dwc->mask); while (dma_readl(dw, CH_EN) & dwc->mask) cpu_relax(); /* make sure DMA does not restart by loading a new list */ channel_writel(dwc, LLP, 0); channel_writel(dwc, CTL_LO, 0); channel_writel(dwc, CTL_HI, 0); dma_writel(dw, CLEAR.BLOCK, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask); for (i = 0; i < dwc->cdesc->periods; i++) dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); } } /* ------------------------------------------------------------------------- */ static void dw_dma_tasklet(unsigned long data) { struct dw_dma *dw = (struct dw_dma *)data; struct dw_dma_chan *dwc; u32 status_block; u32 status_xfer; u32 status_err; int i; status_block = dma_readl(dw, RAW.BLOCK); status_xfer = dma_readl(dw, RAW.XFER); status_err = dma_readl(dw, RAW.ERROR); dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", status_block, status_err); for (i = 0; i < dw->dma.chancnt; i++) { dwc = &dw->chan[i]; spin_lock(&dwc->lock); if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) dwc_handle_cyclic(dw, dwc, status_block, status_err, status_xfer); else if (status_err & (1 << i)) dwc_handle_error(dw, dwc); else if ((status_block | status_xfer) & (1 << i)) dwc_scan_descriptors(dw, dwc); spin_unlock(&dwc->lock); } /* * Re-enable interrupts. Block Complete interrupts are only * enabled if the INT_EN bit in the descriptor is set. This * will trigger a scan before the whole list is done. */ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); } static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) { struct dw_dma *dw = dev_id; u32 status; dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", dma_readl(dw, STATUS_INT)); /* * Just disable the interrupts. We'll turn them back on in the * softirq handler. */ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); status = dma_readl(dw, STATUS_INT); if (status) { dev_err(dw->dma.dev, "BUG: Unexpected interrupts pending: 0x%x\n", status); /* Try to recover */ channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); } tasklet_schedule(&dw->tasklet); return IRQ_HANDLED; } /*----------------------------------------------------------------------*/ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) { struct dw_desc *desc = txd_to_dw_desc(tx); struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); dma_cookie_t cookie; spin_lock_bh(&dwc->lock); cookie = dwc_assign_cookie(dwc, desc); /* * REVISIT: We should attempt to chain as many descriptors as * possible, perhaps even appending to those already submitted * for DMA. But this is hard to do in a race-free manner. */ if (list_empty(&dwc->active_list)) { dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", desc->txd.cookie); dwc_dostart(dwc, desc); list_add_tail(&desc->desc_node, &dwc->active_list); } else { dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", desc->txd.cookie); list_add_tail(&desc->desc_node, &dwc->queue); } spin_unlock_bh(&dwc->lock); return cookie; } static struct dma_async_tx_descriptor * dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_desc *desc; struct dw_desc *first; struct dw_desc *prev; size_t xfer_count; size_t offset; unsigned int src_width; unsigned int dst_width; u32 ctllo; dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", dest, src, len, flags); if (unlikely(!len)) { dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); return NULL; } /* * We can be a lot more clever here, but this should take care * of the most common optimization. */ if (!((src | dest | len) & 3)) src_width = dst_width = 2; else if (!((src | dest | len) & 1)) src_width = dst_width = 1; else src_width = dst_width = 0; ctllo = DWC_DEFAULT_CTLLO | DWC_CTLL_DST_WIDTH(dst_width) | DWC_CTLL_SRC_WIDTH(src_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_INC | DWC_CTLL_FC_M2M; prev = first = NULL; for (offset = 0; offset < len; offset += xfer_count << src_width) { xfer_count = min_t(size_t, (len - offset) >> src_width, DWC_MAX_COUNT); desc = dwc_desc_get(dwc); if (!desc) goto err_desc_get; desc->lli.sar = src + offset; desc->lli.dar = dest + offset; desc->lli.ctllo = ctllo; desc->lli.ctlhi = xfer_count; if (!first) { first = desc; } else { prev->lli.llp = desc->txd.phys; dma_sync_single_for_device(chan2parent(chan), prev->txd.phys, sizeof(prev->lli), DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; } if (flags & DMA_PREP_INTERRUPT) /* Trigger interrupt after last block */ prev->lli.ctllo |= DWC_CTLL_INT_EN; prev->lli.llp = 0; dma_sync_single_for_device(chan2parent(chan), prev->txd.phys, sizeof(prev->lli), DMA_TO_DEVICE); first->txd.flags = flags; first->len = len; return &first->txd; err_desc_get: dwc_desc_put(dwc, first); return NULL; } static struct dma_async_tx_descriptor * dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long flags) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_slave *dws = chan->private; struct dw_desc *prev; struct dw_desc *first; u32 ctllo; dma_addr_t reg; unsigned int reg_width; unsigned int mem_width; unsigned int i; struct scatterlist *sg; size_t total_len = 0; dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); if (unlikely(!dws || !sg_len)) return NULL; reg_width = dws->reg_width; prev = first = NULL; switch (direction) { case DMA_TO_DEVICE: ctllo = (DWC_DEFAULT_CTLLO | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC | DWC_CTLL_FC_M2P); reg = dws->tx_reg; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; u32 len; u32 mem; desc = dwc_desc_get(dwc); if (!desc) { dev_err(chan2dev(chan), "not enough descriptors available\n"); goto err_desc_get; } mem = sg_phys(sg); len = sg_dma_len(sg); mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; desc->lli.sar = mem; desc->lli.dar = reg; desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); desc->lli.ctlhi = len >> mem_width; if (!first) { first = desc; } else { prev->lli.llp = desc->txd.phys; dma_sync_single_for_device(chan2parent(chan), prev->txd.phys, sizeof(prev->lli), DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; total_len += len; } break; case DMA_FROM_DEVICE: ctllo = (DWC_DEFAULT_CTLLO | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX | DWC_CTLL_FC_P2M); reg = dws->rx_reg; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; u32 len; u32 mem; desc = dwc_desc_get(dwc); if (!desc) { dev_err(chan2dev(chan), "not enough descriptors available\n"); goto err_desc_get; } mem = sg_phys(sg); len = sg_dma_len(sg); mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; desc->lli.sar = reg; desc->lli.dar = mem; desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); desc->lli.ctlhi = len >> reg_width; if (!first) { first = desc; } else { prev->lli.llp = desc->txd.phys; dma_sync_single_for_device(chan2parent(chan), prev->txd.phys, sizeof(prev->lli), DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; total_len += len; } break; default: return NULL; } if (flags & DMA_PREP_INTERRUPT) /* Trigger interrupt after last block */ prev->lli.ctllo |= DWC_CTLL_INT_EN; prev->lli.llp = 0; dma_sync_single_for_device(chan2parent(chan), prev->txd.phys, sizeof(prev->lli), DMA_TO_DEVICE); first->len = total_len; return &first->txd; err_desc_get: dwc_desc_put(dwc, first); return NULL; } static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc, *_desc; LIST_HEAD(list); /* Only supports DMA_TERMINATE_ALL */ if (cmd != DMA_TERMINATE_ALL) return -ENXIO; /* * This is only called when something went wrong elsewhere, so * we don't really care about the data. Just disable the * channel. We still have to poll the channel enable bit due * to AHB/HSB limitations. */ spin_lock_bh(&dwc->lock); channel_clear_bit(dw, CH_EN, dwc->mask); while (dma_readl(dw, CH_EN) & dwc->mask) cpu_relax(); /* active_list entries will end up before queued entries */ list_splice_init(&dwc->queue, &list); list_splice_init(&dwc->active_list, &list); spin_unlock_bh(&dwc->lock); /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_descriptor_complete(dwc, desc); return 0; } static enum dma_status dwc_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); dma_cookie_t last_used; dma_cookie_t last_complete; int ret; last_complete = dwc->completed; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret != DMA_SUCCESS) { dwc_scan_descriptors(to_dw_dma(chan->device), dwc); last_complete = dwc->completed; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); } dma_set_tx_state(txstate, last_complete, last_used, 0); return ret; } static void dwc_issue_pending(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); spin_lock_bh(&dwc->lock); if (!list_empty(&dwc->queue)) dwc_scan_descriptors(to_dw_dma(chan->device), dwc); spin_unlock_bh(&dwc->lock); } static int dwc_alloc_chan_resources(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc; struct dw_dma_slave *dws; int i; u32 cfghi; u32 cfglo; dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); /* ASSERT: channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) { dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); return -EIO; } dwc->completed = chan->cookie = 1; cfghi = DWC_CFGH_FIFO_MODE; cfglo = 0; dws = chan->private; if (dws) { /* * We need controller-specific data to set up slave * transfers. */ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); cfghi = dws->cfg_hi; cfglo = dws->cfg_lo; } channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); /* * NOTE: some controllers may have additional features that we * need to initialize here, like "scatter-gather" (which * doesn't mean what you think it means), and status writeback. */ spin_lock_bh(&dwc->lock); i = dwc->descs_allocated; while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { spin_unlock_bh(&dwc->lock); desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); if (!desc) { dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); spin_lock_bh(&dwc->lock); break; } INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = dwc_tx_submit; desc->txd.flags = DMA_CTRL_ACK; desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, sizeof(desc->lli), DMA_TO_DEVICE); dwc_desc_put(dwc, desc); spin_lock_bh(&dwc->lock); i = ++dwc->descs_allocated; } /* Enable interrupts */ channel_set_bit(dw, MASK.XFER, dwc->mask); channel_set_bit(dw, MASK.BLOCK, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask); spin_unlock_bh(&dwc->lock); dev_dbg(chan2dev(chan), "alloc_chan_resources allocated %d descriptors\n", i); return i; } static void dwc_free_chan_resources(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc, *_desc; LIST_HEAD(list); dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", dwc->descs_allocated); /* ASSERT: channel is idle */ BUG_ON(!list_empty(&dwc->active_list)); BUG_ON(!list_empty(&dwc->queue)); BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); spin_lock_bh(&dwc->lock); list_splice_init(&dwc->free_list, &list); dwc->descs_allocated = 0; /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); channel_clear_bit(dw, MASK.BLOCK, dwc->mask); channel_clear_bit(dw, MASK.ERROR, dwc->mask); spin_unlock_bh(&dwc->lock); list_for_each_entry_safe(desc, _desc, &list, desc_node) { dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); dma_unmap_single(chan2parent(chan), desc->txd.phys, sizeof(desc->lli), DMA_TO_DEVICE); kfree(desc); } dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); } /* --------------------- Cyclic DMA API extensions -------------------- */ /** * dw_dma_cyclic_start - start the cyclic DMA transfer * @chan: the DMA channel to start * * Must be called with soft interrupts disabled. Returns zero on success or * -errno on failure. */ int dw_dma_cyclic_start(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(dwc->chan.device); if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); return -ENODEV; } spin_lock(&dwc->lock); /* assert channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) { dev_err(chan2dev(&dwc->chan), "BUG: Attempted to start non-idle channel\n"); dev_err(chan2dev(&dwc->chan), " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", channel_readl(dwc, SAR), channel_readl(dwc, DAR), channel_readl(dwc, LLP), channel_readl(dwc, CTL_HI), channel_readl(dwc, CTL_LO)); spin_unlock(&dwc->lock); return -EBUSY; } dma_writel(dw, CLEAR.BLOCK, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask); /* setup DMAC channel registers */ channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); channel_writel(dwc, CTL_HI, 0); channel_set_bit(dw, CH_EN, dwc->mask); spin_unlock(&dwc->lock); return 0; } EXPORT_SYMBOL(dw_dma_cyclic_start); /** * dw_dma_cyclic_stop - stop the cyclic DMA transfer * @chan: the DMA channel to stop * * Must be called with soft interrupts disabled. */ void dw_dma_cyclic_stop(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(dwc->chan.device); spin_lock(&dwc->lock); channel_clear_bit(dw, CH_EN, dwc->mask); while (dma_readl(dw, CH_EN) & dwc->mask) cpu_relax(); spin_unlock(&dwc->lock); } EXPORT_SYMBOL(dw_dma_cyclic_stop); /** * dw_dma_cyclic_prep - prepare the cyclic DMA transfer * @chan: the DMA channel to prepare * @buf_addr: physical DMA address where the buffer starts * @buf_len: total number of bytes for the entire buffer * @period_len: number of bytes for each period * @direction: transfer direction, to or from device * * Must be called before trying to start the transfer. Returns a valid struct * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. */ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_data_direction direction) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_cyclic_desc *cdesc; struct dw_cyclic_desc *retval = NULL; struct dw_desc *desc; struct dw_desc *last = NULL; struct dw_dma_slave *dws = chan->private; unsigned long was_cyclic; unsigned int reg_width; unsigned int periods; unsigned int i; spin_lock_bh(&dwc->lock); if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { spin_unlock_bh(&dwc->lock); dev_dbg(chan2dev(&dwc->chan), "queue and/or active list are not empty\n"); return ERR_PTR(-EBUSY); } was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); spin_unlock_bh(&dwc->lock); if (was_cyclic) { dev_dbg(chan2dev(&dwc->chan), "channel already prepared for cyclic DMA\n"); return ERR_PTR(-EBUSY); } retval = ERR_PTR(-EINVAL); reg_width = dws->reg_width; periods = buf_len / period_len; /* Check for too big/unaligned periods and unaligned DMA buffer. */ if (period_len > (DWC_MAX_COUNT << reg_width)) goto out_err; if (unlikely(period_len & ((1 << reg_width) - 1))) goto out_err; if (unlikely(buf_addr & ((1 << reg_width) - 1))) goto out_err; if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) goto out_err; retval = ERR_PTR(-ENOMEM); if (periods > NR_DESCS_PER_CHANNEL) goto out_err; cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); if (!cdesc) goto out_err; cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); if (!cdesc->desc) goto out_err_alloc; for (i = 0; i < periods; i++) { desc = dwc_desc_get(dwc); if (!desc) goto out_err_desc_get; switch (direction) { case DMA_TO_DEVICE: desc->lli.dar = dws->tx_reg; desc->lli.sar = buf_addr + (period_len * i); desc->lli.ctllo = (DWC_DEFAULT_CTLLO | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC | DWC_CTLL_FC_M2P | DWC_CTLL_INT_EN); break; case DMA_FROM_DEVICE: desc->lli.dar = buf_addr + (period_len * i); desc->lli.sar = dws->rx_reg; desc->lli.ctllo = (DWC_DEFAULT_CTLLO | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX | DWC_CTLL_FC_P2M | DWC_CTLL_INT_EN); break; default: break; } desc->lli.ctlhi = (period_len >> reg_width); cdesc->desc[i] = desc; if (last) { last->lli.llp = desc->txd.phys; dma_sync_single_for_device(chan2parent(chan), last->txd.phys, sizeof(last->lli), DMA_TO_DEVICE); } last = desc; } /* lets make a cyclic list */ last->lli.llp = cdesc->desc[0]->txd.phys; dma_sync_single_for_device(chan2parent(chan), last->txd.phys, sizeof(last->lli), DMA_TO_DEVICE); dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " "period %zu periods %d\n", buf_addr, buf_len, period_len, periods); cdesc->periods = periods; dwc->cdesc = cdesc; return cdesc; out_err_desc_get: while (i--) dwc_desc_put(dwc, cdesc->desc[i]); out_err_alloc: kfree(cdesc); out_err: clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); return (struct dw_cyclic_desc *)retval; } EXPORT_SYMBOL(dw_dma_cyclic_prep); /** * dw_dma_cyclic_free - free a prepared cyclic DMA transfer * @chan: the DMA channel to free */ void dw_dma_cyclic_free(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(dwc->chan.device); struct dw_cyclic_desc *cdesc = dwc->cdesc; int i; dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); if (!cdesc) return; spin_lock_bh(&dwc->lock); channel_clear_bit(dw, CH_EN, dwc->mask); while (dma_readl(dw, CH_EN) & dwc->mask) cpu_relax(); dma_writel(dw, CLEAR.BLOCK, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask); spin_unlock_bh(&dwc->lock); for (i = 0; i < cdesc->periods; i++) dwc_desc_put(dwc, cdesc->desc[i]); kfree(cdesc->desc); kfree(cdesc); clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); } EXPORT_SYMBOL(dw_dma_cyclic_free); /*----------------------------------------------------------------------*/ static void dw_dma_off(struct dw_dma *dw) { dma_writel(dw, CFG, 0); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) cpu_relax(); } static int __init dw_probe(struct platform_device *pdev) { struct dw_dma_platform_data *pdata; struct resource *io; struct dw_dma *dw; size_t size; int irq; int err; int i; pdata = pdev->dev.platform_data; if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) return -EINVAL; io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) return -EINVAL; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; size = sizeof(struct dw_dma); size += pdata->nr_channels * sizeof(struct dw_dma_chan); dw = kzalloc(size, GFP_KERNEL); if (!dw) return -ENOMEM; if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { err = -EBUSY; goto err_kfree; } dw->regs = ioremap(io->start, DW_REGLEN); if (!dw->regs) { err = -ENOMEM; goto err_release_r; } dw->clk = clk_get(&pdev->dev, "hclk"); if (IS_ERR(dw->clk)) { err = PTR_ERR(dw->clk); goto err_clk; } clk_enable(dw->clk); /* force dma off, just in case */ dw_dma_off(dw); err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); if (err) goto err_irq; platform_set_drvdata(pdev, dw); tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); dw->all_chan_mask = (1 << pdata->nr_channels) - 1; INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { struct dw_dma_chan *dwc = &dw->chan[i]; dwc->chan.device = &dw->dma; dwc->chan.cookie = dwc->completed = 1; dwc->chan.chan_id = i; list_add_tail(&dwc->chan.device_node, &dw->dma.channels); dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; spin_lock_init(&dwc->lock); dwc->mask = 1 << i; INIT_LIST_HEAD(&dwc->active_list); INIT_LIST_HEAD(&dwc->queue); INIT_LIST_HEAD(&dwc->free_list); channel_clear_bit(dw, CH_EN, dwc->mask); } /* Clear/disable all interrupts on all channels. */ dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); dw->dma.dev = &pdev->dev; dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; dw->dma.device_free_chan_resources = dwc_free_chan_resources; dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; dw->dma.device_control = dwc_control; dw->dma.device_tx_status = dwc_tx_status; dw->dma.device_issue_pending = dwc_issue_pending; dma_writel(dw, CFG, DW_CFG_DMA_EN); printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", dev_name(&pdev->dev), dw->dma.chancnt); dma_async_device_register(&dw->dma); return 0; err_irq: clk_disable(dw->clk); clk_put(dw->clk); err_clk: iounmap(dw->regs); dw->regs = NULL; err_release_r: release_resource(io); err_kfree: kfree(dw); return err; } static int __exit dw_remove(struct platform_device *pdev) { struct dw_dma *dw = platform_get_drvdata(pdev); struct dw_dma_chan *dwc, *_dwc; struct resource *io; dw_dma_off(dw); dma_async_device_unregister(&dw->dma); free_irq(platform_get_irq(pdev, 0), dw); tasklet_kill(&dw->tasklet); list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, chan.device_node) { list_del(&dwc->chan.device_node); channel_clear_bit(dw, CH_EN, dwc->mask); } clk_disable(dw->clk); clk_put(dw->clk); iounmap(dw->regs); dw->regs = NULL; io = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(io->start, DW_REGLEN); kfree(dw); return 0; } static void dw_shutdown(struct platform_device *pdev) { struct dw_dma *dw = platform_get_drvdata(pdev); dw_dma_off(platform_get_drvdata(pdev)); clk_disable(dw->clk); } static int dw_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dw_dma *dw = platform_get_drvdata(pdev); dw_dma_off(platform_get_drvdata(pdev)); clk_disable(dw->clk); return 0; } static int dw_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dw_dma *dw = platform_get_drvdata(pdev); clk_enable(dw->clk); dma_writel(dw, CFG, DW_CFG_DMA_EN); return 0; } static const struct dev_pm_ops dw_dev_pm_ops = { .suspend_noirq = dw_suspend_noirq, .resume_noirq = dw_resume_noirq, }; static struct platform_driver dw_driver = { .remove = __exit_p(dw_remove), .shutdown = dw_shutdown, .driver = { .name = "dw_dmac", .pm = &dw_dev_pm_ops, }, }; static int __init dw_init(void) { return platform_driver_probe(&dw_driver, dw_probe); } module_init(dw_init); static void __exit dw_exit(void) { platform_driver_unregister(&dw_driver); } module_exit(dw_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
gpl-2.0
gotoco/linux
drivers/gpu/drm/i915/dvo_sil164.c
1303
6735
/************************************************************************** Copyright © 2006 Dave Airlie All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sub license, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ #include "dvo.h" #define SIL164_VID 0x0001 #define SIL164_DID 0x0006 #define SIL164_VID_LO 0x00 #define SIL164_VID_HI 0x01 #define SIL164_DID_LO 0x02 #define SIL164_DID_HI 0x03 #define SIL164_REV 0x04 #define SIL164_RSVD 0x05 #define SIL164_FREQ_LO 0x06 #define SIL164_FREQ_HI 0x07 #define SIL164_REG8 0x08 #define SIL164_8_VEN (1<<5) #define SIL164_8_HEN (1<<4) #define SIL164_8_DSEL (1<<3) #define SIL164_8_BSEL (1<<2) #define SIL164_8_EDGE (1<<1) #define SIL164_8_PD (1<<0) #define SIL164_REG9 0x09 #define SIL164_9_VLOW (1<<7) #define SIL164_9_MSEL_MASK (0x7<<4) #define SIL164_9_TSEL (1<<3) #define SIL164_9_RSEN (1<<2) #define SIL164_9_HTPLG (1<<1) #define SIL164_9_MDI (1<<0) #define SIL164_REGC 0x0c struct sil164_priv { //I2CDevRec d; bool quiet; }; #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct sil164_priv *sil = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, } }; out_buf[0] = addr; out_buf[1] = 0; if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; } if (!sil->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct sil164_priv *sil = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!sil->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", addr, adapter->name, dvo->slave_addr); } return false; } /* Silicon Image 164 driver for chip on i2c bus */ static bool sil164_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { /* this will detect the SIL164 chip on the specified i2c bus */ struct sil164_priv *sil; unsigned char ch; sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL); if (sil == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = sil; sil->quiet = true; if (!sil164_readb(dvo, SIL164_VID_LO, &ch)) goto out; if (ch != (SIL164_VID & 0xff)) { DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", ch, adapter->name, dvo->slave_addr); goto out; } if (!sil164_readb(dvo, SIL164_DID_LO, &ch)) goto out; if (ch != (SIL164_DID & 0xff)) { DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", ch, adapter->name, dvo->slave_addr); goto out; } sil->quiet = false; DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n"); return true; out: kfree(sil); return false; } static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo) { uint8_t reg9; sil164_readb(dvo, SIL164_REG9, &reg9); if (reg9 & SIL164_9_HTPLG) return connector_status_connected; else return connector_status_disconnected; } static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { return MODE_OK; } static void sil164_mode_set(struct intel_dvo_device *dvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* As long as the basics are set up, since we don't have clock * dependencies in the mode setup, we can just leave the * registers alone and everything will work fine. */ /* recommended programming sequence from doc */ /*sil164_writeb(sil, 0x08, 0x30); sil164_writeb(sil, 0x09, 0x00); sil164_writeb(sil, 0x0a, 0x90); sil164_writeb(sil, 0x0c, 0x89); sil164_writeb(sil, 0x08, 0x31);*/ /* don't do much */ return; } /* set the SIL164 power state */ static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) { int ret; unsigned char ch; ret = sil164_readb(dvo, SIL164_REG8, &ch); if (ret == false) return; if (enable) ch |= SIL164_8_PD; else ch &= ~SIL164_8_PD; sil164_writeb(dvo, SIL164_REG8, ch); return; } static bool sil164_get_hw_state(struct intel_dvo_device *dvo) { int ret; unsigned char ch; ret = sil164_readb(dvo, SIL164_REG8, &ch); if (ret == false) return false; if (ch & SIL164_8_PD) return true; else return false; } static void sil164_dump_regs(struct intel_dvo_device *dvo) { uint8_t val; sil164_readb(dvo, SIL164_FREQ_LO, &val); DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val); sil164_readb(dvo, SIL164_FREQ_HI, &val); DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val); sil164_readb(dvo, SIL164_REG8, &val); DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val); sil164_readb(dvo, SIL164_REG9, &val); DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val); sil164_readb(dvo, SIL164_REGC, &val); DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val); } static void sil164_destroy(struct intel_dvo_device *dvo) { struct sil164_priv *sil = dvo->dev_priv; if (sil) { kfree(sil); dvo->dev_priv = NULL; } } struct intel_dvo_dev_ops sil164_ops = { .init = sil164_init, .detect = sil164_detect, .mode_valid = sil164_mode_valid, .mode_set = sil164_mode_set, .dpms = sil164_dpms, .get_hw_state = sil164_get_hw_state, .dump_regs = sil164_dump_regs, .destroy = sil164_destroy, };
gpl-2.0
ciwrl/android_kernel_huawei_msm8939
kernel/power/user.c
2327
9883
/* * linux/kernel/power/user.c * * This file provides the user space interface for software suspend/resume. * * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> * * This file is released under the GPLv2. * */ #include <linux/suspend.h> #include <linux/syscalls.h> #include <linux/reboot.h> #include <linux/string.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pm.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/console.h> #include <linux/cpu.h> #include <linux/freezer.h> #include <asm/uaccess.h> #include "power.h" #define SNAPSHOT_MINOR 231 static struct snapshot_data { struct snapshot_handle handle; int swap; int mode; char frozen; char ready; char platform_support; } snapshot_state; atomic_t snapshot_device_available = ATOMIC_INIT(1); static int snapshot_open(struct inode *inode, struct file *filp) { struct snapshot_data *data; int error; lock_system_sleep(); if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { error = -EBUSY; goto Unlock; } if ((filp->f_flags & O_ACCMODE) == O_RDWR) { atomic_inc(&snapshot_device_available); error = -ENOSYS; goto Unlock; } if(create_basic_memory_bitmaps()) { atomic_inc(&snapshot_device_available); error = -ENOMEM; goto Unlock; } nonseekable_open(inode, filp); data = &snapshot_state; filp->private_data = data; memset(&data->handle, 0, sizeof(struct snapshot_handle)); if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { /* Hibernating. The image device should be accessible. */ data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device, 0, NULL) : -1; data->mode = O_RDONLY; error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); if (error) pm_notifier_call_chain(PM_POST_HIBERNATION); } else { /* * Resuming. We may need to wait for the image device to * appear. */ wait_for_device_probe(); data->swap = -1; data->mode = O_WRONLY; error = pm_notifier_call_chain(PM_RESTORE_PREPARE); if (error) pm_notifier_call_chain(PM_POST_RESTORE); } if (error) { free_basic_memory_bitmaps(); atomic_inc(&snapshot_device_available); } data->frozen = 0; data->ready = 0; data->platform_support = 0; Unlock: unlock_system_sleep(); return error; } static int snapshot_release(struct inode *inode, struct file *filp) { struct snapshot_data *data; lock_system_sleep(); swsusp_free(); free_basic_memory_bitmaps(); data = filp->private_data; free_all_swap_pages(data->swap); if (data->frozen) { pm_restore_gfp_mask(); thaw_processes(); } pm_notifier_call_chain(data->mode == O_RDONLY ? PM_POST_HIBERNATION : PM_POST_RESTORE); atomic_inc(&snapshot_device_available); unlock_system_sleep(); return 0; } static ssize_t snapshot_read(struct file *filp, char __user *buf, size_t count, loff_t *offp) { struct snapshot_data *data; ssize_t res; loff_t pg_offp = *offp & ~PAGE_MASK; lock_system_sleep(); data = filp->private_data; if (!data->ready) { res = -ENODATA; goto Unlock; } if (!pg_offp) { /* on page boundary? */ res = snapshot_read_next(&data->handle); if (res <= 0) goto Unlock; } else { res = PAGE_SIZE - pg_offp; } res = simple_read_from_buffer(buf, count, &pg_offp, data_of(data->handle), res); if (res > 0) *offp += res; Unlock: unlock_system_sleep(); return res; } static ssize_t snapshot_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp) { struct snapshot_data *data; ssize_t res; loff_t pg_offp = *offp & ~PAGE_MASK; lock_system_sleep(); data = filp->private_data; if (!pg_offp) { res = snapshot_write_next(&data->handle); if (res <= 0) goto unlock; } else { res = PAGE_SIZE - pg_offp; } res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp, buf, count); if (res > 0) *offp += res; unlock: unlock_system_sleep(); return res; } static long snapshot_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error = 0; struct snapshot_data *data; loff_t size; sector_t offset; if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR) return -ENOTTY; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!mutex_trylock(&pm_mutex)) return -EBUSY; data = filp->private_data; switch (cmd) { case SNAPSHOT_FREEZE: if (data->frozen) break; printk("Syncing filesystems ... "); sys_sync(); printk("done.\n"); error = freeze_processes(); if (!error) data->frozen = 1; break; case SNAPSHOT_UNFREEZE: if (!data->frozen || data->ready) break; pm_restore_gfp_mask(); thaw_processes(); data->frozen = 0; break; case SNAPSHOT_CREATE_IMAGE: if (data->mode != O_RDONLY || !data->frozen || data->ready) { error = -EPERM; break; } pm_restore_gfp_mask(); error = hibernation_snapshot(data->platform_support); if (!error) { error = put_user(in_suspend, (int __user *)arg); data->ready = !freezer_test_done && !error; freezer_test_done = false; } break; case SNAPSHOT_ATOMIC_RESTORE: snapshot_write_finalize(&data->handle); if (data->mode != O_WRONLY || !data->frozen || !snapshot_image_loaded(&data->handle)) { error = -EPERM; break; } error = hibernation_restore(data->platform_support); break; case SNAPSHOT_FREE: swsusp_free(); memset(&data->handle, 0, sizeof(struct snapshot_handle)); data->ready = 0; /* * It is necessary to thaw kernel threads here, because * SNAPSHOT_CREATE_IMAGE may be invoked directly after * SNAPSHOT_FREE. In that case, if kernel threads were not * thawed, the preallocation of memory carried out by * hibernation_snapshot() might run into problems (i.e. it * might fail or even deadlock). */ thaw_kernel_threads(); break; case SNAPSHOT_PREF_IMAGE_SIZE: image_size = arg; break; case SNAPSHOT_GET_IMAGE_SIZE: if (!data->ready) { error = -ENODATA; break; } size = snapshot_get_image_size(); size <<= PAGE_SHIFT; error = put_user(size, (loff_t __user *)arg); break; case SNAPSHOT_AVAIL_SWAP_SIZE: size = count_swap_pages(data->swap, 1); size <<= PAGE_SHIFT; error = put_user(size, (loff_t __user *)arg); break; case SNAPSHOT_ALLOC_SWAP_PAGE: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } offset = alloc_swapdev_block(data->swap); if (offset) { offset <<= PAGE_SHIFT; error = put_user(offset, (loff_t __user *)arg); } else { error = -ENOSPC; } break; case SNAPSHOT_FREE_SWAP_PAGES: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } free_all_swap_pages(data->swap); break; case SNAPSHOT_S2RAM: if (!data->frozen) { error = -EPERM; break; } /* * Tasks are frozen and the notifiers have been called with * PM_HIBERNATION_PREPARE */ error = suspend_devices_and_enter(PM_SUSPEND_MEM); data->ready = 0; break; case SNAPSHOT_PLATFORM_SUPPORT: data->platform_support = !!arg; break; case SNAPSHOT_POWER_OFF: if (data->platform_support) error = hibernation_platform_enter(); break; case SNAPSHOT_SET_SWAP_AREA: if (swsusp_swap_in_use()) { error = -EPERM; } else { struct resume_swap_area swap_area; dev_t swdev; error = copy_from_user(&swap_area, (void __user *)arg, sizeof(struct resume_swap_area)); if (error) { error = -EFAULT; break; } /* * User space encodes device types as two-byte values, * so we need to recode them */ swdev = new_decode_dev(swap_area.dev); if (swdev) { offset = swap_area.offset; data->swap = swap_type_of(swdev, offset, NULL); if (data->swap < 0) error = -ENODEV; } else { data->swap = -1; error = -EINVAL; } } break; default: error = -ENOTTY; } mutex_unlock(&pm_mutex); return error; } #ifdef CONFIG_COMPAT struct compat_resume_swap_area { compat_loff_t offset; u32 dev; } __packed; static long snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t)); switch (cmd) { case SNAPSHOT_GET_IMAGE_SIZE: case SNAPSHOT_AVAIL_SWAP_SIZE: case SNAPSHOT_ALLOC_SWAP_PAGE: { compat_loff_t __user *uoffset = compat_ptr(arg); loff_t offset; mm_segment_t old_fs; int err; old_fs = get_fs(); set_fs(KERNEL_DS); err = snapshot_ioctl(file, cmd, (unsigned long) &offset); set_fs(old_fs); if (!err && put_user(offset, uoffset)) err = -EFAULT; return err; } case SNAPSHOT_CREATE_IMAGE: return snapshot_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); case SNAPSHOT_SET_SWAP_AREA: { struct compat_resume_swap_area __user *u_swap_area = compat_ptr(arg); struct resume_swap_area swap_area; mm_segment_t old_fs; int err; err = get_user(swap_area.offset, &u_swap_area->offset); err |= get_user(swap_area.dev, &u_swap_area->dev); if (err) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA, (unsigned long) &swap_area); set_fs(old_fs); return err; } default: return snapshot_ioctl(file, cmd, arg); } } #endif /* CONFIG_COMPAT */ static const struct file_operations snapshot_fops = { .open = snapshot_open, .release = snapshot_release, .read = snapshot_read, .write = snapshot_write, .llseek = no_llseek, .unlocked_ioctl = snapshot_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = snapshot_compat_ioctl, #endif }; static struct miscdevice snapshot_device = { .minor = SNAPSHOT_MINOR, .name = "snapshot", .fops = &snapshot_fops, }; static int __init snapshot_device_init(void) { return misc_register(&snapshot_device); }; device_initcall(snapshot_device_init);
gpl-2.0
asturel/android_tegra3_grouper
arch/s390/oprofile/hwsampler.c
2583
25418
/** * arch/s390/oprofile/hwsampler.c * * Copyright IBM Corp. 2010 * Author: Heinz Graalfs <graalfs@de.ibm.com> */ #include <linux/kernel_stat.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/semaphore.h> #include <linux/oom.h> #include <linux/oprofile.h> #include <asm/lowcore.h> #include <asm/irq.h> #include "hwsampler.h" #define MAX_NUM_SDB 511 #define MIN_NUM_SDB 1 #define ALERT_REQ_MASK 0x4000000000000000ul #define BUFFER_FULL_MASK 0x8000000000000000ul #define EI_IEA (1 << 31) /* invalid entry address */ #define EI_ISE (1 << 30) /* incorrect SDBT entry */ #define EI_PRA (1 << 29) /* program request alert */ #define EI_SACA (1 << 23) /* sampler authorization change alert */ #define EI_LSDA (1 << 22) /* loss of sample data alert */ DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); struct hws_execute_parms { void *buffer; signed int rc; }; DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer); static DEFINE_MUTEX(hws_sem); static DEFINE_MUTEX(hws_sem_oom); static unsigned char hws_flush_all; static unsigned int hws_oom; static struct workqueue_struct *hws_wq; static unsigned int hws_state; enum { HWS_INIT = 1, HWS_DEALLOCATED, HWS_STOPPED, HWS_STARTED, HWS_STOPPING }; /* set to 1 if called by kernel during memory allocation */ static unsigned char oom_killer_was_active; /* size of SDBT and SDB as of allocate API */ static unsigned long num_sdbt = 100; static unsigned long num_sdb = 511; /* sampling interval (machine cycles) */ static unsigned long interval; static unsigned long min_sampler_rate; static unsigned long max_sampler_rate; static int ssctl(void *buffer) { int cc; /* set in order to detect a program check */ cc = 1; asm volatile( "0: .insn s,0xB2870000,0(%1)\n" "1: ipm %0\n" " srl %0,28\n" "2:\n" EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "+d" (cc), "+a" (buffer) : "m" (*((struct hws_ssctl_request_block *)buffer)) : "cc", "memory"); return cc ? -EINVAL : 0 ; } static int qsi(void *buffer) { int cc; cc = 1; asm volatile( "0: .insn s,0xB2860000,0(%1)\n" "1: lhi %0,0\n" "2:\n" EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "=d" (cc), "+a" (buffer) : "m" (*((struct hws_qsi_info_block *)buffer)) : "cc", "memory"); return cc ? -EINVAL : 0; } static void execute_qsi(void *parms) { struct hws_execute_parms *ep = parms; ep->rc = qsi(ep->buffer); } static void execute_ssctl(void *parms) { struct hws_execute_parms *ep = parms; ep->rc = ssctl(ep->buffer); } static int smp_ctl_ssctl_stop(int cpu) { int rc; struct hws_execute_parms ep; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); cb->ssctl.es = 0; cb->ssctl.cs = 0; ep.buffer = &cb->ssctl; smp_call_function_single(cpu, execute_ssctl, &ep, 1); rc = ep.rc; if (rc) { printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); dump_stack(); } ep.buffer = &cb->qsi; smp_call_function_single(cpu, execute_qsi, &ep, 1); if (cb->qsi.es || cb->qsi.cs) { printk(KERN_EMERG "CPUMF sampling did not stop properly.\n"); dump_stack(); } return rc; } static int smp_ctl_ssctl_deactivate(int cpu) { int rc; struct hws_execute_parms ep; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); cb->ssctl.es = 1; cb->ssctl.cs = 0; ep.buffer = &cb->ssctl; smp_call_function_single(cpu, execute_ssctl, &ep, 1); rc = ep.rc; if (rc) printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); ep.buffer = &cb->qsi; smp_call_function_single(cpu, execute_qsi, &ep, 1); if (cb->qsi.cs) printk(KERN_EMERG "CPUMF sampling was not set inactive.\n"); return rc; } static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval) { int rc; struct hws_execute_parms ep; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); cb->ssctl.h = 1; cb->ssctl.tear = cb->first_sdbt; cb->ssctl.dear = *(unsigned long *) cb->first_sdbt; cb->ssctl.interval = interval; cb->ssctl.es = 1; cb->ssctl.cs = 1; ep.buffer = &cb->ssctl; smp_call_function_single(cpu, execute_ssctl, &ep, 1); rc = ep.rc; if (rc) printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); ep.buffer = &cb->qsi; smp_call_function_single(cpu, execute_qsi, &ep, 1); if (ep.rc) printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu); return rc; } static int smp_ctl_qsi(int cpu) { struct hws_execute_parms ep; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); ep.buffer = &cb->qsi; smp_call_function_single(cpu, execute_qsi, &ep, 1); return ep.rc; } static inline unsigned long *trailer_entry_ptr(unsigned long v) { void *ret; ret = (void *)v; ret += PAGE_SIZE; ret -= sizeof(struct hws_trailer_entry); return (unsigned long *) ret; } /* prototypes for external interrupt handler and worker */ static void hws_ext_handler(unsigned int ext_int_code, unsigned int param32, unsigned long param64); static void worker(struct work_struct *work); static void add_samples_to_oprofile(unsigned cpu, unsigned long *, unsigned long *dear); static void init_all_cpu_buffers(void) { int cpu; struct hws_cpu_buffer *cb; for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); memset(cb, 0, sizeof(struct hws_cpu_buffer)); } } static int is_link_entry(unsigned long *s) { return *s & 0x1ul ? 1 : 0; } static unsigned long *get_next_sdbt(unsigned long *s) { return (unsigned long *) (*s & ~0x1ul); } static int prepare_cpu_buffers(void) { int cpu; int rc; struct hws_cpu_buffer *cb; rc = 0; for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); atomic_set(&cb->ext_params, 0); cb->worker_entry = 0; cb->sample_overflow = 0; cb->req_alert = 0; cb->incorrect_sdbt_entry = 0; cb->invalid_entry_address = 0; cb->loss_of_sample_data = 0; cb->sample_auth_change_alert = 0; cb->finish = 0; cb->oom = 0; cb->stop_mode = 0; } return rc; } /* * allocate_sdbt() - allocate sampler memory * @cpu: the cpu for which sampler memory is allocated * * A 4K page is allocated for each requested SDBT. * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs. * Set ALERT_REQ mask in each SDBs trailer. * Returns zero if successful, <0 otherwise. */ static int allocate_sdbt(int cpu) { int j, k, rc; unsigned long *sdbt; unsigned long sdb; unsigned long *tail; unsigned long *trailer; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); if (cb->first_sdbt) return -EINVAL; sdbt = NULL; tail = sdbt; for (j = 0; j < num_sdbt; j++) { sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL); mutex_lock(&hws_sem_oom); /* OOM killer might have been activated */ barrier(); if (oom_killer_was_active || !sdbt) { if (sdbt) free_page((unsigned long)sdbt); goto allocate_sdbt_error; } if (cb->first_sdbt == 0) cb->first_sdbt = (unsigned long)sdbt; /* link current page to tail of chain */ if (tail) *tail = (unsigned long)(void *)sdbt + 1; mutex_unlock(&hws_sem_oom); for (k = 0; k < num_sdb; k++) { /* get and set SDB page */ sdb = get_zeroed_page(GFP_KERNEL); mutex_lock(&hws_sem_oom); /* OOM killer might have been activated */ barrier(); if (oom_killer_was_active || !sdb) { if (sdb) free_page(sdb); goto allocate_sdbt_error; } *sdbt = sdb; trailer = trailer_entry_ptr(*sdbt); *trailer = ALERT_REQ_MASK; sdbt++; mutex_unlock(&hws_sem_oom); } tail = sdbt; } mutex_lock(&hws_sem_oom); if (oom_killer_was_active) goto allocate_sdbt_error; rc = 0; if (tail) *tail = (unsigned long) ((void *)cb->first_sdbt) + 1; allocate_sdbt_exit: mutex_unlock(&hws_sem_oom); return rc; allocate_sdbt_error: rc = -ENOMEM; goto allocate_sdbt_exit; } /* * deallocate_sdbt() - deallocate all sampler memory * * For each online CPU all SDBT trees are deallocated. * Returns the number of freed pages. */ static int deallocate_sdbt(void) { int cpu; int counter; counter = 0; for_each_online_cpu(cpu) { unsigned long start; unsigned long sdbt; unsigned long *curr; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); if (!cb->first_sdbt) continue; sdbt = cb->first_sdbt; curr = (unsigned long *) sdbt; start = sdbt; /* we'll free the SDBT after all SDBs are processed... */ while (1) { if (!*curr || !sdbt) break; /* watch for link entry reset if found */ if (is_link_entry(curr)) { curr = get_next_sdbt(curr); if (sdbt) free_page(sdbt); /* we are done if we reach the start */ if ((unsigned long) curr == start) break; else sdbt = (unsigned long) curr; } else { /* process SDB pointer */ if (*curr) { free_page(*curr); curr++; } } counter++; } cb->first_sdbt = 0; } return counter; } static int start_sampling(int cpu) { int rc; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); rc = smp_ctl_ssctl_enable_activate(cpu, interval); if (rc) { printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu); goto start_exit; } rc = -EINVAL; if (!cb->qsi.es) { printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu); goto start_exit; } if (!cb->qsi.cs) { printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu); goto start_exit; } printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n", cpu, interval); rc = 0; start_exit: return rc; } static int stop_sampling(int cpu) { unsigned long v; int rc; struct hws_cpu_buffer *cb; rc = smp_ctl_qsi(cpu); WARN_ON(rc); cb = &per_cpu(sampler_cpu_buffer, cpu); if (!rc && !cb->qsi.es) printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu); rc = smp_ctl_ssctl_stop(cpu); if (rc) { printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n", cpu, rc); goto stop_exit; } printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu); stop_exit: v = cb->req_alert; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert," " count=%lu.\n", cpu, v); v = cb->loss_of_sample_data; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data," " count=%lu.\n", cpu, v); v = cb->invalid_entry_address; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address," " count=%lu.\n", cpu, v); v = cb->incorrect_sdbt_entry; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Incorrect SDBT address," " count=%lu.\n", cpu, v); v = cb->sample_auth_change_alert; if (v) printk(KERN_ERR "hwsampler: CPU %d CPUMF Sample authorization change," " count=%lu.\n", cpu, v); return rc; } static int check_hardware_prerequisites(void) { if (!test_facility(68)) return -EOPNOTSUPP; return 0; } /* * hws_oom_callback() - the OOM callback function * * In case the callback is invoked during memory allocation for the * hw sampler, all obtained memory is deallocated and a flag is set * so main sampler memory allocation can exit with a failure code. * In case the callback is invoked during sampling the hw sampler * is deactivated for all CPUs. */ static int hws_oom_callback(struct notifier_block *nfb, unsigned long dummy, void *parm) { unsigned long *freed; int cpu; struct hws_cpu_buffer *cb; freed = parm; mutex_lock(&hws_sem_oom); if (hws_state == HWS_DEALLOCATED) { /* during memory allocation */ if (oom_killer_was_active == 0) { oom_killer_was_active = 1; *freed += deallocate_sdbt(); } } else { int i; cpu = get_cpu(); cb = &per_cpu(sampler_cpu_buffer, cpu); if (!cb->oom) { for_each_online_cpu(i) { smp_ctl_ssctl_deactivate(i); cb->oom = 1; } cb->finish = 1; printk(KERN_INFO "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n", cpu); } } mutex_unlock(&hws_sem_oom); return NOTIFY_OK; } static struct notifier_block hws_oom_notifier = { .notifier_call = hws_oom_callback }; static int hws_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { /* We do not have sampler space available for all possible CPUs. All CPUs should be online when hw sampling is activated. */ return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD; } static struct notifier_block hws_cpu_notifier = { .notifier_call = hws_cpu_callback }; /** * hwsampler_deactivate() - set hardware sampling temporarily inactive * @cpu: specifies the CPU to be set inactive. * * Returns 0 on success, !0 on failure. */ int hwsampler_deactivate(unsigned int cpu) { /* * Deactivate hw sampling temporarily and flush the buffer * by pushing all the pending samples to oprofile buffer. * * This function can be called under one of the following conditions: * Memory unmap, task is exiting. */ int rc; struct hws_cpu_buffer *cb; rc = 0; mutex_lock(&hws_sem); cb = &per_cpu(sampler_cpu_buffer, cpu); if (hws_state == HWS_STARTED) { rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (cb->qsi.cs) { rc = smp_ctl_ssctl_deactivate(cpu); if (rc) { printk(KERN_INFO "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu); cb->finish = 1; hws_state = HWS_STOPPING; } else { hws_flush_all = 1; /* Add work to queue to read pending samples.*/ queue_work_on(cpu, hws_wq, &cb->worker); } } } mutex_unlock(&hws_sem); if (hws_wq) flush_workqueue(hws_wq); return rc; } /** * hwsampler_activate() - activate/resume hardware sampling which was deactivated * @cpu: specifies the CPU to be set active. * * Returns 0 on success, !0 on failure. */ int hwsampler_activate(unsigned int cpu) { /* * Re-activate hw sampling. This should be called in pair with * hwsampler_deactivate(). */ int rc; struct hws_cpu_buffer *cb; rc = 0; mutex_lock(&hws_sem); cb = &per_cpu(sampler_cpu_buffer, cpu); if (hws_state == HWS_STARTED) { rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (!cb->qsi.cs) { hws_flush_all = 0; rc = smp_ctl_ssctl_enable_activate(cpu, interval); if (rc) { printk(KERN_ERR "CPU %d, CPUMF activate sampling failed.\n", cpu); } } } mutex_unlock(&hws_sem); return rc; } static void hws_ext_handler(unsigned int ext_int_code, unsigned int param32, unsigned long param64) { struct hws_cpu_buffer *cb; kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++; cb = &__get_cpu_var(sampler_cpu_buffer); atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); if (hws_wq) queue_work(hws_wq, &cb->worker); } static int check_qsi_on_setup(void) { int rc; unsigned int cpu; struct hws_cpu_buffer *cb; for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (rc) return -EOPNOTSUPP; if (!cb->qsi.as) { printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n"); return -EINVAL; } if (cb->qsi.es) { printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n"); rc = smp_ctl_ssctl_stop(cpu); if (rc) return -EINVAL; printk(KERN_INFO "CPU %d, CPUMF Sampling stopped now.\n", cpu); } } return 0; } static int check_qsi_on_start(void) { unsigned int cpu; int rc; struct hws_cpu_buffer *cb; for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (!cb->qsi.as) return -EINVAL; if (cb->qsi.es) return -EINVAL; if (cb->qsi.cs) return -EINVAL; } return 0; } static void worker_on_start(unsigned int cpu) { struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); cb->worker_entry = cb->first_sdbt; } static int worker_check_error(unsigned int cpu, int ext_params) { int rc; unsigned long *sdbt; struct hws_cpu_buffer *cb; rc = 0; cb = &per_cpu(sampler_cpu_buffer, cpu); sdbt = (unsigned long *) cb->worker_entry; if (!sdbt || !*sdbt) return -EINVAL; if (ext_params & EI_PRA) cb->req_alert++; if (ext_params & EI_LSDA) cb->loss_of_sample_data++; if (ext_params & EI_IEA) { cb->invalid_entry_address++; rc = -EINVAL; } if (ext_params & EI_ISE) { cb->incorrect_sdbt_entry++; rc = -EINVAL; } if (ext_params & EI_SACA) { cb->sample_auth_change_alert++; rc = -EINVAL; } return rc; } static void worker_on_finish(unsigned int cpu) { int rc, i; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); if (cb->finish) { rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (cb->qsi.es) { printk(KERN_INFO "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n", cpu); rc = smp_ctl_ssctl_stop(cpu); if (rc) printk(KERN_INFO "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu); for_each_online_cpu(i) { if (i == cpu) continue; if (!cb->finish) { cb->finish = 1; queue_work_on(i, hws_wq, &cb->worker); } } } } } static void worker_on_interrupt(unsigned int cpu) { unsigned long *sdbt; unsigned char done; struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); sdbt = (unsigned long *) cb->worker_entry; done = 0; /* do not proceed if stop was entered, * forget the buffers not yet processed */ while (!done && !cb->stop_mode) { unsigned long *trailer; struct hws_trailer_entry *te; unsigned long *dear = 0; trailer = trailer_entry_ptr(*sdbt); /* leave loop if no more work to do */ if (!(*trailer & BUFFER_FULL_MASK)) { done = 1; if (!hws_flush_all) continue; } te = (struct hws_trailer_entry *)trailer; cb->sample_overflow += te->overflow; add_samples_to_oprofile(cpu, sdbt, dear); /* reset trailer */ xchg((unsigned char *) te, 0x40); /* advance to next sdb slot in current sdbt */ sdbt++; /* in case link bit is set use address w/o link bit */ if (is_link_entry(sdbt)) sdbt = get_next_sdbt(sdbt); cb->worker_entry = (unsigned long)sdbt; } } static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, unsigned long *dear) { struct hws_data_entry *sample_data_ptr; unsigned long *trailer; trailer = trailer_entry_ptr(*sdbt); if (dear) { if (dear > trailer) return; trailer = dear; } sample_data_ptr = (struct hws_data_entry *)(*sdbt); while ((unsigned long *)sample_data_ptr < trailer) { struct pt_regs *regs = NULL; struct task_struct *tsk = NULL; /* * Check sampling mode, 1 indicates basic (=customer) sampling * mode. */ if (sample_data_ptr->def != 1) { /* sample slot is not yet written */ break; } else { /* make sure we don't use it twice, * the next time the sampler will set it again */ sample_data_ptr->def = 0; } /* Get pt_regs. */ if (sample_data_ptr->P == 1) { /* userspace sample */ unsigned int pid = sample_data_ptr->prim_asn; rcu_read_lock(); tsk = pid_task(find_vpid(pid), PIDTYPE_PID); if (tsk) regs = task_pt_regs(tsk); rcu_read_unlock(); } else { /* kernelspace sample */ regs = task_pt_regs(current); } mutex_lock(&hws_sem); oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0, !sample_data_ptr->P, tsk); mutex_unlock(&hws_sem); sample_data_ptr++; } } static void worker(struct work_struct *work) { unsigned int cpu; int ext_params; struct hws_cpu_buffer *cb; cb = container_of(work, struct hws_cpu_buffer, worker); cpu = smp_processor_id(); ext_params = atomic_xchg(&cb->ext_params, 0); if (!cb->worker_entry) worker_on_start(cpu); if (worker_check_error(cpu, ext_params)) return; if (!cb->finish) worker_on_interrupt(cpu); if (cb->finish) worker_on_finish(cpu); } /** * hwsampler_allocate() - allocate memory for the hardware sampler * @sdbt: number of SDBTs per online CPU (must be > 0) * @sdb: number of SDBs per SDBT (minimum 1, maximum 511) * * Returns 0 on success, !0 on failure. */ int hwsampler_allocate(unsigned long sdbt, unsigned long sdb) { int cpu, rc; mutex_lock(&hws_sem); rc = -EINVAL; if (hws_state != HWS_DEALLOCATED) goto allocate_exit; if (sdbt < 1) goto allocate_exit; if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB) goto allocate_exit; num_sdbt = sdbt; num_sdb = sdb; oom_killer_was_active = 0; register_oom_notifier(&hws_oom_notifier); for_each_online_cpu(cpu) { if (allocate_sdbt(cpu)) { unregister_oom_notifier(&hws_oom_notifier); goto allocate_error; } } unregister_oom_notifier(&hws_oom_notifier); if (oom_killer_was_active) goto allocate_error; hws_state = HWS_STOPPED; rc = 0; allocate_exit: mutex_unlock(&hws_sem); return rc; allocate_error: rc = -ENOMEM; printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n"); goto allocate_exit; } /** * hwsampler_deallocate() - deallocate hardware sampler memory * * Returns 0 on success, !0 on failure. */ int hwsampler_deallocate() { int rc; mutex_lock(&hws_sem); rc = -EINVAL; if (hws_state != HWS_STOPPED) goto deallocate_exit; ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ deallocate_sdbt(); hws_state = HWS_DEALLOCATED; rc = 0; deallocate_exit: mutex_unlock(&hws_sem); return rc; } unsigned long hwsampler_query_min_interval(void) { return min_sampler_rate; } unsigned long hwsampler_query_max_interval(void) { return max_sampler_rate; } unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) { struct hws_cpu_buffer *cb; cb = &per_cpu(sampler_cpu_buffer, cpu); return cb->sample_overflow; } int hwsampler_setup() { int rc; int cpu; struct hws_cpu_buffer *cb; mutex_lock(&hws_sem); rc = -EINVAL; if (hws_state) goto setup_exit; hws_state = HWS_INIT; init_all_cpu_buffers(); rc = check_hardware_prerequisites(); if (rc) goto setup_exit; rc = check_qsi_on_setup(); if (rc) goto setup_exit; rc = -EINVAL; hws_wq = create_workqueue("hwsampler"); if (!hws_wq) goto setup_exit; register_cpu_notifier(&hws_cpu_notifier); for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); INIT_WORK(&cb->worker, worker); rc = smp_ctl_qsi(cpu); WARN_ON(rc); if (min_sampler_rate != cb->qsi.min_sampl_rate) { if (min_sampler_rate) { printk(KERN_WARNING "hwsampler: different min sampler rate values.\n"); if (min_sampler_rate < cb->qsi.min_sampl_rate) min_sampler_rate = cb->qsi.min_sampl_rate; } else min_sampler_rate = cb->qsi.min_sampl_rate; } if (max_sampler_rate != cb->qsi.max_sampl_rate) { if (max_sampler_rate) { printk(KERN_WARNING "hwsampler: different max sampler rate values.\n"); if (max_sampler_rate > cb->qsi.max_sampl_rate) max_sampler_rate = cb->qsi.max_sampl_rate; } else max_sampler_rate = cb->qsi.max_sampl_rate; } } register_external_interrupt(0x1407, hws_ext_handler); hws_state = HWS_DEALLOCATED; rc = 0; setup_exit: mutex_unlock(&hws_sem); return rc; } int hwsampler_shutdown() { int rc; mutex_lock(&hws_sem); rc = -EINVAL; if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) { mutex_unlock(&hws_sem); if (hws_wq) flush_workqueue(hws_wq); mutex_lock(&hws_sem); if (hws_state == HWS_STOPPED) { ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ deallocate_sdbt(); } if (hws_wq) { destroy_workqueue(hws_wq); hws_wq = NULL; } unregister_external_interrupt(0x1407, hws_ext_handler); hws_state = HWS_INIT; rc = 0; } mutex_unlock(&hws_sem); unregister_cpu_notifier(&hws_cpu_notifier); return rc; } /** * hwsampler_start_all() - start hardware sampling on all online CPUs * @rate: specifies the used interval when samples are taken * * Returns 0 on success, !0 on failure. */ int hwsampler_start_all(unsigned long rate) { int rc, cpu; mutex_lock(&hws_sem); hws_oom = 0; rc = -EINVAL; if (hws_state != HWS_STOPPED) goto start_all_exit; interval = rate; /* fail if rate is not valid */ if (interval < min_sampler_rate || interval > max_sampler_rate) goto start_all_exit; rc = check_qsi_on_start(); if (rc) goto start_all_exit; rc = prepare_cpu_buffers(); if (rc) goto start_all_exit; for_each_online_cpu(cpu) { rc = start_sampling(cpu); if (rc) break; } if (rc) { for_each_online_cpu(cpu) { stop_sampling(cpu); } goto start_all_exit; } hws_state = HWS_STARTED; rc = 0; start_all_exit: mutex_unlock(&hws_sem); if (rc) return rc; register_oom_notifier(&hws_oom_notifier); hws_oom = 1; hws_flush_all = 0; /* now let them in, 1407 CPUMF external interrupts */ ctl_set_bit(0, 5); /* set CR0 bit 58 */ return 0; } /** * hwsampler_stop_all() - stop hardware sampling on all online CPUs * * Returns 0 on success, !0 on failure. */ int hwsampler_stop_all() { int tmp_rc, rc, cpu; struct hws_cpu_buffer *cb; mutex_lock(&hws_sem); rc = 0; if (hws_state == HWS_INIT) { mutex_unlock(&hws_sem); return rc; } hws_state = HWS_STOPPING; mutex_unlock(&hws_sem); for_each_online_cpu(cpu) { cb = &per_cpu(sampler_cpu_buffer, cpu); cb->stop_mode = 1; tmp_rc = stop_sampling(cpu); if (tmp_rc) rc = tmp_rc; } if (hws_wq) flush_workqueue(hws_wq); mutex_lock(&hws_sem); if (hws_oom) { unregister_oom_notifier(&hws_oom_notifier); hws_oom = 0; } hws_state = HWS_STOPPED; mutex_unlock(&hws_sem); return rc; }
gpl-2.0
mingit/mstcp
drivers/media/pci/bt8xx/bttv-input.c
2583
14061
/* * * Copyright (c) 2003 Gerd Knorr * Copyright (c) 2003 Pavel Machek * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/slab.h> #include "bttv.h" #include "bttvp.h" static int ir_debug; module_param(ir_debug, int, 0644); static int ir_rc5_remote_gap = 885; module_param(ir_rc5_remote_gap, int, 0644); #undef dprintk #define dprintk(fmt, ...) \ do { \ if (ir_debug >= 1) \ pr_info(fmt, ##__VA_ARGS__); \ } while (0) #define DEVNAME "bttv-input" #define MODULE_NAME "bttv" /* ---------------------------------------------------------------------- */ static void ir_handle_key(struct bttv *btv) { struct bttv_ir *ir = btv->remote; u32 gpio,data; /* read gpio value */ gpio = bttv_gpio_read(&btv->c); if (ir->polling) { if (ir->last_gpio == gpio) return; ir->last_gpio = gpio; } /* extract data */ data = ir_extract_bits(gpio, ir->mask_keycode); dprintk("irq gpio=0x%x code=%d | %s%s%s\n", gpio, data, ir->polling ? "poll" : "irq", (gpio & ir->mask_keydown) ? " down" : "", (gpio & ir->mask_keyup) ? " up" : ""); if ((ir->mask_keydown && (gpio & ir->mask_keydown)) || (ir->mask_keyup && !(gpio & ir->mask_keyup))) { rc_keydown_notimeout(ir->dev, data, 0); } else { /* HACK: Probably, ir->mask_keydown is missing for this board */ if (btv->c.type == BTTV_BOARD_WINFAST2000) rc_keydown_notimeout(ir->dev, data, 0); rc_keyup(ir->dev); } } static void ir_enltv_handle_key(struct bttv *btv) { struct bttv_ir *ir = btv->remote; u32 gpio, data, keyup; /* read gpio value */ gpio = bttv_gpio_read(&btv->c); /* extract data */ data = ir_extract_bits(gpio, ir->mask_keycode); /* Check if it is keyup */ keyup = (gpio & ir->mask_keyup) ? 1 << 31 : 0; if ((ir->last_gpio & 0x7f) != data) { dprintk("gpio=0x%x code=%d | %s\n", gpio, data, (gpio & ir->mask_keyup) ? " up" : "up/down"); rc_keydown_notimeout(ir->dev, data, 0); if (keyup) rc_keyup(ir->dev); } else { if ((ir->last_gpio & 1 << 31) == keyup) return; dprintk("(cnt) gpio=0x%x code=%d | %s\n", gpio, data, (gpio & ir->mask_keyup) ? " up" : "down"); if (keyup) rc_keyup(ir->dev); else rc_keydown_notimeout(ir->dev, data, 0); } ir->last_gpio = data | keyup; } static int bttv_rc5_irq(struct bttv *btv); void bttv_input_irq(struct bttv *btv) { struct bttv_ir *ir = btv->remote; if (ir->rc5_gpio) bttv_rc5_irq(btv); else if (!ir->polling) ir_handle_key(btv); } static void bttv_input_timer(unsigned long data) { struct bttv *btv = (struct bttv*)data; struct bttv_ir *ir = btv->remote; if (btv->c.type == BTTV_BOARD_ENLTV_FM_2) ir_enltv_handle_key(btv); else ir_handle_key(btv); mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling)); } /* * FIXME: Nebula digi uses the legacy way to decode RC5, instead of relying * on the rc-core way. As we need to be sure that both IRQ transitions are * properly triggered, Better to touch it only with this hardware for * testing. */ #define RC5_START(x) (((x) >> 12) & 3) #define RC5_TOGGLE(x) (((x) >> 11) & 1) #define RC5_ADDR(x) (((x) >> 6) & 31) #define RC5_INSTR(x) ((x) & 63) /* decode raw bit pattern to RC5 code */ static u32 bttv_rc5_decode(unsigned int code) { unsigned int org_code = code; unsigned int pair; unsigned int rc5 = 0; int i; for (i = 0; i < 14; ++i) { pair = code & 0x3; code >>= 2; rc5 <<= 1; switch (pair) { case 0: case 2: break; case 1: rc5 |= 1; break; case 3: dprintk("rc5_decode(%x) bad code\n", org_code); return 0; } } dprintk("code=%x, rc5=%x, start=%x, toggle=%x, address=%x, " "instr=%x\n", rc5, org_code, RC5_START(rc5), RC5_TOGGLE(rc5), RC5_ADDR(rc5), RC5_INSTR(rc5)); return rc5; } static void bttv_rc5_timer_end(unsigned long data) { struct bttv_ir *ir = (struct bttv_ir *)data; struct timeval tv; u32 gap; u32 rc5 = 0; /* get time */ do_gettimeofday(&tv); /* avoid overflow with gap >1s */ if (tv.tv_sec - ir->base_time.tv_sec > 1) { gap = 200000; } else { gap = 1000000 * (tv.tv_sec - ir->base_time.tv_sec) + tv.tv_usec - ir->base_time.tv_usec; } /* signal we're ready to start a new code */ ir->active = false; /* Allow some timer jitter (RC5 is ~24ms anyway so this is ok) */ if (gap < 28000) { dprintk("spurious timer_end\n"); return; } if (ir->last_bit < 20) { /* ignore spurious codes (caused by light/other remotes) */ dprintk("short code: %x\n", ir->code); } else { ir->code = (ir->code << ir->shift_by) | 1; rc5 = bttv_rc5_decode(ir->code); /* two start bits? */ if (RC5_START(rc5) != ir->start) { pr_info(DEVNAME ":" " rc5 start bits invalid: %u\n", RC5_START(rc5)); /* right address? */ } else if (RC5_ADDR(rc5) == ir->addr) { u32 toggle = RC5_TOGGLE(rc5); u32 instr = RC5_INSTR(rc5); /* Good code */ rc_keydown(ir->dev, instr, toggle); dprintk("instruction %x, toggle %x\n", instr, toggle); } } } static int bttv_rc5_irq(struct bttv *btv) { struct bttv_ir *ir = btv->remote; struct timeval tv; u32 gpio; u32 gap; unsigned long current_jiffies; /* read gpio port */ gpio = bttv_gpio_read(&btv->c); /* get time of bit */ current_jiffies = jiffies; do_gettimeofday(&tv); /* avoid overflow with gap >1s */ if (tv.tv_sec - ir->base_time.tv_sec > 1) { gap = 200000; } else { gap = 1000000 * (tv.tv_sec - ir->base_time.tv_sec) + tv.tv_usec - ir->base_time.tv_usec; } dprintk("RC5 IRQ: gap %d us for %s\n", gap, (gpio & 0x20) ? "mark" : "space"); /* remote IRQ? */ if (!(gpio & 0x20)) return 0; /* active code => add bit */ if (ir->active) { /* only if in the code (otherwise spurious IRQ or timer late) */ if (ir->last_bit < 28) { ir->last_bit = (gap - ir_rc5_remote_gap / 2) / ir_rc5_remote_gap; ir->code |= 1 << ir->last_bit; } /* starting new code */ } else { ir->active = true; ir->code = 0; ir->base_time = tv; ir->last_bit = 0; mod_timer(&ir->timer, current_jiffies + msecs_to_jiffies(30)); } /* toggle GPIO pin 4 to reset the irq */ bttv_gpio_write(&btv->c, gpio & ~(1 << 4)); bttv_gpio_write(&btv->c, gpio | (1 << 4)); return 1; } /* ---------------------------------------------------------------------- */ static void bttv_ir_start(struct bttv *btv, struct bttv_ir *ir) { if (ir->polling) { setup_timer(&ir->timer, bttv_input_timer, (unsigned long)btv); ir->timer.expires = jiffies + msecs_to_jiffies(1000); add_timer(&ir->timer); } else if (ir->rc5_gpio) { /* set timer_end for code completion */ setup_timer(&ir->timer, bttv_rc5_timer_end, (unsigned long)ir); ir->shift_by = 1; ir->start = 3; ir->addr = 0x0; ir->rc5_remote_gap = ir_rc5_remote_gap; } } static void bttv_ir_stop(struct bttv *btv) { if (btv->remote->polling) del_timer_sync(&btv->remote->timer); if (btv->remote->rc5_gpio) { u32 gpio; del_timer_sync(&btv->remote->timer); gpio = bttv_gpio_read(&btv->c); bttv_gpio_write(&btv->c, gpio & ~(1 << 4)); } } /* * Get_key functions used by I2C remotes */ static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) { unsigned char b; /* poll IR chip */ if (1 != i2c_master_recv(ir->c, &b, 1)) { dprintk("read error\n"); return -EIO; } /* ignore 0xaa */ if (b==0xaa) return 0; dprintk("key %02x\n", b); /* * NOTE: * lirc_i2c maps the pv951 code as: * addr = 0x61D6 * cmd = bit_reverse (b) * So, it seems that this device uses NEC extended * I decided to not fix the table, due to two reasons: * 1) Without the actual device, this is only a guess; * 2) As the addr is not reported via I2C, nor can be changed, * the device is bound to the vendor-provided RC. */ *ir_key = b; *ir_raw = b; return 1; } /* Instantiate the I2C IR receiver device, if present */ void init_bttv_i2c_ir(struct bttv *btv) { const unsigned short addr_list[] = { 0x1a, 0x18, 0x64, 0x30, 0x71, I2C_CLIENT_END }; struct i2c_board_info info; struct i2c_client *i2c_dev; if (0 != btv->i2c_rc) return; memset(&info, 0, sizeof(struct i2c_board_info)); memset(&btv->init_data, 0, sizeof(btv->init_data)); strlcpy(info.type, "ir_video", I2C_NAME_SIZE); switch (btv->c.type) { case BTTV_BOARD_PV951: btv->init_data.name = "PV951"; btv->init_data.get_key = get_key_pv951; btv->init_data.ir_codes = RC_MAP_PV951; info.addr = 0x4b; break; } if (btv->init_data.name) { info.platform_data = &btv->init_data; i2c_dev = i2c_new_device(&btv->c.i2c_adap, &info); } else { /* * The external IR receiver is at i2c address 0x34 (0x35 for * reads). Future Hauppauge cards will have an internal * receiver at 0x30 (0x31 for reads). In theory, both can be * fitted, and Hauppauge suggest an external overrides an * internal. * That's why we probe 0x1a (~0x34) first. CB */ i2c_dev = i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list, NULL); } if (NULL == i2c_dev) return; #if defined(CONFIG_MODULES) && defined(MODULE) request_module("ir-kbd-i2c"); #endif } int bttv_input_init(struct bttv *btv) { struct bttv_ir *ir; char *ir_codes = NULL; struct rc_dev *rc; int err = -ENOMEM; if (!btv->has_remote) return -ENODEV; ir = kzalloc(sizeof(*ir),GFP_KERNEL); rc = rc_allocate_device(); if (!ir || !rc) goto err_out_free; /* detect & configure */ switch (btv->c.type) { case BTTV_BOARD_AVERMEDIA: case BTTV_BOARD_AVPHONE98: case BTTV_BOARD_AVERMEDIA98: ir_codes = RC_MAP_AVERMEDIA; ir->mask_keycode = 0xf88000; ir->mask_keydown = 0x010000; ir->polling = 50; // ms break; case BTTV_BOARD_AVDVBT_761: case BTTV_BOARD_AVDVBT_771: ir_codes = RC_MAP_AVERMEDIA_DVBT; ir->mask_keycode = 0x0f00c0; ir->mask_keydown = 0x000020; ir->polling = 50; // ms break; case BTTV_BOARD_PXELVWPLTVPAK: ir_codes = RC_MAP_PIXELVIEW; ir->mask_keycode = 0x003e00; ir->mask_keyup = 0x010000; ir->polling = 50; // ms break; case BTTV_BOARD_PV_M4900: case BTTV_BOARD_PV_BT878P_9B: case BTTV_BOARD_PV_BT878P_PLUS: ir_codes = RC_MAP_PIXELVIEW; ir->mask_keycode = 0x001f00; ir->mask_keyup = 0x008000; ir->polling = 50; // ms break; case BTTV_BOARD_WINFAST2000: ir_codes = RC_MAP_WINFAST; ir->mask_keycode = 0x1f8; break; case BTTV_BOARD_MAGICTVIEW061: case BTTV_BOARD_MAGICTVIEW063: ir_codes = RC_MAP_WINFAST; ir->mask_keycode = 0x0008e000; ir->mask_keydown = 0x00200000; break; case BTTV_BOARD_APAC_VIEWCOMP: ir_codes = RC_MAP_APAC_VIEWCOMP; ir->mask_keycode = 0x001f00; ir->mask_keyup = 0x008000; ir->polling = 50; // ms break; case BTTV_BOARD_ASKEY_CPH03X: case BTTV_BOARD_CONCEPTRONIC_CTVFMI2: case BTTV_BOARD_CONTVFMI: ir_codes = RC_MAP_PIXELVIEW; ir->mask_keycode = 0x001F00; ir->mask_keyup = 0x006000; ir->polling = 50; // ms break; case BTTV_BOARD_NEBULA_DIGITV: ir_codes = RC_MAP_NEBULA; ir->rc5_gpio = true; break; case BTTV_BOARD_MACHTV_MAGICTV: ir_codes = RC_MAP_APAC_VIEWCOMP; ir->mask_keycode = 0x001F00; ir->mask_keyup = 0x004000; ir->polling = 50; /* ms */ break; case BTTV_BOARD_KOZUMI_KTV_01C: ir_codes = RC_MAP_PCTV_SEDNA; ir->mask_keycode = 0x001f00; ir->mask_keyup = 0x006000; ir->polling = 50; /* ms */ break; case BTTV_BOARD_ENLTV_FM_2: ir_codes = RC_MAP_ENCORE_ENLTV2; ir->mask_keycode = 0x00fd00; ir->mask_keyup = 0x000080; ir->polling = 1; /* ms */ ir->last_gpio = ir_extract_bits(bttv_gpio_read(&btv->c), ir->mask_keycode); break; } if (NULL == ir_codes) { dprintk("Ooops: IR config error [card=%d]\n", btv->c.type); err = -ENODEV; goto err_out_free; } if (ir->rc5_gpio) { u32 gpio; /* enable remote irq */ bttv_gpio_inout(&btv->c, (1 << 4), 1 << 4); gpio = bttv_gpio_read(&btv->c); bttv_gpio_write(&btv->c, gpio & ~(1 << 4)); bttv_gpio_write(&btv->c, gpio | (1 << 4)); } else { /* init hardware-specific stuff */ bttv_gpio_inout(&btv->c, ir->mask_keycode | ir->mask_keydown, 0); } /* init input device */ ir->dev = rc; snprintf(ir->name, sizeof(ir->name), "bttv IR (card=%d)", btv->c.type); snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(btv->c.pci)); rc->input_name = ir->name; rc->input_phys = ir->phys; rc->input_id.bustype = BUS_PCI; rc->input_id.version = 1; if (btv->c.pci->subsystem_vendor) { rc->input_id.vendor = btv->c.pci->subsystem_vendor; rc->input_id.product = btv->c.pci->subsystem_device; } else { rc->input_id.vendor = btv->c.pci->vendor; rc->input_id.product = btv->c.pci->device; } rc->dev.parent = &btv->c.pci->dev; rc->map_name = ir_codes; rc->driver_name = MODULE_NAME; btv->remote = ir; bttv_ir_start(btv, ir); /* all done */ err = rc_register_device(rc); if (err) goto err_out_stop; return 0; err_out_stop: bttv_ir_stop(btv); btv->remote = NULL; err_out_free: rc_free_device(rc); kfree(ir); return err; } void bttv_input_fini(struct bttv *btv) { if (btv->remote == NULL) return; bttv_ir_stop(btv); rc_unregister_device(btv->remote->dev); kfree(btv->remote); btv->remote = NULL; }
gpl-2.0
luciang/linux-2.6-new-sysctl
arch/mn10300/kernel/smp.c
2839
28219
/* SMP support routines. * * Copyright (C) 2006-2008 Panasonic Corporation * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/cpumask.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/profile.h> #include <linux/smp.h> #include <asm/tlbflush.h> #include <asm/system.h> #include <asm/bitops.h> #include <asm/processor.h> #include <asm/bug.h> #include <asm/exceptions.h> #include <asm/hardirq.h> #include <asm/fpu.h> #include <asm/mmu_context.h> #include <asm/thread_info.h> #include <asm/cpu-regs.h> #include <asm/intctl-regs.h> #include "internal.h" #ifdef CONFIG_HOTPLUG_CPU #include <linux/cpu.h> #include <asm/cacheflush.h> static unsigned long sleep_mode[NR_CPUS]; static void run_sleep_cpu(unsigned int cpu); static void run_wakeup_cpu(unsigned int cpu); #endif /* CONFIG_HOTPLUG_CPU */ /* * Debug Message function */ #undef DEBUG_SMP #ifdef DEBUG_SMP #define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__) #else #define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__) #endif /* timeout value in msec for smp_nmi_call_function. zero is no timeout. */ #define CALL_FUNCTION_NMI_IPI_TIMEOUT 0 /* * Structure and data for smp_nmi_call_function(). */ struct nmi_call_data_struct { smp_call_func_t func; void *info; cpumask_t started; cpumask_t finished; int wait; char size_alignment[0] __attribute__ ((__aligned__(SMP_CACHE_BYTES))); } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); static DEFINE_SPINLOCK(smp_nmi_call_lock); static struct nmi_call_data_struct *nmi_call_data; /* * Data structures and variables */ static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */ static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */ cpumask_t cpu_boot_map; /* Bitmask of boot APs */ unsigned long start_stack[NR_CPUS - 1]; /* * Per CPU parameters */ struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned; static int cpucount; /* The count of boot CPUs */ static cpumask_t smp_commenced_mask; cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; /* * Function Prototypes */ static int do_boot_cpu(int); static void smp_show_cpu_info(int cpu_id); static void smp_callin(void); static void smp_online(void); static void smp_store_cpu_info(int); static void smp_cpu_init(void); static void smp_tune_scheduling(void); static void send_IPI_mask(const cpumask_t *cpumask, int irq); static void init_ipi(void); /* * IPI Initialization interrupt definitions */ static void mn10300_ipi_disable(unsigned int irq); static void mn10300_ipi_enable(unsigned int irq); static void mn10300_ipi_chip_disable(struct irq_data *d); static void mn10300_ipi_chip_enable(struct irq_data *d); static void mn10300_ipi_ack(struct irq_data *d); static void mn10300_ipi_nop(struct irq_data *d); static struct irq_chip mn10300_ipi_type = { .name = "cpu_ipi", .irq_disable = mn10300_ipi_chip_disable, .irq_enable = mn10300_ipi_chip_enable, .irq_ack = mn10300_ipi_ack, .irq_eoi = mn10300_ipi_nop }; static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id); static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id); static struct irqaction reschedule_ipi = { .handler = smp_reschedule_interrupt, .name = "smp reschedule IPI" }; static struct irqaction call_function_ipi = { .handler = smp_call_function_interrupt, .name = "smp call function IPI" }; #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id); static struct irqaction local_timer_ipi = { .handler = smp_ipi_timer_interrupt, .flags = IRQF_DISABLED, .name = "smp local timer IPI" }; #endif /** * init_ipi - Initialise the IPI mechanism */ static void init_ipi(void) { unsigned long flags; u16 tmp16; /* set up the reschedule IPI */ irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type, handle_percpu_irq); setup_irq(RESCHEDULE_IPI, &reschedule_ipi); set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV); mn10300_ipi_enable(RESCHEDULE_IPI); /* set up the call function IPI */ irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type, handle_percpu_irq); setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi); set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV); mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); /* set up the local timer IPI */ #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \ defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type, handle_percpu_irq); setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi); set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV); mn10300_ipi_enable(LOCAL_TIMER_IPI); #endif #ifdef CONFIG_MN10300_CACHE_ENABLED /* set up the cache flush IPI */ flags = arch_local_cli_save(); __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV), mn10300_low_ipi_handler); GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT; mn10300_ipi_enable(FLUSH_CACHE_IPI); arch_local_irq_restore(flags); #endif /* set up the NMI call function IPI */ flags = arch_local_cli_save(); GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; tmp16 = GxICR(CALL_FUNCTION_NMI_IPI); arch_local_irq_restore(flags); /* set up the SMP boot IPI */ flags = arch_local_cli_save(); __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV), mn10300_low_ipi_handler); arch_local_irq_restore(flags); } /** * mn10300_ipi_shutdown - Shut down handling of an IPI * @irq: The IPI to be shut down. */ static void mn10300_ipi_shutdown(unsigned int irq) { unsigned long flags; u16 tmp; flags = arch_local_cli_save(); tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; tmp = GxICR(irq); arch_local_irq_restore(flags); } /** * mn10300_ipi_enable - Enable an IPI * @irq: The IPI to be enabled. */ static void mn10300_ipi_enable(unsigned int irq) { unsigned long flags; u16 tmp; flags = arch_local_cli_save(); tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; tmp = GxICR(irq); arch_local_irq_restore(flags); } static void mn10300_ipi_chip_enable(struct irq_data *d) { mn10300_ipi_enable(d->irq); } /** * mn10300_ipi_disable - Disable an IPI * @irq: The IPI to be disabled. */ static void mn10300_ipi_disable(unsigned int irq) { unsigned long flags; u16 tmp; flags = arch_local_cli_save(); tmp = GxICR(irq); GxICR(irq) = tmp & GxICR_LEVEL; tmp = GxICR(irq); arch_local_irq_restore(flags); } static void mn10300_ipi_chip_disable(struct irq_data *d) { mn10300_ipi_disable(d->irq); } /** * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC * @irq: The IPI to be acknowledged. * * Clear the interrupt detection flag for the IPI on the appropriate interrupt * channel in the PIC. */ static void mn10300_ipi_ack(struct irq_data *d) { unsigned int irq = d->irq; unsigned long flags; u16 tmp; flags = arch_local_cli_save(); GxICR_u8(irq) = GxICR_DETECT; tmp = GxICR(irq); arch_local_irq_restore(flags); } /** * mn10300_ipi_nop - Dummy IPI action * @irq: The IPI to be acted upon. */ static void mn10300_ipi_nop(struct irq_data *d) { } /** * send_IPI_mask - Send IPIs to all CPUs in list * @cpumask: The list of CPUs to target. * @irq: The IPI request to be sent. * * Send the specified IPI to all the CPUs in the list, not waiting for them to * finish before returning. The caller is responsible for synchronisation if * that is needed. */ static void send_IPI_mask(const cpumask_t *cpumask, int irq) { int i; u16 tmp; for (i = 0; i < NR_CPUS; i++) { if (cpumask_test_cpu(i, cpumask)) { /* send IPI */ tmp = CROSS_GxICR(irq, i); CROSS_GxICR(irq, i) = tmp | GxICR_REQUEST | GxICR_DETECT; tmp = CROSS_GxICR(irq, i); /* flush write buffer */ } } } /** * send_IPI_self - Send an IPI to this CPU. * @irq: The IPI request to be sent. * * Send the specified IPI to the current CPU. */ void send_IPI_self(int irq) { send_IPI_mask(cpumask_of(smp_processor_id()), irq); } /** * send_IPI_allbutself - Send IPIs to all the other CPUs. * @irq: The IPI request to be sent. * * Send the specified IPI to all CPUs in the system barring the current one, * not waiting for them to finish before returning. The caller is responsible * for synchronisation if that is needed. */ void send_IPI_allbutself(int irq) { cpumask_t cpumask; cpumask_copy(&cpumask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &cpumask); send_IPI_mask(&cpumask, irq); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { BUG(); /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/ } void arch_send_call_function_single_ipi(int cpu) { send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI); } /** * smp_send_reschedule - Send reschedule IPI to a CPU * @cpu: The CPU to target. */ void smp_send_reschedule(int cpu) { send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI); } /** * smp_nmi_call_function - Send a call function NMI IPI to all CPUs * @func: The function to ask to be run. * @info: The context data to pass to that function. * @wait: If true, wait (atomically) until function is run on all CPUs. * * Send a non-maskable request to all CPUs in the system, requesting them to * run the specified function with the given context data, and, potentially, to * wait for completion of that function on all CPUs. * * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the * timeout. */ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait) { struct nmi_call_data_struct data; unsigned long flags; unsigned int cnt; int cpus, ret = 0; cpus = num_online_cpus() - 1; if (cpus < 1) return 0; data.func = func; data.info = info; cpumask_copy(&data.started, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &data.started); data.wait = wait; if (wait) data.finished = data.started; spin_lock_irqsave(&smp_nmi_call_lock, flags); nmi_call_data = &data; smp_mb(); /* Send a message to all other CPUs and wait for them to respond */ send_IPI_allbutself(CALL_FUNCTION_NMI_IPI); /* Wait for response */ if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) { for (cnt = 0; cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && !cpumask_empty(&data.started); cnt++) mdelay(1); if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) { for (cnt = 0; cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && !cpumask_empty(&data.finished); cnt++) mdelay(1); } if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT) ret = -ETIMEDOUT; } else { /* If timeout value is zero, wait until cpumask has been * cleared */ while (!cpumask_empty(&data.started)) barrier(); if (wait) while (!cpumask_empty(&data.finished)) barrier(); } spin_unlock_irqrestore(&smp_nmi_call_lock, flags); return ret; } /** * smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI * * Send a non-maskable request to all other CPUs in the system, instructing * them to jump into the debugger. The caller is responsible for checking that * the other CPUs responded to the instruction. * * The caller should make sure that this CPU's debugger IPI is disabled. */ void smp_jump_to_debugger(void) { if (num_online_cpus() > 1) /* Send a message to all other CPUs */ send_IPI_allbutself(DEBUGGER_NMI_IPI); } /** * stop_this_cpu - Callback to stop a CPU. * @unused: Callback context (ignored). */ void stop_this_cpu(void *unused) { static volatile int stopflag; unsigned long flags; #ifdef CONFIG_GDBSTUB /* In case of single stepping smp_send_stop by other CPU, * clear procindebug to avoid deadlock. */ atomic_set(&procindebug[smp_processor_id()], 0); #endif /* CONFIG_GDBSTUB */ flags = arch_local_cli_save(); set_cpu_online(smp_processor_id(), false); while (!stopflag) cpu_relax(); set_cpu_online(smp_processor_id(), true); arch_local_irq_restore(flags); } /** * smp_send_stop - Send a stop request to all CPUs. */ void smp_send_stop(void) { smp_nmi_call_function(stop_this_cpu, NULL, 0); } /** * smp_reschedule_interrupt - Reschedule IPI handler * @irq: The interrupt number. * @dev_id: The device ID. * * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. */ static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) { scheduler_ipi(); return IRQ_HANDLED; } /** * smp_call_function_interrupt - Call function IPI handler * @irq: The interrupt number. * @dev_id: The device ID. * * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. */ static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id) { /* generic_smp_call_function_interrupt(); */ generic_smp_call_function_single_interrupt(); return IRQ_HANDLED; } /** * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler */ void smp_nmi_call_function_interrupt(void) { smp_call_func_t func = nmi_call_data->func; void *info = nmi_call_data->info; int wait = nmi_call_data->wait; /* Notify the initiating CPU that I've grabbed the data and am about to * execute the function */ smp_mb(); cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started); (*func)(info); if (wait) { smp_mb(); cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->finished); } } #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \ defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) /** * smp_ipi_timer_interrupt - Local timer IPI handler * @irq: The interrupt number. * @dev_id: The device ID. * * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. */ static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id) { return local_timer_interrupt(); } #endif void __init smp_init_cpus(void) { int i; for (i = 0; i < NR_CPUS; i++) { set_cpu_possible(i, true); set_cpu_present(i, true); } } /** * smp_cpu_init - Initialise AP in start_secondary. * * For this Application Processor, set up init_mm, initialise FPU and set * interrupt level 0-6 setting. */ static void __init smp_cpu_init(void) { unsigned long flags; int cpu_id = smp_processor_id(); u16 tmp16; if (test_and_set_bit(cpu_id, &cpu_initialized)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id); for (;;) local_irq_enable(); } printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); /* Force FPU initialization */ clear_using_fpu(current); GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT; mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT; mn10300_ipi_enable(LOCAL_TIMER_IPI); GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT; mn10300_ipi_enable(RESCHEDULE_IPI); #ifdef CONFIG_MN10300_CACHE_ENABLED GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT; mn10300_ipi_enable(FLUSH_CACHE_IPI); #endif mn10300_ipi_shutdown(SMP_BOOT_IRQ); /* Set up the non-maskable call function IPI */ flags = arch_local_cli_save(); GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; tmp16 = GxICR(CALL_FUNCTION_NMI_IPI); arch_local_irq_restore(flags); } /** * smp_prepare_cpu_init - Initialise CPU in startup_secondary * * Set interrupt level 0-6 setting and init ICR of the kernel debugger. */ void smp_prepare_cpu_init(void) { int loop; /* Set the interrupt vector registers */ IVAR0 = EXCEP_IRQ_LEVEL0; IVAR1 = EXCEP_IRQ_LEVEL1; IVAR2 = EXCEP_IRQ_LEVEL2; IVAR3 = EXCEP_IRQ_LEVEL3; IVAR4 = EXCEP_IRQ_LEVEL4; IVAR5 = EXCEP_IRQ_LEVEL5; IVAR6 = EXCEP_IRQ_LEVEL6; /* Disable all interrupts and set to priority 6 (lowest) */ for (loop = 0; loop < GxICR_NUM_IRQS; loop++) GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT; #ifdef CONFIG_KERNEL_DEBUGGER /* initialise the kernel debugger interrupt */ do { unsigned long flags; u16 tmp16; flags = arch_local_cli_save(); GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; tmp16 = GxICR(DEBUGGER_NMI_IPI); arch_local_irq_restore(flags); } while (0); #endif } /** * start_secondary - Activate a secondary CPU (AP) * @unused: Thread parameter (ignored). */ int __init start_secondary(void *unused) { smp_cpu_init(); smp_callin(); while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask)) cpu_relax(); local_flush_tlb(); preempt_disable(); smp_online(); #ifdef CONFIG_GENERIC_CLOCKEVENTS init_clockevents(); #endif cpu_idle(); return 0; } /** * smp_prepare_cpus - Boot up secondary CPUs (APs) * @max_cpus: Maximum number of CPUs to boot. * * Call do_boot_cpu, and boot up APs. */ void __init smp_prepare_cpus(unsigned int max_cpus) { int phy_id; /* Setup boot CPU information */ smp_store_cpu_info(0); smp_tune_scheduling(); init_ipi(); /* If SMP should be disabled, then finish */ if (max_cpus == 0) { printk(KERN_INFO "SMP mode deactivated.\n"); goto smp_done; } /* Boot secondary CPUs (for which phy_id > 0) */ for (phy_id = 0; phy_id < NR_CPUS; phy_id++) { /* Don't boot primary CPU */ if (max_cpus <= cpucount + 1) continue; if (phy_id != 0) do_boot_cpu(phy_id); set_cpu_possible(phy_id, true); smp_show_cpu_info(phy_id); } smp_done: Dprintk("Boot done.\n"); } /** * smp_store_cpu_info - Save a CPU's information * @cpu: The CPU to save for. * * Save boot_cpu_data and jiffy for the specified CPU. */ static void __init smp_store_cpu_info(int cpu) { struct mn10300_cpuinfo *ci = &cpu_data[cpu]; *ci = boot_cpu_data; ci->loops_per_jiffy = loops_per_jiffy; ci->type = CPUREV; } /** * smp_tune_scheduling - Set time slice value * * Nothing to do here. */ static void __init smp_tune_scheduling(void) { } /** * do_boot_cpu: Boot up one CPU * @phy_id: Physical ID of CPU to boot. * * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1 * otherwise. */ static int __init do_boot_cpu(int phy_id) { struct task_struct *idle; unsigned long send_status, callin_status; int timeout, cpu_id; send_status = GxICR_REQUEST; callin_status = 0; timeout = 0; cpu_id = phy_id; cpucount++; /* Create idle thread for this CPU */ idle = fork_idle(cpu_id); if (IS_ERR(idle)) panic("Failed fork for CPU#%d.", cpu_id); idle->thread.pc = (unsigned long)start_secondary; printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id); start_stack[cpu_id - 1] = idle->thread.sp; task_thread_info(idle)->cpu = cpu_id; /* Send boot IPI to AP */ send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ); Dprintk("Waiting for send to finish...\n"); /* Wait for AP's IPI receive in 100[ms] */ do { udelay(1000); send_status = CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST; } while (send_status == GxICR_REQUEST && timeout++ < 100); Dprintk("Waiting for cpu_callin_map.\n"); if (send_status == 0) { /* Allow AP to start initializing */ cpumask_set_cpu(cpu_id, &cpu_callout_map); /* Wait for setting cpu_callin_map */ timeout = 0; do { udelay(1000); callin_status = cpumask_test_cpu(cpu_id, &cpu_callin_map); } while (callin_status == 0 && timeout++ < 5000); if (callin_status == 0) Dprintk("Not responding.\n"); } else { printk(KERN_WARNING "IPI not delivered.\n"); } if (send_status == GxICR_REQUEST || callin_status == 0) { cpumask_clear_cpu(cpu_id, &cpu_callout_map); cpumask_clear_cpu(cpu_id, &cpu_callin_map); cpumask_clear_cpu(cpu_id, &cpu_initialized); cpucount--; return 1; } return 0; } /** * smp_show_cpu_info - Show SMP CPU information * @cpu: The CPU of interest. */ static void __init smp_show_cpu_info(int cpu) { struct mn10300_cpuinfo *ci = &cpu_data[cpu]; printk(KERN_INFO "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n", cpu, MN10300_IOCLK / 1000000, (MN10300_IOCLK / 10000) % 100, ci->loops_per_jiffy / (500000 / HZ), (ci->loops_per_jiffy / (5000 / HZ)) % 100); } /** * smp_callin - Set cpu_callin_map of the current CPU ID */ static void __init smp_callin(void) { unsigned long timeout; int cpu; cpu = smp_processor_id(); timeout = jiffies + (2 * HZ); if (cpumask_test_cpu(cpu, &cpu_callin_map)) { printk(KERN_ERR "CPU#%d already present.\n", cpu); BUG(); } Dprintk("CPU#%d waiting for CALLOUT\n", cpu); /* Wait for AP startup 2s total */ while (time_before(jiffies, timeout)) { if (cpumask_test_cpu(cpu, &cpu_callout_map)) break; cpu_relax(); } if (!time_before(jiffies, timeout)) { printk(KERN_ERR "BUG: CPU#%d started up but did not get a callout!\n", cpu); BUG(); } #ifdef CONFIG_CALIBRATE_DELAY calibrate_delay(); /* Get our bogomips */ #endif /* Save our processor parameters */ smp_store_cpu_info(cpu); /* Allow the boot processor to continue */ cpumask_set_cpu(cpu, &cpu_callin_map); } /** * smp_online - Set cpu_online_mask */ static void __init smp_online(void) { int cpu; cpu = smp_processor_id(); local_irq_enable(); set_cpu_online(cpu, true); smp_wmb(); } /** * smp_cpus_done - * @max_cpus: Maximum CPU count. * * Do nothing. */ void __init smp_cpus_done(unsigned int max_cpus) { } /* * smp_prepare_boot_cpu - Set up stuff for the boot processor. * * Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot * processor (CPU 0). */ void __devinit smp_prepare_boot_cpu(void) { cpumask_set_cpu(0, &cpu_callout_map); cpumask_set_cpu(0, &cpu_callin_map); current_thread_info()->cpu = 0; } /* * initialize_secondary - Initialise a secondary CPU (Application Processor). * * Set SP register and jump to thread's PC address. */ void initialize_secondary(void) { asm volatile ( "mov %0,sp \n" "jmp (%1) \n" : : "a"(current->thread.sp), "a"(current->thread.pc)); } /** * __cpu_up - Set smp_commenced_mask for the nominated CPU * @cpu: The target CPU. */ int __devinit __cpu_up(unsigned int cpu) { int timeout; #ifdef CONFIG_HOTPLUG_CPU if (num_online_cpus() == 1) disable_hlt(); if (sleep_mode[cpu]) run_wakeup_cpu(cpu); #endif /* CONFIG_HOTPLUG_CPU */ cpumask_set_cpu(cpu, &smp_commenced_mask); /* Wait 5s total for a response */ for (timeout = 0 ; timeout < 5000 ; timeout++) { if (cpu_online(cpu)) break; udelay(1000); } BUG_ON(!cpu_online(cpu)); return 0; } /** * setup_profiling_timer - Set up the profiling timer * @multiplier - The frequency multiplier to use * * The frequency of the profiling timer can be changed by writing a multiplier * value into /proc/profile. */ int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } /* * CPU hotplug routines */ #ifdef CONFIG_HOTPLUG_CPU static DEFINE_PER_CPU(struct cpu, cpu_devices); static int __init topology_init(void) { int cpu, ret; for_each_cpu(cpu) { ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d failed (%d)\n", cpu, ret); } return 0; } subsys_initcall(topology_init); int __cpu_disable(void) { int cpu = smp_processor_id(); if (cpu == 0) return -EBUSY; migrate_irqs(); cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm)); return 0; } void __cpu_die(unsigned int cpu) { run_sleep_cpu(cpu); if (num_online_cpus() == 1) enable_hlt(); } #ifdef CONFIG_MN10300_CACHE_ENABLED static inline void hotplug_cpu_disable_cache(void) { int tmp; asm volatile( " movhu (%1),%0 \n" " and %2,%0 \n" " movhu %0,(%1) \n" "1: movhu (%1),%0 \n" " btst %3,%0 \n" " bne 1b \n" : "=&r"(tmp) : "a"(&CHCTR), "i"(~(CHCTR_ICEN | CHCTR_DCEN)), "i"(CHCTR_ICBUSY | CHCTR_DCBUSY) : "memory", "cc"); } static inline void hotplug_cpu_enable_cache(void) { int tmp; asm volatile( "movhu (%1),%0 \n" "or %2,%0 \n" "movhu %0,(%1) \n" : "=&r"(tmp) : "a"(&CHCTR), "i"(CHCTR_ICEN | CHCTR_DCEN) : "memory", "cc"); } static inline void hotplug_cpu_invalidate_cache(void) { int tmp; asm volatile ( "movhu (%1),%0 \n" "or %2,%0 \n" "movhu %0,(%1) \n" : "=&r"(tmp) : "a"(&CHCTR), "i"(CHCTR_ICINV | CHCTR_DCINV) : "cc"); } #else /* CONFIG_MN10300_CACHE_ENABLED */ #define hotplug_cpu_disable_cache() do {} while (0) #define hotplug_cpu_enable_cache() do {} while (0) #define hotplug_cpu_invalidate_cache() do {} while (0) #endif /* CONFIG_MN10300_CACHE_ENABLED */ /** * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug * @cpumask: List of target CPUs. * @func: The function to call on those CPUs. * @info: The context data for the function to be called. * @wait: Whether to wait for the calls to complete. * * Non-maskably call a function on another CPU for hotplug purposes. * * This function must be called with maskable interrupts disabled. */ static int hotplug_cpu_nmi_call_function(cpumask_t cpumask, smp_call_func_t func, void *info, int wait) { /* * The address and the size of nmi_call_func_mask_data * need to be aligned on L1_CACHE_BYTES. */ static struct nmi_call_data_struct nmi_call_func_mask_data __cacheline_aligned; unsigned long start, end; start = (unsigned long)&nmi_call_func_mask_data; end = start + sizeof(struct nmi_call_data_struct); nmi_call_func_mask_data.func = func; nmi_call_func_mask_data.info = info; nmi_call_func_mask_data.started = cpumask; nmi_call_func_mask_data.wait = wait; if (wait) nmi_call_func_mask_data.finished = cpumask; spin_lock(&smp_nmi_call_lock); nmi_call_data = &nmi_call_func_mask_data; mn10300_local_dcache_flush_range(start, end); smp_wmb(); send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI); do { mn10300_local_dcache_inv_range(start, end); barrier(); } while (!cpumask_empty(&nmi_call_func_mask_data.started)); if (wait) { do { mn10300_local_dcache_inv_range(start, end); barrier(); } while (!cpumask_empty(&nmi_call_func_mask_data.finished)); } spin_unlock(&smp_nmi_call_lock); return 0; } static void restart_wakeup_cpu(void) { unsigned int cpu = smp_processor_id(); cpumask_set_cpu(cpu, &cpu_callin_map); local_flush_tlb(); set_cpu_online(cpu, true); smp_wmb(); } static void prepare_sleep_cpu(void *unused) { sleep_mode[smp_processor_id()] = 1; smp_mb(); mn10300_local_dcache_flush_inv(); hotplug_cpu_disable_cache(); hotplug_cpu_invalidate_cache(); } /* when this function called, IE=0, NMID=0. */ static void sleep_cpu(void *unused) { unsigned int cpu_id = smp_processor_id(); /* * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested, * before this cpu goes in SLEEP mode. */ do { smp_mb(); __sleep_cpu(); } while (sleep_mode[cpu_id]); restart_wakeup_cpu(); } static void run_sleep_cpu(unsigned int cpu) { unsigned long flags; cpumask_t cpumask; cpumask_copy(&cpumask, &cpumask_of(cpu)); flags = arch_local_cli_save(); hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1); hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0); udelay(1); /* delay for the cpu to sleep. */ arch_local_irq_restore(flags); } static void wakeup_cpu(void) { hotplug_cpu_invalidate_cache(); hotplug_cpu_enable_cache(); smp_mb(); sleep_mode[smp_processor_id()] = 0; } static void run_wakeup_cpu(unsigned int cpu) { unsigned long flags; flags = arch_local_cli_save(); #if NR_CPUS == 2 mn10300_local_dcache_flush_inv(); #else /* * Before waking up the cpu, * all online cpus should stop and flush D-Cache for global data. */ #error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y. #endif hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1); arch_local_irq_restore(flags); } #endif /* CONFIG_HOTPLUG_CPU */
gpl-2.0
flwh/KK_mt6589_iq451
kernel/drivers/net/ethernet/amd/7990.c
5143
21760
/* * 7990.c -- LANCE ethernet IC generic routines. * This is an attempt to separate out the bits of various ethernet * drivers that are common because they all use the AMD 7990 LANCE * (Local Area Network Controller for Ethernet) chip. * * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk> * * Most of this stuff was obtained by looking at other LANCE drivers, * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful. * NB: this was made easy by the fact that Jes Sorensen had cleaned up * most of a2025 and sunlance with the aim of merging them, so the * common code was pretty obvious. */ #include <linux/crc32.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/route.h> #include <linux/string.h> #include <linux/skbuff.h> #include <asm/irq.h> /* Used for the temporal inet entries and routing */ #include <linux/socket.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/pgtable.h> #ifdef CONFIG_HP300 #include <asm/blinken.h> #endif #include "7990.h" #define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x)) #define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x)) #define READRDP(lp) in_be16(lp->base + LANCE_RDP) #if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE) #include "hplance.h" #undef WRITERAP #undef WRITERDP #undef READRDP #if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE) /* Lossage Factor Nine, Mr Sulu. */ #define WRITERAP(lp,x) (lp->writerap(lp,x)) #define WRITERDP(lp,x) (lp->writerdp(lp,x)) #define READRDP(lp) (lp->readrdp(lp)) #else /* These inlines can be used if only CONFIG_HPLANCE is defined */ static inline void WRITERAP(struct lance_private *lp, __u16 value) { do { out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); } static inline void WRITERDP(struct lance_private *lp, __u16 value) { do { out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); } static inline __u16 READRDP(struct lance_private *lp) { __u16 value; do { value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); return value; } #endif #endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */ /* debugging output macros, various flavours */ /* #define TEST_HITS */ #ifdef UNDEF #define PRINT_RINGS() \ do { \ int t; \ for (t=0; t < RX_RING_SIZE; t++) { \ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\ ib->brx_ring[t].length,\ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\ }\ for (t=0; t < TX_RING_SIZE; t++) { \ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\ ib->btx_ring[t].length,\ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\ }\ } while (0) #else #define PRINT_RINGS() #endif /* Load the CSR registers. The LANCE has to be STOPped when we do this! */ static void load_csrs (struct lance_private *lp) { volatile struct lance_init_block *aib = lp->lance_init_block; int leptr; leptr = LANCE_ADDR (aib); WRITERAP(lp, LE_CSR1); /* load address of init block */ WRITERDP(lp, leptr & 0xFFFF); WRITERAP(lp, LE_CSR2); WRITERDP(lp, leptr >> 16); WRITERAP(lp, LE_CSR3); WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */ /* Point back to csr0 */ WRITERAP(lp, LE_CSR0); } /* #define to 0 or 1 appropriately */ #define DEBUG_IRING 0 /* Set up the Lance Rx and Tx rings and the init block */ static void lance_init_ring (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */ int leptr; int i; aib = lp->lance_init_block; lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */ /* Copy the ethernet address to the lance init block * Notice that we do a byteswap if we're big endian. * [I think this is the right criterion; at least, sunlance, * a2065 and atarilance do the byteswap and lance.c (PC) doesn't. * However, the datasheet says that the BSWAP bit doesn't affect * the init block, so surely it should be low byte first for * everybody? Um.] * We could define the ib->physaddr as three 16bit values and * use (addr[1] << 8) | addr[0] & co, but this is more efficient. */ #ifdef __BIG_ENDIAN ib->phys_addr [0] = dev->dev_addr [1]; ib->phys_addr [1] = dev->dev_addr [0]; ib->phys_addr [2] = dev->dev_addr [3]; ib->phys_addr [3] = dev->dev_addr [2]; ib->phys_addr [4] = dev->dev_addr [5]; ib->phys_addr [5] = dev->dev_addr [4]; #else for (i=0; i<6; i++) ib->phys_addr[i] = dev->dev_addr[i]; #endif if (DEBUG_IRING) printk ("TX rings:\n"); lp->tx_full = 0; /* Setup the Tx ring entries */ for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) { leptr = LANCE_ADDR(&aib->tx_buf[i][0]); ib->btx_ring [i].tmd0 = leptr; ib->btx_ring [i].tmd1_hadr = leptr >> 16; ib->btx_ring [i].tmd1_bits = 0; ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ ib->btx_ring [i].misc = 0; if (DEBUG_IRING) printk ("%d: 0x%8.8x\n", i, leptr); } /* Setup the Rx ring entries */ if (DEBUG_IRING) printk ("RX rings:\n"); for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) { leptr = LANCE_ADDR(&aib->rx_buf[i][0]); ib->brx_ring [i].rmd0 = leptr; ib->brx_ring [i].rmd1_hadr = leptr >> 16; ib->brx_ring [i].rmd1_bits = LE_R1_OWN; /* 0xf000 == bits that must be one (reserved, presumably) */ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; ib->brx_ring [i].mblength = 0; if (DEBUG_IRING) printk ("%d: 0x%8.8x\n", i, leptr); } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = LANCE_ADDR(&aib->brx_ring); ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); ib->rx_ptr = leptr; if (DEBUG_IRING) printk ("RX ptr: %8.8x\n", leptr); /* Setup tx descriptor pointer */ leptr = LANCE_ADDR(&aib->btx_ring); ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); ib->tx_ptr = leptr; if (DEBUG_IRING) printk ("TX ptr: %8.8x\n", leptr); /* Clear the multicast filter */ ib->filter [0] = 0; ib->filter [1] = 0; PRINT_RINGS(); } /* LANCE must be STOPped before we do this, too... */ static int init_restart_lance (struct lance_private *lp) { int i; WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_INIT); /* Need a hook here for sunlance ledma stuff */ /* Wait for the lance to complete initialization */ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++) barrier(); if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) { printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp)); return -1; } /* Clear IDON by writing a "1", enable interrupts and start lance */ WRITERDP(lp, LE_C0_IDON); WRITERDP(lp, LE_C0_INEA | LE_C0_STRT); return 0; } static int lance_reset (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int status; /* Stop the lance */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); load_csrs (lp); lance_init_ring (dev); dev->trans_start = jiffies; /* prevent tx timeout */ status = init_restart_lance (lp); #ifdef DEBUG_DRIVER printk ("Lance restart=%d\n", status); #endif return status; } static int lance_rx (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_rx_desc *rd; unsigned char bits; #ifdef TEST_HITS int i; #endif #ifdef TEST_HITS printk ("["); for (i = 0; i < RX_RING_SIZE; i++) { if (i == lp->rx_new) printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X"); else printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1"); } printk ("]"); #endif #ifdef CONFIG_HP300 blinken_leds(0x40, 0); #endif WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */ for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */ !((bits = rd->rmd1_bits) & LE_R1_OWN); rd = &ib->brx_ring [lp->rx_new]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; continue; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { int len = (rd->mblength & 0xfff) - 4; struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); if (!skb) { printk ("%s: Memory squeeze, deferring packet.\n", dev->name); dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; return 0; } skb_reserve (skb, 2); /* 16 byte align */ skb_put (skb, len); /* make room */ skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), len); skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } /* Return the packet to the pool */ rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; } return 0; } static int lance_tx (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_tx_desc *td; int i, j; int status; #ifdef CONFIG_HP300 blinken_leds(0x80, 0); #endif /* csr0 is 2f3 */ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); /* csr0 is 73 */ j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { td = &ib->btx_ring [i]; /* If we hit a packet not owned by us, stop */ if (td->tmd1_bits & LE_T1_OWN) break; if (td->tmd1_bits & LE_T1_ERR) { status = td->misc; dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk("%s: Carrier Lost, trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); /* Stop the lance */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); lance_init_ring (dev); load_csrs (lp); init_restart_lance (lp); return 0; } } /* buffer errors and underflows turn off the transmitter */ /* Restart the adapter */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); /* Stop the lance */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); lance_init_ring (dev); load_csrs (lp); init_restart_lance (lp); return 0; } } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ td->tmd1_bits &= ~(LE_T1_POK); /* One collision before packet was sent. */ if (td->tmd1_bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (td->tmd1_bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = (j + 1) & lp->tx_ring_mod_mask; } lp->tx_old = j; WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); return 0; } static irqreturn_t lance_interrupt (int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct lance_private *lp = netdev_priv(dev); int csr0; spin_lock (&lp->devlock); WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */ csr0 = READRDP(lp); PRINT_RINGS(); if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */ spin_unlock (&lp->devlock); return IRQ_NONE; /* been generated by the Lance. */ } /* Acknowledge all the interrupt sources ASAP */ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT)); if ((csr0 & LE_C0_ERR)) { /* Clear the error condition */ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA); } if (csr0 & LE_C0_RINT) lance_rx (dev); if (csr0 & LE_C0_TINT) lance_tx (dev); /* Log misc errors. */ if (csr0 & LE_C0_BABL) dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & LE_C0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & LE_C0_MERR) { printk("%s: Bus master arbitration failure, status %4.4x.\n", dev->name, csr0); /* Restart the chip. */ WRITERDP(lp, LE_C0_STRT); } if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) { lp->tx_full = 0; netif_wake_queue (dev); } WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA); spin_unlock (&lp->devlock); return IRQ_HANDLED; } int lance_open (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int res; /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev)) return -EAGAIN; res = lance_reset(dev); spin_lock_init(&lp->devlock); netif_start_queue (dev); return res; } EXPORT_SYMBOL_GPL(lance_open); int lance_close (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); netif_stop_queue (dev); /* Stop the LANCE */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); free_irq(lp->irq, dev); return 0; } EXPORT_SYMBOL_GPL(lance_close); void lance_tx_timeout(struct net_device *dev) { printk("lance_tx_timeout\n"); lance_reset(dev); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue (dev); } EXPORT_SYMBOL_GPL(lance_tx_timeout); int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; int entry, skblen, len; static int outs; unsigned long flags; if (!TX_BUFFS_AVAIL) return NETDEV_TX_LOCKED; netif_stop_queue (dev); skblen = skb->len; #ifdef DEBUG_DRIVER /* dump the packet */ { int i; for (i = 0; i < 64; i++) { if ((i % 16) == 0) printk ("\n"); printk ("%2.2x ", skb->data [i]); } } #endif len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; entry = lp->tx_new & lp->tx_ring_mod_mask; ib->btx_ring [entry].length = (-len) | 0xf000; ib->btx_ring [entry].misc = 0; if (skb->len < ETH_ZLEN) memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); /* Now, give the packet to the lance */ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; outs++; /* Kick the lance: transmit now */ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); dev_kfree_skb (skb); spin_lock_irqsave (&lp->devlock, flags); if (TX_BUFFS_AVAIL) netif_start_queue (dev); else lp->tx_full = 1; spin_unlock_irqrestore (&lp->devlock, flags); return NETDEV_TX_OK; } EXPORT_SYMBOL_GPL(lance_start_xmit); /* taken from the depca driver via a2065.c */ static void lance_load_multicast (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile u16 *mcast_table = (u16 *)&ib->filter; struct netdev_hw_addr *ha; u32 crc; /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI){ ib->filter [0] = 0xffffffff; ib->filter [1] = 0xffffffff; return; } /* clear the multicast filter */ ib->filter [0] = 0; ib->filter [1] = 0; /* Add addresses */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc = crc >> 26; mcast_table [crc >> 4] |= 1 << (crc & 0xf); } } void lance_set_multicast (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; int stopped; stopped = netif_queue_stopped(dev); if (!stopped) netif_stop_queue (dev); while (lp->tx_old != lp->tx_new) schedule(); WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); lance_init_ring (dev); if (dev->flags & IFF_PROMISC) { ib->mode |= LE_MO_PROM; } else { ib->mode &= ~LE_MO_PROM; lance_load_multicast (dev); } load_csrs (lp); init_restart_lance (lp); if (!stopped) netif_start_queue (dev); } EXPORT_SYMBOL_GPL(lance_set_multicast); #ifdef CONFIG_NET_POLL_CONTROLLER void lance_poll(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); spin_lock (&lp->devlock); WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STRT); spin_unlock (&lp->devlock); lance_interrupt(dev->irq, dev); } #endif MODULE_LICENSE("GPL");
gpl-2.0
davtse/i9505
drivers/block/cryptoloop.c
12311
5005
/* Linux loop encryption enabling module Copyright (C) 2002 Herbert Valerio Riedel <hvr@gnu.org> Copyright (C) 2003 Fruhwirth Clemens <clemens@endorphin.org> This module is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This module is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this module; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/string.h> #include <linux/crypto.h> #include <linux/blkdev.h> #include <linux/loop.h> #include <linux/scatterlist.h> #include <asm/uaccess.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI"); MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>"); #define LOOP_IV_SECTOR_BITS 9 #define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS) static int cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) { int err = -EINVAL; int cipher_len; int mode_len; char cms[LO_NAME_SIZE]; /* cipher-mode string */ char *cipher; char *mode; char *cmsp = cms; /* c-m string pointer */ struct crypto_blkcipher *tfm; /* encryption breaks for non sector aligned offsets */ if (info->lo_offset % LOOP_IV_SECTOR_SIZE) goto out; strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); cms[LO_NAME_SIZE - 1] = 0; cipher = cmsp; cipher_len = strcspn(cmsp, "-"); mode = cmsp + cipher_len; mode_len = 0; if (*mode) { mode++; mode_len = strcspn(mode, "-"); } if (!mode_len) { mode = "cbc"; mode_len = 3; } if (cipher_len + mode_len + 3 > LO_NAME_SIZE) return -EINVAL; memmove(cms, mode, mode_len); cmsp = cms + mode_len; *cmsp++ = '('; memcpy(cmsp, info->lo_crypt_name, cipher_len); cmsp += cipher_len; *cmsp++ = ')'; *cmsp = 0; tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return PTR_ERR(tfm); err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key, info->lo_encrypt_key_size); if (err != 0) goto out_free_tfm; lo->key_data = tfm; return 0; out_free_tfm: crypto_free_blkcipher(tfm); out: return err; } typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc, struct scatterlist *sg_out, struct scatterlist *sg_in, unsigned int nsg); static int cryptoloop_transfer(struct loop_device *lo, int cmd, struct page *raw_page, unsigned raw_off, struct page *loop_page, unsigned loop_off, int size, sector_t IV) { struct crypto_blkcipher *tfm = lo->key_data; struct blkcipher_desc desc = { .tfm = tfm, .flags = CRYPTO_TFM_REQ_MAY_SLEEP, }; struct scatterlist sg_out; struct scatterlist sg_in; encdec_cbc_t encdecfunc; struct page *in_page, *out_page; unsigned in_offs, out_offs; int err; sg_init_table(&sg_out, 1); sg_init_table(&sg_in, 1); if (cmd == READ) { in_page = raw_page; in_offs = raw_off; out_page = loop_page; out_offs = loop_off; encdecfunc = crypto_blkcipher_crt(tfm)->decrypt; } else { in_page = loop_page; in_offs = loop_off; out_page = raw_page; out_offs = raw_off; encdecfunc = crypto_blkcipher_crt(tfm)->encrypt; } while (size > 0) { const int sz = min(size, LOOP_IV_SECTOR_SIZE); u32 iv[4] = { 0, }; iv[0] = cpu_to_le32(IV & 0xffffffff); sg_set_page(&sg_in, in_page, sz, in_offs); sg_set_page(&sg_out, out_page, sz, out_offs); desc.info = iv; err = encdecfunc(&desc, &sg_out, &sg_in, sz); if (err) return err; IV++; size -= sz; in_offs += sz; out_offs += sz; } return 0; } static int cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) { return -EINVAL; } static int cryptoloop_release(struct loop_device *lo) { struct crypto_blkcipher *tfm = lo->key_data; if (tfm != NULL) { crypto_free_blkcipher(tfm); lo->key_data = NULL; return 0; } printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n"); return -EINVAL; } static struct loop_func_table cryptoloop_funcs = { .number = LO_CRYPT_CRYPTOAPI, .init = cryptoloop_init, .ioctl = cryptoloop_ioctl, .transfer = cryptoloop_transfer, .release = cryptoloop_release, .owner = THIS_MODULE }; static int __init init_cryptoloop(void) { int rc = loop_register_transfer(&cryptoloop_funcs); if (rc) printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n"); return rc; } static void __exit cleanup_cryptoloop(void) { if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI)) printk(KERN_ERR "cryptoloop: loop_unregister_transfer failed\n"); } module_init(init_cryptoloop); module_exit(cleanup_cryptoloop);
gpl-2.0
scue/LenovoK860i_4.2_opensource_kernel
fs/nls/nls_cp1250.c
12567
15419
/* * linux/fs/nls/nls_cp1250.c * * Charset cp1250 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x20ac, 0x0000, 0x201a, 0x0000, 0x201e, 0x2026, 0x2020, 0x2021, 0x0000, 0x2030, 0x0160, 0x2039, 0x015a, 0x0164, 0x017d, 0x0179, /* 0x90*/ 0x0000, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x0000, 0x2122, 0x0161, 0x203a, 0x015b, 0x0165, 0x017e, 0x017a, /* 0xa0*/ 0x00a0, 0x02c7, 0x02d8, 0x0141, 0x00a4, 0x0104, 0x00a6, 0x00a7, 0x00a8, 0x00a9, 0x015e, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x017b, /* 0xb0*/ 0x00b0, 0x00b1, 0x02db, 0x0142, 0x00b4, 0x00b5, 0x00b6, 0x00b7, 0x00b8, 0x0105, 0x015f, 0x00bb, 0x013d, 0x02dd, 0x013e, 0x017c, /* 0xc0*/ 0x0154, 0x00c1, 0x00c2, 0x0102, 0x00c4, 0x0139, 0x0106, 0x00c7, 0x010c, 0x00c9, 0x0118, 0x00cb, 0x011a, 0x00cd, 0x00ce, 0x010e, /* 0xd0*/ 0x0110, 0x0143, 0x0147, 0x00d3, 0x00d4, 0x0150, 0x00d6, 0x00d7, 0x0158, 0x016e, 0x00da, 0x0170, 0x00dc, 0x00dd, 0x0162, 0x00df, /* 0xe0*/ 0x0155, 0x00e1, 0x00e2, 0x0103, 0x00e4, 0x013a, 0x0107, 0x00e7, 0x010d, 0x00e9, 0x0119, 0x00eb, 0x011b, 0x00ed, 0x00ee, 0x010f, /* 0xf0*/ 0x0111, 0x0144, 0x0148, 0x00f3, 0x00f4, 0x0151, 0x00f6, 0x00f7, 0x0159, 0x016f, 0x00fa, 0x0171, 0x00fc, 0x00fd, 0x0163, 0x02d9, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0xae, 0x00, /* 0xa8-0xaf */ 0xb0, 0xb1, 0x00, 0x00, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */ 0x00, 0xc9, 0x00, 0xcb, 0x00, 0xcd, 0xce, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0x00, 0x00, 0xda, 0x00, 0xdc, 0xdd, 0x00, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */ 0x00, 0xe9, 0x00, 0xeb, 0x00, 0xed, 0xee, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0x00, 0x00, 0xfa, 0x00, 0xfc, 0xfd, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0xc3, 0xe3, 0xa5, 0xb9, 0xc6, 0xe6, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0xcf, 0xef, /* 0x08-0x0f */ 0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xca, 0xea, 0xcc, 0xec, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0xc5, 0xe5, 0x00, 0x00, 0xbc, 0xbe, 0x00, /* 0x38-0x3f */ 0x00, 0xa3, 0xb3, 0xd1, 0xf1, 0x00, 0x00, 0xd2, /* 0x40-0x47 */ 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xd5, 0xf5, 0x00, 0x00, 0xc0, 0xe0, 0x00, 0x00, /* 0x50-0x57 */ 0xd8, 0xf8, 0x8c, 0x9c, 0x00, 0x00, 0xaa, 0xba, /* 0x58-0x5f */ 0x8a, 0x9a, 0xde, 0xfe, 0x8d, 0x9d, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xf9, /* 0x68-0x6f */ 0xdb, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x8f, 0x9f, 0xaf, 0xbf, 0x8e, 0x9e, 0x00, /* 0x78-0x7f */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0xa2, 0xff, 0x00, 0xb2, 0x00, 0xbd, 0x00, 0x00, /* 0xd8-0xdf */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x91, 0x92, 0x82, 0x00, 0x93, 0x94, 0x84, 0x00, /* 0x18-0x1f */ 0x86, 0x87, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x89, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x8b, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, page02, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, page21, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x00, 0x82, 0x00, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x00, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xb3, 0xa4, 0xb9, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbe, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x00, 0x82, 0x00, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x00, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xa3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xa5, 0xaa, 0xbb, 0xbc, 0xbd, 0xbc, 0xaf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0x00, /* 0xd8-0xdf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp1250", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp1250(void) { return register_nls(&table); } static void __exit exit_nls_cp1250(void) { unregister_nls(&table); } module_init(init_nls_cp1250) module_exit(exit_nls_cp1250) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
gabriel-fernandez/kernel
arch/ia64/sn/kernel/bte_error.c
13847
7665
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. */ #include <linux/types.h> #include <asm/sn/sn_sal.h> #include "ioerror.h" #include <asm/sn/addrs.h> #include <asm/sn/shubio.h> #include <asm/sn/geo.h> #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" #include <asm/sn/bte.h> #include <asm/param.h> /* * Bte error handling is done in two parts. The first captures * any crb related errors. Since there can be multiple crbs per * interface and multiple interfaces active, we need to wait until * all active crbs are completed. This is the first job of the * second part error handler. When all bte related CRBs are cleanly * completed, it resets the interfaces and gets them ready for new * transfers to be queued. */ void bte_error_handler(unsigned long); /* * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ int shub1_bte_error_handler(unsigned long _nodepda) { struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; nasid_t nasid; int i; int valid_crbs; ii_imem_u_t imem; /* II IMEM Register */ ii_icrb0_d_u_t icrbd; /* II CRB Register D */ ii_ibcr_u_t ibcr; ii_icmr_u_t icmr; ii_ieclr_u_t ieclr; BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda, smp_processor_id())); if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) && (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, smp_processor_id())); return 1; } /* Determine information about our hub */ nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); /* * A BTE transfer can use multiple CRBs. We need to make sure * that all the BTE CRBs are complete (or timed out) before * attempting to clean up the error. Resetting the BTE while * there are still BTE CRBs active will hang the BTE. * We should look at all the CRBs to see if they are allocated * to the BTE and see if they are still active. When none * are active, we can continue with the cleanup. * * We also want to make sure that the local NI port is up. * When a router resets the NI port can go down, while it * goes through the LLP handshake, but then comes back up. */ icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR); if (icmr.ii_icmr_fld_s.i_crb_mark != 0) { /* * There are errors which still need to be cleaned up by * hubiio_crb_error_handler */ mod_timer(recovery_timer, jiffies + (HZ * 5)); BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, smp_processor_id())); return 1; } if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld; for (i = 0; i < IIO_NUM_CRBS; i++) { if (!((1 << i) & valid_crbs)) { /* This crb was not marked as valid, ignore */ continue; } icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i)); if (icrbd.d_bteop) { mod_timer(recovery_timer, jiffies + (HZ * 5)); BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", err_nodepda, smp_processor_id(), i)); return 1; } } } BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id())); /* Re-enable both bte interfaces */ imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM); imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1; REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval); /* Clear BTE0/1 error bits */ ieclr.ii_ieclr_regval = 0; if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS) ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1; if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS) ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1; REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval); /* Reinitialize both BTE state machines. */ ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR); ibcr.ii_ibcr_fld_s.i_soft_reset = 1; REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); del_timer(recovery_timer); return 0; } /* * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ int shub2_bte_error_handler(unsigned long _nodepda) { struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; struct bteinfo_s *bte; nasid_t nasid; u64 status; int i; nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); /* * Verify that all the BTEs are complete */ for (i = 0; i < BTES_PER_NODE; i++) { bte = &err_nodepda->bte_if[i]; status = BTE_LNSTAT_LOAD(bte); if (status & IBLS_ERROR) { bte->bh_error = BTE_SHUB2_ERROR(status); continue; } if (!(status & IBLS_BUSY)) continue; mod_timer(recovery_timer, jiffies + (HZ * 5)); BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, smp_processor_id())); return 1; } if (ia64_sn_bte_recovery(nasid)) panic("bte_error_handler(): Fatal BTE Error"); del_timer(recovery_timer); return 0; } /* * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ void bte_error_handler(unsigned long _nodepda) { struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; int i; unsigned long irq_flags; volatile u64 *notify; bte_result_t bh_error; BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, smp_processor_id())); spin_lock_irqsave(recovery_lock, irq_flags); /* * Lock all interfaces on this node to prevent new transfers * from being queued. */ for (i = 0; i < BTES_PER_NODE; i++) { if (err_nodepda->bte_if[i].cleanup_active) { continue; } spin_lock(&err_nodepda->bte_if[i].spinlock); BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda, smp_processor_id(), i)); err_nodepda->bte_if[i].cleanup_active = 1; } if (is_shub1()) { if (shub1_bte_error_handler(_nodepda)) { spin_unlock_irqrestore(recovery_lock, irq_flags); return; } } else { if (shub2_bte_error_handler(_nodepda)) { spin_unlock_irqrestore(recovery_lock, irq_flags); return; } } for (i = 0; i < BTES_PER_NODE; i++) { bh_error = err_nodepda->bte_if[i].bh_error; if (bh_error != BTE_SUCCESS) { /* There is an error which needs to be notified */ notify = err_nodepda->bte_if[i].most_rcnt_na; BTE_PRINTK(("cnode %d bte %d error=0x%lx\n", err_nodepda->bte_if[i].bte_cnode, err_nodepda->bte_if[i].bte_num, IBLS_ERROR | (u64) bh_error)); *notify = IBLS_ERROR | bh_error; err_nodepda->bte_if[i].bh_error = BTE_SUCCESS; } err_nodepda->bte_if[i].cleanup_active = 0; BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda, smp_processor_id(), i)); spin_unlock(&err_nodepda->bte_if[i].spinlock); } spin_unlock_irqrestore(recovery_lock, irq_flags); } /* * First part error handler. This is called whenever any error CRB interrupt * is generated by the II. */ void bte_crb_error_handler(cnodeid_t cnode, int btenum, int crbnum, ioerror_t * ioe, int bteop) { struct bteinfo_s *bte; bte = &(NODEPDA(cnode)->bte_if[btenum]); /* * The caller has already figured out the error type, we save that * in the bte handle structure for the thread exercising the * interface to consume. */ bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET; bte->bte_error_count++; BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n", bte->bte_cnode, bte->bte_num, ioe->ie_errortype)); bte_error_handler((unsigned long) NODEPDA(cnode)); }
gpl-2.0
TheWolfer22/android_kernel_lge_g3
arch/ia64/sn/kernel/bte_error.c
13847
7665
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. */ #include <linux/types.h> #include <asm/sn/sn_sal.h> #include "ioerror.h" #include <asm/sn/addrs.h> #include <asm/sn/shubio.h> #include <asm/sn/geo.h> #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" #include <asm/sn/bte.h> #include <asm/param.h> /* * Bte error handling is done in two parts. The first captures * any crb related errors. Since there can be multiple crbs per * interface and multiple interfaces active, we need to wait until * all active crbs are completed. This is the first job of the * second part error handler. When all bte related CRBs are cleanly * completed, it resets the interfaces and gets them ready for new * transfers to be queued. */ void bte_error_handler(unsigned long); /* * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ int shub1_bte_error_handler(unsigned long _nodepda) { struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; nasid_t nasid; int i; int valid_crbs; ii_imem_u_t imem; /* II IMEM Register */ ii_icrb0_d_u_t icrbd; /* II CRB Register D */ ii_ibcr_u_t ibcr; ii_icmr_u_t icmr; ii_ieclr_u_t ieclr; BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda, smp_processor_id())); if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) && (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, smp_processor_id())); return 1; } /* Determine information about our hub */ nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); /* * A BTE transfer can use multiple CRBs. We need to make sure * that all the BTE CRBs are complete (or timed out) before * attempting to clean up the error. Resetting the BTE while * there are still BTE CRBs active will hang the BTE. * We should look at all the CRBs to see if they are allocated * to the BTE and see if they are still active. When none * are active, we can continue with the cleanup. * * We also want to make sure that the local NI port is up. * When a router resets the NI port can go down, while it * goes through the LLP handshake, but then comes back up. */ icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR); if (icmr.ii_icmr_fld_s.i_crb_mark != 0) { /* * There are errors which still need to be cleaned up by * hubiio_crb_error_handler */ mod_timer(recovery_timer, jiffies + (HZ * 5)); BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, smp_processor_id())); return 1; } if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld; for (i = 0; i < IIO_NUM_CRBS; i++) { if (!((1 << i) & valid_crbs)) { /* This crb was not marked as valid, ignore */ continue; } icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i)); if (icrbd.d_bteop) { mod_timer(recovery_timer, jiffies + (HZ * 5)); BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", err_nodepda, smp_processor_id(), i)); return 1; } } } BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id())); /* Re-enable both bte interfaces */ imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM); imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1; REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval); /* Clear BTE0/1 error bits */ ieclr.ii_ieclr_regval = 0; if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS) ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1; if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS) ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1; REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval); /* Reinitialize both BTE state machines. */ ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR); ibcr.ii_ibcr_fld_s.i_soft_reset = 1; REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); del_timer(recovery_timer); return 0; } /* * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ int shub2_bte_error_handler(unsigned long _nodepda) { struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; struct bteinfo_s *bte; nasid_t nasid; u64 status; int i; nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); /* * Verify that all the BTEs are complete */ for (i = 0; i < BTES_PER_NODE; i++) { bte = &err_nodepda->bte_if[i]; status = BTE_LNSTAT_LOAD(bte); if (status & IBLS_ERROR) { bte->bh_error = BTE_SHUB2_ERROR(status); continue; } if (!(status & IBLS_BUSY)) continue; mod_timer(recovery_timer, jiffies + (HZ * 5)); BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, smp_processor_id())); return 1; } if (ia64_sn_bte_recovery(nasid)) panic("bte_error_handler(): Fatal BTE Error"); del_timer(recovery_timer); return 0; } /* * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ void bte_error_handler(unsigned long _nodepda) { struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; int i; unsigned long irq_flags; volatile u64 *notify; bte_result_t bh_error; BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, smp_processor_id())); spin_lock_irqsave(recovery_lock, irq_flags); /* * Lock all interfaces on this node to prevent new transfers * from being queued. */ for (i = 0; i < BTES_PER_NODE; i++) { if (err_nodepda->bte_if[i].cleanup_active) { continue; } spin_lock(&err_nodepda->bte_if[i].spinlock); BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda, smp_processor_id(), i)); err_nodepda->bte_if[i].cleanup_active = 1; } if (is_shub1()) { if (shub1_bte_error_handler(_nodepda)) { spin_unlock_irqrestore(recovery_lock, irq_flags); return; } } else { if (shub2_bte_error_handler(_nodepda)) { spin_unlock_irqrestore(recovery_lock, irq_flags); return; } } for (i = 0; i < BTES_PER_NODE; i++) { bh_error = err_nodepda->bte_if[i].bh_error; if (bh_error != BTE_SUCCESS) { /* There is an error which needs to be notified */ notify = err_nodepda->bte_if[i].most_rcnt_na; BTE_PRINTK(("cnode %d bte %d error=0x%lx\n", err_nodepda->bte_if[i].bte_cnode, err_nodepda->bte_if[i].bte_num, IBLS_ERROR | (u64) bh_error)); *notify = IBLS_ERROR | bh_error; err_nodepda->bte_if[i].bh_error = BTE_SUCCESS; } err_nodepda->bte_if[i].cleanup_active = 0; BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda, smp_processor_id(), i)); spin_unlock(&err_nodepda->bte_if[i].spinlock); } spin_unlock_irqrestore(recovery_lock, irq_flags); } /* * First part error handler. This is called whenever any error CRB interrupt * is generated by the II. */ void bte_crb_error_handler(cnodeid_t cnode, int btenum, int crbnum, ioerror_t * ioe, int bteop) { struct bteinfo_s *bte; bte = &(NODEPDA(cnode)->bte_if[btenum]); /* * The caller has already figured out the error type, we save that * in the bte handle structure for the thread exercising the * interface to consume. */ bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET; bte->bte_error_count++; BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n", bte->bte_cnode, bte->bte_num, ioe->ie_errortype)); bte_error_handler((unsigned long) NODEPDA(cnode)); }
gpl-2.0
TeamICS/heroc-kernel-2.6.35-ics
drivers/net/wireless/tiwlan1251/common/src/TNETW_Driver/MacServices/src/PowerAuthorization/PowerAuthorization.c
24
13135
/**************************************************************************** **+-----------------------------------------------------------------------+** **| |** **| Copyright(c) 1998 - 2008 Texas Instruments. All rights reserved. |** **| All rights reserved. |** **| |** **| Redistribution and use in source and binary forms, with or without |** **| modification, are permitted provided that the following conditions |** **| are met: |** **| |** **| * Redistributions of source code must retain the above copyright |** **| notice, this list of conditions and the following disclaimer. |** **| * Redistributions in binary form must reproduce the above copyright |** **| notice, this list of conditions and the following disclaimer in |** **| the documentation and/or other materials provided with the |** **| distribution. |** **| * Neither the name Texas Instruments nor the names of its |** **| contributors may be used to endorse or promote products derived |** **| from this software without specific prior written permission. |** **| |** **| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |** **| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |** **| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |** **| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |** **| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |** **| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |** **| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |** **| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |** **| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |** **| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |** **| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |** **| |** **+-----------------------------------------------------------------------+** ****************************************************************************/ /**********************************************************************************/ /* */ /* MODULE: PowerAuthorization.c */ /* PURPOSE: PowerAuthorization implementation module */ /* */ /**********************************************************************************/ #include "whalCtrl_api.h" #include "report.h" #include "MacServices_api.h" #include "MacServices.h" #include "PowerAuthorization.h" #include "PowerAuthorization_internal.h" /**************************************************************************** * powerAutho_Create() **************************************************************************** * DESCRIPTION: * * INPUTS: hOs - the handle to the OS layer * hReport - the handle to the report module * hELPCtrl - the handle to the ELPCtrl module * * * OUTPUT: the context of the PowerAuthorization module * * RETURNS: the context of the PowerAuthorization module (NULL if error) ****************************************************************************/ TI_HANDLE powerAutho_Create(TI_HANDLE hOs) { powerAutho_t *pObj; pObj = os_memoryAlloc(hOs, sizeof(powerAutho_t)); if (pObj == NULL) { WLAN_OS_REPORT(("FATAL ERROR: powerAutho_Create(): Error allocating context\n")); return NULL; } os_memoryZero(hOs, pObj, sizeof(powerAutho_t)); pObj->hOs = hOs; pObj->hReport = NULL; pObj->hHalCtrl = NULL; /* set as 'before init complete' */ pObj->initComplete = FALSE; pObj->m_AwakeRequired = 0; pObj->m_PowerPolicy = POWERAUTHO_POLICY_PD; pObj->m_MinPowerLevel = POWERAUTHO_POLICY_PD; return pObj; } /**************************************************************************** * powerAutho_Destroy() **************************************************************************** * DESCRIPTION: * * INPUTS: hPowerAutho - the handle to the PowerAuthorization module. * * * OUTPUT: * * RETURNS: OK ****************************************************************************/ int powerAutho_Destroy(TI_HANDLE hPowerAutho) { powerAutho_t *pPowerAutho = (powerAutho_t*)hPowerAutho; if (pPowerAutho) os_memoryFree(pPowerAutho->hOs, pPowerAutho, sizeof(powerAutho_t)); return OK; } /**************************************************************************** * powerAutho_Configure() **************************************************************************** * DESCRIPTION: * * INPUTS: hPowerAutho - the handle to the PowerAuthorization module. * aPowerPolicy - the power policy to configure. * * * OUTPUT: * * RETURNS: OK ****************************************************************************/ int powerAutho_Configure(TI_HANDLE hPowerAutho, TI_HANDLE hReport, TI_HANDLE hHalCtrl, powerAutho_PowerPolicy_e aPowerPolicy) { powerAutho_t *pPowerAutho = (powerAutho_t*)hPowerAutho; pPowerAutho->m_PowerPolicy = aPowerPolicy; pPowerAutho->m_ElpCtrl_Mode_LUT[POWERAUTHO_POLICY_AWAKE] = ELPCTRL_MODE_KEEP_AWAKE; pPowerAutho->m_ElpCtrl_Mode_LUT[POWERAUTHO_POLICY_PD] = ELPCTRL_MODE_KEEP_AWAKE; pPowerAutho->m_ElpCtrl_Mode_LUT[POWERAUTHO_POLICY_ELP] = ELPCTRL_MODE_NORMAL; pPowerAutho->hReport = hReport; pPowerAutho->hHalCtrl = hHalCtrl; return OK; } /**************************************************************************** * powerAutho_PowerPolicyUpdate() **************************************************************************** * DESCRIPTION: updates the PowerPolicy and calcs the new MinPowerPolicy of the sustem * * INPUTS: hMacServices - the handle to the MacServices module. * aPowerPolicy - the new power policy. * * * OUTPUT: none * * RETURNS: OK or NOK ****************************************************************************/ int MacServices_powerAutho_PowerPolicyUpdate(TI_HANDLE hMacServices, powerAutho_PowerPolicy_e aPowerPolicy) { powerAutho_t *pPowerAutho = (powerAutho_t*)(((MacServices_t*)hMacServices)->hPowerAutho); WLAN_REPORT_INFORMATION (pPowerAutho->hReport,ELP_MODULE_LOG, ("MacServices_powerAutho_PowerPolicyUpdate: PowerPolicy = %d\n",aPowerPolicy)); pPowerAutho->m_PowerPolicy = aPowerPolicy; return powerAutho_CalcMinPowerLevel(pPowerAutho); } /**************************************************************************** * powerAutho_AwakeRequiredUpdate() **************************************************************************** * DESCRIPTION: updates the AwakeRequired and calcs the new MinPowerPolicy of the sustem * * INPUTS: hMacServices - the handle to the MacServices module. * aAwakeRequired - the awake required parameter, * can be according to the enum required or not_required. * aAwakeReason - the reason that the HW is required * * OUTPUT: none * * RETURNS: OK or NOK ****************************************************************************/ int MacServices_powerAutho_AwakeRequiredUpdate(TI_HANDLE hMacServices, MacServices_powerAutho_AwakeRequired_e aAwakeRequired, MacServices_powerAutho_AwakeReason_e aAwakeReason) { powerAutho_t *pPowerAutho = (powerAutho_t*)(((MacServices_t*)hMacServices)->hPowerAutho); if(aAwakeRequired == POWERAUTHO_AWAKE_REQUIRED) { pPowerAutho->m_AwakeRequired |= (1<<aAwakeReason); } else { /* aAwakeRequired == POWERAUTHO_AWAKE_NOT_REQUIRED*/ pPowerAutho->m_AwakeRequired &= ~(1<<aAwakeReason); } WLAN_REPORT_INFORMATION (pPowerAutho->hReport,ELP_MODULE_LOG, ("MacServices_powerAutho_AwakeRequiredUpdate: awake required sent %d (reason %d) and the updated is %d\n", aAwakeRequired, aAwakeReason, pPowerAutho->m_AwakeRequired)); return powerAutho_CalcMinPowerLevel(pPowerAutho); } /**************************************************************************** * powerAutho_CalcMinPowerLevel() **************************************************************************** * DESCRIPTION: calculate the min power level * * INPUTS: hPowerAutho - the handle to the PowerAuthorization module. * * OUTPUT: none * * RETURNS: OK or NOK ****************************************************************************/ int powerAutho_CalcMinPowerLevel(TI_HANDLE hPowerAutho) { powerAutho_t *pPowerAutho = (powerAutho_t*)hPowerAutho; powerAutho_PowerPolicy_e newMinPowerLevel; /* calc the new MinPowerLevel */ if(pPowerAutho->m_AwakeRequired > 0) newMinPowerLevel = POWERAUTHO_POLICY_AWAKE; else newMinPowerLevel = pPowerAutho->m_PowerPolicy; /* check if the MinPowerLevel changed */ if(pPowerAutho->m_MinPowerLevel != newMinPowerLevel) { WLAN_REPORT_INFORMATION (pPowerAutho->hReport,ELP_MODULE_LOG, ("powerAutho_CalcMinPowerLevel - new MinPowerLevel is = %d\n",newMinPowerLevel)); pPowerAutho->m_MinPowerLevel = newMinPowerLevel; /* we do the update of the FW only after the init complete*/ if(pPowerAutho->initComplete == TRUE) { /* Update interface mode */ whalCtrl_ElpCtrl_SetMode(pPowerAutho->hHalCtrl, pPowerAutho->m_ElpCtrl_Mode_LUT[newMinPowerLevel]); /* Send MIB with PowerPolicy */ whalCtrl_SetMinPowerLevel(pPowerAutho->hHalCtrl, newMinPowerLevel); return OK; } } return TNETWIF_COMPLETE; } /**************************************************************************** * powerAutho_PowerPolicyUpdate() **************************************************************************** * DESCRIPTION: send the min power level to the FW for the first time * * INPUTS: hMacServices - the handle to the MacServices module. * * * OUTPUT: none * * RETURNS: OK or NOK ****************************************************************************/ int MacServices_powerAutho_ExitFromInit(TI_HANDLE hMacServices) { powerAutho_t *pPowerAutho = (powerAutho_t*)(((MacServices_t*)hMacServices)->hPowerAutho); whalParamInfo_t ParamInfo; WLAN_REPORT_INFORMATION (pPowerAutho->hReport,ELP_MODULE_LOG, ("MacServices_powerAutho_ExitFromInit: PowerPolicy = %d\n",pPowerAutho->m_MinPowerLevel )); /* set as 'after init complete' */ pPowerAutho->initComplete = TRUE; /* Update interface mode */ whalCtrl_ElpCtrl_SetMode(pPowerAutho->hHalCtrl, pPowerAutho->m_ElpCtrl_Mode_LUT[pPowerAutho->m_MinPowerLevel]); /* Send MIB with PowerPolicy */ ParamInfo.paramType = (UINT32)HAL_CTRL_MIN_POWER_LEVEL; ParamInfo.paramLength = sizeof(powerAutho_PowerPolicy_e); ParamInfo.content.minPowerPolicy = pPowerAutho->m_MinPowerLevel; whalCtrl_SetParam(pPowerAutho->hHalCtrl, &ParamInfo); return OK; } /**************************************************************************** * MacServices_powerAutho_Endrecovery() **************************************************************************** * DESCRIPTION: initialize module after recovery * * INPUTS: hMacServices - the handle to the MacServices module. * * * OUTPUT: none * * RETURNS: OK or NOK ****************************************************************************/ int powerAutho_Restart(TI_HANDLE hMacServices) { powerAutho_t *pPowerAutho = (powerAutho_t*)(((MacServices_t*)hMacServices)->hPowerAutho); /* set as 'before init complete' */ pPowerAutho->initComplete = FALSE; pPowerAutho->m_PowerPolicy = POWERAUTHO_POLICY_AWAKE; pPowerAutho->m_MinPowerLevel = POWERAUTHO_POLICY_AWAKE; return pPowerAutho->m_MinPowerLevel; } /**************************************************************************** * MacServices_powerAutho_Endrecovery() **************************************************************************** * DESCRIPTION: updates the PowerPolicy and calcs the new MinPowerPolicy of the sustem * * INPUTS: hMacServices - the handle to the MacServices module. * * * OUTPUT: none * * RETURNS: OK or NOK ****************************************************************************/ int MacServices_powerAutho_EndRecovery(TI_HANDLE hMacServices) { powerAutho_t *pPowerAutho = (powerAutho_t*)(((MacServices_t*)hMacServices)->hPowerAutho); WLAN_REPORT_INFORMATION (pPowerAutho->hReport,ELP_MODULE_LOG, ("MacServices_powerAutho_Endrecovery: PowerPolicy = %d\n",pPowerAutho->m_PowerPolicy)); return powerAutho_CalcMinPowerLevel(pPowerAutho); }
gpl-2.0
llubu/Glibc_Linker
rt/tst-aio6.c
24
2559
/* Test for timeout handling. Copyright (C) 2000, 2002 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include <aio.h> #include <errno.h> #include <stdio.h> #include <unistd.h> #include <sys/time.h> /* We expect to wait for 3 seconds so we have to increase the timeout. */ #define TIMEOUT 10 /* sec */ #define TEST_FUNCTION do_test () static int do_test (void) { struct aiocb *arr[1]; struct aiocb cb; char buf[100]; struct timeval before; struct timeval after; struct timespec timeout; int fd[2]; int result = 0; if (pipe (fd) != 0) { printf ("cannot create pipe: %m\n"); return 1; } arr[0] = &cb; cb.aio_fildes = fd[0]; cb.aio_lio_opcode = LIO_WRITE; cb.aio_reqprio = 0; cb.aio_buf = (void *) buf; cb.aio_nbytes = sizeof (buf) - 1; cb.aio_offset = 0; cb.aio_sigevent.sigev_notify = SIGEV_NONE; /* Try to read from stdin where nothing will be available. */ if (aio_read (arr[0]) < 0) { if (errno == ENOSYS) { puts ("no aio support in this configuration"); return 0; } printf ("aio_read failed: %m\n"); return 1; } /* Get the current time. */ gettimeofday (&before, NULL); /* Wait for input which is unsuccessful and therefore the function will time out. */ timeout.tv_sec = 3; timeout.tv_nsec = 0; if (aio_suspend ((const struct aiocb *const*) arr, 1, &timeout) != -1) { puts ("aio_suspend() didn't return -1"); result = 1; } else if (errno != EAGAIN) { puts ("error not set to EAGAIN"); result = 1; } else { gettimeofday (&after, NULL); if (after.tv_sec < before.tv_sec + 1) { puts ("timeout came too early"); result = 1; } } return result; } #include "../test-skeleton.c"
gpl-2.0
ariev7x/S7270_foxkernel
arch/arm/mach-hawaii/hawaii.c
24
7529
/************************************************************************************************/ /* */ /* Copyright 2010 Broadcom Corporation */ /* */ /* Unless you and Broadcom execute a separate written software license agreement governing */ /* use of this software, this software is licensed to you under the terms of the GNU */ /* General Public License version 2 (the GPL), available at */ /* */ /* http://www.broadcom.com/licenses/GPLv2.php */ /* */ /* with the following added to such license: */ /* */ /* As a special exception, the copyright holders of this software give you permission to */ /* link this software with independent modules, and to copy and distribute the resulting */ /* executable under terms of your choice, provided that you also meet, for each linked */ /* independent module, the terms and conditions of the license of that module. */ /* An independent module is a module which is not derived from this software. The special */ /* exception does not apply to any modifications of the software. */ /* */ /* Notwithstanding the above, under no circumstances may you combine this software in any */ /* way with any other Broadcom software provided under a license other than the GPL, */ /* without Broadcom's express prior written consent. */ /* */ /************************************************************************************************/ #include <linux/init.h> #include <linux/kernel.h> #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/mfd/bcm590xx/core.h> #include <linux/mfd/bcmpmu.h> #include <asm/io.h> #include <asm/mach/map.h> #include <asm/system_misc.h> #include <asm/hardware/cache-l2x0.h> #include <mach/io_map.h> #include <mach/clock.h> #include <mach/memory.h> #include <mach/system.h> #include <mach/gpio.h> #include <mach/pinmux.h> #include <mach/kona.h> #include <mach/timer.h> #include <mach/profile_timer.h> #ifdef CONFIG_HAWAII_L2X0_PREFETCH #include <mach/cache-l2x0.h> #endif #include <mach/cpu.h> #include <plat/scu.h> #include <plat/kona_reset_reason.h> #include <mach/sec_api.h> #include <mach/cdebugger.h> extern int reset_pwm_padcntrl(void); static void hawaii_poweroff(void) { #ifdef CONFIG_MFD_BCM_PMU590XX bcm590xx_shutdown(); #endif #if defined(CONFIG_MFD_BCMPMU) || defined(CONFIG_MFD_BCM_PMU59xxx) bcmpmu_client_power_off(); #endif mdelay(5); pr_err("Failed power off!!!\n"); while (1) ; } void hawaii_restart(char mode, const char *cmd) { #if defined(CONFIG_MFD_BCMPMU) || defined(CONFIG_MFD_BCM_PMU59xxx) int ret = 0; if (hard_reset_reason) { ret = bcmpmu_client_hard_reset(hard_reset_reason); BUG_ON(ret); } else { switch (mode) { case 's': /* Jump into X address. Unused. * Kept to catch wrong mode*/ soft_restart(0); break; case 'h': default: /* Clear the magic key when reboot is required */ if (cmd == NULL) cdebugger_set_upload_magic(0x00); ret = reset_pwm_padcntrl(); if (ret) pr_err("%s Failed to reset PADCNTRL"\ "pin for PWM2 to GPIO24:%d\n",\ __func__, ret); kona_reset(mode, cmd); break; } } #else switch (mode) { case 's': /* Jump into X address. Unused. * Kept to catch wrong mode*/ soft_restart(0); break; case 'h': default: /* Clear the magic key when reboot is required */ if (cmd == NULL) cdebugger_set_upload_magic(0x00); kona_reset(mode, cmd); break; } #endif } EXPORT_SYMBOL(hawaii_restart); #ifdef CONFIG_CACHE_L2X0 static void __init hawaii_l2x0_init(void) { void __iomem *l2cache_base = (void __iomem *)(KONA_L2C_VA); u32 val; u32 aux_val = 0x00050000; u32 aux_mask = 0xfff0ffff; /* * Enable L2 if it is not already enabled by the ROM code. */ val = readl(l2cache_base + L2X0_CTRL); val = val & 0x1; if (val == 0) { /* TURN ON THE L2 CACHE */ #ifdef CONFIG_MOBICORE_DRIVER secure_api_call(SMC_CMD_L2X0SETUP2, 0, aux_val, aux_mask, 0); secure_api_call(SMC_CMD_L2X0INVALL, 0, 0, 0, 0); secure_api_call(SMC_CMD_L2X0CTRL, 1, 0, 0, 0); #else //secure_api_call_init(); secure_api_call(SSAPI_ENABLE_L2_CACHE, 0, 0, 0, 0); #endif } /* * 32KB way size, 16-way associativity */ l2x0_init(l2cache_base, aux_val, aux_mask); } #endif static int __init hawaii_postcore_init(void) { int ret = 0; pr_info("Secure service initialized\n"); secure_api_call_init(); return ret; } postcore_initcall(hawaii_postcore_init); static int __init hawaii_arch_init(void) { int ret = 0; #ifdef CONFIG_CACHE_L2X0 hawaii_l2x0_init(); #endif return ret; } arch_initcall(hawaii_arch_init); void __init hawaii_timer_init(void) { struct gp_timer_setup gpt_setup; /* * IMPORTANT: * If we have to use slave-timer as system timer, two modifications are required * 1) modify the name of timer as, gpt_setup.name = "slave-timer"; * 2) By default when the clock manager comes up it disables most of * the clock. So if we switch to slave-timer we should prevent the * clock manager from doing this. So, modify plat-kona/include/mach/clock.h * * By default aon-timer as system timer the following is the config * #define BCM2165x_CLK_TIMERS_FLAGS (TYPE_PERI_CLK | SW_GATE | DISABLE_ON_INIT) * #define BCM2165x_CLK_HUB_TIMER_FLAGS (TYPE_PERI_CLK | SW_GATE) * * change it as follows to use slave timer as system timer * * #define BCM2165x_CLK_TIMERS_FLAGS (TYPE_PERI_CLK | SW_GATE) * #define BCM2165x_CLK_HUB_TIMER_FLAGS (TYPE_PERI_CLK | SW_GATE | DISABLE_ON_INIT) */ gpt_setup.name = "aon-timer"; gpt_setup.ch_num = 3; gpt_setup.rate = CLOCK_TICK_RATE; /* Call the init function of timer module */ gp_timer_init(&gpt_setup); profile_timer_init(IOMEM(KONA_PROFTMR_VA)); } struct sys_timer kona_timer = { .init = hawaii_timer_init, }; #ifdef CONFIG_KONA_ATAG_DT /* hawaii has 4 banks of GPIO pins */ uint32_t dt_pinmux_gpio_mask[4] = { 0, 0, 0, 0 }; uint32_t dt_gpio[128]; #endif static void cpu_info_verbose(void) { if (cpu_is_hawaii_A0()) pr_info("Hawaii CHIPID-A0\n"); } static int __init hawaii_init(void) { pm_power_off = hawaii_poweroff; cpu_info_verbose(); pinmux_init(); #ifdef CONFIG_KONA_ATAG_DT printk(KERN_INFO "pinmux_gpio_mask: 0x%x, 0x%x, 0x%x, 0x%x\n", dt_pinmux_gpio_mask[0], dt_pinmux_gpio_mask[1], dt_pinmux_gpio_mask[2], dt_pinmux_gpio_mask[3]); #endif #ifdef CONFIG_GPIOLIB /* hawaii has 4 banks of GPIO pins */ kona_gpio_init(4); #endif scu_init((void __iomem *)KONA_SCU_VA); return 0; } early_initcall(hawaii_init);
gpl-2.0
weizhenwei/fastsocket
kernel/drivers/scsi/libfc/fc_frame.c
24
2400
/* * Copyright(c) 2007 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ /* * Frame allocation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/crc32.h> #include <scsi/fc_frame.h> /* * Check the CRC in a frame. */ u32 fc_frame_crc_check(struct fc_frame *fp) { u32 crc; u32 error; const u8 *bp; unsigned int len; WARN_ON(!fc_frame_is_linear(fp)); fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */ bp = (const u8 *) fr_hdr(fp); crc = ~crc32(~0, bp, len); error = crc ^ fr_crc(fp); return error; } EXPORT_SYMBOL(fc_frame_crc_check); /* * Allocate a frame intended to be sent. * Get an sk_buff for the frame and set the length. */ struct fc_frame *_fc_frame_alloc(size_t len) { struct fc_frame *fp; struct sk_buff *skb; WARN_ON((len % sizeof(u32)) != 0); len += sizeof(struct fc_frame_header); skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM + NET_SKB_PAD, GFP_ATOMIC); if (!skb) return NULL; skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM); fp = (struct fc_frame *) skb; fc_frame_init(fp); skb_put(skb, len); return fp; } EXPORT_SYMBOL(_fc_frame_alloc); struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) { struct fc_frame *fp; size_t fill; fill = payload_len % 4; if (fill != 0) fill = 4 - fill; fp = _fc_frame_alloc(payload_len + fill); if (fp) { memset((char *) fr_hdr(fp) + payload_len, 0, fill); /* trim is OK, we just allocated it so there are no fragments */ skb_trim(fp_skb(fp), payload_len + sizeof(struct fc_frame_header)); } return fp; } EXPORT_SYMBOL(fc_frame_alloc_fill);
gpl-2.0
dreamer7/ZOPO-TSN
mediatek/platform/mt6589/kernel/drivers/smi/mau.c
24
8023
#include <linux/uaccess.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/cdev.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/earlysuspend.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/sched.h> //wake_up_process() #include <linux/kthread.h> //kthread_create()kthread_run() #include <mach/irqs.h> #include <asm/io.h> #include <mach/mt_smi.h> #include "smi_reg.h" #include "smi_common.h" #include <mach/mt_typedefs.h> #include <mach/mt_reg_base.h> #include <mach/mt_clkmgr.h> #include <mach/mt_irq.h> #include <mach/m4u.h> #define SMI_LOG_TAG "MAU" int mau_enable_interrupt(int larb) { M4U_WriteReg32(gLarbBaseAddr[larb], SMI_LARB_CON_SET, F_SMI_LARB_CON_MAU_IRQ_EN(1)); return 0; } int mau_disable_interrupt(int larb) { M4U_WriteReg32(gLarbBaseAddr[larb], SMI_LARB_CON_CLR, F_SMI_LARB_CON_MAU_IRQ_EN(1)); return 0; } static irqreturn_t mau_isr(int irq, void *dev_id) { int larb,i; unsigned int larb_base; unsigned int regval; switch(irq) { case MT_SMI_LARB0_IRQ_ID: larb = 0; break; case MT_SMI_LARB1_IRQ_ID: larb = 1; break; case MT_SMI_LARB2_IRQ_ID: larb = 2; break; case MT_SMI_LARB3_IRQ_ID: larb = 3; break; case MT_SMI_LARB4_IRQ_ID: larb = 4; break; default : larb=0; SMIERR("unkown irq(%d)\n",irq); } larb_clock_on(larb); larb_base = gLarbBaseAddr[larb]; //dump interrupt debug infomation for(i=0; i<MAU_ENTRY_NR; i++) { regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_STAT(i)); if(F_MAU_STAT_ASSERT(regval)) { //violation happens in this entry int port = F_MAU_STAT_ID(regval); SMIMSG("[MAU] larb=%d, entry=%d, port=%d\n",larb,i,port); regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_START(i)); SMIMSG("start_addr=0x%x, read_en=%d, write_en=%d\n", F_MAU_START_ADDR_VAL(regval), F_MAU_START_IS_RD(regval), F_MAU_START_IS_WR(regval)); regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_END(i)); SMIMSG("end_addr=0x%x, virtual=%d\n", F_MAU_END_ADDR_VAL(regval), F_MAU_END_IS_VIR(regval)); //smi_aee_print("violation by %s\n",smi_port_name[larb][port]); SMIMSG("violation by %s\n",smi_port_name[larb][port]); } //clear interrupt status regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_STAT(i)); M4U_WriteReg32(larb_base, SMI_MAU_ENTR_STAT(i), regval); } larb_clock_off(larb); return IRQ_HANDLED; } int mau_start_monitor(int larb, int entry, int rd, int wr, int vir, unsigned int start, unsigned int end, unsigned int port_msk) { unsigned int regval; unsigned int larb_base = gLarbBaseAddr[larb]; //mau entry i start address regval = F_MAU_START_WR(wr)|F_MAU_START_RD(rd)|F_MAU_START_ADD(start); M4U_WriteReg32(larb_base, SMI_MAU_ENTR_START(entry), regval); regval = F_MAU_END_VIR(vir)|F_MAU_END_ADD(end); M4U_WriteReg32(larb_base, SMI_MAU_ENTR_END(entry), regval); //start monitor regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_GID(entry)); M4U_WriteReg32(larb_base, SMI_MAU_ENTR_GID(entry), regval|port_msk); return 0; } int mau_config(MTK_MAU_CONFIG* pMauConf) { if(pMauConf->larb > SMI_LARB_NR || pMauConf->entry > MAU_ENTRY_NR ) { SMIERR("config:larb=%d,entry=%d\n", pMauConf->larb, pMauConf->entry); SMIMSG("mau config error: larb=%d,entry=%d,rd=%d,wr=%d,vir=%d,start=0x%x,end=0x%x,port_msk=0x%x\n", pMauConf->larb, pMauConf->entry, pMauConf->monitor_read, pMauConf->monitor_write, pMauConf->virt,pMauConf->start, pMauConf->end, pMauConf->port_msk); return -1; } SMIMSG("mau config: larb=%d,entry=%d,rd=%d,wr=%d,vir=%d,start=0x%x,end=0x%x,port_msk=0x%x\n", pMauConf->larb, pMauConf->entry, pMauConf->monitor_read, pMauConf->monitor_write, pMauConf->virt,pMauConf->start, pMauConf->end, pMauConf->port_msk); larb_clock_on(pMauConf->larb); mau_start_monitor(pMauConf->larb, pMauConf->entry, !!(pMauConf->monitor_read), !!(pMauConf->monitor_write), !!(pMauConf->virt), pMauConf->start, pMauConf->end, pMauConf->port_msk); larb_clock_off(pMauConf->larb); return 0; } int mau_dump_status(int larb) { unsigned int larb_base; unsigned int regval; int i; larb_clock_on(larb); larb_base = gLarbBaseAddr[larb]; //dump interrupt debug infomation for(i=0; i<MAU_ENTRY_NR; i++) { regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_GID(i)); if(regval!=0) { SMIMSG("larb(%d), entry(%d)=========>\n", larb, i); SMIMSG("port mask = 0x%x\n", regval); regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_START(i)); SMIMSG("start_addr=0x%x, read_en=%d, write_en=%d\n", F_MAU_START_ADDR_VAL(regval), F_MAU_START_IS_RD(regval), F_MAU_START_IS_WR(regval)); regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_END(i)); SMIMSG("end_addr=0x%x, virtual=%d\n", F_MAU_END_ADDR_VAL(regval), F_MAU_END_IS_VIR(regval)); } else { SMIMSG("larb(%d), entry(%d) is free\n", larb, i); } //dump interrupt debug infomation regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_STAT(i)); if(F_MAU_STAT_ASSERT(regval)) { //violation happens in this entry int port = F_MAU_STAT_ID(regval); SMIMSG("[MAU] larb=%d, entry=%d, port=%d\n",larb,i,port); regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_START(i)); SMIMSG("start_addr=0x%x, read_en=%d, write_en=%d\n", F_MAU_START_ADDR_VAL(regval), F_MAU_START_IS_RD(regval), F_MAU_START_IS_WR(regval)); regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_END(i)); SMIMSG("end_addr=0x%x, virtual=%d\n", F_MAU_END_ADDR_VAL(regval), F_MAU_END_IS_VIR(regval)); SMIMSG("violation by %s\n",smi_port_name[larb][port]); } else { SMIMSG("no violation of entry %d\n", i); } //clear interrupt status regval = M4U_ReadReg32(larb_base, SMI_MAU_ENTR_STAT(i)); M4U_WriteReg32(larb_base, SMI_MAU_ENTR_STAT(i), regval); } larb_clock_off(larb); return 0; } int mau_init(void) { int i; if(request_irq(MT_SMI_LARB0_IRQ_ID , (irq_handler_t)mau_isr, IRQF_TRIGGER_LOW, "MAU0" , NULL)) { SMIERR("request MAU0 IRQ line failed"); return -ENODEV; } if(request_irq(MT_SMI_LARB1_IRQ_ID , (irq_handler_t)mau_isr, IRQF_TRIGGER_LOW, "MAU1" , NULL)) { SMIERR("request MAU1 IRQ line failed"); return -ENODEV; } if(request_irq(MT_SMI_LARB2_IRQ_ID , (irq_handler_t)mau_isr, IRQF_TRIGGER_LOW, "MAU2" , NULL)) { SMIERR("request MAU2 IRQ line failed"); return -ENODEV; } if(request_irq(MT_SMI_LARB3_IRQ_ID , (irq_handler_t)mau_isr, IRQF_TRIGGER_LOW, "MAU3" , NULL)) { SMIERR("request MAU3 IRQ line failed"); return -ENODEV; } if(request_irq(MT_SMI_LARB4_IRQ_ID , (irq_handler_t)mau_isr, IRQF_TRIGGER_LOW, "MAU4" , NULL)) { SMIERR("request MAU4 IRQ line failed"); return -ENODEV; } for(i=0; i<SMI_LARB_NR; i++) { larb_clock_on(i); mau_enable_interrupt(i); larb_clock_off(i); } return 0; }
gpl-2.0
vasubabu/kernel
kernel/trace/trace_sched_wakeup.c
24
9135
/* * trace task wakeup timings * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Based on code from the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */ #include <linux/module.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/kallsyms.h> #include <linux/uaccess.h> #include <linux/ftrace.h> #include <trace/events/sched.h> #include "trace.h" static struct trace_array *wakeup_trace; static int __read_mostly tracer_enabled; static struct task_struct *wakeup_task; static int wakeup_cpu; static unsigned wakeup_prio = -1; static int wakeup_rt; static raw_spinlock_t wakeup_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static void __wakeup_reset(struct trace_array *tr); static int save_lat_flag; #ifdef CONFIG_FUNCTION_TRACER /* * irqsoff uses its own tracer function to keep the overhead down: */ static void wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int resched; int cpu; int pc; if (likely(!wakeup_task)) return; pc = preempt_count(); resched = ftrace_preempt_disable(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (unlikely(disabled != 1)) goto out; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); if (unlikely(!wakeup_task)) goto unlock; /* * The task can't disappear because it needs to * wake up first, and we have the wakeup_lock. */ if (task_cpu(wakeup_task) != cpu) goto unlock; trace_function(tr, ip, parent_ip, flags, pc); unlock: __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&data->disabled); ftrace_preempt_enable(resched); } static struct ftrace_ops trace_ops __read_mostly = { .func = wakeup_tracer_call, }; #endif /* CONFIG_FUNCTION_TRACER */ /* * Should this new latency be reported/recorded? */ static int report_latency(cycle_t delta) { if (tracing_thresh) { if (delta < tracing_thresh) return 0; } else { if (delta <= tracing_max_latency) return 0; } return 1; } static void notrace probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { unsigned long latency = 0, t0 = 0, t1 = 0; struct trace_array_cpu *data; cycle_t T0, T1, delta; unsigned long flags; long disabled; int cpu; int pc; tracing_record_cmdline(prev); if (unlikely(!tracer_enabled)) return; /* * When we start a new trace, we set wakeup_task to NULL * and then set tracer_enabled = 1. We want to make sure * that another CPU does not see the tracer_enabled = 1 * and the wakeup_task with an older task, that might * actually be the same as next. */ smp_rmb(); if (next != wakeup_task) return; pc = preempt_count(); /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (likely(disabled != 1)) goto out; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; /* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); /* * usecs conversion is slow so we try to delay the conversion * as long as possible: */ T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; if (!report_latency(delta)) goto out_unlock; latency = nsecs_to_usecs(delta); tracing_max_latency = delta; t0 = nsecs_to_usecs(T0); t1 = nsecs_to_usecs(T1); update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); out_unlock: __wakeup_reset(wakeup_trace); __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); } static void __wakeup_reset(struct trace_array *tr) { int cpu; for_each_possible_cpu(cpu) tracing_reset(tr, cpu); wakeup_cpu = -1; wakeup_prio = -1; if (wakeup_task) put_task_struct(wakeup_task); wakeup_task = NULL; } static void wakeup_reset(struct trace_array *tr) { unsigned long flags; local_irq_save(flags); __raw_spin_lock(&wakeup_lock); __wakeup_reset(tr); __raw_spin_unlock(&wakeup_lock); local_irq_restore(flags); } static void probe_wakeup(struct rq *rq, struct task_struct *p, int success) { struct trace_array_cpu *data; int cpu = smp_processor_id(); unsigned long flags; long disabled; int pc; if (likely(!tracer_enabled)) return; tracing_record_cmdline(p); tracing_record_cmdline(current); if ((wakeup_rt && !rt_task(p)) || p->prio >= wakeup_prio || p->prio >= current->prio) return; pc = preempt_count(); disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); if (unlikely(disabled != 1)) goto out; /* interrupts should be off from try_to_wake_up */ __raw_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) goto out_locked; /* reset the trace */ __wakeup_reset(wakeup_trace); wakeup_cpu = task_cpu(p); wakeup_prio = p->prio; wakeup_task = p; get_task_struct(wakeup_task); local_save_flags(flags); data = wakeup_trace->data[wakeup_cpu]; data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); /* * We must be careful in using CALLER_ADDR2. But since wake_up * is not called by an assembly function (where as schedule is) * it should be safe to use it here. */ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: __raw_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); } static void start_wakeup_tracer(struct trace_array *tr) { int ret; ret = register_trace_sched_wakeup(probe_wakeup); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup\n"); return; } ret = register_trace_sched_wakeup_new(probe_wakeup); if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_wakeup_new\n"); goto fail_deprobe; } ret = register_trace_sched_switch(probe_wakeup_sched_switch); if (ret) { pr_info("sched trace: Couldn't activate tracepoint" " probe to kernel_sched_switch\n"); goto fail_deprobe_wake_new; } wakeup_reset(tr); /* * Don't let the tracer_enabled = 1 show up before * the wakeup_task is reset. This may be overkill since * wakeup_reset does a spin_unlock after setting the * wakeup_task to NULL, but I want to be safe. * This is a slow path anyway. */ smp_wmb(); register_ftrace_function(&trace_ops); if (tracing_is_enabled()) tracer_enabled = 1; else tracer_enabled = 0; return; fail_deprobe_wake_new: unregister_trace_sched_wakeup_new(probe_wakeup); fail_deprobe: unregister_trace_sched_wakeup(probe_wakeup); } static void stop_wakeup_tracer(struct trace_array *tr) { tracer_enabled = 0; unregister_ftrace_function(&trace_ops); unregister_trace_sched_switch(probe_wakeup_sched_switch); unregister_trace_sched_wakeup_new(probe_wakeup); unregister_trace_sched_wakeup(probe_wakeup); } static int __wakeup_tracer_init(struct trace_array *tr) { save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; trace_flags |= TRACE_ITER_LATENCY_FMT; tracing_max_latency = 0; wakeup_trace = tr; start_wakeup_tracer(tr); return 0; } static int wakeup_tracer_init(struct trace_array *tr) { wakeup_rt = 0; return __wakeup_tracer_init(tr); } static int wakeup_rt_tracer_init(struct trace_array *tr) { wakeup_rt = 1; return __wakeup_tracer_init(tr); } static void wakeup_tracer_reset(struct trace_array *tr) { stop_wakeup_tracer(tr); /* make sure we put back any tasks we are tracing */ wakeup_reset(tr); if (!save_lat_flag) trace_flags &= ~TRACE_ITER_LATENCY_FMT; } static void wakeup_tracer_start(struct trace_array *tr) { wakeup_reset(tr); tracer_enabled = 1; } static void wakeup_tracer_stop(struct trace_array *tr) { tracer_enabled = 0; } static struct tracer wakeup_tracer __read_mostly = { .name = "wakeup", .init = wakeup_tracer_init, .reset = wakeup_tracer_reset, .start = wakeup_tracer_start, .stop = wakeup_tracer_stop, .print_max = 1, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif }; static struct tracer wakeup_rt_tracer __read_mostly = { .name = "wakeup_rt", .init = wakeup_rt_tracer_init, .reset = wakeup_tracer_reset, .start = wakeup_tracer_start, .stop = wakeup_tracer_stop, .wait_pipe = poll_wait_pipe, .print_max = 1, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif }; __init static int init_wakeup_tracer(void) { int ret; ret = register_tracer(&wakeup_tracer); if (ret) return ret; ret = register_tracer(&wakeup_rt_tracer); if (ret) return ret; return 0; } device_initcall(init_wakeup_tracer);
gpl-2.0
JamesAng/lx-std
net/sched/act_police.c
280
9949
/* * net/sched/police.c Input police filter. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * J Hadi Salim (action changes) */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/slab.h> #include <net/act_api.h> #include <net/netlink.h> #define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L) #define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L) #define POL_TAB_MASK 15 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; static u32 police_idx_gen; static DEFINE_RWLOCK(police_lock); static struct tcf_hashinfo police_hash_info = { .htab = tcf_police_ht, .hmask = POL_TAB_MASK, .lock = &police_lock, }; /* old policer structure from before tc actions */ struct tc_police_compat { u32 index; int action; u32 limit; u32 burst; u32 mtu; struct tc_ratespec rate; struct tc_ratespec peakrate; }; /* Each policer is serialized by its individual spinlock */ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a) { struct tcf_common *p; int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; struct nlattr *nest; read_lock_bh(&police_lock); s_i = cb->args[0]; for (i = 0; i < (POL_TAB_MASK + 1); i++) { p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)]; for (; p; p = p->tcfc_next) { index++; if (index < s_i) continue; a->priv = p; a->order = index; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; if (type == RTM_DELACTION) err = tcf_action_dump_1(skb, a, 0, 1); else err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; nla_nest_cancel(skb, nest); goto done; } nla_nest_end(skb, nest); n_i++; } } done: read_unlock_bh(&police_lock); if (n_i) cb->args[0] += n_i; return n_i; nla_put_failure: nla_nest_cancel(skb, nest); goto done; } static void tcf_police_destroy(struct tcf_police *p) { unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); struct tcf_common **p1p; for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) { if (*p1p == &p->common) { write_lock_bh(&police_lock); *p1p = p->tcf_next; write_unlock_bh(&police_lock); gen_kill_estimator(&p->tcf_bstats, &p->tcf_rate_est); if (p->tcfp_R_tab) qdisc_put_rtab(p->tcfp_R_tab); if (p->tcfp_P_tab) qdisc_put_rtab(p->tcfp_P_tab); /* * gen_estimator est_timer() might access p->tcf_lock * or bstats, wait a RCU grace period before freeing p */ kfree_rcu(p, tcf_rcu); return; } } WARN_ON(1); } static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE }, [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE }, [TCA_POLICE_AVRATE] = { .type = NLA_U32 }, [TCA_POLICE_RESULT] = { .type = NLA_U32 }, }; static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { unsigned int h; int ret = 0, err; struct nlattr *tb[TCA_POLICE_MAX + 1]; struct tc_police *parm; struct tcf_police *police; struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; int size; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy); if (err < 0) return err; if (tb[TCA_POLICE_TBF] == NULL) return -EINVAL; size = nla_len(tb[TCA_POLICE_TBF]); if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) return -EINVAL; parm = nla_data(tb[TCA_POLICE_TBF]); if (parm->index) { struct tcf_common *pc; pc = tcf_hash_lookup(parm->index, &police_hash_info); if (pc != NULL) { a->priv = pc; police = to_police(pc); if (bind) { police->tcf_bindcnt += 1; police->tcf_refcnt += 1; } if (ovr) goto override; return ret; } } police = kzalloc(sizeof(*police), GFP_KERNEL); if (police == NULL) return -ENOMEM; ret = ACT_P_CREATED; police->tcf_refcnt = 1; spin_lock_init(&police->tcf_lock); if (bind) police->tcf_bindcnt = 1; override: if (parm->rate.rate) { err = -ENOMEM; R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]); if (R_tab == NULL) goto failure; if (parm->peakrate.rate) { P_tab = qdisc_get_rtab(&parm->peakrate, tb[TCA_POLICE_PEAKRATE]); if (P_tab == NULL) goto failure; } } spin_lock_bh(&police->tcf_lock); if (est) { err = gen_replace_estimator(&police->tcf_bstats, &police->tcf_rate_est, &police->tcf_lock, est); if (err) goto failure_unlock; } else if (tb[TCA_POLICE_AVRATE] && (ret == ACT_P_CREATED || !gen_estimator_active(&police->tcf_bstats, &police->tcf_rate_est))) { err = -EINVAL; goto failure_unlock; } /* No failure allowed after this point */ if (R_tab != NULL) { qdisc_put_rtab(police->tcfp_R_tab); police->tcfp_R_tab = R_tab; } if (P_tab != NULL) { qdisc_put_rtab(police->tcfp_P_tab); police->tcfp_P_tab = P_tab; } if (tb[TCA_POLICE_RESULT]) police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); police->tcfp_toks = police->tcfp_burst = parm->burst; police->tcfp_mtu = parm->mtu; if (police->tcfp_mtu == 0) { police->tcfp_mtu = ~0; if (police->tcfp_R_tab) police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log; } if (police->tcfp_P_tab) police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu); police->tcf_action = parm->action; if (tb[TCA_POLICE_AVRATE]) police->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); spin_unlock_bh(&police->tcf_lock); if (ret != ACT_P_CREATED) return ret; police->tcfp_t_c = psched_get_time(); police->tcf_index = parm->index ? parm->index : tcf_hash_new_index(&police_idx_gen, &police_hash_info); h = tcf_hash(police->tcf_index, POL_TAB_MASK); write_lock_bh(&police_lock); police->tcf_next = tcf_police_ht[h]; tcf_police_ht[h] = &police->common; write_unlock_bh(&police_lock); a->priv = police; return ret; failure_unlock: spin_unlock_bh(&police->tcf_lock); failure: if (P_tab) qdisc_put_rtab(P_tab); if (R_tab) qdisc_put_rtab(R_tab); if (ret == ACT_P_CREATED) kfree(police); return err; } static int tcf_act_police_cleanup(struct tc_action *a, int bind) { struct tcf_police *p = a->priv; int ret = 0; if (p != NULL) { if (bind) p->tcf_bindcnt--; p->tcf_refcnt--; if (p->tcf_refcnt <= 0 && !p->tcf_bindcnt) { tcf_police_destroy(p); ret = 1; } } return ret; } static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_police *police = a->priv; psched_time_t now; long toks; long ptoks = 0; spin_lock(&police->tcf_lock); bstats_update(&police->tcf_bstats, skb); if (police->tcfp_ewma_rate && police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { police->tcf_qstats.overlimits++; if (police->tcf_action == TC_ACT_SHOT) police->tcf_qstats.drops++; spin_unlock(&police->tcf_lock); return police->tcf_action; } if (qdisc_pkt_len(skb) <= police->tcfp_mtu) { if (police->tcfp_R_tab == NULL) { spin_unlock(&police->tcf_lock); return police->tcfp_result; } now = psched_get_time(); toks = psched_tdiff_bounded(now, police->tcfp_t_c, police->tcfp_burst); if (police->tcfp_P_tab) { ptoks = toks + police->tcfp_ptoks; if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) ptoks = (long)L2T_P(police, police->tcfp_mtu); ptoks -= L2T_P(police, qdisc_pkt_len(skb)); } toks += police->tcfp_toks; if (toks > (long)police->tcfp_burst) toks = police->tcfp_burst; toks -= L2T(police, qdisc_pkt_len(skb)); if ((toks|ptoks) >= 0) { police->tcfp_t_c = now; police->tcfp_toks = toks; police->tcfp_ptoks = ptoks; spin_unlock(&police->tcf_lock); return police->tcfp_result; } } police->tcf_qstats.overlimits++; if (police->tcf_action == TC_ACT_SHOT) police->tcf_qstats.drops++; spin_unlock(&police->tcf_lock); return police->tcf_action; } static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_police *police = a->priv; struct tc_police opt = { .index = police->tcf_index, .action = police->tcf_action, .mtu = police->tcfp_mtu, .burst = police->tcfp_burst, .refcnt = police->tcf_refcnt - ref, .bindcnt = police->tcf_bindcnt - bind, }; if (police->tcfp_R_tab) opt.rate = police->tcfp_R_tab->rate; if (police->tcfp_P_tab) opt.peakrate = police->tcfp_P_tab->rate; if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) goto nla_put_failure; if (police->tcfp_result && nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result)) goto nla_put_failure; if (police->tcfp_ewma_rate && nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) goto nla_put_failure; return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } MODULE_AUTHOR("Alexey Kuznetsov"); MODULE_DESCRIPTION("Policing actions"); MODULE_LICENSE("GPL"); static struct tc_action_ops act_police_ops = { .kind = "police", .hinfo = &police_hash_info, .type = TCA_ID_POLICE, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_act_police, .dump = tcf_act_police_dump, .cleanup = tcf_act_police_cleanup, .lookup = tcf_hash_search, .init = tcf_act_police_locate, .walk = tcf_act_police_walker }; static int __init police_init_module(void) { return tcf_register_action(&act_police_ops); } static void __exit police_cleanup_module(void) { tcf_unregister_action(&act_police_ops); } module_init(police_init_module); module_exit(police_cleanup_module);
gpl-2.0
YijingWang/linux-pci
arch/blackfin/mach-common/ints-priority.c
536
31744
/* * Set up the interrupt priorities * * Copyright 2004-2009 Analog Devices Inc. * 2003 Bas Vermeulen <bas@buyways.nl> * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca> * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca> * 1999 D. Jeff Dionne <jeff@uclinux.org> * 1996 Roman Zippel * * Licensed under the GPL-2 */ #include <linux/module.h> #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/irq.h> #include <linux/sched.h> #include <linux/syscore_ops.h> #include <asm/delay.h> #ifdef CONFIG_IPIPE #include <linux/ipipe.h> #endif #include <asm/traps.h> #include <asm/blackfin.h> #include <asm/gpio.h> #include <asm/irq_handler.h> #include <asm/dpmc.h> #include <asm/traps.h> /* * NOTES: * - we have separated the physical Hardware interrupt from the * levels that the LINUX kernel sees (see the description in irq.h) * - */ #ifndef CONFIG_SMP /* Initialize this to an actual value to force it into the .data * section so that we know it is properly initialized at entry into * the kernel but before bss is initialized to zero (which is where * it would live otherwise). The 0x1f magic represents the IRQs we * cannot actually mask out in hardware. */ unsigned long bfin_irq_flags = 0x1f; EXPORT_SYMBOL(bfin_irq_flags); #endif #ifdef CONFIG_PM unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */ unsigned vr_wakeup; #endif #ifndef SEC_GCTL static struct ivgx { /* irq number for request_irq, available in mach-bf5xx/irq.h */ unsigned int irqno; /* corresponding bit in the SIC_ISR register */ unsigned int isrflag; } ivg_table[NR_PERI_INTS]; static struct ivg_slice { /* position of first irq in ivg_table for given ivg */ struct ivgx *ifirst; struct ivgx *istop; } ivg7_13[IVG13 - IVG7 + 1]; /* * Search SIC_IAR and fill tables with the irqvalues * and their positions in the SIC_ISR register. */ static void __init search_IAR(void) { unsigned ivg, irq_pos = 0; for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) { int irqN; ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos]; for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) { int irqn; u32 iar = bfin_read32((unsigned long *)SIC_IAR0 + #if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \ defined(CONFIG_BF538) || defined(CONFIG_BF539) ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4)) #else (irqN >> 3) #endif ); for (irqn = irqN; irqn < irqN + 4; ++irqn) { int iar_shift = (irqn & 7) * 4; if (ivg == (0xf & (iar >> iar_shift))) { ivg_table[irq_pos].irqno = IVG7 + irqn; ivg_table[irq_pos].isrflag = 1 << (irqn % 32); ivg7_13[ivg].istop++; irq_pos++; } } } } } #endif /* * This is for core internal IRQs */ void bfin_ack_noop(struct irq_data *d) { /* Dummy function. */ } static void bfin_core_mask_irq(struct irq_data *d) { bfin_irq_flags &= ~(1 << d->irq); if (!hard_irqs_disabled()) hard_local_irq_enable(); } static void bfin_core_unmask_irq(struct irq_data *d) { bfin_irq_flags |= 1 << d->irq; /* * If interrupts are enabled, IMASK must contain the same value * as bfin_irq_flags. Make sure that invariant holds. If interrupts * are currently disabled we need not do anything; one of the * callers will take care of setting IMASK to the proper value * when reenabling interrupts. * local_irq_enable just does "STI bfin_irq_flags", so it's exactly * what we need. */ if (!hard_irqs_disabled()) hard_local_irq_enable(); return; } #ifndef SEC_GCTL void bfin_internal_mask_irq(unsigned int irq) { unsigned long flags = hard_local_irq_save(); #ifdef SIC_IMASK0 unsigned mask_bank = BFIN_SYSIRQ(irq) / 32; unsigned mask_bit = BFIN_SYSIRQ(irq) % 32; bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & ~(1 << mask_bit)); # if defined(CONFIG_SMP) || defined(CONFIG_ICC) bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) & ~(1 << mask_bit)); # endif #else bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & ~(1 << BFIN_SYSIRQ(irq))); #endif /* end of SIC_IMASK0 */ hard_local_irq_restore(flags); } static void bfin_internal_mask_irq_chip(struct irq_data *d) { bfin_internal_mask_irq(d->irq); } #ifdef CONFIG_SMP void bfin_internal_unmask_irq_affinity(unsigned int irq, const struct cpumask *affinity) #else void bfin_internal_unmask_irq(unsigned int irq) #endif { unsigned long flags = hard_local_irq_save(); #ifdef SIC_IMASK0 unsigned mask_bank = BFIN_SYSIRQ(irq) / 32; unsigned mask_bit = BFIN_SYSIRQ(irq) % 32; # ifdef CONFIG_SMP if (cpumask_test_cpu(0, affinity)) # endif bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) | (1 << mask_bit)); # ifdef CONFIG_SMP if (cpumask_test_cpu(1, affinity)) bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) | (1 << mask_bit)); # endif #else bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | (1 << BFIN_SYSIRQ(irq))); #endif hard_local_irq_restore(flags); } #ifdef CONFIG_SMP static void bfin_internal_unmask_irq_chip(struct irq_data *d) { bfin_internal_unmask_irq_affinity(d->irq, d->affinity); } static int bfin_internal_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { bfin_internal_mask_irq(d->irq); bfin_internal_unmask_irq_affinity(d->irq, mask); return 0; } #else static void bfin_internal_unmask_irq_chip(struct irq_data *d) { bfin_internal_unmask_irq(d->irq); } #endif #if defined(CONFIG_PM) int bfin_internal_set_wake(unsigned int irq, unsigned int state) { u32 bank, bit, wakeup = 0; unsigned long flags; bank = BFIN_SYSIRQ(irq) / 32; bit = BFIN_SYSIRQ(irq) % 32; switch (irq) { #ifdef IRQ_RTC case IRQ_RTC: wakeup |= WAKE; break; #endif #ifdef IRQ_CAN0_RX case IRQ_CAN0_RX: wakeup |= CANWE; break; #endif #ifdef IRQ_CAN1_RX case IRQ_CAN1_RX: wakeup |= CANWE; break; #endif #ifdef IRQ_USB_INT0 case IRQ_USB_INT0: wakeup |= USBWE; break; #endif #ifdef CONFIG_BF54x case IRQ_CNT: wakeup |= ROTWE; break; #endif default: break; } flags = hard_local_irq_save(); if (state) { bfin_sic_iwr[bank] |= (1 << bit); vr_wakeup |= wakeup; } else { bfin_sic_iwr[bank] &= ~(1 << bit); vr_wakeup &= ~wakeup; } hard_local_irq_restore(flags); return 0; } static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state) { return bfin_internal_set_wake(d->irq, state); } #else inline int bfin_internal_set_wake(unsigned int irq, unsigned int state) { return 0; } # define bfin_internal_set_wake_chip NULL #endif #else /* SEC_GCTL */ static void bfin_sec_preflow_handler(struct irq_data *d) { unsigned long flags = hard_local_irq_save(); unsigned int sid = BFIN_SYSIRQ(d->irq); bfin_write_SEC_SCI(0, SEC_CSID, sid); hard_local_irq_restore(flags); } static void bfin_sec_mask_ack_irq(struct irq_data *d) { unsigned long flags = hard_local_irq_save(); unsigned int sid = BFIN_SYSIRQ(d->irq); bfin_write_SEC_SCI(0, SEC_CSID, sid); hard_local_irq_restore(flags); } static void bfin_sec_unmask_irq(struct irq_data *d) { unsigned long flags = hard_local_irq_save(); unsigned int sid = BFIN_SYSIRQ(d->irq); bfin_write32(SEC_END, sid); hard_local_irq_restore(flags); } static void bfin_sec_enable_ssi(unsigned int sid) { unsigned long flags = hard_local_irq_save(); uint32_t reg_sctl = bfin_read_SEC_SCTL(sid); reg_sctl |= SEC_SCTL_SRC_EN; bfin_write_SEC_SCTL(sid, reg_sctl); hard_local_irq_restore(flags); } static void bfin_sec_disable_ssi(unsigned int sid) { unsigned long flags = hard_local_irq_save(); uint32_t reg_sctl = bfin_read_SEC_SCTL(sid); reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN); bfin_write_SEC_SCTL(sid, reg_sctl); hard_local_irq_restore(flags); } static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid) { unsigned long flags = hard_local_irq_save(); uint32_t reg_sctl = bfin_read_SEC_SCTL(sid); reg_sctl &= ((uint32_t)~SEC_SCTL_CTG); bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG)); hard_local_irq_restore(flags); } static void bfin_sec_enable_sci(unsigned int sid) { unsigned long flags = hard_local_irq_save(); uint32_t reg_sctl = bfin_read_SEC_SCTL(sid); if (sid == BFIN_SYSIRQ(IRQ_WATCH0)) reg_sctl |= SEC_SCTL_FAULT_EN; else reg_sctl |= SEC_SCTL_INT_EN; bfin_write_SEC_SCTL(sid, reg_sctl); hard_local_irq_restore(flags); } static void bfin_sec_disable_sci(unsigned int sid) { unsigned long flags = hard_local_irq_save(); uint32_t reg_sctl = bfin_read_SEC_SCTL(sid); reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN); bfin_write_SEC_SCTL(sid, reg_sctl); hard_local_irq_restore(flags); } static void bfin_sec_enable(struct irq_data *d) { unsigned long flags = hard_local_irq_save(); unsigned int sid = BFIN_SYSIRQ(d->irq); bfin_sec_enable_sci(sid); bfin_sec_enable_ssi(sid); hard_local_irq_restore(flags); } static void bfin_sec_disable(struct irq_data *d) { unsigned long flags = hard_local_irq_save(); unsigned int sid = BFIN_SYSIRQ(d->irq); bfin_sec_disable_sci(sid); bfin_sec_disable_ssi(sid); hard_local_irq_restore(flags); } static void bfin_sec_set_priority(unsigned int sec_int_levels, u8 *sec_int_priority) { unsigned long flags = hard_local_irq_save(); uint32_t reg_sctl; int i; bfin_write_SEC_SCI(0, SEC_CPLVL, sec_int_levels); for (i = 0; i < SYS_IRQS - BFIN_IRQ(0); i++) { reg_sctl = bfin_read_SEC_SCTL(i) & ~SEC_SCTL_PRIO; reg_sctl |= sec_int_priority[i] << SEC_SCTL_PRIO_OFFSET; bfin_write_SEC_SCTL(i, reg_sctl); } hard_local_irq_restore(flags); } void bfin_sec_raise_irq(unsigned int irq) { unsigned long flags = hard_local_irq_save(); unsigned int sid = BFIN_SYSIRQ(irq); bfin_write32(SEC_RAISE, sid); hard_local_irq_restore(flags); } static void init_software_driven_irq(void) { bfin_sec_set_ssi_coreid(34, 0); bfin_sec_set_ssi_coreid(35, 1); bfin_sec_enable_sci(35); bfin_sec_enable_ssi(35); bfin_sec_set_ssi_coreid(36, 0); bfin_sec_set_ssi_coreid(37, 1); bfin_sec_enable_sci(37); bfin_sec_enable_ssi(37); } void handle_sec_sfi_fault(uint32_t gstat) { } void handle_sec_sci_fault(uint32_t gstat) { uint32_t core_id; uint32_t cstat; core_id = gstat & SEC_GSTAT_SCI; cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT); if (cstat & SEC_CSTAT_ERR) { switch (cstat & SEC_CSTAT_ERRC) { case SEC_CSTAT_ACKERR: printk(KERN_DEBUG "sec ack err\n"); break; default: printk(KERN_DEBUG "sec sci unknown err\n"); } } } void handle_sec_ssi_fault(uint32_t gstat) { uint32_t sid; uint32_t sstat; sid = gstat & SEC_GSTAT_SID; sstat = bfin_read_SEC_SSTAT(sid); } void handle_sec_fault(uint32_t sec_gstat) { if (sec_gstat & SEC_GSTAT_ERR) { switch (sec_gstat & SEC_GSTAT_ERRC) { case 0: handle_sec_sfi_fault(sec_gstat); break; case SEC_GSTAT_SCIERR: handle_sec_sci_fault(sec_gstat); break; case SEC_GSTAT_SSIERR: handle_sec_ssi_fault(sec_gstat); break; } } } static struct irqaction bfin_fault_irq = { .name = "Blackfin fault", }; static irqreturn_t bfin_fault_routine(int irq, void *data) { struct pt_regs *fp = get_irq_regs(); switch (irq) { case IRQ_C0_DBL_FAULT: double_fault_c(fp); break; case IRQ_C0_HW_ERR: dump_bfin_process(fp); dump_bfin_mem(fp); show_regs(fp); printk(KERN_NOTICE "Kernel Stack\n"); show_stack(current, NULL); print_modules(); panic("Core 0 hardware error"); break; case IRQ_C0_NMI_L1_PARITY_ERR: panic("Core 0 NMI L1 parity error"); break; case IRQ_SEC_ERR: pr_err("SEC error\n"); handle_sec_fault(bfin_read32(SEC_GSTAT)); break; default: panic("Unknown fault %d", irq); } return IRQ_HANDLED; } #endif /* SEC_GCTL */ static struct irq_chip bfin_core_irqchip = { .name = "CORE", .irq_mask = bfin_core_mask_irq, .irq_unmask = bfin_core_unmask_irq, }; #ifndef SEC_GCTL static struct irq_chip bfin_internal_irqchip = { .name = "INTN", .irq_mask = bfin_internal_mask_irq_chip, .irq_unmask = bfin_internal_unmask_irq_chip, .irq_disable = bfin_internal_mask_irq_chip, .irq_enable = bfin_internal_unmask_irq_chip, #ifdef CONFIG_SMP .irq_set_affinity = bfin_internal_set_affinity, #endif .irq_set_wake = bfin_internal_set_wake_chip, }; #else static struct irq_chip bfin_sec_irqchip = { .name = "SEC", .irq_mask_ack = bfin_sec_mask_ack_irq, .irq_mask = bfin_sec_mask_ack_irq, .irq_unmask = bfin_sec_unmask_irq, .irq_eoi = bfin_sec_unmask_irq, .irq_disable = bfin_sec_disable, .irq_enable = bfin_sec_enable, }; #endif void bfin_handle_irq(unsigned irq) { #ifdef CONFIG_IPIPE struct pt_regs regs; /* Contents not used. */ ipipe_trace_irq_entry(irq); __ipipe_handle_irq(irq, &regs); ipipe_trace_irq_exit(irq); #else /* !CONFIG_IPIPE */ generic_handle_irq(irq); #endif /* !CONFIG_IPIPE */ } #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static int mac_stat_int_mask; static void bfin_mac_status_ack_irq(unsigned int irq) { switch (irq) { case IRQ_MAC_MMCINT: bfin_write_EMAC_MMC_TIRQS( bfin_read_EMAC_MMC_TIRQE() & bfin_read_EMAC_MMC_TIRQS()); bfin_write_EMAC_MMC_RIRQS( bfin_read_EMAC_MMC_RIRQE() & bfin_read_EMAC_MMC_RIRQS()); break; case IRQ_MAC_RXFSINT: bfin_write_EMAC_RX_STKY( bfin_read_EMAC_RX_IRQE() & bfin_read_EMAC_RX_STKY()); break; case IRQ_MAC_TXFSINT: bfin_write_EMAC_TX_STKY( bfin_read_EMAC_TX_IRQE() & bfin_read_EMAC_TX_STKY()); break; case IRQ_MAC_WAKEDET: bfin_write_EMAC_WKUP_CTL( bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS); break; default: /* These bits are W1C */ bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT)); break; } } static void bfin_mac_status_mask_irq(struct irq_data *d) { unsigned int irq = d->irq; mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT)); #ifdef BF537_FAMILY switch (irq) { case IRQ_MAC_PHYINT: bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE); break; default: break; } #else if (!mac_stat_int_mask) bfin_internal_mask_irq(IRQ_MAC_ERROR); #endif bfin_mac_status_ack_irq(irq); } static void bfin_mac_status_unmask_irq(struct irq_data *d) { unsigned int irq = d->irq; #ifdef BF537_FAMILY switch (irq) { case IRQ_MAC_PHYINT: bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE); break; default: break; } #else if (!mac_stat_int_mask) bfin_internal_unmask_irq(IRQ_MAC_ERROR); #endif mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT); } #ifdef CONFIG_PM int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state) { #ifdef BF537_FAMILY return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state); #else return bfin_internal_set_wake(IRQ_MAC_ERROR, state); #endif } #else # define bfin_mac_status_set_wake NULL #endif static struct irq_chip bfin_mac_status_irqchip = { .name = "MACST", .irq_mask = bfin_mac_status_mask_irq, .irq_unmask = bfin_mac_status_unmask_irq, .irq_set_wake = bfin_mac_status_set_wake, }; void bfin_demux_mac_status_irq(unsigned int int_err_irq, struct irq_desc *inta_desc) { int i, irq = 0; u32 status = bfin_read_EMAC_SYSTAT(); for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++) if (status & (1L << i)) { irq = IRQ_MAC_PHYINT + i; break; } if (irq) { if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) { bfin_handle_irq(irq); } else { bfin_mac_status_ack_irq(irq); pr_debug("IRQ %d:" " MASKED MAC ERROR INTERRUPT ASSERTED\n", irq); } } else printk(KERN_ERR "%s : %s : LINE %d :\nIRQ ?: MAC ERROR" " INTERRUPT ASSERTED BUT NO SOURCE FOUND" "(EMAC_SYSTAT=0x%X)\n", __func__, __FILE__, __LINE__, status); } #endif static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) { #ifdef CONFIG_IPIPE handle = handle_level_irq; #endif __irq_set_handler_locked(irq, handle); } #ifdef CONFIG_GPIO_ADI static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); static void bfin_gpio_ack_irq(struct irq_data *d) { /* AFAIK ack_irq in case mask_ack is provided * get's only called for edge sense irqs */ set_gpio_data(irq_to_gpio(d->irq), 0); } static void bfin_gpio_mask_ack_irq(struct irq_data *d) { unsigned int irq = d->irq; u32 gpionr = irq_to_gpio(irq); if (!irqd_is_level_type(d)) set_gpio_data(gpionr, 0); set_gpio_maska(gpionr, 0); } static void bfin_gpio_mask_irq(struct irq_data *d) { set_gpio_maska(irq_to_gpio(d->irq), 0); } static void bfin_gpio_unmask_irq(struct irq_data *d) { set_gpio_maska(irq_to_gpio(d->irq), 1); } static unsigned int bfin_gpio_irq_startup(struct irq_data *d) { u32 gpionr = irq_to_gpio(d->irq); if (__test_and_set_bit(gpionr, gpio_enabled)) bfin_gpio_irq_prepare(gpionr); bfin_gpio_unmask_irq(d); return 0; } static void bfin_gpio_irq_shutdown(struct irq_data *d) { u32 gpionr = irq_to_gpio(d->irq); bfin_gpio_mask_irq(d); __clear_bit(gpionr, gpio_enabled); bfin_gpio_irq_free(gpionr); } static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type) { unsigned int irq = d->irq; int ret; char buf[16]; u32 gpionr = irq_to_gpio(irq); if (type == IRQ_TYPE_PROBE) { /* only probe unenabled GPIO interrupt lines */ if (test_bit(gpionr, gpio_enabled)) return 0; type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; } if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { snprintf(buf, 16, "gpio-irq%d", irq); ret = bfin_gpio_irq_request(gpionr, buf); if (ret) return ret; if (__test_and_set_bit(gpionr, gpio_enabled)) bfin_gpio_irq_prepare(gpionr); } else { __clear_bit(gpionr, gpio_enabled); return 0; } set_gpio_inen(gpionr, 0); set_gpio_dir(gpionr, 0); if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) set_gpio_both(gpionr, 1); else set_gpio_both(gpionr, 0); if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */ else set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { set_gpio_edge(gpionr, 1); set_gpio_inen(gpionr, 1); set_gpio_data(gpionr, 0); } else { set_gpio_edge(gpionr, 0); set_gpio_inen(gpionr, 1); } if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) bfin_set_irq_handler(irq, handle_edge_irq); else bfin_set_irq_handler(irq, handle_level_irq); return 0; } static void bfin_demux_gpio_block(unsigned int irq) { unsigned int gpio, mask; gpio = irq_to_gpio(irq); mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio); while (mask) { if (mask & 1) bfin_handle_irq(irq); irq++; mask >>= 1; } } void bfin_demux_gpio_irq(unsigned int inta_irq, struct irq_desc *desc) { unsigned int irq; switch (inta_irq) { #if defined(BF537_FAMILY) case IRQ_PF_INTA_PG_INTA: bfin_demux_gpio_block(IRQ_PF0); irq = IRQ_PG0; break; case IRQ_PH_INTA_MAC_RX: irq = IRQ_PH0; break; #elif defined(BF533_FAMILY) case IRQ_PROG_INTA: irq = IRQ_PF0; break; #elif defined(BF538_FAMILY) case IRQ_PORTF_INTA: irq = IRQ_PF0; break; #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) case IRQ_PORTF_INTA: irq = IRQ_PF0; break; case IRQ_PORTG_INTA: irq = IRQ_PG0; break; case IRQ_PORTH_INTA: irq = IRQ_PH0; break; #elif defined(CONFIG_BF561) case IRQ_PROG0_INTA: irq = IRQ_PF0; break; case IRQ_PROG1_INTA: irq = IRQ_PF16; break; case IRQ_PROG2_INTA: irq = IRQ_PF32; break; #endif default: BUG(); return; } bfin_demux_gpio_block(irq); } #ifdef CONFIG_PM static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) { return bfin_gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state); } #else # define bfin_gpio_set_wake NULL #endif static struct irq_chip bfin_gpio_irqchip = { .name = "GPIO", .irq_ack = bfin_gpio_ack_irq, .irq_mask = bfin_gpio_mask_irq, .irq_mask_ack = bfin_gpio_mask_ack_irq, .irq_unmask = bfin_gpio_unmask_irq, .irq_disable = bfin_gpio_mask_irq, .irq_enable = bfin_gpio_unmask_irq, .irq_set_type = bfin_gpio_irq_type, .irq_startup = bfin_gpio_irq_startup, .irq_shutdown = bfin_gpio_irq_shutdown, .irq_set_wake = bfin_gpio_set_wake, }; #endif #ifdef CONFIG_PM #ifdef SEC_GCTL static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS]; static int sec_suspend(void) { u32 bank; for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0)); return 0; } static void sec_resume(void) { u32 bank; bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET); udelay(100); bfin_write_SEC_GCTL(SEC_GCTL_EN); bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN); for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) bfin_write_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]); } static struct syscore_ops sec_pm_syscore_ops = { .suspend = sec_suspend, .resume = sec_resume, }; #endif #endif void init_exception_vectors(void) { /* cannot program in software: * evt0 - emulation (jtag) * evt1 - reset */ bfin_write_EVT2(evt_nmi); bfin_write_EVT3(trap); bfin_write_EVT5(evt_ivhw); bfin_write_EVT6(evt_timer); bfin_write_EVT7(evt_evt7); bfin_write_EVT8(evt_evt8); bfin_write_EVT9(evt_evt9); bfin_write_EVT10(evt_evt10); bfin_write_EVT11(evt_evt11); bfin_write_EVT12(evt_evt12); bfin_write_EVT13(evt_evt13); bfin_write_EVT14(evt_evt14); bfin_write_EVT15(evt_system_call); CSYNC(); } #ifndef SEC_GCTL /* * This function should be called during kernel startup to initialize * the BFin IRQ handling routines. */ int __init init_arch_irq(void) { int irq; unsigned long ilat = 0; /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ #ifdef SIC_IMASK0 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); # ifdef SIC_IMASK2 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); # endif # if defined(CONFIG_SMP) || defined(CONFIG_ICC) bfin_write_SICB_IMASK0(SIC_UNMASK_ALL); bfin_write_SICB_IMASK1(SIC_UNMASK_ALL); # endif #else bfin_write_SIC_IMASK(SIC_UNMASK_ALL); #endif local_irq_disable(); for (irq = 0; irq <= SYS_IRQS; irq++) { if (irq <= IRQ_CORETMR) irq_set_chip(irq, &bfin_core_irqchip); else irq_set_chip(irq, &bfin_internal_irqchip); switch (irq) { #if !BFIN_GPIO_PINT #if defined(BF537_FAMILY) case IRQ_PH_INTA_MAC_RX: case IRQ_PF_INTA_PG_INTA: #elif defined(BF533_FAMILY) case IRQ_PROG_INTA: #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) case IRQ_PORTF_INTA: case IRQ_PORTG_INTA: case IRQ_PORTH_INTA: #elif defined(CONFIG_BF561) case IRQ_PROG0_INTA: case IRQ_PROG1_INTA: case IRQ_PROG2_INTA: #elif defined(BF538_FAMILY) case IRQ_PORTF_INTA: #endif irq_set_chained_handler(irq, bfin_demux_gpio_irq); break; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) case IRQ_MAC_ERROR: irq_set_chained_handler(irq, bfin_demux_mac_status_irq); break; #endif #if defined(CONFIG_SMP) || defined(CONFIG_ICC) case IRQ_SUPPLE_0: case IRQ_SUPPLE_1: irq_set_handler(irq, handle_percpu_irq); break; #endif #ifdef CONFIG_TICKSOURCE_CORETMR case IRQ_CORETMR: # ifdef CONFIG_SMP irq_set_handler(irq, handle_percpu_irq); # else irq_set_handler(irq, handle_simple_irq); # endif break; #endif #ifdef CONFIG_TICKSOURCE_GPTMR0 case IRQ_TIMER0: irq_set_handler(irq, handle_simple_irq); break; #endif default: #ifdef CONFIG_IPIPE irq_set_handler(irq, handle_level_irq); #else irq_set_handler(irq, handle_simple_irq); #endif break; } } init_mach_irq(); #if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip, handle_level_irq); #endif /* if configured as edge, then will be changed to do_edge_IRQ */ #ifdef CONFIG_GPIO_ADI for (irq = GPIO_IRQ_BASE; irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) irq_set_chip_and_handler(irq, &bfin_gpio_irqchip, handle_level_irq); #endif bfin_write_IMASK(0); CSYNC(); ilat = bfin_read_ILAT(); CSYNC(); bfin_write_ILAT(ilat); CSYNC(); printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n"); /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx, * local_irq_enable() */ program_IAR(); /* Therefore it's better to setup IARs before interrupts enabled */ search_IAR(); /* Enable interrupts IVG7-15 */ bfin_irq_flags |= IMASK_IVG15 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; /* This implicitly covers ANOMALY_05000171 * Boot-ROM code modifies SICA_IWRx wakeup registers */ #ifdef SIC_IWR0 bfin_write_SIC_IWR0(IWR_DISABLE_ALL); # ifdef SIC_IWR1 /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which * will screw up the bootrom as it relies on MDMA0/1 waking it * up from IDLE instructions. See this report for more info: * http://blackfin.uclinux.org/gf/tracker/4323 */ if (ANOMALY_05000435) bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11)); else bfin_write_SIC_IWR1(IWR_DISABLE_ALL); # endif # ifdef SIC_IWR2 bfin_write_SIC_IWR2(IWR_DISABLE_ALL); # endif #else bfin_write_SIC_IWR(IWR_DISABLE_ALL); #endif return 0; } #ifdef CONFIG_DO_IRQ_L1 __attribute__((l1_text)) #endif static int vec_to_irq(int vec) { struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; unsigned long sic_status[3]; if (likely(vec == EVT_IVTMR_P)) return IRQ_CORETMR; #ifdef SIC_ISR sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR(); #else if (smp_processor_id()) { # ifdef SICB_ISR0 /* This will be optimized out in UP mode. */ sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0(); sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1(); # endif } else { sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); } #endif #ifdef SIC_ISR2 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2(); #endif for (;; ivg++) { if (ivg >= ivg_stop) return -1; #ifdef SIC_ISR if (sic_status[0] & ivg->isrflag) #else if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag) #endif return ivg->irqno; } } #else /* SEC_GCTL */ /* * This function should be called during kernel startup to initialize * the BFin IRQ handling routines. */ int __init init_arch_irq(void) { int irq; unsigned long ilat = 0; bfin_write_SEC_GCTL(SEC_GCTL_RESET); local_irq_disable(); for (irq = 0; irq <= SYS_IRQS; irq++) { if (irq <= IRQ_CORETMR) { irq_set_chip_and_handler(irq, &bfin_core_irqchip, handle_simple_irq); #if defined(CONFIG_TICKSOURCE_CORETMR) && defined(CONFIG_SMP) if (irq == IRQ_CORETMR) irq_set_handler(irq, handle_percpu_irq); #endif } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) { irq_set_chip_and_handler(irq, &bfin_sec_irqchip, handle_percpu_irq); } else { irq_set_chip(irq, &bfin_sec_irqchip); irq_set_handler(irq, handle_fasteoi_irq); __irq_set_preflow_handler(irq, bfin_sec_preflow_handler); } } bfin_write_IMASK(0); CSYNC(); ilat = bfin_read_ILAT(); CSYNC(); bfin_write_ILAT(ilat); CSYNC(); printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n"); bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); /* Enable interrupts IVG7-15 */ bfin_irq_flags |= IMASK_IVG15 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN); bfin_sec_enable_sci(BFIN_SYSIRQ(IRQ_WATCH0)); bfin_sec_enable_ssi(BFIN_SYSIRQ(IRQ_WATCH0)); bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET); udelay(100); bfin_write_SEC_GCTL(SEC_GCTL_EN); bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN); bfin_write_SEC_SCI(1, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN); init_software_driven_irq(); #ifdef CONFIG_PM register_syscore_ops(&sec_pm_syscore_ops); #endif bfin_fault_irq.handler = bfin_fault_routine; #ifdef CONFIG_L1_PARITY_CHECK setup_irq(IRQ_C0_NMI_L1_PARITY_ERR, &bfin_fault_irq); #endif setup_irq(IRQ_C0_DBL_FAULT, &bfin_fault_irq); setup_irq(IRQ_SEC_ERR, &bfin_fault_irq); return 0; } #ifdef CONFIG_DO_IRQ_L1 __attribute__((l1_text)) #endif static int vec_to_irq(int vec) { if (likely(vec == EVT_IVTMR_P)) return IRQ_CORETMR; return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID)); } #endif /* SEC_GCTL */ #ifdef CONFIG_DO_IRQ_L1 __attribute__((l1_text)) #endif void do_irq(int vec, struct pt_regs *fp) { int irq = vec_to_irq(vec); if (irq == -1) return; asm_do_IRQ(irq, fp); } #ifdef CONFIG_IPIPE int __ipipe_get_irq_priority(unsigned irq) { int ient, prio; if (irq <= IRQ_CORETMR) return irq; #ifdef SEC_GCTL if (irq >= BFIN_IRQ(0)) return IVG11; #else for (ient = 0; ient < NR_PERI_INTS; ient++) { struct ivgx *ivg = ivg_table + ient; if (ivg->irqno == irq) { for (prio = 0; prio <= IVG13-IVG7; prio++) { if (ivg7_13[prio].ifirst <= ivg && ivg7_13[prio].istop > ivg) return IVG7 + prio; } } } #endif return IVG15; } /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */ #ifdef CONFIG_DO_IRQ_L1 __attribute__((l1_text)) #endif asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) { struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr(); struct ipipe_domain *this_domain = __ipipe_current_domain; int irq, s = 0; irq = vec_to_irq(vec); if (irq == -1) return 0; if (irq == IRQ_SYSTMR) { #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0) bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */ #endif /* This is basically what we need from the register frame. */ __this_cpu_write(__ipipe_tick_regs.ipend, regs->ipend); __this_cpu_write(__ipipe_tick_regs.pc, regs->pc); if (this_domain != ipipe_root_domain) __this_cpu_and(__ipipe_tick_regs.ipend, ~0x10); else __this_cpu_or(__ipipe_tick_regs.ipend, 0x10); } /* * We don't want Linux interrupt handlers to run at the * current core priority level (i.e. < EVT15), since this * might delay other interrupts handled by a high priority * domain. Here is what we do instead: * * - we raise the SYNCDEFER bit to prevent * __ipipe_handle_irq() to sync the pipeline for the root * stage for the incoming interrupt. Upon return, that IRQ is * pending in the interrupt log. * * - we raise the TIF_IRQ_SYNC bit for the current thread, so * that _schedule_and_signal_from_int will eventually sync the * pipeline from EVT15. */ if (this_domain == ipipe_root_domain) { s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); barrier(); } ipipe_trace_irq_entry(irq); __ipipe_handle_irq(irq, regs); ipipe_trace_irq_exit(irq); if (user_mode(regs) && !ipipe_test_foreign_stack() && (current->ipipe_flags & PF_EVTRET) != 0) { /* * Testing for user_regs() does NOT fully eliminate * foreign stack contexts, because of the forged * interrupt returns we do through * __ipipe_call_irqtail. In that case, we might have * preempted a foreign stack context in a high * priority domain, with a single interrupt level now * pending after the irqtail unwinding is done. In * which case user_mode() is now true, and the event * gets dispatched spuriously. */ current->ipipe_flags &= ~PF_EVTRET; __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); } if (this_domain == ipipe_root_domain) { set_thread_flag(TIF_IRQ_SYNC); if (!s) { __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status); return !test_bit(IPIPE_STALL_FLAG, &p->status); } } return 0; } #endif /* CONFIG_IPIPE */
gpl-2.0
francescosganga/remixos-kernel
fs/gfs2/aops.c
536
32049
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/fs.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/gfs2_ondisk.h> #include <linux/backing-dev.h> #include <linux/uio.h> #include <trace/events/writeback.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "glock.h" #include "inode.h" #include "log.h" #include "meta_io.h" #include "quota.h" #include "trans.h" #include "rgrp.h" #include "super.h" #include "util.h" #include "glops.h" static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, unsigned int from, unsigned int to) { struct buffer_head *head = page_buffers(page); unsigned int bsize = head->b_size; struct buffer_head *bh; unsigned int start, end; for (bh = head, start = 0; bh != head || !start; bh = bh->b_this_page, start = end) { end = start + bsize; if (end <= from || start >= to) continue; if (gfs2_is_jdata(ip)) set_buffer_uptodate(bh); gfs2_trans_add_data(ip->i_gl, bh); } } /** * gfs2_get_block_noalloc - Fills in a buffer head with details about a block * @inode: The inode * @lblock: The block number to look up * @bh_result: The buffer head to return the result in * @create: Non-zero if we may add block to the file * * Returns: errno */ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, struct buffer_head *bh_result, int create) { int error; error = gfs2_block_map(inode, lblock, bh_result, 0); if (error) return error; if (!buffer_mapped(bh_result)) return -EIO; return 0; } static int gfs2_get_block_direct(struct inode *inode, sector_t lblock, struct buffer_head *bh_result, int create) { return gfs2_block_map(inode, lblock, bh_result, 0); } /** * gfs2_writepage_common - Common bits of writepage * @page: The page to be written * @wbc: The writeback control * * Returns: 1 if writepage is ok, otherwise an error code or zero if no error. */ static int gfs2_writepage_common(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) goto out; if (current->journal_info) goto redirty; /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index > end_index || (page->index == end_index && !offset)) { page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); goto out; } return 1; redirty: redirty_page_for_writepage(wbc, page); out: unlock_page(page); return 0; } /** * gfs2_writepage - Write page for writeback mappings * @page: The page * @wbc: The writeback control * */ static int gfs2_writepage(struct page *page, struct writeback_control *wbc) { int ret; ret = gfs2_writepage_common(page, wbc); if (ret <= 0) return ret; return nobh_writepage(page, gfs2_get_block_noalloc, wbc); } /** * __gfs2_jdata_writepage - The core of jdata writepage * @page: The page to write * @wbc: The writeback control * * This is shared between writepage and writepages and implements the * core of the writepage operation. If a transaction is required then * PageChecked will have been set and the transaction will have * already been started before this is called. */ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); if (PageChecked(page)) { ClearPageChecked(page); if (!page_has_buffers(page)) { create_empty_buffers(page, inode->i_sb->s_blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); } return block_write_full_page(page, gfs2_get_block_noalloc, wbc); } /** * gfs2_jdata_writepage - Write complete page * @page: Page to write * @wbc: The writeback control * * Returns: errno * */ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); int ret; int done_trans = 0; if (PageChecked(page)) { if (wbc->sync_mode != WB_SYNC_ALL) goto out_ignore; ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); if (ret) goto out_ignore; done_trans = 1; } ret = gfs2_writepage_common(page, wbc); if (ret > 0) ret = __gfs2_jdata_writepage(page, wbc); if (done_trans) gfs2_trans_end(sdp); return ret; out_ignore: redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; } /** * gfs2_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write * @wbc: Write-back control * * Used for both ordered and writeback modes. */ static int gfs2_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); } /** * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages * @mapping: The mapping * @wbc: The writeback control * @pvec: The vector of pages * @nr_pages: The number of pages to write * @end: End position * @done_index: Page index * * Returns: non-zero if loop should terminate, zero otherwise */ static int gfs2_write_jdata_pagevec(struct address_space *mapping, struct writeback_control *wbc, struct pagevec *pvec, int nr_pages, pgoff_t end, pgoff_t *done_index) { struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); int i; int ret; ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); if (ret < 0) return ret; for(i = 0; i < nr_pages; i++) { struct page *page = pvec->pages[i]; /* * At this point, the page may be truncated or * invalidated (changing page->mapping to NULL), or * even swizzled back from swapper_space to tmpfs file * mapping. However, page->index will not change * because we have a reference on the page. */ if (page->index > end) { /* * can't be range_cyclic (1st pass) because * end == -1 in that case. */ ret = 1; break; } *done_index = page->index; lock_page(page); if (unlikely(page->mapping != mapping)) { continue_unlock: unlock_page(page); continue; } if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } if (PageWriteback(page)) { if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); else goto continue_unlock; } BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) goto continue_unlock; trace_wbc_writepage(wbc, inode_to_bdi(inode)); ret = __gfs2_jdata_writepage(page, wbc); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(page); ret = 0; } else { /* * done_index is set past this page, * so media errors will not choke * background writeout for the entire * file. This has consequences for * range_cyclic semantics (ie. it may * not be suitable for data integrity * writeout). */ *done_index = page->index + 1; ret = 1; break; } } /* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { ret = 1; break; } } gfs2_trans_end(sdp); return ret; } /** * gfs2_write_cache_jdata - Like write_cache_pages but different * @mapping: The mapping to write * @wbc: The writeback control * * The reason that we use our own function here is that we need to * start transactions before we grab page locks. This allows us * to get the ordering right. */ static int gfs2_write_cache_jdata(struct address_space *mapping, struct writeback_control *wbc) { int ret = 0; int done = 0; struct pagevec pvec; int nr_pages; pgoff_t uninitialized_var(writeback_index); pgoff_t index; pgoff_t end; pgoff_t done_index; int cycled; int range_whole = 0; int tag; pagevec_init(&pvec, 0); if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; if (index == 0) cycled = 1; else cycled = 0; end = -1; } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); done_index = index; while (!done && (index <= end)) { nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index); if (ret) done = 1; if (ret > 0) ret = 0; pagevec_release(&pvec); cond_resched(); } if (!cycled && !done) { /* * range_cyclic: * We hit the last page and there is more work to be done: wrap * back to the start of the file */ cycled = 1; index = 0; end = writeback_index - 1; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; return ret; } /** * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write * @wbc: The writeback control * */ static int gfs2_jdata_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); int ret; ret = gfs2_write_cache_jdata(mapping, wbc); if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) { gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); ret = gfs2_write_cache_jdata(mapping, wbc); } return ret; } /** * stuffed_readpage - Fill in a Linux page with stuffed file data * @ip: the inode * @page: the page * * Returns: errno */ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) { struct buffer_head *dibh; u64 dsize = i_size_read(&ip->i_inode); void *kaddr; int error; /* * Due to the order of unstuffing files and ->fault(), we can be * asked for a zero page in the case of a stuffed file being extended, * so we need to supply one here. It doesn't happen often. */ if (unlikely(page->index)) { zero_user(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); return 0; } error = gfs2_meta_inode_buffer(ip, &dibh); if (error) return error; kaddr = kmap_atomic(page); if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); kunmap_atomic(kaddr); flush_dcache_page(page); brelse(dibh); SetPageUptodate(page); return 0; } /** * __gfs2_readpage - readpage * @file: The file to read a page for * @page: The page to read * * This is the core of gfs2's readpage. Its used by the internal file * reading code as in that case we already hold the glock. Also its * called by gfs2_readpage() once the required lock has been granted. * */ static int __gfs2_readpage(void *file, struct page *page) { struct gfs2_inode *ip = GFS2_I(page->mapping->host); struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); int error; if (gfs2_is_stuffed(ip)) { error = stuffed_readpage(ip, page); unlock_page(page); } else { error = mpage_readpage(page, gfs2_block_map); } if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) return -EIO; return error; } /** * gfs2_readpage - read a page of a file * @file: The file to read * @page: The page of the file * * This deals with the locking required. We have to unlock and * relock the page in order to get the locking in the right * order. */ static int gfs2_readpage(struct file *file, struct page *page) { struct address_space *mapping = page->mapping; struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_holder gh; int error; unlock_page(page); gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); error = gfs2_glock_nq(&gh); if (unlikely(error)) goto out; error = AOP_TRUNCATED_PAGE; lock_page(page); if (page->mapping == mapping && !PageUptodate(page)) error = __gfs2_readpage(file, page); else unlock_page(page); gfs2_glock_dq(&gh); out: gfs2_holder_uninit(&gh); if (error && error != AOP_TRUNCATED_PAGE) lock_page(page); return error; } /** * gfs2_internal_read - read an internal file * @ip: The gfs2 inode * @buf: The buffer to fill * @pos: The file position * @size: The amount to read * */ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, unsigned size) { struct address_space *mapping = ip->i_inode.i_mapping; unsigned long index = *pos / PAGE_CACHE_SIZE; unsigned offset = *pos & (PAGE_CACHE_SIZE - 1); unsigned copied = 0; unsigned amt; struct page *page; void *p; do { amt = size - copied; if (offset + size > PAGE_CACHE_SIZE) amt = PAGE_CACHE_SIZE - offset; page = read_cache_page(mapping, index, __gfs2_readpage, NULL); if (IS_ERR(page)) return PTR_ERR(page); p = kmap_atomic(page); memcpy(buf + copied, p + offset, amt); kunmap_atomic(p); page_cache_release(page); copied += amt; index++; offset = 0; } while(copied < size); (*pos) += size; return size; } /** * gfs2_readpages - Read a bunch of pages at once * @file: The file to read from * @mapping: Address space info * @pages: List of pages to read * @nr_pages: Number of pages to read * * Some notes: * 1. This is only for readahead, so we can simply ignore any things * which are slightly inconvenient (such as locking conflicts between * the page lock and the glock) and return having done no I/O. Its * obviously not something we'd want to do on too regular a basis. * Any I/O we ignore at this time will be done via readpage later. * 2. We don't handle stuffed files here we let readpage do the honours. * 3. mpage_readpages() does most of the heavy lifting in the common case. * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places. */ static int gfs2_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_holder gh; int ret; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); ret = gfs2_glock_nq(&gh); if (unlikely(ret)) goto out_uninit; if (!gfs2_is_stuffed(ip)) ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); gfs2_glock_dq(&gh); out_uninit: gfs2_holder_uninit(&gh); if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) ret = -EIO; return ret; } /** * gfs2_write_begin - Begin to write to a file * @file: The file to write to * @mapping: The mapping in which to write * @pos: The file offset at which to start writing * @len: Length of the write * @flags: Various flags * @pagep: Pointer to return the page * @fsdata: Pointer to return fs data (unused by GFS2) * * Returns: errno */ static int gfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); unsigned int data_blocks = 0, ind_blocks = 0, rblocks; unsigned requested = 0; int alloc_required; int error = 0; pgoff_t index = pos >> PAGE_CACHE_SHIFT; unsigned from = pos & (PAGE_CACHE_SIZE - 1); struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); error = gfs2_glock_nq(&ip->i_gh); if (unlikely(error)) goto out_uninit; if (&ip->i_inode == sdp->sd_rindex) { error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &m_ip->i_gh); if (unlikely(error)) { gfs2_glock_dq(&ip->i_gh); goto out_uninit; } } alloc_required = gfs2_write_alloc_required(ip, pos, len); if (alloc_required || gfs2_is_jdata(ip)) gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); if (alloc_required) { struct gfs2_alloc_parms ap = { .aflags = 0, }; requested = data_blocks + ind_blocks; ap.target = requested; error = gfs2_quota_lock_check(ip, &ap); if (error) goto out_unlock; error = gfs2_inplace_reserve(ip, &ap); if (error) goto out_qunlock; } rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; if (&ip->i_inode == sdp->sd_rindex) rblocks += 2 * RES_STATFS; if (alloc_required) rblocks += gfs2_rg_blocks(ip, requested); error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = -ENOMEM; flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, index, flags); *pagep = page; if (unlikely(!page)) goto out_endtrans; if (gfs2_is_stuffed(ip)) { error = 0; if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { error = gfs2_unstuff_dinode(ip, page); if (error == 0) goto prepare_write; } else if (!PageUptodate(page)) { error = stuffed_readpage(ip, page); } goto out; } prepare_write: error = __block_write_begin(page, from, len, gfs2_block_map); out: if (error == 0) return 0; unlock_page(page); page_cache_release(page); gfs2_trans_end(sdp); if (pos + len > ip->i_inode.i_size) gfs2_trim_blocks(&ip->i_inode); goto out_trans_fail; out_endtrans: gfs2_trans_end(sdp); out_trans_fail: if (alloc_required) { gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); } out_unlock: if (&ip->i_inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); return error; } /** * adjust_fs_space - Adjusts the free space available due to gfs2_grow * @inode: the rindex inode */ static void adjust_fs_space(struct inode *inode) { struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *m_bh, *l_bh; u64 fs_total, new_free; /* Total up the file system space, according to the latest rindex. */ fs_total = gfs2_ri_total(sdp); if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) return; spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); if (fs_total > (m_sc->sc_total + l_sc->sc_total)) new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); else new_free = 0; spin_unlock(&sdp->sd_statfs_spin); fs_warn(sdp, "File system extended by %llu blocks.\n", (unsigned long long)new_free); gfs2_statfs_change(sdp, new_free, new_free, 0); if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) goto out; update_statfs(sdp, m_bh, l_bh); brelse(l_bh); out: brelse(m_bh); } /** * gfs2_stuffed_write_end - Write end for stuffed files * @inode: The inode * @dibh: The buffer_head containing the on-disk inode * @pos: The file position * @len: The length of the write * @copied: How much was actually copied by the VFS * @page: The page * * This copies the data from the page into the inode block after * the inode data structure itself. * * Returns: errno */ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, loff_t pos, unsigned len, unsigned copied, struct page *page) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); u64 to = pos + copied; void *kaddr; unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); kaddr = kmap_atomic(page); memcpy(buf + pos, kaddr + pos, copied); memset(kaddr + pos + copied, 0, len - copied); flush_dcache_page(page); kunmap_atomic(kaddr); if (!PageUptodate(page)) SetPageUptodate(page); unlock_page(page); page_cache_release(page); if (copied) { if (inode->i_size < to) i_size_write(inode, to); mark_inode_dirty(inode); } if (inode == sdp->sd_rindex) { adjust_fs_space(inode); sdp->sd_rindex_uptodate = 0; } brelse(dibh); gfs2_trans_end(sdp); if (inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return copied; } /** * gfs2_write_end * @file: The file to write to * @mapping: The address space to write to * @pos: The file position * @len: The length of the data * @copied: How much was actually copied by the VFS * @page: The page that has been written * @fsdata: The fsdata (unused in GFS2) * * The main write_end function for GFS2. We have a separate one for * stuffed files as they are slightly different, otherwise we just * put our locking around the VFS provided functions. * * Returns: errno */ static int gfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct buffer_head *dibh; unsigned int from = pos & (PAGE_CACHE_SIZE - 1); unsigned int to = from + len; int ret; struct gfs2_trans *tr = current->journal_info; BUG_ON(!tr); BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); ret = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(ret)) { unlock_page(page); page_cache_release(page); goto failed; } if (gfs2_is_stuffed(ip)) return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); if (!gfs2_is_writeback(ip)) gfs2_page_add_databufs(ip, page, from, to); ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); if (tr->tr_num_buf_new) __mark_inode_dirty(inode, I_DIRTY_DATASYNC); else gfs2_trans_add_meta(ip->i_gl, dibh); if (inode == sdp->sd_rindex) { adjust_fs_space(inode); sdp->sd_rindex_uptodate = 0; } brelse(dibh); failed: gfs2_trans_end(sdp); gfs2_inplace_release(ip); if (ip->i_res->rs_qa_qd_num) gfs2_quota_unlock(ip); if (inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return ret; } /** * gfs2_set_page_dirty - Page dirtying function * @page: The page to dirty * * Returns: 1 if it dirtyed the page, or 0 otherwise */ static int gfs2_set_page_dirty(struct page *page) { SetPageChecked(page); return __set_page_dirty_buffers(page); } /** * gfs2_bmap - Block map function * @mapping: Address space info * @lblock: The block to map * * Returns: The disk address for the block or 0 on hole or error */ static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_holder i_gh; sector_t dblock = 0; int error; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) return 0; if (!gfs2_is_stuffed(ip)) dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); gfs2_glock_dq_uninit(&i_gh); return dblock; } static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) { struct gfs2_bufdata *bd; lock_buffer(bh); gfs2_log_lock(sdp); clear_buffer_dirty(bh); bd = bh->b_private; if (bd) { if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) list_del_init(&bd->bd_list); else gfs2_remove_from_journal(bh, current->journal_info, 0); } bh->b_bdev = NULL; clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); gfs2_log_unlock(sdp); unlock_buffer(bh); } static void gfs2_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); unsigned int stop = offset + length; int partial_page = (offset || length < PAGE_CACHE_SIZE); struct buffer_head *bh, *head; unsigned long pos = 0; BUG_ON(!PageLocked(page)); if (!partial_page) ClearPageChecked(page); if (!page_has_buffers(page)) goto out; bh = head = page_buffers(page); do { if (pos + bh->b_size > stop) return; if (offset <= pos) gfs2_discard(sdp, bh); pos += bh->b_size; bh = bh->b_this_page; } while (bh != head); out: if (!partial_page) try_to_release_page(page, 0); } /** * gfs2_ok_for_dio - check that dio is valid on this file * @ip: The inode * @offset: The offset at which we are reading or writing * * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o) * 1 (to accept the i/o request) */ static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) { /* * Should we return an error here? I can't see that O_DIRECT for * a stuffed file makes any sense. For now we'll silently fall * back to buffered I/O */ if (gfs2_is_stuffed(ip)) return 0; if (offset >= i_size_read(&ip->i_inode)) return 0; return 1; } static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct address_space *mapping = inode->i_mapping; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int rv; /* * Deferred lock, even if its a write, since we do no allocation * on this path. All we need change is atime, and this lock mode * ensures that other nodes have flushed their buffered read caches * (i.e. their page cache entries for this inode). We do not, * unfortunately have the option of only flushing a range like * the VFS does. */ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); rv = gfs2_glock_nq(&gh); if (rv) return rv; rv = gfs2_ok_for_dio(ip, offset); if (rv != 1) goto out; /* dio not valid, fall back to buffered i/o */ /* * Now since we are holding a deferred (CW) lock at this point, you * might be wondering why this is ever needed. There is a case however * where we've granted a deferred local lock against a cached exclusive * glock. That is ok provided all granted local locks are deferred, but * it also means that it is possible to encounter pages which are * cached and possibly also mapped. So here we check for that and sort * them out ahead of the dio. The glock state machine will take care of * everything else. * * If in fact the cached glock state (gl->gl_state) is deferred (CW) in * the first place, mapping->nr_pages will always be zero. */ if (mapping->nrpages) { loff_t lstart = offset & (PAGE_CACHE_SIZE - 1); loff_t len = iov_iter_count(iter); loff_t end = PAGE_ALIGN(offset + len) - 1; rv = 0; if (len == 0) goto out; if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); rv = filemap_write_and_wait_range(mapping, lstart, end); if (rv) goto out; if (iov_iter_rw(iter) == WRITE) truncate_inode_pages_range(mapping, lstart, end); } rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, offset, gfs2_get_block_direct, NULL, NULL, 0); out: gfs2_glock_dq(&gh); gfs2_holder_uninit(&gh); return rv; } /** * gfs2_releasepage - free the metadata associated with a page * @page: the page that's being released * @gfp_mask: passed from Linux VFS, ignored by us * * Call try_to_free_buffers() if the buffers in this page can be * released. * * Returns: 0 */ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) { struct address_space *mapping = page->mapping; struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); struct buffer_head *bh, *head; struct gfs2_bufdata *bd; if (!page_has_buffers(page)) return 0; gfs2_log_lock(sdp); spin_lock(&sdp->sd_ail_lock); head = bh = page_buffers(page); do { if (atomic_read(&bh->b_count)) goto cannot_release; bd = bh->b_private; if (bd && bd->bd_tr) goto cannot_release; if (buffer_pinned(bh) || buffer_dirty(bh)) goto not_possible; bh = bh->b_this_page; } while(bh != head); spin_unlock(&sdp->sd_ail_lock); head = bh = page_buffers(page); do { bd = bh->b_private; if (bd) { gfs2_assert_warn(sdp, bd->bd_bh == bh); if (!list_empty(&bd->bd_list)) list_del_init(&bd->bd_list); bd->bd_bh = NULL; bh->b_private = NULL; kmem_cache_free(gfs2_bufdata_cachep, bd); } bh = bh->b_this_page; } while (bh != head); gfs2_log_unlock(sdp); return try_to_free_buffers(page); not_possible: /* Should never happen */ WARN_ON(buffer_dirty(bh)); WARN_ON(buffer_pinned(bh)); cannot_release: spin_unlock(&sdp->sd_ail_lock); gfs2_log_unlock(sdp); return 0; } static const struct address_space_operations gfs2_writeback_aops = { .writepage = gfs2_writepage, .writepages = gfs2_writepages, .readpage = gfs2_readpage, .readpages = gfs2_readpages, .write_begin = gfs2_write_begin, .write_end = gfs2_write_end, .bmap = gfs2_bmap, .invalidatepage = gfs2_invalidatepage, .releasepage = gfs2_releasepage, .direct_IO = gfs2_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations gfs2_ordered_aops = { .writepage = gfs2_writepage, .writepages = gfs2_writepages, .readpage = gfs2_readpage, .readpages = gfs2_readpages, .write_begin = gfs2_write_begin, .write_end = gfs2_write_end, .set_page_dirty = gfs2_set_page_dirty, .bmap = gfs2_bmap, .invalidatepage = gfs2_invalidatepage, .releasepage = gfs2_releasepage, .direct_IO = gfs2_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations gfs2_jdata_aops = { .writepage = gfs2_jdata_writepage, .writepages = gfs2_jdata_writepages, .readpage = gfs2_readpage, .readpages = gfs2_readpages, .write_begin = gfs2_write_begin, .write_end = gfs2_write_end, .set_page_dirty = gfs2_set_page_dirty, .bmap = gfs2_bmap, .invalidatepage = gfs2_invalidatepage, .releasepage = gfs2_releasepage, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; void gfs2_set_aops(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); if (gfs2_is_writeback(ip)) inode->i_mapping->a_ops = &gfs2_writeback_aops; else if (gfs2_is_ordered(ip)) inode->i_mapping->a_ops = &gfs2_ordered_aops; else if (gfs2_is_jdata(ip)) inode->i_mapping->a_ops = &gfs2_jdata_aops; else BUG(); }
gpl-2.0
LinTeX9527/linux
drivers/spi/spi-xlp.c
536
11699
/* * Copyright (C) 2003-2015 Broadcom Corporation * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 (GPL v2) * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/of.h> #include <linux/interrupt.h> /* SPI Configuration Register */ #define XLP_SPI_CONFIG 0x00 #define XLP_SPI_CPHA BIT(0) #define XLP_SPI_CPOL BIT(1) #define XLP_SPI_CS_POL BIT(2) #define XLP_SPI_TXMISO_EN BIT(3) #define XLP_SPI_TXMOSI_EN BIT(4) #define XLP_SPI_RXMISO_EN BIT(5) #define XLP_SPI_CS_LSBFE BIT(10) #define XLP_SPI_RXCAP_EN BIT(11) /* SPI Frequency Divider Register */ #define XLP_SPI_FDIV 0x04 /* SPI Command Register */ #define XLP_SPI_CMD 0x08 #define XLP_SPI_CMD_IDLE_MASK 0x0 #define XLP_SPI_CMD_TX_MASK 0x1 #define XLP_SPI_CMD_RX_MASK 0x2 #define XLP_SPI_CMD_TXRX_MASK 0x3 #define XLP_SPI_CMD_CONT BIT(4) #define XLP_SPI_XFR_BITCNT_SHIFT 16 /* SPI Status Register */ #define XLP_SPI_STATUS 0x0c #define XLP_SPI_XFR_PENDING BIT(0) #define XLP_SPI_XFR_DONE BIT(1) #define XLP_SPI_TX_INT BIT(2) #define XLP_SPI_RX_INT BIT(3) #define XLP_SPI_TX_UF BIT(4) #define XLP_SPI_RX_OF BIT(5) #define XLP_SPI_STAT_MASK 0x3f /* SPI Interrupt Enable Register */ #define XLP_SPI_INTR_EN 0x10 #define XLP_SPI_INTR_DONE BIT(0) #define XLP_SPI_INTR_TXTH BIT(1) #define XLP_SPI_INTR_RXTH BIT(2) #define XLP_SPI_INTR_TXUF BIT(3) #define XLP_SPI_INTR_RXOF BIT(4) /* SPI FIFO Threshold Register */ #define XLP_SPI_FIFO_THRESH 0x14 /* SPI FIFO Word Count Register */ #define XLP_SPI_FIFO_WCNT 0x18 #define XLP_SPI_RXFIFO_WCNT_MASK 0xf #define XLP_SPI_TXFIFO_WCNT_MASK 0xf0 #define XLP_SPI_TXFIFO_WCNT_SHIFT 4 /* SPI Transmit Data FIFO Register */ #define XLP_SPI_TXDATA_FIFO 0x1c /* SPI Receive Data FIFO Register */ #define XLP_SPI_RXDATA_FIFO 0x20 /* SPI System Control Register */ #define XLP_SPI_SYSCTRL 0x100 #define XLP_SPI_SYS_RESET BIT(0) #define XLP_SPI_SYS_CLKDIS BIT(1) #define XLP_SPI_SYS_PMEN BIT(8) #define SPI_CS_OFFSET 0x40 #define XLP_SPI_TXRXTH 0x80 #define XLP_SPI_FIFO_SIZE 8 #define XLP_SPI_MAX_CS 4 #define XLP_SPI_DEFAULT_FREQ 133333333 #define XLP_SPI_FDIV_MIN 4 #define XLP_SPI_FDIV_MAX 65535 /* * SPI can transfer only 28 bytes properly at a time. So split the * transfer into 28 bytes size. */ #define XLP_SPI_XFER_SIZE 28 struct xlp_spi_priv { struct device dev; /* device structure */ void __iomem *base; /* spi registers base address */ const u8 *tx_buf; /* tx data buffer */ u8 *rx_buf; /* rx data buffer */ int tx_len; /* tx xfer length */ int rx_len; /* rx xfer length */ int txerrors; /* TXFIFO underflow count */ int rxerrors; /* RXFIFO overflow count */ int cs; /* slave device chip select */ u32 spi_clk; /* spi clock frequency */ bool cmd_cont; /* cs active */ struct completion done; /* completion notification */ }; static inline u32 xlp_spi_reg_read(struct xlp_spi_priv *priv, int cs, int regoff) { return readl(priv->base + regoff + cs * SPI_CS_OFFSET); } static inline void xlp_spi_reg_write(struct xlp_spi_priv *priv, int cs, int regoff, u32 val) { writel(val, priv->base + regoff + cs * SPI_CS_OFFSET); } static inline void xlp_spi_sysctl_write(struct xlp_spi_priv *priv, int regoff, u32 val) { writel(val, priv->base + regoff); } /* * Setup global SPI_SYSCTRL register for all SPI channels. */ static void xlp_spi_sysctl_setup(struct xlp_spi_priv *xspi) { int cs; for (cs = 0; cs < XLP_SPI_MAX_CS; cs++) xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL, XLP_SPI_SYS_RESET << cs); xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL, XLP_SPI_SYS_PMEN); } static int xlp_spi_setup(struct spi_device *spi) { struct xlp_spi_priv *xspi; u32 fdiv, cfg; int cs; xspi = spi_master_get_devdata(spi->master); cs = spi->chip_select; /* * The value of fdiv must be between 4 and 65535. */ fdiv = DIV_ROUND_UP(xspi->spi_clk, spi->max_speed_hz); if (fdiv > XLP_SPI_FDIV_MAX) fdiv = XLP_SPI_FDIV_MAX; else if (fdiv < XLP_SPI_FDIV_MIN) fdiv = XLP_SPI_FDIV_MIN; xlp_spi_reg_write(xspi, cs, XLP_SPI_FDIV, fdiv); xlp_spi_reg_write(xspi, cs, XLP_SPI_FIFO_THRESH, XLP_SPI_TXRXTH); cfg = xlp_spi_reg_read(xspi, cs, XLP_SPI_CONFIG); if (spi->mode & SPI_CPHA) cfg |= XLP_SPI_CPHA; else cfg &= ~XLP_SPI_CPHA; if (spi->mode & SPI_CPOL) cfg |= XLP_SPI_CPOL; else cfg &= ~XLP_SPI_CPOL; if (!(spi->mode & SPI_CS_HIGH)) cfg |= XLP_SPI_CS_POL; else cfg &= ~XLP_SPI_CS_POL; if (spi->mode & SPI_LSB_FIRST) cfg |= XLP_SPI_CS_LSBFE; else cfg &= ~XLP_SPI_CS_LSBFE; cfg |= XLP_SPI_TXMOSI_EN | XLP_SPI_RXMISO_EN; if (fdiv == 4) cfg |= XLP_SPI_RXCAP_EN; xlp_spi_reg_write(xspi, cs, XLP_SPI_CONFIG, cfg); return 0; } static void xlp_spi_read_rxfifo(struct xlp_spi_priv *xspi) { u32 rx_data, rxfifo_cnt; int i, j, nbytes; rxfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT); rxfifo_cnt &= XLP_SPI_RXFIFO_WCNT_MASK; while (rxfifo_cnt) { rx_data = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_RXDATA_FIFO); j = 0; nbytes = min(xspi->rx_len, 4); for (i = nbytes - 1; i >= 0; i--, j++) xspi->rx_buf[i] = (rx_data >> (j * 8)) & 0xff; xspi->rx_len -= nbytes; xspi->rx_buf += nbytes; rxfifo_cnt--; } } static void xlp_spi_fill_txfifo(struct xlp_spi_priv *xspi) { u32 tx_data, txfifo_cnt; int i, j, nbytes; txfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT); txfifo_cnt &= XLP_SPI_TXFIFO_WCNT_MASK; txfifo_cnt >>= XLP_SPI_TXFIFO_WCNT_SHIFT; while (xspi->tx_len && (txfifo_cnt < XLP_SPI_FIFO_SIZE)) { j = 0; tx_data = 0; nbytes = min(xspi->tx_len, 4); for (i = nbytes - 1; i >= 0; i--, j++) tx_data |= xspi->tx_buf[i] << (j * 8); xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_TXDATA_FIFO, tx_data); xspi->tx_len -= nbytes; xspi->tx_buf += nbytes; txfifo_cnt++; } } static irqreturn_t xlp_spi_interrupt(int irq, void *dev_id) { struct xlp_spi_priv *xspi = dev_id; u32 stat; stat = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_STATUS) & XLP_SPI_STAT_MASK; if (!stat) return IRQ_NONE; if (stat & XLP_SPI_TX_INT) { if (xspi->tx_len) xlp_spi_fill_txfifo(xspi); if (stat & XLP_SPI_TX_UF) xspi->txerrors++; } if (stat & XLP_SPI_RX_INT) { if (xspi->rx_len) xlp_spi_read_rxfifo(xspi); if (stat & XLP_SPI_RX_OF) xspi->rxerrors++; } /* write status back to clear interrupts */ xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_STATUS, stat); if (stat & XLP_SPI_XFR_DONE) complete(&xspi->done); return IRQ_HANDLED; } static void xlp_spi_send_cmd(struct xlp_spi_priv *xspi, int xfer_len, int cmd_cont) { u32 cmd = 0; if (xspi->tx_buf) cmd |= XLP_SPI_CMD_TX_MASK; if (xspi->rx_buf) cmd |= XLP_SPI_CMD_RX_MASK; if (cmd_cont) cmd |= XLP_SPI_CMD_CONT; cmd |= ((xfer_len * 8 - 1) << XLP_SPI_XFR_BITCNT_SHIFT); xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_CMD, cmd); } static int xlp_spi_xfer_block(struct xlp_spi_priv *xs, const unsigned char *tx_buf, unsigned char *rx_buf, int xfer_len, int cmd_cont) { int timeout; u32 intr_mask = 0; xs->tx_buf = tx_buf; xs->rx_buf = rx_buf; xs->tx_len = (xs->tx_buf == NULL) ? 0 : xfer_len; xs->rx_len = (xs->rx_buf == NULL) ? 0 : xfer_len; xs->txerrors = xs->rxerrors = 0; /* fill TXDATA_FIFO, then send the CMD */ if (xs->tx_len) xlp_spi_fill_txfifo(xs); xlp_spi_send_cmd(xs, xfer_len, cmd_cont); /* * We are getting some spurious tx interrupts, so avoid enabling * tx interrupts when only rx is in process. * Enable all the interrupts in tx case. */ if (xs->tx_len) intr_mask |= XLP_SPI_INTR_TXTH | XLP_SPI_INTR_TXUF | XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF; else intr_mask |= XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF; intr_mask |= XLP_SPI_INTR_DONE; xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, intr_mask); timeout = wait_for_completion_timeout(&xs->done, msecs_to_jiffies(1000)); /* Disable interrupts */ xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, 0x0); if (!timeout) { dev_err(&xs->dev, "xfer timedout!\n"); goto out; } if (xs->txerrors || xs->rxerrors) dev_err(&xs->dev, "Over/Underflow rx %d tx %d xfer %d!\n", xs->rxerrors, xs->txerrors, xfer_len); return xfer_len; out: return -ETIMEDOUT; } static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t) { int bytesleft, sz; unsigned char *rx_buf; const unsigned char *tx_buf; tx_buf = t->tx_buf; rx_buf = t->rx_buf; bytesleft = t->len; while (bytesleft) { if (bytesleft > XLP_SPI_XFER_SIZE) sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf, XLP_SPI_XFER_SIZE, 1); else sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf, bytesleft, xs->cmd_cont); if (sz < 0) return sz; bytesleft -= sz; if (tx_buf) tx_buf += sz; if (rx_buf) rx_buf += sz; } return bytesleft; } static int xlp_spi_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) { struct xlp_spi_priv *xspi = spi_master_get_devdata(master); int ret = 0; xspi->cs = spi->chip_select; xspi->dev = spi->dev; if (spi_transfer_is_last(master, t)) xspi->cmd_cont = 0; else xspi->cmd_cont = 1; if (xlp_spi_txrx_bufs(xspi, t)) ret = -EIO; spi_finalize_current_transfer(master); return ret; } static int xlp_spi_probe(struct platform_device *pdev) { struct spi_master *master; struct xlp_spi_priv *xspi; struct resource *res; struct clk *clk; int irq, err; xspi = devm_kzalloc(&pdev->dev, sizeof(*xspi), GFP_KERNEL); if (!xspi) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); xspi->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(xspi->base)) return PTR_ERR(xspi->base); irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no IRQ resource found\n"); return -EINVAL; } err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0, pdev->name, xspi); if (err) { dev_err(&pdev->dev, "unable to request irq %d\n", irq); return err; } clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "could not get spi clock\n"); return -ENODEV; } xspi->spi_clk = clk_get_rate(clk); master = spi_alloc_master(&pdev->dev, 0); if (!master) { dev_err(&pdev->dev, "could not alloc master\n"); return -ENOMEM; } master->bus_num = 0; master->num_chipselect = XLP_SPI_MAX_CS; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->setup = xlp_spi_setup; master->transfer_one = xlp_spi_transfer_one; master->dev.of_node = pdev->dev.of_node; init_completion(&xspi->done); spi_master_set_devdata(master, xspi); xlp_spi_sysctl_setup(xspi); /* register spi controller */ err = devm_spi_register_master(&pdev->dev, master); if (err) { dev_err(&pdev->dev, "spi register master failed!\n"); spi_master_put(master); return err; } return 0; } static const struct of_device_id xlp_spi_dt_id[] = { { .compatible = "netlogic,xlp832-spi" }, { }, }; static struct platform_driver xlp_spi_driver = { .probe = xlp_spi_probe, .driver = { .name = "xlp-spi", .of_match_table = xlp_spi_dt_id, }, }; module_platform_driver(xlp_spi_driver); MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>"); MODULE_DESCRIPTION("Netlogic XLP SPI controller driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
pocketbook-free/kernel_622
net/rose/rose_subr.c
792
11840
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose); /* * This routine purges all of the queues of frames. */ void rose_clear_queues(struct sock *sk) { skb_queue_purge(&sk->sk_write_queue); skb_queue_purge(&rose_sk(sk)->ack_queue); } /* * This routine purges the input queue of those frames that have been * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the * SDL diagram. */ void rose_frames_acked(struct sock *sk, unsigned short nr) { struct sk_buff *skb; struct rose_sock *rose = rose_sk(sk); /* * Remove all the ack-ed frames from the ack queue. */ if (rose->va != nr) { while (skb_peek(&rose->ack_queue) != NULL && rose->va != nr) { skb = skb_dequeue(&rose->ack_queue); kfree_skb(skb); rose->va = (rose->va + 1) % ROSE_MODULUS; } } } void rose_requeue_frames(struct sock *sk) { struct sk_buff *skb, *skb_prev = NULL; /* * Requeue all the un-ack-ed frames on the output queue to be picked * up by rose_kick. This arrangement handles the possibility of an * empty output queue. */ while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) { if (skb_prev == NULL) skb_queue_head(&sk->sk_write_queue, skb); else skb_append(skb_prev, skb, &sk->sk_write_queue); skb_prev = skb; } } /* * Validate that the value of nr is between va and vs. Return true or * false for testing. */ int rose_validate_nr(struct sock *sk, unsigned short nr) { struct rose_sock *rose = rose_sk(sk); unsigned short vc = rose->va; while (vc != rose->vs) { if (nr == vc) return 1; vc = (vc + 1) % ROSE_MODULUS; } return nr == rose->vs; } /* * This routine is called when the packet layer internally generates a * control frame. */ void rose_write_internal(struct sock *sk, int frametype) { struct rose_sock *rose = rose_sk(sk); struct sk_buff *skb; unsigned char *dptr; unsigned char lci1, lci2; char buffer[100]; int len, faclen = 0; len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1; switch (frametype) { case ROSE_CALL_REQUEST: len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN; faclen = rose_create_facilities(buffer, rose); len += faclen; break; case ROSE_CALL_ACCEPTED: case ROSE_CLEAR_REQUEST: case ROSE_RESET_REQUEST: len += 2; break; } if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) return; /* * Space for AX.25 header and PID. */ skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1); dptr = skb_put(skb, skb_tailroom(skb)); lci1 = (rose->lci >> 8) & 0x0F; lci2 = (rose->lci >> 0) & 0xFF; switch (frametype) { case ROSE_CALL_REQUEST: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = 0xAA; memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN); dptr += ROSE_ADDR_LEN; memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); dptr += ROSE_ADDR_LEN; memcpy(dptr, buffer, faclen); dptr += faclen; break; case ROSE_CALL_ACCEPTED: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = 0x00; /* Address length */ *dptr++ = 0; /* Facilities length */ break; case ROSE_CLEAR_REQUEST: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = rose->cause; *dptr++ = rose->diagnostic; break; case ROSE_RESET_REQUEST: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = ROSE_DTE_ORIGINATED; *dptr++ = 0; break; case ROSE_RR: case ROSE_RNR: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr = frametype; *dptr++ |= (rose->vr << 5) & 0xE0; break; case ROSE_CLEAR_CONFIRMATION: case ROSE_RESET_CONFIRMATION: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; break; default: printk(KERN_ERR "ROSE: rose_write_internal - invalid frametype %02X\n", frametype); kfree_skb(skb); return; } rose_transmit_link(skb, rose->neighbour); } int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m) { unsigned char *frame; frame = skb->data; *ns = *nr = *q = *d = *m = 0; switch (frame[2]) { case ROSE_CALL_REQUEST: case ROSE_CALL_ACCEPTED: case ROSE_CLEAR_REQUEST: case ROSE_CLEAR_CONFIRMATION: case ROSE_RESET_REQUEST: case ROSE_RESET_CONFIRMATION: return frame[2]; default: break; } if ((frame[2] & 0x1F) == ROSE_RR || (frame[2] & 0x1F) == ROSE_RNR) { *nr = (frame[2] >> 5) & 0x07; return frame[2] & 0x1F; } if ((frame[2] & 0x01) == ROSE_DATA) { *q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT; *d = (frame[0] & ROSE_D_BIT) == ROSE_D_BIT; *m = (frame[2] & ROSE_M_BIT) == ROSE_M_BIT; *nr = (frame[2] >> 5) & 0x07; *ns = (frame[2] >> 1) & 0x07; return ROSE_DATA; } return ROSE_ILLEGAL; } static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len) { unsigned char *pt; unsigned char l, lg, n = 0; int fac_national_digis_received = 0; do { switch (*p & 0xC0) { case 0x00: p += 2; n += 2; len -= 2; break; case 0x40: if (*p == FAC_NATIONAL_RAND) facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF); p += 3; n += 3; len -= 3; break; case 0x80: p += 4; n += 4; len -= 4; break; case 0xC0: l = p[1]; if (*p == FAC_NATIONAL_DEST_DIGI) { if (!fac_national_digis_received) { memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN); facilities->source_ndigis = 1; } } else if (*p == FAC_NATIONAL_SRC_DIGI) { if (!fac_national_digis_received) { memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN); facilities->dest_ndigis = 1; } } else if (*p == FAC_NATIONAL_FAIL_CALL) { memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN); } else if (*p == FAC_NATIONAL_FAIL_ADD) { memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN); } else if (*p == FAC_NATIONAL_DIGIS) { fac_national_digis_received = 1; facilities->source_ndigis = 0; facilities->dest_ndigis = 0; for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { if (pt[6] & AX25_HBIT) memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); else memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); } } p += l + 2; n += l + 2; len -= l + 2; break; } } while (*p != 0x00 && len > 0); return n; } static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len) { unsigned char l, n = 0; char callsign[11]; do { switch (*p & 0xC0) { case 0x00: p += 2; n += 2; len -= 2; break; case 0x40: p += 3; n += 3; len -= 3; break; case 0x80: p += 4; n += 4; len -= 4; break; case 0xC0: l = p[1]; if (*p == FAC_CCITT_DEST_NSAP) { memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); memcpy(callsign, p + 12, l - 10); callsign[l - 10] = '\0'; asc2ax(&facilities->source_call, callsign); } if (*p == FAC_CCITT_SRC_NSAP) { memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN); memcpy(callsign, p + 12, l - 10); callsign[l - 10] = '\0'; asc2ax(&facilities->dest_call, callsign); } p += l + 2; n += l + 2; len -= l + 2; break; } } while (*p != 0x00 && len > 0); return n; } int rose_parse_facilities(unsigned char *p, struct rose_facilities_struct *facilities) { int facilities_len, len; facilities_len = *p++; if (facilities_len == 0) return 0; while (facilities_len > 0) { if (*p == 0x00) { facilities_len--; p++; switch (*p) { case FAC_NATIONAL: /* National */ len = rose_parse_national(p + 1, facilities, facilities_len - 1); facilities_len -= len + 1; p += len + 1; break; case FAC_CCITT: /* CCITT */ len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); facilities_len -= len + 1; p += len + 1; break; default: printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p); facilities_len--; p++; break; } } else break; /* Error in facilities format */ } return 1; } static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose) { unsigned char *p = buffer + 1; char *callsign; char buf[11]; int len, nb; /* National Facilities */ if (rose->rand != 0 || rose->source_ndigis == 1 || rose->dest_ndigis == 1) { *p++ = 0x00; *p++ = FAC_NATIONAL; if (rose->rand != 0) { *p++ = FAC_NATIONAL_RAND; *p++ = (rose->rand >> 8) & 0xFF; *p++ = (rose->rand >> 0) & 0xFF; } /* Sent before older facilities */ if ((rose->source_ndigis > 0) || (rose->dest_ndigis > 0)) { int maxdigi = 0; *p++ = FAC_NATIONAL_DIGIS; *p++ = AX25_ADDR_LEN * (rose->source_ndigis + rose->dest_ndigis); for (nb = 0 ; nb < rose->source_ndigis ; nb++) { if (++maxdigi >= ROSE_MAX_DIGIS) break; memcpy(p, &rose->source_digis[nb], AX25_ADDR_LEN); p[6] |= AX25_HBIT; p += AX25_ADDR_LEN; } for (nb = 0 ; nb < rose->dest_ndigis ; nb++) { if (++maxdigi >= ROSE_MAX_DIGIS) break; memcpy(p, &rose->dest_digis[nb], AX25_ADDR_LEN); p[6] &= ~AX25_HBIT; p += AX25_ADDR_LEN; } } /* For compatibility */ if (rose->source_ndigis > 0) { *p++ = FAC_NATIONAL_SRC_DIGI; *p++ = AX25_ADDR_LEN; memcpy(p, &rose->source_digis[0], AX25_ADDR_LEN); p += AX25_ADDR_LEN; } /* For compatibility */ if (rose->dest_ndigis > 0) { *p++ = FAC_NATIONAL_DEST_DIGI; *p++ = AX25_ADDR_LEN; memcpy(p, &rose->dest_digis[0], AX25_ADDR_LEN); p += AX25_ADDR_LEN; } } *p++ = 0x00; *p++ = FAC_CCITT; *p++ = FAC_CCITT_DEST_NSAP; callsign = ax2asc(buf, &rose->dest_call); *p++ = strlen(callsign) + 10; *p++ = (strlen(callsign) + 9) * 2; /* ??? */ *p++ = 0x47; *p++ = 0x00; *p++ = 0x11; *p++ = ROSE_ADDR_LEN * 2; memcpy(p, &rose->dest_addr, ROSE_ADDR_LEN); p += ROSE_ADDR_LEN; memcpy(p, callsign, strlen(callsign)); p += strlen(callsign); *p++ = FAC_CCITT_SRC_NSAP; callsign = ax2asc(buf, &rose->source_call); *p++ = strlen(callsign) + 10; *p++ = (strlen(callsign) + 9) * 2; /* ??? */ *p++ = 0x47; *p++ = 0x00; *p++ = 0x11; *p++ = ROSE_ADDR_LEN * 2; memcpy(p, &rose->source_addr, ROSE_ADDR_LEN); p += ROSE_ADDR_LEN; memcpy(p, callsign, strlen(callsign)); p += strlen(callsign); len = p - buffer; buffer[0] = len - 1; return len; } void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic) { struct rose_sock *rose = rose_sk(sk); rose_stop_timer(sk); rose_stop_idletimer(sk); rose_clear_queues(sk); rose->lci = 0; rose->state = ROSE_STATE_0; if (cause != -1) rose->cause = cause; if (diagnostic != -1) rose->diagnostic = diagnostic; sk->sk_state = TCP_CLOSE; sk->sk_err = reason; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } }
gpl-2.0
arjen75/ics-lge-kernel-msm7x27-chick
drivers/w1/masters/ds1wm.c
1048
11715
/* * 1-wire busmaster driver for DS1WM and ASICs with embedded DS1WMs * such as HP iPAQs (including h5xxx, h2200, and devices with ASIC3 * like hx4700). * * Copyright (c) 2004-2005, Szabolcs Gyurko <szabolcs.gyurko@tlt.hu> * Copyright (c) 2004-2007, Matt Reimer <mreimer@vpop.net> * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/mfd/core.h> #include <linux/mfd/ds1wm.h> #include <linux/slab.h> #include <asm/io.h> #include "../w1.h" #include "../w1_int.h" #define DS1WM_CMD 0x00 /* R/W 4 bits command */ #define DS1WM_DATA 0x01 /* R/W 8 bits, transmit/receive buffer */ #define DS1WM_INT 0x02 /* R/W interrupt status */ #define DS1WM_INT_EN 0x03 /* R/W interrupt enable */ #define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */ #define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */ #define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */ #define DS1WM_CMD_DQ_OUTPUT (1 << 2) /* write only - forces bus low */ #define DS1WM_CMD_DQ_INPUT (1 << 3) /* read only - reflects state of bus */ #define DS1WM_CMD_RST (1 << 5) /* software reset */ #define DS1WM_CMD_OD (1 << 7) /* overdrive */ #define DS1WM_INT_PD (1 << 0) /* presence detect */ #define DS1WM_INT_PDR (1 << 1) /* presence detect result */ #define DS1WM_INT_TBE (1 << 2) /* tx buffer empty */ #define DS1WM_INT_TSRE (1 << 3) /* tx shift register empty */ #define DS1WM_INT_RBF (1 << 4) /* rx buffer full */ #define DS1WM_INT_RSRF (1 << 5) /* rx shift register full */ #define DS1WM_INTEN_EPD (1 << 0) /* enable presence detect int */ #define DS1WM_INTEN_IAS (1 << 1) /* INTR active state */ #define DS1WM_INTEN_ETBE (1 << 2) /* enable tx buffer empty int */ #define DS1WM_INTEN_ETMT (1 << 3) /* enable tx shift register empty int */ #define DS1WM_INTEN_ERBF (1 << 4) /* enable rx buffer full int */ #define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */ #define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */ #define DS1WM_TIMEOUT (HZ * 5) static struct { unsigned long freq; unsigned long divisor; } freq[] = { { 4000000, 0x8 }, { 5000000, 0x2 }, { 6000000, 0x5 }, { 7000000, 0x3 }, { 8000000, 0xc }, { 10000000, 0x6 }, { 12000000, 0x9 }, { 14000000, 0x7 }, { 16000000, 0x10 }, { 20000000, 0xa }, { 24000000, 0xd }, { 28000000, 0xb }, { 32000000, 0x14 }, { 40000000, 0xe }, { 48000000, 0x11 }, { 56000000, 0xf }, { 64000000, 0x18 }, { 80000000, 0x12 }, { 96000000, 0x15 }, { 112000000, 0x13 }, { 128000000, 0x1c }, }; struct ds1wm_data { void __iomem *map; int bus_shift; /* # of shifts to calc register offsets */ struct platform_device *pdev; struct mfd_cell *cell; int irq; int active_high; int slave_present; void *reset_complete; void *read_complete; void *write_complete; u8 read_byte; /* last byte received */ }; static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, u8 val) { __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); } static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) { return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); } static irqreturn_t ds1wm_isr(int isr, void *data) { struct ds1wm_data *ds1wm_data = data; u8 intr = ds1wm_read_register(ds1wm_data, DS1WM_INT); ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1; if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) complete(ds1wm_data->reset_complete); if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) complete(ds1wm_data->write_complete); if (intr & DS1WM_INT_RBF) { ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data, DS1WM_DATA); if (ds1wm_data->read_complete) complete(ds1wm_data->read_complete); } return IRQ_HANDLED; } static int ds1wm_reset(struct ds1wm_data *ds1wm_data) { unsigned long timeleft; DECLARE_COMPLETION_ONSTACK(reset_done); ds1wm_data->reset_complete = &reset_done; ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET); timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); ds1wm_data->reset_complete = NULL; if (!timeleft) { dev_err(&ds1wm_data->pdev->dev, "reset failed\n"); return 1; } /* Wait for the end of the reset. According to the specs, the time * from when the interrupt is asserted to the end of the reset is: * tRSTH - tPDH - tPDL - tPDI * 625 us - 60 us - 240 us - 100 ns = 324.9 us * * We'll wait a bit longer just to be sure. * Was udelay(500), but if it is going to busywait the cpu that long, * might as well come back later. */ msleep(1); ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_ERBF | DS1WM_INTEN_ETMT | DS1WM_INTEN_EPD | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); if (!ds1wm_data->slave_present) { dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); return 1; } return 0; } static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) { DECLARE_COMPLETION_ONSTACK(write_done); ds1wm_data->write_complete = &write_done; ds1wm_write_register(ds1wm_data, DS1WM_DATA, data); wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT); ds1wm_data->write_complete = NULL; return 0; } static int ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data) { DECLARE_COMPLETION_ONSTACK(read_done); ds1wm_data->read_complete = &read_done; ds1wm_write(ds1wm_data, write_data); wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT); ds1wm_data->read_complete = NULL; return ds1wm_data->read_byte; } static int ds1wm_find_divisor(int gclk) { int i; for (i = 0; i < ARRAY_SIZE(freq); i++) if (gclk <= freq[i].freq) return freq[i].divisor; return 0; } static void ds1wm_up(struct ds1wm_data *ds1wm_data) { int divisor; struct ds1wm_driver_data *plat = ds1wm_data->cell->driver_data; if (ds1wm_data->cell->enable) ds1wm_data->cell->enable(ds1wm_data->pdev); divisor = ds1wm_find_divisor(plat->clock_rate); if (divisor == 0) { dev_err(&ds1wm_data->pdev->dev, "no suitable divisor for %dHz clock\n", plat->clock_rate); return; } ds1wm_write_register(ds1wm_data, DS1WM_CLKDIV, divisor); /* Let the w1 clock stabilize. */ msleep(1); ds1wm_reset(ds1wm_data); } static void ds1wm_down(struct ds1wm_data *ds1wm_data) { ds1wm_reset(ds1wm_data); /* Disable interrupts. */ ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0); if (ds1wm_data->cell->disable) ds1wm_data->cell->disable(ds1wm_data->pdev); } /* --------------------------------------------------------------------- */ /* w1 methods */ static u8 ds1wm_read_byte(void *data) { struct ds1wm_data *ds1wm_data = data; return ds1wm_read(ds1wm_data, 0xff); } static void ds1wm_write_byte(void *data, u8 byte) { struct ds1wm_data *ds1wm_data = data; ds1wm_write(ds1wm_data, byte); } static u8 ds1wm_reset_bus(void *data) { struct ds1wm_data *ds1wm_data = data; ds1wm_reset(ds1wm_data); return 0; } static void ds1wm_search(void *data, struct w1_master *master_dev, u8 search_type, w1_slave_found_callback slave_found) { struct ds1wm_data *ds1wm_data = data; int i; unsigned long long rom_id; /* XXX We need to iterate for multiple devices per the DS1WM docs. * See http://www.maxim-ic.com/appnotes.cfm/appnote_number/120. */ if (ds1wm_reset(ds1wm_data)) return; ds1wm_write(ds1wm_data, search_type); ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA); for (rom_id = 0, i = 0; i < 16; i++) { unsigned char resp, r, d; resp = ds1wm_read(ds1wm_data, 0x00); r = ((resp & 0x02) >> 1) | ((resp & 0x08) >> 2) | ((resp & 0x20) >> 3) | ((resp & 0x80) >> 4); d = ((resp & 0x01) >> 0) | ((resp & 0x04) >> 1) | ((resp & 0x10) >> 2) | ((resp & 0x40) >> 3); rom_id |= (unsigned long long) r << (i * 4); } dev_dbg(&ds1wm_data->pdev->dev, "found 0x%08llX\n", rom_id); ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA); ds1wm_reset(ds1wm_data); slave_found(master_dev, rom_id); } /* --------------------------------------------------------------------- */ static struct w1_bus_master ds1wm_master = { .read_byte = ds1wm_read_byte, .write_byte = ds1wm_write_byte, .reset_bus = ds1wm_reset_bus, .search = ds1wm_search, }; static int ds1wm_probe(struct platform_device *pdev) { struct ds1wm_data *ds1wm_data; struct ds1wm_driver_data *plat; struct resource *res; struct mfd_cell *cell; int ret; if (!pdev) return -ENODEV; cell = pdev->dev.platform_data; if (!cell) return -ENODEV; ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL); if (!ds1wm_data) return -ENOMEM; platform_set_drvdata(pdev, ds1wm_data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENXIO; goto err0; } ds1wm_data->map = ioremap(res->start, resource_size(res)); if (!ds1wm_data->map) { ret = -ENOMEM; goto err0; } plat = cell->driver_data; /* calculate bus shift from mem resource */ ds1wm_data->bus_shift = resource_size(res) >> 3; ds1wm_data->pdev = pdev; ds1wm_data->cell = cell; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { ret = -ENXIO; goto err1; } ds1wm_data->irq = res->start; ds1wm_data->active_high = plat->active_high; if (res->flags & IORESOURCE_IRQ_HIGHEDGE) set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); if (res->flags & IORESOURCE_IRQ_LOWEDGE) set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, "ds1wm", ds1wm_data); if (ret) goto err1; ds1wm_up(ds1wm_data); ds1wm_master.data = (void *)ds1wm_data; ret = w1_add_master_device(&ds1wm_master); if (ret) goto err2; return 0; err2: ds1wm_down(ds1wm_data); free_irq(ds1wm_data->irq, ds1wm_data); err1: iounmap(ds1wm_data->map); err0: kfree(ds1wm_data); return ret; } #ifdef CONFIG_PM static int ds1wm_suspend(struct platform_device *pdev, pm_message_t state) { struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev); ds1wm_down(ds1wm_data); return 0; } static int ds1wm_resume(struct platform_device *pdev) { struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev); ds1wm_up(ds1wm_data); return 0; } #else #define ds1wm_suspend NULL #define ds1wm_resume NULL #endif static int ds1wm_remove(struct platform_device *pdev) { struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev); w1_remove_master_device(&ds1wm_master); ds1wm_down(ds1wm_data); free_irq(ds1wm_data->irq, ds1wm_data); iounmap(ds1wm_data->map); kfree(ds1wm_data); return 0; } static struct platform_driver ds1wm_driver = { .driver = { .name = "ds1wm", }, .probe = ds1wm_probe, .remove = ds1wm_remove, .suspend = ds1wm_suspend, .resume = ds1wm_resume }; static int __init ds1wm_init(void) { printk("DS1WM w1 busmaster driver - (c) 2004 Szabolcs Gyurko\n"); return platform_driver_register(&ds1wm_driver); } static void __exit ds1wm_exit(void) { platform_driver_unregister(&ds1wm_driver); } module_init(ds1wm_init); module_exit(ds1wm_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, " "Matt Reimer <mreimer@vpop.net>"); MODULE_DESCRIPTION("DS1WM w1 busmaster driver");
gpl-2.0