repo_name
stringlengths
5
85
path
stringlengths
3
252
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
922
999k
license
stringclasses
15 values
lostemp/kernel-2.6.30
fs/gfs2/super.c
29
10062
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/crc32.h> #include <linux/gfs2_ondisk.h> #include <linux/bio.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "dir.h" #include "glock.h" #include "glops.h" #include "inode.h" #include "log.h" #include "meta_io.h" #include "quota.h" #include "recovery.h" #include "rgrp.h" #include "super.h" #include "trans.h" #include "util.h" /** * gfs2_jindex_free - Clear all the journal index information * @sdp: The GFS2 superblock * */ void gfs2_jindex_free(struct gfs2_sbd *sdp) { struct list_head list, *head; struct gfs2_jdesc *jd; struct gfs2_journal_extent *jext; spin_lock(&sdp->sd_jindex_spin); list_add(&list, &sdp->sd_jindex_list); list_del_init(&sdp->sd_jindex_list); sdp->sd_journals = 0; spin_unlock(&sdp->sd_jindex_spin); while (!list_empty(&list)) { jd = list_entry(list.next, struct gfs2_jdesc, jd_list); head = &jd->extent_list; while (!list_empty(head)) { jext = list_entry(head->next, struct gfs2_journal_extent, extent_list); list_del(&jext->extent_list); kfree(jext); } list_del(&jd->jd_list); iput(jd->jd_inode); kfree(jd); } } static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) { struct gfs2_jdesc *jd; int found = 0; list_for_each_entry(jd, head, jd_list) { if (jd->jd_jid == jid) { found = 1; break; } } if (!found) jd = NULL; return jd; } struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid) { struct gfs2_jdesc *jd; spin_lock(&sdp->sd_jindex_spin); jd = jdesc_find_i(&sdp->sd_jindex_list, jid); spin_unlock(&sdp->sd_jindex_spin); return jd; } int gfs2_jdesc_check(struct gfs2_jdesc *jd) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); int ar; int error; if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { gfs2_consist_inode(ip); return -EIO; } jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar); if (!error && ar) { gfs2_consist_inode(ip); error = -EIO; } return error; } /** * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one * @sdp: the filesystem * * Returns: errno */ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) { struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_glock *j_gl = ip->i_gl; struct gfs2_holder t_gh; struct gfs2_log_header_host head; int error; error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh); if (error) return error; j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); error = gfs2_find_jhead(sdp->sd_jdesc, &head); if (error) goto fail; if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { gfs2_consist(sdp); error = -EIO; goto fail; } /* Initialize some head of the log stuff */ sdp->sd_log_sequence = head.lh_sequence + 1; gfs2_log_pointers_init(sdp, head.lh_blkno); error = gfs2_quota_init(sdp); if (error) goto fail; set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); gfs2_glock_dq_uninit(&t_gh); return 0; fail: t_gh.gh_flags |= GL_NOCACHE; gfs2_glock_dq_uninit(&t_gh); return error; } static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) { const struct gfs2_statfs_change *str = buf; sc->sc_total = be64_to_cpu(str->sc_total); sc->sc_free = be64_to_cpu(str->sc_free); sc->sc_dinodes = be64_to_cpu(str->sc_dinodes); } static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf) { struct gfs2_statfs_change *str = buf; str->sc_total = cpu_to_be64(sc->sc_total); str->sc_free = cpu_to_be64(sc->sc_free); str->sc_dinodes = cpu_to_be64(sc->sc_dinodes); } int gfs2_statfs_init(struct gfs2_sbd *sdp) { struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *m_bh, *l_bh; struct gfs2_holder gh; int error; error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &gh); if (error) return error; error = gfs2_meta_inode_buffer(m_ip, &m_bh); if (error) goto out; if (sdp->sd_args.ar_spectator) { spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); spin_unlock(&sdp->sd_statfs_spin); } else { error = gfs2_meta_inode_buffer(l_ip, &l_bh); if (error) goto out_m_bh; spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); gfs2_statfs_change_in(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); spin_unlock(&sdp->sd_statfs_spin); brelse(l_bh); } out_m_bh: brelse(m_bh); out: gfs2_glock_dq_uninit(&gh); return 0; } void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, s64 dinodes) { struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *l_bh; int error; error = gfs2_meta_inode_buffer(l_ip, &l_bh); if (error) return; gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1); spin_lock(&sdp->sd_statfs_spin); l_sc->sc_total += total; l_sc->sc_free += free; l_sc->sc_dinodes += dinodes; gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); spin_unlock(&sdp->sd_statfs_spin); brelse(l_bh); } int gfs2_statfs_sync(struct gfs2_sbd *sdp) { struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct gfs2_holder gh; struct buffer_head *m_bh, *l_bh; int error; error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &gh); if (error) return error; error = gfs2_meta_inode_buffer(m_ip, &m_bh); if (error) goto out; spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) { spin_unlock(&sdp->sd_statfs_spin); goto out_bh; } spin_unlock(&sdp->sd_statfs_spin); error = gfs2_meta_inode_buffer(l_ip, &l_bh); if (error) goto out_bh; error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0); if (error) goto out_bh2; gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1); spin_lock(&sdp->sd_statfs_spin); m_sc->sc_total += l_sc->sc_total; m_sc->sc_free += l_sc->sc_free; m_sc->sc_dinodes += l_sc->sc_dinodes; memset(l_sc, 0, sizeof(struct gfs2_statfs_change)); memset(l_bh->b_data + sizeof(struct gfs2_dinode), 0, sizeof(struct gfs2_statfs_change)); spin_unlock(&sdp->sd_statfs_spin); gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1); gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); gfs2_trans_end(sdp); out_bh2: brelse(l_bh); out_bh: brelse(m_bh); out: gfs2_glock_dq_uninit(&gh); return error; } struct lfcc { struct list_head list; struct gfs2_holder gh; }; /** * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all * journals are clean * @sdp: the file system * @state: the state to put the transaction lock into * @t_gh: the hold on the transaction lock * * Returns: errno */ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp, struct gfs2_holder *t_gh) { struct gfs2_inode *ip; struct gfs2_jdesc *jd; struct lfcc *lfcc; LIST_HEAD(list); struct gfs2_log_header_host lh; int error; list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL); if (!lfcc) { error = -ENOMEM; goto out; } ip = GFS2_I(jd->jd_inode); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh); if (error) { kfree(lfcc); goto out; } list_add(&lfcc->list, &list); } error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED, GL_NOCACHE, t_gh); list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { error = gfs2_jdesc_check(jd); if (error) break; error = gfs2_find_jhead(jd, &lh); if (error) break; if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { error = -EBUSY; break; } } if (error) gfs2_glock_dq_uninit(t_gh); out: while (!list_empty(&list)) { lfcc = list_entry(list.next, struct lfcc, list); list_del(&lfcc->list); gfs2_glock_dq_uninit(&lfcc->gh); kfree(lfcc); } return error; } /** * gfs2_freeze_fs - freezes the file system * @sdp: the file system * * This function flushes data and meta data for all machines by * aquiring the transaction log exclusively. All journals are * ensured to be in a clean state as well. * * Returns: errno */ int gfs2_freeze_fs(struct gfs2_sbd *sdp) { int error = 0; mutex_lock(&sdp->sd_freeze_lock); if (!sdp->sd_freeze_count++) { error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh); if (error) sdp->sd_freeze_count--; } mutex_unlock(&sdp->sd_freeze_lock); return error; } /** * gfs2_unfreeze_fs - unfreezes the file system * @sdp: the file system * * This function allows the file system to proceed by unlocking * the exclusively held transaction lock. Other GFS2 nodes are * now free to acquire the lock shared and go on with their lives. * */ void gfs2_unfreeze_fs(struct gfs2_sbd *sdp) { mutex_lock(&sdp->sd_freeze_lock); if (sdp->sd_freeze_count && !--sdp->sd_freeze_count) gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); mutex_unlock(&sdp->sd_freeze_lock); }
gpl-2.0
RWTH-OS/linux
net/sctp/outqueue.c
29
56377
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 Intel Corp. * * This file is part of the SCTP kernel implementation * * These functions implement the sctp_outq class. The outqueue handles * bundling and queueing of outgoing SCTP chunks. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Perry Melange <pmelange@null.cc.uic.edu> * Xingang Guo <xingang.guo@intel.com> * Hui Huang <hui.huang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Jon Grimm <jgrimm@us.ibm.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/list.h> /* For struct list_head */ #include <linux/socket.h> #include <linux/ip.h> #include <linux/slab.h> #include <net/sock.h> /* For skb_set_owner_w */ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Declare internal functions here. */ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); static void sctp_check_transmitted(struct sctp_outq *q, struct list_head *transmitted_queue, struct sctp_transport *transport, union sctp_addr *saddr, struct sctp_sackhdr *sack, __u32 *highest_new_tsn); static void sctp_mark_missing(struct sctp_outq *q, struct list_head *transmitted_queue, struct sctp_transport *transport, __u32 highest_new_tsn, int count_of_newacks); static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp); /* Add data to the front of the queue. */ static inline void sctp_outq_head_data(struct sctp_outq *q, struct sctp_chunk *ch) { list_add(&ch->list, &q->out_chunk_list); q->out_qlen += ch->skb->len; } /* Take data from the front of the queue. */ static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) { struct sctp_chunk *ch = NULL; if (!list_empty(&q->out_chunk_list)) { struct list_head *entry = q->out_chunk_list.next; ch = list_entry(entry, struct sctp_chunk, list); list_del_init(entry); q->out_qlen -= ch->skb->len; } return ch; } /* Add data chunk to the end of the queue. */ static inline void sctp_outq_tail_data(struct sctp_outq *q, struct sctp_chunk *ch) { list_add_tail(&ch->list, &q->out_chunk_list); q->out_qlen += ch->skb->len; } /* * SFR-CACC algorithm: * D) If count_of_newacks is greater than or equal to 2 * and t was not sent to the current primary then the * sender MUST NOT increment missing report count for t. */ static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary, struct sctp_transport *transport, int count_of_newacks) { if (count_of_newacks >= 2 && transport != primary) return 1; return 0; } /* * SFR-CACC algorithm: * F) If count_of_newacks is less than 2, let d be the * destination to which t was sent. If cacc_saw_newack * is 0 for destination d, then the sender MUST NOT * increment missing report count for t. */ static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, int count_of_newacks) { if (count_of_newacks < 2 && (transport && !transport->cacc.cacc_saw_newack)) return 1; return 0; } /* * SFR-CACC algorithm: * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD * execute steps C, D, F. * * C has been implemented in sctp_outq_sack */ static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary, struct sctp_transport *transport, int count_of_newacks) { if (!primary->cacc.cycling_changeover) { if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks)) return 1; if (sctp_cacc_skip_3_1_f(transport, count_of_newacks)) return 1; return 0; } return 0; } /* * SFR-CACC algorithm: * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less * than next_tsn_at_change of the current primary, then * the sender MUST NOT increment missing report count * for t. */ static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) { if (primary->cacc.cycling_changeover && TSN_lt(tsn, primary->cacc.next_tsn_at_change)) return 1; return 0; } /* * SFR-CACC algorithm: * 3) If the missing report count for TSN t is to be * incremented according to [RFC2960] and * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, * then the sender MUST further execute steps 3.1 and * 3.2 to determine if the missing report count for * TSN t SHOULD NOT be incremented. * * 3.3) If 3.1 and 3.2 do not dictate that the missing * report count for t should not be incremented, then * the sender SHOULD increment missing report count for * t (according to [RFC2960] and [SCTP_STEWART_2002]). */ static inline int sctp_cacc_skip(struct sctp_transport *primary, struct sctp_transport *transport, int count_of_newacks, __u32 tsn) { if (primary->cacc.changeover_active && (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) || sctp_cacc_skip_3_2(primary, tsn))) return 1; return 0; } /* Initialize an existing sctp_outq. This does the boring stuff. * You still need to define handlers if you really want to DO * something with this structure... */ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) { memset(q, 0, sizeof(struct sctp_outq)); q->asoc = asoc; INIT_LIST_HEAD(&q->out_chunk_list); INIT_LIST_HEAD(&q->control_chunk_list); INIT_LIST_HEAD(&q->retransmit); INIT_LIST_HEAD(&q->sacked); INIT_LIST_HEAD(&q->abandoned); } /* Free the outqueue structure and any related pending chunks. */ static void __sctp_outq_teardown(struct sctp_outq *q) { struct sctp_transport *transport; struct list_head *lchunk, *temp; struct sctp_chunk *chunk, *tmp; /* Throw away unacknowledged chunks. */ list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, transports) { while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); /* Mark as part of a failed message. */ sctp_chunk_fail(chunk, q->error); sctp_chunk_free(chunk); } } /* Throw away chunks that have been gap ACKed. */ list_for_each_safe(lchunk, temp, &q->sacked) { list_del_init(lchunk); chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); sctp_chunk_fail(chunk, q->error); sctp_chunk_free(chunk); } /* Throw away any chunks in the retransmit queue. */ list_for_each_safe(lchunk, temp, &q->retransmit) { list_del_init(lchunk); chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); sctp_chunk_fail(chunk, q->error); sctp_chunk_free(chunk); } /* Throw away any chunks that are in the abandoned queue. */ list_for_each_safe(lchunk, temp, &q->abandoned) { list_del_init(lchunk); chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); sctp_chunk_fail(chunk, q->error); sctp_chunk_free(chunk); } /* Throw away any leftover data chunks. */ while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { /* Mark as send failure. */ sctp_chunk_fail(chunk, q->error); sctp_chunk_free(chunk); } /* Throw away any leftover control chunks. */ list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { list_del_init(&chunk->list); sctp_chunk_free(chunk); } } void sctp_outq_teardown(struct sctp_outq *q) { __sctp_outq_teardown(q); sctp_outq_init(q->asoc, q); } /* Free the outqueue structure and any related pending chunks. */ void sctp_outq_free(struct sctp_outq *q) { /* Throw away leftover chunks. */ __sctp_outq_teardown(q); } /* Put a new chunk in an sctp_outq. */ void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) { struct net *net = sock_net(q->asoc->base.sk); pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk, chunk && chunk->chunk_hdr ? sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : "illegal chunk"); /* If it is data, queue it up, otherwise, send it * immediately. */ if (sctp_chunk_is_data(chunk)) { pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n", __func__, q, chunk, chunk && chunk->chunk_hdr ? sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : "illegal chunk"); sctp_outq_tail_data(q, chunk); if (chunk->asoc->peer.prsctp_capable && SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) chunk->asoc->sent_cnt_removable++; if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); else SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); } else { list_add_tail(&chunk->list, &q->control_chunk_list); SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); } if (!q->cork) sctp_outq_flush(q, 0, gfp); } /* Insert a chunk into the sorted list based on the TSNs. The retransmit list * and the abandoned list are in ascending order. */ static void sctp_insert_list(struct list_head *head, struct list_head *new) { struct list_head *pos; struct sctp_chunk *nchunk, *lchunk; __u32 ntsn, ltsn; int done = 0; nchunk = list_entry(new, struct sctp_chunk, transmitted_list); ntsn = ntohl(nchunk->subh.data_hdr->tsn); list_for_each(pos, head) { lchunk = list_entry(pos, struct sctp_chunk, transmitted_list); ltsn = ntohl(lchunk->subh.data_hdr->tsn); if (TSN_lt(ntsn, ltsn)) { list_add(new, pos->prev); done = 1; break; } } if (!done) list_add_tail(new, head); } static int sctp_prsctp_prune_sent(struct sctp_association *asoc, struct sctp_sndrcvinfo *sinfo, struct list_head *queue, int msg_len) { struct sctp_chunk *chk, *temp; list_for_each_entry_safe(chk, temp, queue, transmitted_list) { struct sctp_stream_out *streamout; if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) continue; list_del_init(&chk->transmitted_list); sctp_insert_list(&asoc->outqueue.abandoned, &chk->transmitted_list); streamout = &asoc->stream.out[chk->sinfo.sinfo_stream]; asoc->sent_cnt_removable--; asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; streamout->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; if (!chk->tsn_gap_acked) { if (chk->transport) chk->transport->flight_size -= sctp_data_size(chk); asoc->outqueue.outstanding_bytes -= sctp_data_size(chk); } msg_len -= SCTP_DATA_SNDSIZE(chk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); if (msg_len <= 0) break; } return msg_len; } static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, struct sctp_sndrcvinfo *sinfo, int msg_len) { struct sctp_outq *q = &asoc->outqueue; struct sctp_chunk *chk, *temp; list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) { if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) continue; list_del_init(&chk->list); q->out_qlen -= chk->skb->len; asoc->sent_cnt_removable--; asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) { struct sctp_stream_out *streamout = &asoc->stream.out[chk->sinfo.sinfo_stream]; streamout->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; } msg_len -= SCTP_DATA_SNDSIZE(chk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); sctp_chunk_free(chk); if (msg_len <= 0) break; } return msg_len; } /* Abandon the chunks according their priorities */ void sctp_prsctp_prune(struct sctp_association *asoc, struct sctp_sndrcvinfo *sinfo, int msg_len) { struct sctp_transport *transport; if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable) return; msg_len = sctp_prsctp_prune_sent(asoc, sinfo, &asoc->outqueue.retransmit, msg_len); if (msg_len <= 0) return; list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { msg_len = sctp_prsctp_prune_sent(asoc, sinfo, &transport->transmitted, msg_len); if (msg_len <= 0) return; } sctp_prsctp_prune_unsent(asoc, sinfo, msg_len); } /* Mark all the eligible packets on a transport for retransmission. */ void sctp_retransmit_mark(struct sctp_outq *q, struct sctp_transport *transport, __u8 reason) { struct list_head *lchunk, *ltemp; struct sctp_chunk *chunk; /* Walk through the specified transmitted queue. */ list_for_each_safe(lchunk, ltemp, &transport->transmitted) { chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); /* If the chunk is abandoned, move it to abandoned list. */ if (sctp_chunk_abandoned(chunk)) { list_del_init(lchunk); sctp_insert_list(&q->abandoned, lchunk); /* If this chunk has not been previousely acked, * stop considering it 'outstanding'. Our peer * will most likely never see it since it will * not be retransmitted */ if (!chunk->tsn_gap_acked) { if (chunk->transport) chunk->transport->flight_size -= sctp_data_size(chunk); q->outstanding_bytes -= sctp_data_size(chunk); q->asoc->peer.rwnd += sctp_data_size(chunk); } continue; } /* If we are doing retransmission due to a timeout or pmtu * discovery, only the chunks that are not yet acked should * be added to the retransmit queue. */ if ((reason == SCTP_RTXR_FAST_RTX && (chunk->fast_retransmit == SCTP_NEED_FRTX)) || (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { /* RFC 2960 6.2.1 Processing a Received SACK * * C) Any time a DATA chunk is marked for * retransmission (via either T3-rtx timer expiration * (Section 6.3.3) or via fast retransmit * (Section 7.2.4)), add the data size of those * chunks to the rwnd. */ q->asoc->peer.rwnd += sctp_data_size(chunk); q->outstanding_bytes -= sctp_data_size(chunk); if (chunk->transport) transport->flight_size -= sctp_data_size(chunk); /* sctpimpguide-05 Section 2.8.2 * M5) If a T3-rtx timer expires, the * 'TSN.Missing.Report' of all affected TSNs is set * to 0. */ chunk->tsn_missing_report = 0; /* If a chunk that is being used for RTT measurement * has to be retransmitted, we cannot use this chunk * anymore for RTT measurements. Reset rto_pending so * that a new RTT measurement is started when a new * data chunk is sent. */ if (chunk->rtt_in_progress) { chunk->rtt_in_progress = 0; transport->rto_pending = 0; } /* Move the chunk to the retransmit queue. The chunks * on the retransmit queue are always kept in order. */ list_del_init(lchunk); sctp_insert_list(&q->retransmit, lchunk); } } pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, " "flight_size:%d, pba:%d\n", __func__, transport, reason, transport->cwnd, transport->ssthresh, transport->flight_size, transport->partial_bytes_acked); } /* Mark all the eligible packets on a transport for retransmission and force * one packet out. */ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, sctp_retransmit_reason_t reason) { struct net *net = sock_net(q->asoc->base.sk); switch (reason) { case SCTP_RTXR_T3_RTX: SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS); sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); /* Update the retran path if the T3-rtx timer has expired for * the current retran path. */ if (transport == transport->asoc->peer.retran_path) sctp_assoc_update_retran_path(transport->asoc); transport->asoc->rtx_data_chunks += transport->asoc->unack_data; break; case SCTP_RTXR_FAST_RTX: SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS); sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); q->fast_rtx = 1; break; case SCTP_RTXR_PMTUD: SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS); break; case SCTP_RTXR_T1_RTX: SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS); transport->asoc->init_retries++; break; default: BUG(); } sctp_retransmit_mark(q, transport, reason); /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by * following the procedures outlined in C1 - C5. */ if (reason == SCTP_RTXR_T3_RTX) sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); /* Flush the queues only on timeout, since fast_rtx is only * triggered during sack processing and the queue * will be flushed at the end. */ if (reason != SCTP_RTXR_FAST_RTX) sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC); } /* * Transmit DATA chunks on the retransmit queue. Upon return from * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which * need to be transmitted by the caller. * We assume that pkt->transport has already been set. * * The return value is a normal kernel error return value. */ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, int rtx_timeout, int *start_timer) { struct list_head *lqueue; struct sctp_transport *transport = pkt->transport; sctp_xmit_t status; struct sctp_chunk *chunk, *chunk1; int fast_rtx; int error = 0; int timer = 0; int done = 0; lqueue = &q->retransmit; fast_rtx = q->fast_rtx; /* This loop handles time-out retransmissions, fast retransmissions, * and retransmissions due to opening of whindow. * * RFC 2960 6.3.3 Handle T3-rtx Expiration * * E3) Determine how many of the earliest (i.e., lowest TSN) * outstanding DATA chunks for the address for which the * T3-rtx has expired will fit into a single packet, subject * to the MTU constraint for the path corresponding to the * destination transport address to which the retransmission * is being sent (this may be different from the address for * which the timer expires [see Section 6.4]). Call this value * K. Bundle and retransmit those K DATA chunks in a single * packet to the destination endpoint. * * [Just to be painfully clear, if we are retransmitting * because a timeout just happened, we should send only ONE * packet of retransmitted data.] * * For fast retransmissions we also send only ONE packet. However, * if we are just flushing the queue due to open window, we'll * try to send as much as possible. */ list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { /* If the chunk is abandoned, move it to abandoned list. */ if (sctp_chunk_abandoned(chunk)) { list_del_init(&chunk->transmitted_list); sctp_insert_list(&q->abandoned, &chunk->transmitted_list); continue; } /* Make sure that Gap Acked TSNs are not retransmitted. A * simple approach is just to move such TSNs out of the * way and into a 'transmitted' queue and skip to the * next chunk. */ if (chunk->tsn_gap_acked) { list_move_tail(&chunk->transmitted_list, &transport->transmitted); continue; } /* If we are doing fast retransmit, ignore non-fast_rtransmit * chunks */ if (fast_rtx && !chunk->fast_retransmit) continue; redo: /* Attempt to append this chunk to the packet. */ status = sctp_packet_append_chunk(pkt, chunk); switch (status) { case SCTP_XMIT_PMTU_FULL: if (!pkt->has_data && !pkt->has_cookie_echo) { /* If this packet did not contain DATA then * retransmission did not happen, so do it * again. We'll ignore the error here since * control chunks are already freed so there * is nothing we can do. */ sctp_packet_transmit(pkt, GFP_ATOMIC); goto redo; } /* Send this packet. */ error = sctp_packet_transmit(pkt, GFP_ATOMIC); /* If we are retransmitting, we should only * send a single packet. * Otherwise, try appending this chunk again. */ if (rtx_timeout || fast_rtx) done = 1; else goto redo; /* Bundle next chunk in the next round. */ break; case SCTP_XMIT_RWND_FULL: /* Send this packet. */ error = sctp_packet_transmit(pkt, GFP_ATOMIC); /* Stop sending DATA as there is no more room * at the receiver. */ done = 1; break; case SCTP_XMIT_DELAY: /* Send this packet. */ error = sctp_packet_transmit(pkt, GFP_ATOMIC); /* Stop sending DATA because of nagle delay. */ done = 1; break; default: /* The append was successful, so add this chunk to * the transmitted list. */ list_move_tail(&chunk->transmitted_list, &transport->transmitted); /* Mark the chunk as ineligible for fast retransmit * after it is retransmitted. */ if (chunk->fast_retransmit == SCTP_NEED_FRTX) chunk->fast_retransmit = SCTP_DONT_FRTX; q->asoc->stats.rtxchunks++; break; } /* Set the timer if there were no errors */ if (!error && !timer) timer = 1; if (done) break; } /* If we are here due to a retransmit timeout or a fast * retransmit and if there are any chunks left in the retransmit * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. */ if (rtx_timeout || fast_rtx) { list_for_each_entry(chunk1, lqueue, transmitted_list) { if (chunk1->fast_retransmit == SCTP_NEED_FRTX) chunk1->fast_retransmit = SCTP_DONT_FRTX; } } *start_timer = timer; /* Clear fast retransmit hint */ if (fast_rtx) q->fast_rtx = 0; return error; } /* Cork the outqueue so queued chunks are really queued. */ void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp) { if (q->cork) q->cork = 0; sctp_outq_flush(q, 0, gfp); } /* * Try to flush an outqueue. * * Description: Send everything in q which we legally can, subject to * congestion limitations. * * Note: This function can be called from multiple contexts so appropriate * locking concerns must be made. Today we use the sock lock to protect * this function. */ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) { struct sctp_packet *packet; struct sctp_packet singleton; struct sctp_association *asoc = q->asoc; __u16 sport = asoc->base.bind_addr.port; __u16 dport = asoc->peer.port; __u32 vtag = asoc->peer.i.init_tag; struct sctp_transport *transport = NULL; struct sctp_transport *new_transport; struct sctp_chunk *chunk, *tmp; sctp_xmit_t status; int error = 0; int start_timer = 0; int one_packet = 0; /* These transports have chunks to send. */ struct list_head transport_list; struct list_head *ltransport; INIT_LIST_HEAD(&transport_list); packet = NULL; /* * 6.10 Bundling * ... * When bundling control chunks with DATA chunks, an * endpoint MUST place control chunks first in the outbound * SCTP packet. The transmitter MUST transmit DATA chunks * within a SCTP packet in increasing order of TSN. * ... */ list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { /* RFC 5061, 5.3 * F1) This means that until such time as the ASCONF * containing the add is acknowledged, the sender MUST * NOT use the new IP address as a source for ANY SCTP * packet except on carrying an ASCONF Chunk. */ if (asoc->src_out_of_asoc_ok && chunk->chunk_hdr->type != SCTP_CID_ASCONF) continue; list_del_init(&chunk->list); /* Pick the right transport to use. */ new_transport = chunk->transport; if (!new_transport) { /* * If we have a prior transport pointer, see if * the destination address of the chunk * matches the destination address of the * current transport. If not a match, then * try to look up the transport with a given * destination address. We do this because * after processing ASCONFs, we may have new * transports created. */ if (transport && sctp_cmp_addr_exact(&chunk->dest, &transport->ipaddr)) new_transport = transport; else new_transport = sctp_assoc_lookup_paddr(asoc, &chunk->dest); /* if we still don't have a new transport, then * use the current active path. */ if (!new_transport) new_transport = asoc->peer.active_path; } else if ((new_transport->state == SCTP_INACTIVE) || (new_transport->state == SCTP_UNCONFIRMED) || (new_transport->state == SCTP_PF)) { /* If the chunk is Heartbeat or Heartbeat Ack, * send it to chunk->transport, even if it's * inactive. * * 3.3.6 Heartbeat Acknowledgement: * ... * A HEARTBEAT ACK is always sent to the source IP * address of the IP datagram containing the * HEARTBEAT chunk to which this ack is responding. * ... * * ASCONF_ACKs also must be sent to the source. */ if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) new_transport = asoc->peer.active_path; } /* Are we switching transports? * Take care of transport locks. */ if (new_transport != transport) { transport = new_transport; if (list_empty(&transport->send_ready)) { list_add_tail(&transport->send_ready, &transport_list); } packet = &transport->packet; sctp_packet_config(packet, vtag, asoc->peer.ecn_capable); } switch (chunk->chunk_hdr->type) { /* * 6.10 Bundling * ... * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN * COMPLETE with any other chunks. [Send them immediately.] */ case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: sctp_packet_init(&singleton, transport, sport, dport); sctp_packet_config(&singleton, vtag, 0); sctp_packet_append_chunk(&singleton, chunk); error = sctp_packet_transmit(&singleton, gfp); if (error < 0) { asoc->base.sk->sk_err = -error; return; } break; case SCTP_CID_ABORT: if (sctp_test_T_bit(chunk)) { packet->vtag = asoc->c.my_vtag; } /* The following chunks are "response" chunks, i.e. * they are generated in response to something we * received. If we are sending these, then we can * send only 1 packet containing these chunks. */ case SCTP_CID_HEARTBEAT_ACK: case SCTP_CID_SHUTDOWN_ACK: case SCTP_CID_COOKIE_ACK: case SCTP_CID_COOKIE_ECHO: case SCTP_CID_ERROR: case SCTP_CID_ECN_CWR: case SCTP_CID_ASCONF_ACK: one_packet = 1; /* Fall through */ case SCTP_CID_SACK: case SCTP_CID_HEARTBEAT: case SCTP_CID_SHUTDOWN: case SCTP_CID_ECN_ECNE: case SCTP_CID_ASCONF: case SCTP_CID_FWD_TSN: case SCTP_CID_RECONF: status = sctp_packet_transmit_chunk(packet, chunk, one_packet, gfp); if (status != SCTP_XMIT_OK) { /* put the chunk back */ list_add(&chunk->list, &q->control_chunk_list); break; } asoc->stats.octrlchunks++; /* PR-SCTP C5) If a FORWARD TSN is sent, the * sender MUST assure that at least one T3-rtx * timer is running. */ if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { sctp_transport_reset_t3_rtx(transport); transport->last_time_sent = jiffies; } if (chunk == asoc->strreset_chunk) sctp_transport_reset_reconf_timer(transport); break; default: /* We built a chunk with an illegal type! */ BUG(); } } if (q->asoc->src_out_of_asoc_ok) goto sctp_flush_out; /* Is it OK to send data chunks? */ switch (asoc->state) { case SCTP_STATE_COOKIE_ECHOED: /* Only allow bundling when this packet has a COOKIE-ECHO * chunk. */ if (!packet || !packet->has_cookie_echo) break; /* fallthru */ case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: /* * RFC 2960 6.1 Transmission of DATA Chunks * * C) When the time comes for the sender to transmit, * before sending new DATA chunks, the sender MUST * first transmit any outstanding DATA chunks which * are marked for retransmission (limited by the * current cwnd). */ if (!list_empty(&q->retransmit)) { if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) goto sctp_flush_out; if (transport == asoc->peer.retran_path) goto retran; /* Switch transports & prepare the packet. */ transport = asoc->peer.retran_path; if (list_empty(&transport->send_ready)) { list_add_tail(&transport->send_ready, &transport_list); } packet = &transport->packet; sctp_packet_config(packet, vtag, asoc->peer.ecn_capable); retran: error = sctp_outq_flush_rtx(q, packet, rtx_timeout, &start_timer); if (error < 0) asoc->base.sk->sk_err = -error; if (start_timer) { sctp_transport_reset_t3_rtx(transport); transport->last_time_sent = jiffies; } /* This can happen on COOKIE-ECHO resend. Only * one chunk can get bundled with a COOKIE-ECHO. */ if (packet->has_cookie_echo) goto sctp_flush_out; /* Don't send new data if there is still data * waiting to retransmit. */ if (!list_empty(&q->retransmit)) goto sctp_flush_out; } /* Apply Max.Burst limitation to the current transport in * case it will be used for new data. We are going to * rest it before we return, but we want to apply the limit * to the currently queued data. */ if (transport) sctp_transport_burst_limited(transport); /* Finally, transmit new packets. */ while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { __u32 sid = ntohs(chunk->subh.data_hdr->stream); /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. */ if (chunk->sinfo.sinfo_stream >= asoc->stream.outcnt) { /* Mark as failed send. */ sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); if (asoc->peer.prsctp_capable && SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) asoc->sent_cnt_removable--; sctp_chunk_free(chunk); continue; } /* Has this chunk expired? */ if (sctp_chunk_abandoned(chunk)) { sctp_chunk_fail(chunk, 0); sctp_chunk_free(chunk); continue; } if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { sctp_outq_head_data(q, chunk); goto sctp_flush_out; } /* If there is a specified transport, use it. * Otherwise, we want to use the active path. */ new_transport = chunk->transport; if (!new_transport || ((new_transport->state == SCTP_INACTIVE) || (new_transport->state == SCTP_UNCONFIRMED) || (new_transport->state == SCTP_PF))) new_transport = asoc->peer.active_path; if (new_transport->state == SCTP_UNCONFIRMED) { WARN_ONCE(1, "Attempt to send packet on unconfirmed path."); sctp_chunk_fail(chunk, 0); sctp_chunk_free(chunk); continue; } /* Change packets if necessary. */ if (new_transport != transport) { transport = new_transport; /* Schedule to have this transport's * packet flushed. */ if (list_empty(&transport->send_ready)) { list_add_tail(&transport->send_ready, &transport_list); } packet = &transport->packet; sctp_packet_config(packet, vtag, asoc->peer.ecn_capable); /* We've switched transports, so apply the * Burst limit to the new transport. */ sctp_transport_burst_limited(transport); } pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " "skb->users:%d\n", __func__, q, chunk, chunk && chunk->chunk_hdr ? sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), chunk->skb ? chunk->skb->head : NULL, chunk->skb ? refcount_read(&chunk->skb->users) : -1); /* Add the chunk to the packet. */ status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); switch (status) { case SCTP_XMIT_PMTU_FULL: case SCTP_XMIT_RWND_FULL: case SCTP_XMIT_DELAY: /* We could not append this chunk, so put * the chunk back on the output queue. */ pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", __func__, ntohl(chunk->subh.data_hdr->tsn), status); sctp_outq_head_data(q, chunk); goto sctp_flush_out; case SCTP_XMIT_OK: /* The sender is in the SHUTDOWN-PENDING state, * The sender MAY set the I-bit in the DATA * chunk header. */ if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) asoc->stats.ouodchunks++; else asoc->stats.oodchunks++; break; default: BUG(); } /* BUG: We assume that the sctp_packet_transmit() * call below will succeed all the time and add the * chunk to the transmitted list and restart the * timers. * It is possible that the call can fail under OOM * conditions. * * Is this really a problem? Won't this behave * like a lost TSN? */ list_add_tail(&chunk->transmitted_list, &transport->transmitted); sctp_transport_reset_t3_rtx(transport); transport->last_time_sent = jiffies; /* Only let one DATA chunk get bundled with a * COOKIE-ECHO chunk. */ if (packet->has_cookie_echo) goto sctp_flush_out; } break; default: /* Do nothing. */ break; } sctp_flush_out: /* Before returning, examine all the transports touched in * this call. Right now, we bluntly force clear all the * transports. Things might change after we implement Nagle. * But such an examination is still required. * * --xguo */ while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) { struct sctp_transport *t = list_entry(ltransport, struct sctp_transport, send_ready); packet = &t->packet; if (!sctp_packet_empty(packet)) { error = sctp_packet_transmit(packet, gfp); if (error < 0) asoc->base.sk->sk_err = -error; } /* Clear the burst limited state, if any */ sctp_transport_burst_reset(t); } } /* Update unack_data based on the incoming SACK chunk */ static void sctp_sack_update_unack_data(struct sctp_association *assoc, struct sctp_sackhdr *sack) { sctp_sack_variable_t *frags; __u16 unack_data; int i; unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; frags = sack->variable; for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { unack_data -= ((ntohs(frags[i].gab.end) - ntohs(frags[i].gab.start) + 1)); } assoc->unack_data = unack_data; } /* This is where we REALLY process a SACK. * * Process the SACK against the outqueue. Mostly, this just frees * things off the transmitted queue. */ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) { struct sctp_association *asoc = q->asoc; struct sctp_sackhdr *sack = chunk->subh.sack_hdr; struct sctp_transport *transport; struct sctp_chunk *tchunk = NULL; struct list_head *lchunk, *transport_list, *temp; sctp_sack_variable_t *frags = sack->variable; __u32 sack_ctsn, ctsn, tsn; __u32 highest_tsn, highest_new_tsn; __u32 sack_a_rwnd; unsigned int outstanding; struct sctp_transport *primary = asoc->peer.primary_path; int count_of_newacks = 0; int gap_ack_blocks; u8 accum_moved = 0; /* Grab the association's destination address list. */ transport_list = &asoc->peer.transport_addr_list; sack_ctsn = ntohl(sack->cum_tsn_ack); gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); asoc->stats.gapcnt += gap_ack_blocks; /* * SFR-CACC algorithm: * On receipt of a SACK the sender SHOULD execute the * following statements. * * 1) If the cumulative ack in the SACK passes next tsn_at_change * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for * all destinations. * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE * is set the receiver of the SACK MUST take the following actions: * * A) Initialize the cacc_saw_newack to 0 for all destination * addresses. * * Only bother if changeover_active is set. Otherwise, this is * totally suboptimal to do on every SACK. */ if (primary->cacc.changeover_active) { u8 clear_cycling = 0; if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { primary->cacc.changeover_active = 0; clear_cycling = 1; } if (clear_cycling || gap_ack_blocks) { list_for_each_entry(transport, transport_list, transports) { if (clear_cycling) transport->cacc.cycling_changeover = 0; if (gap_ack_blocks) transport->cacc.cacc_saw_newack = 0; } } } /* Get the highest TSN in the sack. */ highest_tsn = sack_ctsn; if (gap_ack_blocks) highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); if (TSN_lt(asoc->highest_sacked, highest_tsn)) asoc->highest_sacked = highest_tsn; highest_new_tsn = sack_ctsn; /* Run through the retransmit queue. Credit bytes received * and free those chunks that we can. */ sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn); /* Run through the transmitted queue. * Credit bytes received and free those chunks which we can. * * This is a MASSIVE candidate for optimization. */ list_for_each_entry(transport, transport_list, transports) { sctp_check_transmitted(q, &transport->transmitted, transport, &chunk->source, sack, &highest_new_tsn); /* * SFR-CACC algorithm: * C) Let count_of_newacks be the number of * destinations for which cacc_saw_newack is set. */ if (transport->cacc.cacc_saw_newack) count_of_newacks++; } /* Move the Cumulative TSN Ack Point if appropriate. */ if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) { asoc->ctsn_ack_point = sack_ctsn; accum_moved = 1; } if (gap_ack_blocks) { if (asoc->fast_recovery && accum_moved) highest_new_tsn = highest_tsn; list_for_each_entry(transport, transport_list, transports) sctp_mark_missing(q, &transport->transmitted, transport, highest_new_tsn, count_of_newacks); } /* Update unack_data field in the assoc. */ sctp_sack_update_unack_data(asoc, sack); ctsn = asoc->ctsn_ack_point; /* Throw away stuff rotting on the sack queue. */ list_for_each_safe(lchunk, temp, &q->sacked) { tchunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); tsn = ntohl(tchunk->subh.data_hdr->tsn); if (TSN_lte(tsn, ctsn)) { list_del_init(&tchunk->transmitted_list); if (asoc->peer.prsctp_capable && SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) asoc->sent_cnt_removable--; sctp_chunk_free(tchunk); } } /* ii) Set rwnd equal to the newly received a_rwnd minus the * number of bytes still outstanding after processing the * Cumulative TSN Ack and the Gap Ack Blocks. */ sack_a_rwnd = ntohl(sack->a_rwnd); asoc->peer.zero_window_announced = !sack_a_rwnd; outstanding = q->outstanding_bytes; if (outstanding < sack_a_rwnd) sack_a_rwnd -= outstanding; else sack_a_rwnd = 0; asoc->peer.rwnd = sack_a_rwnd; sctp_generate_fwdtsn(q, sack_ctsn); pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn); pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, " "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, asoc->adv_peer_ack_point); return sctp_outq_is_empty(q); } /* Is the outqueue empty? * The queue is empty when we have not pending data, no in-flight data * and nothing pending retransmissions. */ int sctp_outq_is_empty(const struct sctp_outq *q) { return q->out_qlen == 0 && q->outstanding_bytes == 0 && list_empty(&q->retransmit); } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* Go through a transport's transmitted list or the association's retransmit * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked. * The retransmit list will not have an associated transport. * * I added coherent debug information output. --xguo * * Instead of printing 'sacked' or 'kept' for each TSN on the * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. * KEPT TSN6-TSN7, etc. */ static void sctp_check_transmitted(struct sctp_outq *q, struct list_head *transmitted_queue, struct sctp_transport *transport, union sctp_addr *saddr, struct sctp_sackhdr *sack, __u32 *highest_new_tsn_in_sack) { struct list_head *lchunk; struct sctp_chunk *tchunk; struct list_head tlist; __u32 tsn; __u32 sack_ctsn; __u32 rtt; __u8 restart_timer = 0; int bytes_acked = 0; int migrate_bytes = 0; bool forward_progress = false; sack_ctsn = ntohl(sack->cum_tsn_ack); INIT_LIST_HEAD(&tlist); /* The while loop will skip empty transmitted queues. */ while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) { tchunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); if (sctp_chunk_abandoned(tchunk)) { /* Move the chunk to abandoned list. */ sctp_insert_list(&q->abandoned, lchunk); /* If this chunk has not been acked, stop * considering it as 'outstanding'. */ if (!tchunk->tsn_gap_acked) { if (tchunk->transport) tchunk->transport->flight_size -= sctp_data_size(tchunk); q->outstanding_bytes -= sctp_data_size(tchunk); } continue; } tsn = ntohl(tchunk->subh.data_hdr->tsn); if (sctp_acked(sack, tsn)) { /* If this queue is the retransmit queue, the * retransmit timer has already reclaimed * the outstanding bytes for this chunk, so only * count bytes associated with a transport. */ if (transport) { /* If this chunk is being used for RTT * measurement, calculate the RTT and update * the RTO using this value. * * 6.3.1 C5) Karn's algorithm: RTT measurements * MUST NOT be made using packets that were * retransmitted (and thus for which it is * ambiguous whether the reply was for the * first instance of the packet or a later * instance). */ if (!tchunk->tsn_gap_acked && !sctp_chunk_retransmitted(tchunk) && tchunk->rtt_in_progress) { tchunk->rtt_in_progress = 0; rtt = jiffies - tchunk->sent_at; sctp_transport_update_rto(transport, rtt); } } /* If the chunk hasn't been marked as ACKED, * mark it and account bytes_acked if the * chunk had a valid transport (it will not * have a transport if ASCONF had deleted it * while DATA was outstanding). */ if (!tchunk->tsn_gap_acked) { tchunk->tsn_gap_acked = 1; if (TSN_lt(*highest_new_tsn_in_sack, tsn)) *highest_new_tsn_in_sack = tsn; bytes_acked += sctp_data_size(tchunk); if (!tchunk->transport) migrate_bytes += sctp_data_size(tchunk); forward_progress = true; } if (TSN_lte(tsn, sack_ctsn)) { /* RFC 2960 6.3.2 Retransmission Timer Rules * * R3) Whenever a SACK is received * that acknowledges the DATA chunk * with the earliest outstanding TSN * for that address, restart T3-rtx * timer for that address with its * current RTO. */ restart_timer = 1; forward_progress = true; if (!tchunk->tsn_gap_acked) { /* * SFR-CACC algorithm: * 2) If the SACK contains gap acks * and the flag CHANGEOVER_ACTIVE is * set the receiver of the SACK MUST * take the following action: * * B) For each TSN t being acked that * has not been acked in any SACK so * far, set cacc_saw_newack to 1 for * the destination that the TSN was * sent to. */ if (transport && sack->num_gap_ack_blocks && q->asoc->peer.primary_path->cacc. changeover_active) transport->cacc.cacc_saw_newack = 1; } list_add_tail(&tchunk->transmitted_list, &q->sacked); } else { /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 * M2) Each time a SACK arrives reporting * 'Stray DATA chunk(s)' record the highest TSN * reported as newly acknowledged, call this * value 'HighestTSNinSack'. A newly * acknowledged DATA chunk is one not * previously acknowledged in a SACK. * * When the SCTP sender of data receives a SACK * chunk that acknowledges, for the first time, * the receipt of a DATA chunk, all the still * unacknowledged DATA chunks whose TSN is * older than that newly acknowledged DATA * chunk, are qualified as 'Stray DATA chunks'. */ list_add_tail(lchunk, &tlist); } } else { if (tchunk->tsn_gap_acked) { pr_debug("%s: receiver reneged on data TSN:0x%x\n", __func__, tsn); tchunk->tsn_gap_acked = 0; if (tchunk->transport) bytes_acked -= sctp_data_size(tchunk); /* RFC 2960 6.3.2 Retransmission Timer Rules * * R4) Whenever a SACK is received missing a * TSN that was previously acknowledged via a * Gap Ack Block, start T3-rtx for the * destination address to which the DATA * chunk was originally * transmitted if it is not already running. */ restart_timer = 1; } list_add_tail(lchunk, &tlist); } } if (transport) { if (bytes_acked) { struct sctp_association *asoc = transport->asoc; /* We may have counted DATA that was migrated * to this transport due to DEL-IP operation. * Subtract those bytes, since the were never * send on this transport and shouldn't be * credited to this transport. */ bytes_acked -= migrate_bytes; /* 8.2. When an outstanding TSN is acknowledged, * the endpoint shall clear the error counter of * the destination transport address to which the * DATA chunk was last sent. * The association's overall error counter is * also cleared. */ transport->error_count = 0; transport->asoc->overall_error_count = 0; forward_progress = true; /* * While in SHUTDOWN PENDING, we may have started * the T5 shutdown guard timer after reaching the * retransmission limit. Stop that timer as soon * as the receiver acknowledged any data. */ if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && del_timer(&asoc->timers [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) sctp_association_put(asoc); /* Mark the destination transport address as * active if it is not so marked. */ if ((transport->state == SCTP_INACTIVE || transport->state == SCTP_UNCONFIRMED) && sctp_cmp_addr_exact(&transport->ipaddr, saddr)) { sctp_assoc_control_transport( transport->asoc, transport, SCTP_TRANSPORT_UP, SCTP_RECEIVED_SACK); } sctp_transport_raise_cwnd(transport, sack_ctsn, bytes_acked); transport->flight_size -= bytes_acked; if (transport->flight_size == 0) transport->partial_bytes_acked = 0; q->outstanding_bytes -= bytes_acked + migrate_bytes; } else { /* RFC 2960 6.1, sctpimpguide-06 2.15.2 * When a sender is doing zero window probing, it * should not timeout the association if it continues * to receive new packets from the receiver. The * reason is that the receiver MAY keep its window * closed for an indefinite time. * A sender is doing zero window probing when the * receiver's advertised window is zero, and there is * only one data chunk in flight to the receiver. * * Allow the association to timeout while in SHUTDOWN * PENDING or SHUTDOWN RECEIVED in case the receiver * stays in zero window mode forever. */ if (!q->asoc->peer.rwnd && !list_empty(&tlist) && (sack_ctsn+2 == q->asoc->next_tsn) && q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { pr_debug("%s: sack received for zero window " "probe:%u\n", __func__, sack_ctsn); q->asoc->overall_error_count = 0; transport->error_count = 0; } } /* RFC 2960 6.3.2 Retransmission Timer Rules * * R2) Whenever all outstanding data sent to an address have * been acknowledged, turn off the T3-rtx timer of that * address. */ if (!transport->flight_size) { if (del_timer(&transport->T3_rtx_timer)) sctp_transport_put(transport); } else if (restart_timer) { if (!mod_timer(&transport->T3_rtx_timer, jiffies + transport->rto)) sctp_transport_hold(transport); } if (forward_progress) { if (transport->dst) sctp_transport_dst_confirm(transport); } } list_splice(&tlist, transmitted_queue); } /* Mark chunks as missing and consequently may get retransmitted. */ static void sctp_mark_missing(struct sctp_outq *q, struct list_head *transmitted_queue, struct sctp_transport *transport, __u32 highest_new_tsn_in_sack, int count_of_newacks) { struct sctp_chunk *chunk; __u32 tsn; char do_fast_retransmit = 0; struct sctp_association *asoc = q->asoc; struct sctp_transport *primary = asoc->peer.primary_path; list_for_each_entry(chunk, transmitted_queue, transmitted_list) { tsn = ntohl(chunk->subh.data_hdr->tsn); /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all * 'Unacknowledged TSN's', if the TSN number of an * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack' * value, increment the 'TSN.Missing.Report' count on that * chunk if it has NOT been fast retransmitted or marked for * fast retransmit already. */ if (chunk->fast_retransmit == SCTP_CAN_FRTX && !chunk->tsn_gap_acked && TSN_lt(tsn, highest_new_tsn_in_sack)) { /* SFR-CACC may require us to skip marking * this chunk as missing. */ if (!transport || !sctp_cacc_skip(primary, chunk->transport, count_of_newacks, tsn)) { chunk->tsn_missing_report++; pr_debug("%s: tsn:0x%x missing counter:%d\n", __func__, tsn, chunk->tsn_missing_report); } } /* * M4) If any DATA chunk is found to have a * 'TSN.Missing.Report' * value larger than or equal to 3, mark that chunk for * retransmission and start the fast retransmit procedure. */ if (chunk->tsn_missing_report >= 3) { chunk->fast_retransmit = SCTP_NEED_FRTX; do_fast_retransmit = 1; } } if (transport) { if (do_fast_retransmit) sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, " "flight_size:%d, pba:%d\n", __func__, transport, transport->cwnd, transport->ssthresh, transport->flight_size, transport->partial_bytes_acked); } } /* Is the given TSN acked by this packet? */ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) { int i; sctp_sack_variable_t *frags; __u16 tsn_offset, blocks; __u32 ctsn = ntohl(sack->cum_tsn_ack); if (TSN_lte(tsn, ctsn)) goto pass; /* 3.3.4 Selective Acknowledgement (SACK) (3): * * Gap Ack Blocks: * These fields contain the Gap Ack Blocks. They are repeated * for each Gap Ack Block up to the number of Gap Ack Blocks * defined in the Number of Gap Ack Blocks field. All DATA * chunks with TSNs greater than or equal to (Cumulative TSN * Ack + Gap Ack Block Start) and less than or equal to * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack * Block are assumed to have been received correctly. */ frags = sack->variable; blocks = ntohs(sack->num_gap_ack_blocks); tsn_offset = tsn - ctsn; for (i = 0; i < blocks; ++i) { if (tsn_offset >= ntohs(frags[i].gab.start) && tsn_offset <= ntohs(frags[i].gab.end)) goto pass; } return 0; pass: return 1; } static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, int nskips, __be16 stream) { int i; for (i = 0; i < nskips; i++) { if (skiplist[i].stream == stream) return i; } return i; } /* Create and add a fwdtsn chunk to the outq's control queue if needed. */ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) { struct sctp_association *asoc = q->asoc; struct sctp_chunk *ftsn_chunk = NULL; struct sctp_fwdtsn_skip ftsn_skip_arr[10]; int nskips = 0; int skip_pos = 0; __u32 tsn; struct sctp_chunk *chunk; struct list_head *lchunk, *temp; if (!asoc->peer.prsctp_capable) return; /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the * received SACK. * * If (Advanced.Peer.Ack.Point < SackCumAck), then update * Advanced.Peer.Ack.Point to be equal to SackCumAck. */ if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) asoc->adv_peer_ack_point = ctsn; /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as * the chunk next in the out-queue space is marked as "abandoned" as * shown in the following example: * * Assuming that a SACK arrived with the Cumulative TSN ACK 102 * and the Advanced.Peer.Ack.Point is updated to this value: * * out-queue at the end of ==> out-queue after Adv.Ack.Point * normal SACK processing local advancement * ... ... * Adv.Ack.Pt-> 102 acked 102 acked * 103 abandoned 103 abandoned * 104 abandoned Adv.Ack.P-> 104 abandoned * 105 105 * 106 acked 106 acked * ... ... * * In this example, the data sender successfully advanced the * "Advanced.Peer.Ack.Point" from 102 to 104 locally. */ list_for_each_safe(lchunk, temp, &q->abandoned) { chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); tsn = ntohl(chunk->subh.data_hdr->tsn); /* Remove any chunks in the abandoned queue that are acked by * the ctsn. */ if (TSN_lte(tsn, ctsn)) { list_del_init(lchunk); sctp_chunk_free(chunk); } else { if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { asoc->adv_peer_ack_point = tsn; if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) continue; skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips, chunk->subh.data_hdr->stream); ftsn_skip_arr[skip_pos].stream = chunk->subh.data_hdr->stream; ftsn_skip_arr[skip_pos].ssn = chunk->subh.data_hdr->ssn; if (skip_pos == nskips) nskips++; if (nskips == 10) break; } else break; } } /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" * is greater than the Cumulative TSN ACK carried in the received * SACK, the data sender MUST send the data receiver a FORWARD TSN * chunk containing the latest value of the * "Advanced.Peer.Ack.Point". * * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD * list each stream and sequence number in the forwarded TSN. This * information will enable the receiver to easily find any * stranded TSN's waiting on stream reorder queues. Each stream * SHOULD only be reported once; this means that if multiple * abandoned messages occur in the same stream then only the * highest abandoned stream sequence number is reported. If the * total size of the FORWARD TSN does NOT fit in a single MTU then * the sender of the FORWARD TSN SHOULD lower the * Advanced.Peer.Ack.Point to the last TSN that will fit in a * single MTU. */ if (asoc->adv_peer_ack_point > ctsn) ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, nskips, &ftsn_skip_arr[0]); if (ftsn_chunk) { list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); } }
gpl-2.0
mydongistiny/GCC_SaberMod
gcc/testsuite/gcc.dg/vect/vect-nop-move.c
29
1534
/* { dg-do run } */ /* { dg-require-effective-target vect_float } */ /* { dg-additional-options "-fdump-rtl-combine-details" } */ #include "tree-vect.h" extern void abort (void); #define NOINLINE __attribute__((noinline)) typedef float float32x4_t __attribute__ ((__vector_size__ (16))); typedef float float32x2_t __attribute__ ((__vector_size__ (8))); NOINLINE float foo32x4_be (float32x4_t x) { return x[3]; } NOINLINE float foo32x4_le (float32x4_t x) { return x[0]; } NOINLINE float bar (float a) { return a; } NOINLINE float foo32x2_be (float32x2_t x) { #ifdef __i386__ /* ix86 passes float32x2 vector arguments in mmx registers. We need to emit emms to empty MMS state and reenable x87 stack before float value can be loaded to and passed in x87 floating-point return register. */ __builtin_ia32_emms (); #endif return bar (x[1]); } NOINLINE float foo32x2_le (float32x2_t x) { #ifdef __i386__ __builtin_ia32_emms (); #endif return bar (x[0]); } NOINLINE int test (void) { float32x4_t a = { 0.0f, 1.0f, 2.0f, 3.0f }; float32x2_t b = { 0.0f, 1.0f }; if (foo32x2_be (b) != 1.0f) abort (); if (foo32x2_le (b) != 0.0f) abort (); if (foo32x4_be (a) != 3.0f) abort (); if (foo32x4_le (a) != 0.0f) abort (); return 0; } int main () { check_vect (); return test (); } /* { dg-final { scan-rtl-dump "deleting noop move" "combine" { target aarch64*-*-* } } } */ /* { dg-final { cleanup-rtl-dump "combine" } } */ /* { dg-final { cleanup-tree-dump "vect" } } */
gpl-2.0
BHSPitMonkey/linwizard-strtrk
arch/alpha/boot/bootpz.c
797
13415
/* * arch/alpha/boot/bootpz.c * * Copyright (C) 1997 Jay Estabrook * * This file is used for creating a compressed BOOTP file for the * Linux/AXP kernel * * based significantly on the arch/alpha/boot/main.c of Linus Torvalds * and the decompression code from MILO. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/utsrelease.h> #include <linux/mm.h> #include <asm/system.h> #include <asm/console.h> #include <asm/hwrpb.h> #include <asm/pgtable.h> #include <asm/io.h> #include <stdarg.h> #include "kzsize.h" /* FIXME FIXME FIXME */ #define MALLOC_AREA_SIZE 0x200000 /* 2MB for now */ /* FIXME FIXME FIXME */ /* WARNING NOTE It is very possible that turning on additional messages may cause kernel image corruption due to stack usage to do the printing. */ #undef DEBUG_CHECK_RANGE #undef DEBUG_ADDRESSES #undef DEBUG_LAST_STEPS extern unsigned long switch_to_osf_pal(unsigned long nr, struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa, unsigned long *vptb); extern int decompress_kernel(void* destination, void *source, size_t ksize, size_t kzsize); extern void move_stack(unsigned long new_stack); struct hwrpb_struct *hwrpb = INIT_HWRPB; static struct pcb_struct pcb_va[1]; /* * Find a physical address of a virtual object.. * * This is easy using the virtual page table address. */ #define VPTB ((unsigned long *) 0x200000000) static inline unsigned long find_pa(unsigned long address) { unsigned long result; result = VPTB[address >> 13]; result >>= 32; result <<= 13; result |= address & 0x1fff; return result; } int check_range(unsigned long vstart, unsigned long vend, unsigned long kstart, unsigned long kend) { unsigned long vaddr, kaddr; #ifdef DEBUG_CHECK_RANGE srm_printk("check_range: V[0x%lx:0x%lx] K[0x%lx:0x%lx]\n", vstart, vend, kstart, kend); #endif /* do some range checking for detecting an overlap... */ for (vaddr = vstart; vaddr <= vend; vaddr += PAGE_SIZE) { kaddr = (find_pa(vaddr) | PAGE_OFFSET); if (kaddr >= kstart && kaddr <= kend) { #ifdef DEBUG_CHECK_RANGE srm_printk("OVERLAP: vaddr 0x%lx kaddr 0x%lx" " [0x%lx:0x%lx]\n", vaddr, kaddr, kstart, kend); #endif return 1; } } return 0; } /* * This function moves into OSF/1 pal-code, and has a temporary * PCB for that. The kernel proper should replace this PCB with * the real one as soon as possible. * * The page table muckery in here depends on the fact that the boot * code has the L1 page table identity-map itself in the second PTE * in the L1 page table. Thus the L1-page is virtually addressable * itself (through three levels) at virtual address 0x200802000. */ #define L1 ((unsigned long *) 0x200802000) void pal_init(void) { unsigned long i, rev; struct percpu_struct * percpu; struct pcb_struct * pcb_pa; /* Create the dummy PCB. */ pcb_va->ksp = 0; pcb_va->usp = 0; pcb_va->ptbr = L1[1] >> 32; pcb_va->asn = 0; pcb_va->pcc = 0; pcb_va->unique = 0; pcb_va->flags = 1; pcb_va->res1 = 0; pcb_va->res2 = 0; pcb_pa = (struct pcb_struct *)find_pa((unsigned long)pcb_va); /* * a0 = 2 (OSF) * a1 = return address, but we give the asm the vaddr of the PCB * a2 = physical addr of PCB * a3 = new virtual page table pointer * a4 = KSP (but the asm sets it) */ srm_printk("Switching to OSF PAL-code... "); i = switch_to_osf_pal(2, pcb_va, pcb_pa, VPTB); if (i) { srm_printk("failed, code %ld\n", i); __halt(); } percpu = (struct percpu_struct *) (INIT_HWRPB->processor_offset + (unsigned long) INIT_HWRPB); rev = percpu->pal_revision = percpu->palcode_avail[2]; srm_printk("OK (rev %lx)\n", rev); tbia(); /* do it directly in case we are SMP */ } /* * Start the kernel. */ static inline void runkernel(void) { __asm__ __volatile__( "bis %0,%0,$27\n\t" "jmp ($27)" : /* no outputs: it doesn't even return */ : "r" (START_ADDR)); } /* Must record the SP (it is virtual) on entry, so we can make sure not to overwrite it during movement or decompression. */ unsigned long SP_on_entry; /* Calculate the kernel image address based on the end of the BOOTP bootstrapper (ie this program). */ extern char _end; #define KERNEL_ORIGIN \ ((((unsigned long)&_end) + 511) & ~511) /* Round address to next higher page boundary. */ #define NEXT_PAGE(a) (((a) | (PAGE_SIZE - 1)) + 1) #ifdef INITRD_IMAGE_SIZE # define REAL_INITRD_SIZE INITRD_IMAGE_SIZE #else # define REAL_INITRD_SIZE 0 #endif /* Defines from include/asm-alpha/system.h BOOT_ADDR Virtual address at which the consoles loads the BOOTP image. KERNEL_START KSEG address at which the kernel is built to run, which includes some initial data pages before the code. START_ADDR KSEG address of the entry point of kernel code. ZERO_PGE KSEG address of page full of zeroes, but upon entry to kerne cvan be expected to hold the parameter list and possible INTRD information. These are used in the local defines below. */ /* Virtual addresses for the BOOTP image. Note that this includes the bootstrapper code as well as the compressed kernel image, and possibly the INITRD image. Oh, and do NOT forget the STACK, which appears to be placed virtually beyond the end of the loaded image. */ #define V_BOOT_IMAGE_START BOOT_ADDR #define V_BOOT_IMAGE_END SP_on_entry /* Virtual addresses for just the bootstrapper part of the BOOTP image. */ #define V_BOOTSTRAPPER_START BOOT_ADDR #define V_BOOTSTRAPPER_END KERNEL_ORIGIN /* Virtual addresses for just the data part of the BOOTP image. This may also include the INITRD image, but always includes the STACK. */ #define V_DATA_START KERNEL_ORIGIN #define V_INITRD_START (KERNEL_ORIGIN + KERNEL_Z_SIZE) #define V_INTRD_END (V_INITRD_START + REAL_INITRD_SIZE) #define V_DATA_END V_BOOT_IMAGE_END /* KSEG addresses for the uncompressed kernel. Note that the end address includes workspace for the decompression. Note also that the DATA_START address is ZERO_PGE, to which we write just before jumping to the kernel image at START_ADDR. */ #define K_KERNEL_DATA_START ZERO_PGE #define K_KERNEL_IMAGE_START START_ADDR #define K_KERNEL_IMAGE_END (START_ADDR + KERNEL_SIZE) /* Define to where we may have to decompress the kernel image, before we move it to the final position, in case of overlap. This will be above the final position of the kernel. Regardless of overlap, we move the INITRD image to the end of this copy area, because there needs to be a buffer area after the kernel for "bootmem" anyway. */ #define K_COPY_IMAGE_START NEXT_PAGE(K_KERNEL_IMAGE_END) /* Reserve one page below INITRD for the new stack. */ #define K_INITRD_START \ NEXT_PAGE(K_COPY_IMAGE_START + KERNEL_SIZE + PAGE_SIZE) #define K_COPY_IMAGE_END \ (K_INITRD_START + REAL_INITRD_SIZE + MALLOC_AREA_SIZE) #define K_COPY_IMAGE_SIZE \ NEXT_PAGE(K_COPY_IMAGE_END - K_COPY_IMAGE_START) void start_kernel(void) { int must_move = 0; /* Initialize these for the decompression-in-place situation, which is the smallest amount of work and most likely to occur when using the normal START_ADDR of the kernel (currently set to 16MB, to clear all console code. */ unsigned long uncompressed_image_start = K_KERNEL_IMAGE_START; unsigned long uncompressed_image_end = K_KERNEL_IMAGE_END; unsigned long initrd_image_start = K_INITRD_START; /* * Note that this crufty stuff with static and envval * and envbuf is because: * * 1. Frequently, the stack is short, and we don't want to overrun; * 2. Frequently the stack is where we are going to copy the kernel to; * 3. A certain SRM console required the GET_ENV output to stack. * ??? A comment in the aboot sources indicates that the GET_ENV * destination must be quadword aligned. Might this explain the * behaviour, rather than requiring output to the stack, which * seems rather far-fetched. */ static long nbytes; static char envval[256] __attribute__((aligned(8))); register unsigned long asm_sp asm("30"); SP_on_entry = asm_sp; srm_printk("Linux/Alpha BOOTPZ Loader for Linux " UTS_RELEASE "\n"); /* Validity check the HWRPB. */ if (INIT_HWRPB->pagesize != 8192) { srm_printk("Expected 8kB pages, got %ldkB\n", INIT_HWRPB->pagesize >> 10); return; } if (INIT_HWRPB->vptb != (unsigned long) VPTB) { srm_printk("Expected vptb at %p, got %p\n", VPTB, (void *)INIT_HWRPB->vptb); return; } /* PALcode (re)initialization. */ pal_init(); /* Get the parameter list from the console environment variable. */ nbytes = callback_getenv(ENV_BOOTED_OSFLAGS, envval, sizeof(envval)); if (nbytes < 0 || nbytes >= sizeof(envval)) { nbytes = 0; } envval[nbytes] = '\0'; #ifdef DEBUG_ADDRESSES srm_printk("START_ADDR 0x%lx\n", START_ADDR); srm_printk("KERNEL_ORIGIN 0x%lx\n", KERNEL_ORIGIN); srm_printk("KERNEL_SIZE 0x%x\n", KERNEL_SIZE); srm_printk("KERNEL_Z_SIZE 0x%x\n", KERNEL_Z_SIZE); #endif /* Since all the SRM consoles load the BOOTP image at virtual * 0x20000000, we have to ensure that the physical memory * pages occupied by that image do NOT overlap the physical * address range where the kernel wants to be run. This * causes real problems when attempting to cdecompress the * former into the latter... :-( * * So, we may have to decompress/move the kernel/INITRD image * virtual-to-physical someplace else first before moving * kernel /INITRD to their final resting places... ;-} * * Sigh... */ /* First, check to see if the range of addresses occupied by the bootstrapper part of the BOOTP image include any of the physical pages into which the kernel will be placed for execution. We only need check on the final kernel image range, since we will put the INITRD someplace that we can be sure is not in conflict. */ if (check_range(V_BOOTSTRAPPER_START, V_BOOTSTRAPPER_END, K_KERNEL_DATA_START, K_KERNEL_IMAGE_END)) { srm_printk("FATAL ERROR: overlap of bootstrapper code\n"); __halt(); } /* Next, check to see if the range of addresses occupied by the compressed kernel/INITRD/stack portion of the BOOTP image include any of the physical pages into which the decompressed kernel or the INITRD will be placed for execution. */ if (check_range(V_DATA_START, V_DATA_END, K_KERNEL_IMAGE_START, K_COPY_IMAGE_END)) { #ifdef DEBUG_ADDRESSES srm_printk("OVERLAP: cannot decompress in place\n"); #endif uncompressed_image_start = K_COPY_IMAGE_START; uncompressed_image_end = K_COPY_IMAGE_END; must_move = 1; /* Finally, check to see if the range of addresses occupied by the compressed kernel/INITRD part of the BOOTP image include any of the physical pages into which that part is to be copied for decompression. */ while (check_range(V_DATA_START, V_DATA_END, uncompressed_image_start, uncompressed_image_end)) { #if 0 uncompressed_image_start += K_COPY_IMAGE_SIZE; uncompressed_image_end += K_COPY_IMAGE_SIZE; initrd_image_start += K_COPY_IMAGE_SIZE; #else /* Keep as close as possible to end of BOOTP image. */ uncompressed_image_start += PAGE_SIZE; uncompressed_image_end += PAGE_SIZE; initrd_image_start += PAGE_SIZE; #endif } } srm_printk("Starting to load the kernel with args '%s'\n", envval); #ifdef DEBUG_ADDRESSES srm_printk("Decompressing the kernel...\n" "...from 0x%lx to 0x%lx size 0x%x\n", V_DATA_START, uncompressed_image_start, KERNEL_SIZE); #endif decompress_kernel((void *)uncompressed_image_start, (void *)V_DATA_START, KERNEL_SIZE, KERNEL_Z_SIZE); /* * Now, move things to their final positions, if/as required. */ #ifdef INITRD_IMAGE_SIZE /* First, we always move the INITRD image, if present. */ #ifdef DEBUG_ADDRESSES srm_printk("Moving the INITRD image...\n" " from 0x%lx to 0x%lx size 0x%x\n", V_INITRD_START, initrd_image_start, INITRD_IMAGE_SIZE); #endif memcpy((void *)initrd_image_start, (void *)V_INITRD_START, INITRD_IMAGE_SIZE); #endif /* INITRD_IMAGE_SIZE */ /* Next, we may have to move the uncompressed kernel to the final destination. */ if (must_move) { #ifdef DEBUG_ADDRESSES srm_printk("Moving the uncompressed kernel...\n" "...from 0x%lx to 0x%lx size 0x%x\n", uncompressed_image_start, K_KERNEL_IMAGE_START, (unsigned)KERNEL_SIZE); #endif /* * Move the stack to a safe place to ensure it won't be * overwritten by kernel image. */ move_stack(initrd_image_start - PAGE_SIZE); memcpy((void *)K_KERNEL_IMAGE_START, (void *)uncompressed_image_start, KERNEL_SIZE); } /* Clear the zero page, then move the argument list in. */ #ifdef DEBUG_LAST_STEPS srm_printk("Preparing ZERO_PGE...\n"); #endif memset((char*)ZERO_PGE, 0, PAGE_SIZE); strcpy((char*)ZERO_PGE, envval); #ifdef INITRD_IMAGE_SIZE #ifdef DEBUG_LAST_STEPS srm_printk("Preparing INITRD info...\n"); #endif /* Finally, set the INITRD paramenters for the kernel. */ ((long *)(ZERO_PGE+256))[0] = initrd_image_start; ((long *)(ZERO_PGE+256))[1] = INITRD_IMAGE_SIZE; #endif /* INITRD_IMAGE_SIZE */ #ifdef DEBUG_LAST_STEPS srm_printk("Doing 'runkernel()'...\n"); #endif runkernel(); } /* dummy function, should never be called. */ void *__kmalloc(size_t size, gfp_t flags) { return (void *)NULL; }
gpl-2.0
TheNameIsNigel/android_kernel_huawei_msm8916
tools/perf/builtin-diff.c
2077
14335
/* * builtin-diff.c * * Builtin diff command: Analyze two perf.data input files, look up and read * DSOs and symbol information, sort them and produce a diff. */ #include "builtin.h" #include "util/debug.h" #include "util/event.h" #include "util/hist.h" #include "util/evsel.h" #include "util/evlist.h" #include "util/session.h" #include "util/tool.h" #include "util/sort.h" #include "util/symbol.h" #include "util/util.h" #include <stdlib.h> static char const *input_old = "perf.data.old", *input_new = "perf.data"; static char diff__default_sort_order[] = "dso,symbol"; static bool force; static bool show_period; static bool show_formula; static bool show_baseline_only; static bool sort_compute; static s64 compute_wdiff_w1; static s64 compute_wdiff_w2; enum { COMPUTE_DELTA, COMPUTE_RATIO, COMPUTE_WEIGHTED_DIFF, COMPUTE_MAX, }; const char *compute_names[COMPUTE_MAX] = { [COMPUTE_DELTA] = "delta", [COMPUTE_RATIO] = "ratio", [COMPUTE_WEIGHTED_DIFF] = "wdiff", }; static int compute; static int setup_compute_opt_wdiff(char *opt) { char *w1_str = opt; char *w2_str; int ret = -EINVAL; if (!opt) goto out; w2_str = strchr(opt, ','); if (!w2_str) goto out; *w2_str++ = 0x0; if (!*w2_str) goto out; compute_wdiff_w1 = strtol(w1_str, NULL, 10); compute_wdiff_w2 = strtol(w2_str, NULL, 10); if (!compute_wdiff_w1 || !compute_wdiff_w2) goto out; pr_debug("compute wdiff w1(%" PRId64 ") w2(%" PRId64 ")\n", compute_wdiff_w1, compute_wdiff_w2); ret = 0; out: if (ret) pr_err("Failed: wrong weight data, use 'wdiff:w1,w2'\n"); return ret; } static int setup_compute_opt(char *opt) { if (compute == COMPUTE_WEIGHTED_DIFF) return setup_compute_opt_wdiff(opt); if (opt) { pr_err("Failed: extra option specified '%s'", opt); return -EINVAL; } return 0; } static int setup_compute(const struct option *opt, const char *str, int unset __maybe_unused) { int *cp = (int *) opt->value; char *cstr = (char *) str; char buf[50]; unsigned i; char *option; if (!str) { *cp = COMPUTE_DELTA; return 0; } if (*str == '+') { sort_compute = true; cstr = (char *) ++str; if (!*str) return 0; } option = strchr(str, ':'); if (option) { unsigned len = option++ - str; /* * The str data are not writeable, so we need * to use another buffer. */ /* No option value is longer. */ if (len >= sizeof(buf)) return -EINVAL; strncpy(buf, str, len); buf[len] = 0x0; cstr = buf; } for (i = 0; i < COMPUTE_MAX; i++) if (!strcmp(cstr, compute_names[i])) { *cp = i; return setup_compute_opt(option); } pr_err("Failed: '%s' is not computation method " "(use 'delta','ratio' or 'wdiff')\n", str); return -EINVAL; } double perf_diff__period_percent(struct hist_entry *he, u64 period) { u64 total = he->hists->stats.total_period; return (period * 100.0) / total; } double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair) { double new_percent = perf_diff__period_percent(he, he->stat.period); double old_percent = perf_diff__period_percent(pair, pair->stat.period); he->diff.period_ratio_delta = new_percent - old_percent; he->diff.computed = true; return he->diff.period_ratio_delta; } double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair) { double new_period = he->stat.period; double old_period = pair->stat.period; he->diff.computed = true; he->diff.period_ratio = new_period / old_period; return he->diff.period_ratio; } s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair) { u64 new_period = he->stat.period; u64 old_period = pair->stat.period; he->diff.computed = true; he->diff.wdiff = new_period * compute_wdiff_w2 - old_period * compute_wdiff_w1; return he->diff.wdiff; } static int formula_delta(struct hist_entry *he, struct hist_entry *pair, char *buf, size_t size) { return scnprintf(buf, size, "(%" PRIu64 " * 100 / %" PRIu64 ") - " "(%" PRIu64 " * 100 / %" PRIu64 ")", he->stat.period, he->hists->stats.total_period, pair->stat.period, pair->hists->stats.total_period); } static int formula_ratio(struct hist_entry *he, struct hist_entry *pair, char *buf, size_t size) { double new_period = he->stat.period; double old_period = pair->stat.period; return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period); } static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair, char *buf, size_t size) { u64 new_period = he->stat.period; u64 old_period = pair->stat.period; return scnprintf(buf, size, "(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")", new_period, compute_wdiff_w2, old_period, compute_wdiff_w1); } int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair, char *buf, size_t size) { switch (compute) { case COMPUTE_DELTA: return formula_delta(he, pair, buf, size); case COMPUTE_RATIO: return formula_ratio(he, pair, buf, size); case COMPUTE_WEIGHTED_DIFF: return formula_wdiff(he, pair, buf, size); default: BUG_ON(1); } return -1; } static int hists__add_entry(struct hists *self, struct addr_location *al, u64 period, u64 weight) { if (__hists__add_entry(self, al, NULL, period, weight) != NULL) return 0; return -ENOMEM; } static int diff__process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { struct addr_location al; if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) { pr_warning("problem processing %d event, skipping it.\n", event->header.type); return -1; } if (al.filtered) return 0; if (hists__add_entry(&evsel->hists, &al, sample->period, sample->weight)) { pr_warning("problem incrementing symbol period, skipping event\n"); return -1; } evsel->hists.stats.total_period += sample->period; return 0; } static struct perf_tool tool = { .sample = diff__process_sample_event, .mmap = perf_event__process_mmap, .comm = perf_event__process_comm, .exit = perf_event__process_exit, .fork = perf_event__process_fork, .lost = perf_event__process_lost, .ordered_samples = true, .ordering_requires_timestamps = true, }; static struct perf_evsel *evsel_match(struct perf_evsel *evsel, struct perf_evlist *evlist) { struct perf_evsel *e; list_for_each_entry(e, &evlist->entries, node) if (perf_evsel__match2(evsel, e)) return e; return NULL; } static void perf_evlist__collapse_resort(struct perf_evlist *evlist) { struct perf_evsel *evsel; list_for_each_entry(evsel, &evlist->entries, node) { struct hists *hists = &evsel->hists; hists__collapse_resort(hists); } } static void hists__baseline_only(struct hists *hists) { struct rb_root *root; struct rb_node *next; if (sort__need_collapse) root = &hists->entries_collapsed; else root = hists->entries_in; next = rb_first(root); while (next != NULL) { struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&he->rb_node_in); if (!hist_entry__next_pair(he)) { rb_erase(&he->rb_node_in, root); hist_entry__free(he); } } } static void hists__precompute(struct hists *hists) { struct rb_node *next = rb_first(&hists->entries); while (next != NULL) { struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node); struct hist_entry *pair = hist_entry__next_pair(he); next = rb_next(&he->rb_node); if (!pair) continue; switch (compute) { case COMPUTE_DELTA: perf_diff__compute_delta(he, pair); break; case COMPUTE_RATIO: perf_diff__compute_ratio(he, pair); break; case COMPUTE_WEIGHTED_DIFF: perf_diff__compute_wdiff(he, pair); break; default: BUG_ON(1); } } } static int64_t cmp_doubles(double l, double r) { if (l > r) return -1; else if (l < r) return 1; else return 0; } static int64_t hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, int c) { switch (c) { case COMPUTE_DELTA: { double l = left->diff.period_ratio_delta; double r = right->diff.period_ratio_delta; return cmp_doubles(l, r); } case COMPUTE_RATIO: { double l = left->diff.period_ratio; double r = right->diff.period_ratio; return cmp_doubles(l, r); } case COMPUTE_WEIGHTED_DIFF: { s64 l = left->diff.wdiff; s64 r = right->diff.wdiff; return r - l; } default: BUG_ON(1); } return 0; } static void insert_hist_entry_by_compute(struct rb_root *root, struct hist_entry *he, int c) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct hist_entry *iter; while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); if (hist_entry__cmp_compute(he, iter, c) < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&he->rb_node, parent, p); rb_insert_color(&he->rb_node, root); } static void hists__compute_resort(struct hists *hists) { struct rb_root *root; struct rb_node *next; if (sort__need_collapse) root = &hists->entries_collapsed; else root = hists->entries_in; hists->entries = RB_ROOT; next = rb_first(root); hists->nr_entries = 0; hists->stats.total_period = 0; hists__reset_col_len(hists); while (next != NULL) { struct hist_entry *he; he = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&he->rb_node_in); insert_hist_entry_by_compute(&hists->entries, he, compute); hists__inc_nr_entries(hists, he); } } static void hists__process(struct hists *old, struct hists *new) { hists__match(new, old); if (show_baseline_only) hists__baseline_only(new); else hists__link(new, old); if (sort_compute) { hists__precompute(new); hists__compute_resort(new); } else { hists__output_resort(new); } hists__fprintf(new, true, 0, 0, stdout); } static int __cmd_diff(void) { int ret, i; #define older (session[0]) #define newer (session[1]) struct perf_session *session[2]; struct perf_evlist *evlist_new, *evlist_old; struct perf_evsel *evsel; bool first = true; older = perf_session__new(input_old, O_RDONLY, force, false, &tool); newer = perf_session__new(input_new, O_RDONLY, force, false, &tool); if (session[0] == NULL || session[1] == NULL) return -ENOMEM; for (i = 0; i < 2; ++i) { ret = perf_session__process_events(session[i], &tool); if (ret) goto out_delete; } evlist_old = older->evlist; evlist_new = newer->evlist; perf_evlist__collapse_resort(evlist_old); perf_evlist__collapse_resort(evlist_new); list_for_each_entry(evsel, &evlist_new->entries, node) { struct perf_evsel *evsel_old; evsel_old = evsel_match(evsel, evlist_old); if (!evsel_old) continue; fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n", perf_evsel__name(evsel)); first = false; hists__process(&evsel_old->hists, &evsel->hists); } out_delete: for (i = 0; i < 2; ++i) perf_session__delete(session[i]); return ret; #undef older #undef newer } static const char * const diff_usage[] = { "perf diff [<options>] [old_file] [new_file]", NULL, }; static const struct option options[] = { OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('b', "baseline-only", &show_baseline_only, "Show only items with match in baseline"), OPT_CALLBACK('c', "compute", &compute, "delta,ratio,wdiff:w1,w2 (default delta)", "Entries differential computation selection", setup_compute), OPT_BOOLEAN('p', "period", &show_period, "Show period values."), OPT_BOOLEAN('F', "formula", &show_formula, "Show formula."), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", "only consider symbols in these comms"), OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", "only consider these symbols"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol, parent"), OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", "separator for columns, no spaces will be added between " "columns '.' is reserved."), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), OPT_END() }; static void ui_init(void) { /* * Display baseline/delta/ratio * formula/periods columns. */ perf_hpp__column_enable(PERF_HPP__BASELINE); switch (compute) { case COMPUTE_DELTA: perf_hpp__column_enable(PERF_HPP__DELTA); break; case COMPUTE_RATIO: perf_hpp__column_enable(PERF_HPP__RATIO); break; case COMPUTE_WEIGHTED_DIFF: perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF); break; default: BUG_ON(1); }; if (show_formula) perf_hpp__column_enable(PERF_HPP__FORMULA); if (show_period) { perf_hpp__column_enable(PERF_HPP__PERIOD); perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE); } } int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) { sort_order = diff__default_sort_order; argc = parse_options(argc, argv, options, diff_usage, 0); if (argc) { if (argc > 2) usage_with_options(diff_usage, options); if (argc == 2) { input_old = argv[0]; input_new = argv[1]; } else input_new = argv[0]; } else if (symbol_conf.default_guest_vmlinux_name || symbol_conf.default_guest_kallsyms) { input_old = "perf.data.host"; input_new = "perf.data.guest"; } symbol_conf.exclude_other = false; if (symbol__init() < 0) return -1; ui_init(); if (setup_sorting() < 0) usage_with_options(diff_usage, options); setup_pager(); sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL); sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", NULL); sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", NULL); return __cmd_diff(); }
gpl-2.0
ptmr3/GalaxyNote2_Kernel2
arch/powerpc/kernel/pci_64.c
2333
7665
/* * Port for PPC64 David Engebretsen, IBM Corp. * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. * * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * Rework, based on alpha PCI code. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/irq.h> #include <linux/vmalloc.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> unsigned long pci_probe_only = 1; /* pci_io_base -- the base address from which io bars are offsets. * This is the lowest I/O base address (so bar values are always positive), * and it *must* be the start of ISA space if an ISA bus exists because * ISA drivers use hard coded offsets. If no ISA bus exists nothing * is mapped on the first 64K of IO space */ unsigned long pci_io_base = ISA_IO_BASE; EXPORT_SYMBOL(pci_io_base); static int __init pcibios_init(void) { struct pci_controller *hose, *tmp; printk(KERN_INFO "PCI: Probing PCI hardware\n"); /* For now, override phys_mem_access_prot. If we need it,g * later, we may move that initialization to each ppc_md */ ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; if (pci_probe_only) ppc_pci_flags |= PPC_PCI_PROBE_ONLY; /* On ppc64, we always enable PCI domains and we keep domain 0 * backward compatible in /proc for video cards */ ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0; /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { pcibios_scan_phb(hose); pci_bus_add_devices(hose->bus); } /* Call common code to handle resource allocation */ pcibios_resource_survey(); printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); return 0; } subsys_initcall(pcibios_init); #ifdef CONFIG_HOTPLUG int pcibios_unmap_io_space(struct pci_bus *bus) { struct pci_controller *hose; WARN_ON(bus == NULL); /* If this is not a PHB, we only flush the hash table over * the area mapped by this bridge. We don't play with the PTE * mappings since we might have to deal with sub-page alignemnts * so flushing the hash table is the only sane way to make sure * that no hash entries are covering that removed bridge area * while still allowing other busses overlapping those pages * * Note: If we ever support P2P hotplug on Book3E, we'll have * to do an appropriate TLB flush here too */ if (bus->self) { #ifdef CONFIG_PPC_STD_MMU_64 struct resource *res = bus->resource[0]; #endif pr_debug("IO unmapping for PCI-PCI bridge %s\n", pci_name(bus->self)); #ifdef CONFIG_PPC_STD_MMU_64 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, res->end + _IO_BASE + 1); #endif return 0; } /* Get the host bridge */ hose = pci_bus_to_host(bus); /* Check if we have IOs allocated */ if (hose->io_base_alloc == 0) return 0; pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name); pr_debug(" alloc=0x%p\n", hose->io_base_alloc); /* This is a PHB, we fully unmap the IO area */ vunmap(hose->io_base_alloc); return 0; } EXPORT_SYMBOL_GPL(pcibios_unmap_io_space); #endif /* CONFIG_HOTPLUG */ int __devinit pcibios_map_io_space(struct pci_bus *bus) { struct vm_struct *area; unsigned long phys_page; unsigned long size_page; unsigned long io_virt_offset; struct pci_controller *hose; WARN_ON(bus == NULL); /* If this not a PHB, nothing to do, page tables still exist and * thus HPTEs will be faulted in when needed */ if (bus->self) { pr_debug("IO mapping for PCI-PCI bridge %s\n", pci_name(bus->self)); pr_debug(" virt=0x%016llx...0x%016llx\n", bus->resource[0]->start + _IO_BASE, bus->resource[0]->end + _IO_BASE); return 0; } /* Get the host bridge */ hose = pci_bus_to_host(bus); phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE); /* Make sure IO area address is clear */ hose->io_base_alloc = NULL; /* If there's no IO to map on that bus, get away too */ if (hose->pci_io_size == 0 || hose->io_base_phys == 0) return 0; /* Let's allocate some IO space for that guy. We don't pass * VM_IOREMAP because we don't care about alignment tricks that * the core does in that case. Maybe we should due to stupid card * with incomplete address decoding but I'd rather not deal with * those outside of the reserved 64K legacy region. */ area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END); if (area == NULL) return -ENOMEM; hose->io_base_alloc = area->addr; hose->io_base_virt = (void __iomem *)(area->addr + hose->io_base_phys - phys_page); pr_debug("IO mapping for PHB %s\n", hose->dn->full_name); pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n", hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); pr_debug(" size=0x%016llx (alloc=0x%016lx)\n", hose->pci_io_size, size_page); /* Establish the mapping */ if (__ioremap_at(phys_page, area->addr, size_page, _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL) return -ENOMEM; /* Fixup hose IO resource */ io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE; hose->io_resource.start += io_virt_offset; hose->io_resource.end += io_virt_offset; pr_debug(" hose->io_resource=%pR\n", &hose->io_resource); return 0; } EXPORT_SYMBOL_GPL(pcibios_map_io_space); void __devinit pcibios_setup_phb_io_space(struct pci_controller *hose) { pcibios_map_io_space(hose->bus); } #define IOBASE_BRIDGE_NUMBER 0 #define IOBASE_MEMORY 1 #define IOBASE_IO 2 #define IOBASE_ISA_IO 3 #define IOBASE_ISA_MEM 4 long sys_pciconfig_iobase(long which, unsigned long in_bus, unsigned long in_devfn) { struct pci_controller* hose; struct list_head *ln; struct pci_bus *bus = NULL; struct device_node *hose_node; /* Argh ! Please forgive me for that hack, but that's the * simplest way to get existing XFree to not lockup on some * G5 machines... So when something asks for bus 0 io base * (bus 0 is HT root), we return the AGP one instead. */ if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) { struct device_node *agp; agp = of_find_compatible_node(NULL, NULL, "u3-agp"); if (agp) in_bus = 0xf0; of_node_put(agp); } /* That syscall isn't quite compatible with PCI domains, but it's * used on pre-domains setup. We return the first match */ for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { bus = pci_bus_b(ln); if (in_bus >= bus->number && in_bus <= bus->subordinate) break; bus = NULL; } if (bus == NULL || bus->dev.of_node == NULL) return -ENODEV; hose_node = bus->dev.of_node; hose = PCI_DN(hose_node)->phb; switch (which) { case IOBASE_BRIDGE_NUMBER: return (long)hose->first_busno; case IOBASE_MEMORY: return (long)hose->pci_mem_offset; case IOBASE_IO: return (long)hose->io_base_phys; case IOBASE_ISA_IO: return (long)isa_io_base; case IOBASE_ISA_MEM: return -EINVAL; } return -EOPNOTSUPP; } #ifdef CONFIG_NUMA int pcibus_to_node(struct pci_bus *bus) { struct pci_controller *phb = pci_bus_to_host(bus); return phb->node; } EXPORT_SYMBOL(pcibus_to_node); #endif
gpl-2.0
CyanogenMod/android_kernel_bn_acclaim
drivers/net/wireless/ath/ath5k/dma.c
2589
21997
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /*************************************\ * DMA and interrupt masking functions * \*************************************/ /* * dma.c - DMA and interrupt masking functions * * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and * handle queue setup for 5210 chipset (rest are handled on qcu.c). * Also we setup interrupt mask register (IMR) and read the various iterrupt * status registers (ISR). * * TODO: Handle SISR on 5211+ and introduce a function to return the queue * number that resulted the interrupt. */ #include "ath5k.h" #include "reg.h" #include "debug.h" #include "base.h" /*********\ * Receive * \*********/ /** * ath5k_hw_start_rx_dma - Start DMA receive * * @ah: The &struct ath5k_hw */ void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) { ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); ath5k_hw_reg_read(ah, AR5K_CR); } /** * ath5k_hw_stop_rx_dma - Stop DMA receive * * @ah: The &struct ath5k_hw */ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) { unsigned int i; ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); /* * It may take some time to disable the DMA receive unit */ for (i = 1000; i > 0 && (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; i--) udelay(100); if (!i) ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA, "failed to stop RX DMA !\n"); return i ? 0 : -EBUSY; } /** * ath5k_hw_get_rxdp - Get RX Descriptor's address * * @ah: The &struct ath5k_hw */ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah) { return ath5k_hw_reg_read(ah, AR5K_RXDP); } /** * ath5k_hw_set_rxdp - Set RX Descriptor's address * * @ah: The &struct ath5k_hw * @phys_addr: RX descriptor address * * Returns -EIO if rx is active */ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) { if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) { ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA, "tried to set RXDP while rx was active !\n"); return -EIO; } ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); return 0; } /**********\ * Transmit * \**********/ /** * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue * * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Start DMA transmit for a specific queue and since 5210 doesn't have * QCU/DCU, set up queue parameters for 5210 here based on queue type (one * queue for normal data and one queue for beacons). For queue setup * on newer chips check out qcu.c. Returns -EINVAL if queue number is out * of range or if queue is already disabled. * * NOTE: Must be called after setting up tx control descriptor for that * queue (see below). */ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) { u32 tx_queue; AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* Return if queue is declared inactive */ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) return -EINVAL; if (ah->ah_version == AR5K_AR5210) { tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); /* * Set the queue by type on 5210 */ switch (ah->ah_txq[queue].tqi_type) { case AR5K_TX_QUEUE_DATA: tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; break; case AR5K_TX_QUEUE_BEACON: tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, AR5K_BSR); break; case AR5K_TX_QUEUE_CAB: tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, AR5K_BSR); break; default: return -EINVAL; } /* Start queue */ ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); ath5k_hw_reg_read(ah, AR5K_CR); } else { /* Return if queue is disabled */ if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) return -EIO; /* Start queue */ AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); } return 0; } /** * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue * * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Stop DMA transmit on a specific hw queue and drain queue so we don't * have any pending frames. Returns -EBUSY if we still have pending frames, * -EINVAL if queue number is out of range or inactive. * */ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) { unsigned int i = 40; u32 tx_queue, pending; AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* Return if queue is declared inactive */ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) return -EINVAL; if (ah->ah_version == AR5K_AR5210) { tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); /* * Set by queue type */ switch (ah->ah_txq[queue].tqi_type) { case AR5K_TX_QUEUE_DATA: tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; break; case AR5K_TX_QUEUE_BEACON: case AR5K_TX_QUEUE_CAB: /* XXX Fix me... */ tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; ath5k_hw_reg_write(ah, 0, AR5K_BSR); break; default: return -EINVAL; } /* Stop queue */ ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); ath5k_hw_reg_read(ah, AR5K_CR); } else { /* * Enable DCU early termination to quickly * flush any pending frames from QCU */ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), AR5K_QCU_MISC_DCU_EARLY); /* * Schedule TX disable and wait until queue is empty */ AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); /* Wait for queue to stop */ for (i = 1000; i > 0 && (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0); i--) udelay(100); if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA, "queue %i didn't stop !\n", queue); /* Check for pending frames */ i = 1000; do { pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)) & AR5K_QCU_STS_FRMPENDCNT; udelay(100); } while (--i && pending); /* For 2413+ order PCU to drop packets using * QUIET mechanism */ if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && pending){ /* Set periodicity and duration */ ath5k_hw_reg_write(ah, AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)| AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR), AR5K_QUIET_CTL2); /* Enable quiet period for current TSF */ ath5k_hw_reg_write(ah, AR5K_QUIET_CTL1_QT_EN | AR5K_REG_SM(ath5k_hw_reg_read(ah, AR5K_TSF_L32_5211) >> 10, AR5K_QUIET_CTL1_NEXT_QT_TSF), AR5K_QUIET_CTL1); /* Force channel idle high */ AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); /* Wait a while and disable mechanism */ udelay(400); AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, AR5K_QUIET_CTL1_QT_EN); /* Re-check for pending frames */ i = 100; do { pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)) & AR5K_QCU_STS_FRMPENDCNT; udelay(100); } while (--i && pending); AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); if (pending) ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA, "quiet mechanism didn't work q:%i !\n", queue); } /* * Disable DCU early termination */ AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue), AR5K_QCU_MISC_DCU_EARLY); /* Clear register */ ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); if (pending) { ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA, "tx dma didn't stop (q:%i, frm:%i) !\n", queue, pending); return -EBUSY; } } /* TODO: Check for success on 5210 else return error */ return 0; } /** * ath5k_hw_stop_beacon_queue - Stop beacon queue * * @ah The &struct ath5k_hw * @queue The queue number * * Returns -EIO if queue didn't stop */ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) { int ret; ret = ath5k_hw_stop_tx_dma(ah, queue); if (ret) { ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA, "beacon queue didn't stop !\n"); return -EIO; } return 0; } /** * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue * * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Get TX descriptor's address for a specific queue. For 5210 we ignore * the queue number and use tx queue type since we only have 2 queues. * We use TXDP0 for normal data queue and TXDP1 for beacon queue. * For newer chips with QCU/DCU we just read the corresponding TXDP register. * * XXX: Is TXDP read and clear ? */ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) { u16 tx_reg; AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* * Get the transmit queue descriptor pointer from the selected queue */ /*5210 doesn't have QCU*/ if (ah->ah_version == AR5K_AR5210) { switch (ah->ah_txq[queue].tqi_type) { case AR5K_TX_QUEUE_DATA: tx_reg = AR5K_NOQCU_TXDP0; break; case AR5K_TX_QUEUE_BEACON: case AR5K_TX_QUEUE_CAB: tx_reg = AR5K_NOQCU_TXDP1; break; default: return 0xffffffff; } } else { tx_reg = AR5K_QUEUE_TXDP(queue); } return ath5k_hw_reg_read(ah, tx_reg); } /** * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue * * @ah: The &struct ath5k_hw * @queue: The hw queue number * * Set TX descriptor's address for a specific queue. For 5210 we ignore * the queue number and we use tx queue type since we only have 2 queues * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. * For newer chips with QCU/DCU we just set the corresponding TXDP register. * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still * active. */ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) { u16 tx_reg; AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); /* * Set the transmit queue descriptor pointer register by type * on 5210 */ if (ah->ah_version == AR5K_AR5210) { switch (ah->ah_txq[queue].tqi_type) { case AR5K_TX_QUEUE_DATA: tx_reg = AR5K_NOQCU_TXDP0; break; case AR5K_TX_QUEUE_BEACON: case AR5K_TX_QUEUE_CAB: tx_reg = AR5K_NOQCU_TXDP1; break; default: return -EINVAL; } } else { /* * Set the transmit queue descriptor pointer for * the selected queue on QCU for 5211+ * (this won't work if the queue is still active) */ if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) return -EIO; tx_reg = AR5K_QUEUE_TXDP(queue); } /* Set descriptor pointer */ ath5k_hw_reg_write(ah, phys_addr, tx_reg); return 0; } /** * ath5k_hw_update_tx_triglevel - Update tx trigger level * * @ah: The &struct ath5k_hw * @increase: Flag to force increase of trigger level * * This function increases/decreases the tx trigger level for the tx fifo * buffer (aka FIFO threshold) that is used to indicate when PCU flushes * the buffer and transmits its data. Lowering this results sending small * frames more quickly but can lead to tx underruns, raising it a lot can * result other problems (i think bmiss is related). Right now we start with * the lowest possible (64Bytes) and if we get tx underrun we increase it using * the increase flag. Returns -EIO if we have reached maximum/minimum. * * XXX: Link this with tx DMA size ? * XXX: Use it to save interrupts ? * TODO: Needs testing, i think it's related to bmiss... */ int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) { u32 trigger_level, imr; int ret = -EIO; /* * Disable interrupts by setting the mask */ imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), AR5K_TXCFG_TXFULL); if (!increase) { if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) goto done; } else trigger_level += ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); /* * Update trigger level on success */ if (ah->ah_version == AR5K_AR5210) ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); else AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_TXFULL, trigger_level); ret = 0; done: /* * Restore interrupt mask */ ath5k_hw_set_imr(ah, imr); return ret; } /*******************\ * Interrupt masking * \*******************/ /** * ath5k_hw_is_intr_pending - Check if we have pending interrupts * * @ah: The &struct ath5k_hw * * Check if we have pending interrupts to process. Returns 1 if we * have pending interrupts and 0 if we haven't. */ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) { return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; } /** * ath5k_hw_get_isr - Get interrupt status * * @ah: The @struct ath5k_hw * @interrupt_mask: Driver's interrupt mask used to filter out * interrupts in sw. * * This function is used inside our interrupt handler to determine the reason * for the interrupt by reading Primary Interrupt Status Register. Returns an * abstract interrupt status mask which is mostly ISR with some uncommon bits * being mapped on some standard non hw-specific positions * (check out &ath5k_int). * * NOTE: We use read-and-clear register, so after this function is called ISR * is zeroed. */ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) { u32 data; /* * Read interrupt status from the Interrupt Status register * on 5210 */ if (ah->ah_version == AR5K_AR5210) { data = ath5k_hw_reg_read(ah, AR5K_ISR); if (unlikely(data == AR5K_INT_NOCARD)) { *interrupt_mask = data; return -ENODEV; } } else { /* * Read interrupt status from Interrupt * Status Register shadow copy (Read And Clear) * * Note: PISR/SISR Not available on 5210 */ data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR); if (unlikely(data == AR5K_INT_NOCARD)) { *interrupt_mask = data; return -ENODEV; } } /* * Get abstract interrupt mask (driver-compatible) */ *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr; if (ah->ah_version != AR5K_AR5210) { u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2); /*HIU = Host Interface Unit (PCI etc)*/ if (unlikely(data & (AR5K_ISR_HIUERR))) *interrupt_mask |= AR5K_INT_FATAL; /*Beacon Not Ready*/ if (unlikely(data & (AR5K_ISR_BNR))) *interrupt_mask |= AR5K_INT_BNR; if (unlikely(sisr2 & (AR5K_SISR2_SSERR | AR5K_SISR2_DPERR | AR5K_SISR2_MCABT))) *interrupt_mask |= AR5K_INT_FATAL; if (data & AR5K_ISR_TIM) *interrupt_mask |= AR5K_INT_TIM; if (data & AR5K_ISR_BCNMISC) { if (sisr2 & AR5K_SISR2_TIM) *interrupt_mask |= AR5K_INT_TIM; if (sisr2 & AR5K_SISR2_DTIM) *interrupt_mask |= AR5K_INT_DTIM; if (sisr2 & AR5K_SISR2_DTIM_SYNC) *interrupt_mask |= AR5K_INT_DTIM_SYNC; if (sisr2 & AR5K_SISR2_BCN_TIMEOUT) *interrupt_mask |= AR5K_INT_BCN_TIMEOUT; if (sisr2 & AR5K_SISR2_CAB_TIMEOUT) *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; } if (data & AR5K_ISR_RXDOPPLER) *interrupt_mask |= AR5K_INT_RX_DOPPLER; if (data & AR5K_ISR_QCBRORN) { *interrupt_mask |= AR5K_INT_QCBRORN; ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), AR5K_SISR3_QCBRORN); } if (data & AR5K_ISR_QCBRURN) { *interrupt_mask |= AR5K_INT_QCBRURN; ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), AR5K_SISR3_QCBRURN); } if (data & AR5K_ISR_QTRIG) { *interrupt_mask |= AR5K_INT_QTRIG; ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR4), AR5K_SISR4_QTRIG); } if (data & AR5K_ISR_TXOK) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), AR5K_SISR0_QCU_TXOK); if (data & AR5K_ISR_TXDESC) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), AR5K_SISR0_QCU_TXDESC); if (data & AR5K_ISR_TXERR) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), AR5K_SISR1_QCU_TXERR); if (data & AR5K_ISR_TXEOL) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), AR5K_SISR1_QCU_TXEOL); if (data & AR5K_ISR_TXURN) ah->ah_txq_isr |= AR5K_REG_MS( ath5k_hw_reg_read(ah, AR5K_RAC_SISR2), AR5K_SISR2_QCU_TXURN); } else { if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT | AR5K_ISR_HIUERR | AR5K_ISR_DPERR))) *interrupt_mask |= AR5K_INT_FATAL; /* * XXX: BMISS interrupts may occur after association. * I found this on 5210 code but it needs testing. If this is * true we should disable them before assoc and re-enable them * after a successful assoc + some jiffies. interrupt_mask &= ~AR5K_INT_BMISS; */ } /* * In case we didn't handle anything, * print the register value. */ if (unlikely(*interrupt_mask == 0 && net_ratelimit())) ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr); return 0; } /** * ath5k_hw_set_imr - Set interrupt mask * * @ah: The &struct ath5k_hw * @new_mask: The new interrupt mask to be set * * Set the interrupt mask in hw to save interrupts. We do that by mapping * ath5k_int bits to hw-specific bits to remove abstraction and writing * Interrupt Mask Register. */ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) { enum ath5k_int old_mask, int_mask; old_mask = ah->ah_imr; /* * Disable card interrupts to prevent any race conditions * (they will be re-enabled afterwards if AR5K_INT GLOBAL * is set again on the new mask). */ if (old_mask & AR5K_INT_GLOBAL) { ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); ath5k_hw_reg_read(ah, AR5K_IER); } /* * Add additional, chipset-dependent interrupt mask flags * and write them to the IMR (interrupt mask register). */ int_mask = new_mask & AR5K_INT_COMMON; if (ah->ah_version != AR5K_AR5210) { /* Preserve per queue TXURN interrupt mask */ u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) & AR5K_SIMR2_QCU_TXURN; if (new_mask & AR5K_INT_FATAL) { int_mask |= AR5K_IMR_HIUERR; simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR); } /*Beacon Not Ready*/ if (new_mask & AR5K_INT_BNR) int_mask |= AR5K_INT_BNR; if (new_mask & AR5K_INT_TIM) int_mask |= AR5K_IMR_TIM; if (new_mask & AR5K_INT_TIM) simr2 |= AR5K_SISR2_TIM; if (new_mask & AR5K_INT_DTIM) simr2 |= AR5K_SISR2_DTIM; if (new_mask & AR5K_INT_DTIM_SYNC) simr2 |= AR5K_SISR2_DTIM_SYNC; if (new_mask & AR5K_INT_BCN_TIMEOUT) simr2 |= AR5K_SISR2_BCN_TIMEOUT; if (new_mask & AR5K_INT_CAB_TIMEOUT) simr2 |= AR5K_SISR2_CAB_TIMEOUT; if (new_mask & AR5K_INT_RX_DOPPLER) int_mask |= AR5K_IMR_RXDOPPLER; /* Note: Per queue interrupt masks * are set via reset_tx_queue (qcu.c) */ ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); } else { if (new_mask & AR5K_INT_FATAL) int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); } /* If RXNOFRM interrupt is masked disable it * by setting AR5K_RXNOFRM to zero */ if (!(new_mask & AR5K_INT_RXNOFRM)) ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM); /* Store new interrupt mask */ ah->ah_imr = new_mask; /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */ if (new_mask & AR5K_INT_GLOBAL) { ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); ath5k_hw_reg_read(ah, AR5K_IER); } return old_mask; } /********************\ Init/Stop functions \********************/ /** * ath5k_hw_dma_init - Initialize DMA unit * * @ah: The &struct ath5k_hw * * Set DMA size and pre-enable interrupts * (driver handles tx/rx buffer setup and * dma start/stop) * * XXX: Save/restore RXDP/TXDP registers ? */ void ath5k_hw_dma_init(struct ath5k_hw *ah) { /* * Set Rx/Tx DMA Configuration * * Set standard DMA size (128). Note that * a DMA size of 512 causes rx overruns and tx errors * on pci-e cards (tested on 5424 but since rx overruns * also occur on 5416/5418 with madwifi we set 128 * for all PCI-E cards to be safe). * * XXX: need to check 5210 for this * TODO: Check out tx triger level, it's always 64 on dumps but I * guess we can tweak it and see how it goes ;-) */ if (ah->ah_version != AR5K_AR5210) { AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B); AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B); } /* Pre-enable interrupts on 5211/5212*/ if (ah->ah_version != AR5K_AR5210) ath5k_hw_set_imr(ah, ah->ah_imr); } /** * ath5k_hw_dma_stop - stop DMA unit * * @ah: The &struct ath5k_hw * * Stop tx/rx DMA and interrupts. Returns * -EBUSY if tx or rx dma failed to stop. * * XXX: Sometimes DMA unit hangs and we have * stuck frames on tx queues, only a reset * can fix that. */ int ath5k_hw_dma_stop(struct ath5k_hw *ah) { int i, qmax, err; err = 0; /* Disable interrupts */ ath5k_hw_set_imr(ah, 0); /* Stop rx dma */ err = ath5k_hw_stop_rx_dma(ah); if (err) return err; /* Clear any pending interrupts * and disable tx dma */ if (ah->ah_version != AR5K_AR5210) { ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR); qmax = AR5K_NUM_TX_QUEUES; } else { /* PISR/SISR Not available on 5210 */ ath5k_hw_reg_read(ah, AR5K_ISR); qmax = AR5K_NUM_TX_QUEUES_NOQCU; } for (i = 0; i < qmax; i++) { err = ath5k_hw_stop_tx_dma(ah, i); /* -EINVAL -> queue inactive */ if (err && err != -EINVAL) return err; } return 0; }
gpl-2.0
Split-Screen/android_kernel_lge_gee
block/partition-generic.c
2589
14579
/* * Code extracted from drivers/block/genhd.c * Copyright (C) 1991-1998 Linus Torvalds * Re-organised Feb 1998 Russell King * * We now have independent partition support from the * block drivers, which allows all the partition code to * be grouped in one location, and it to be mostly self * contained. */ #include <linux/init.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/ctype.h> #include <linux/genhd.h> #include <linux/blktrace_api.h> #include "partitions/check.h" #ifdef CONFIG_BLK_DEV_MD extern void md_autodetect_dev(dev_t dev); #endif /* * disk_name() is used by partition check code and the genhd driver. * It formats the devicename of the indicated disk into * the supplied buffer (of size at least 32), and returns * a pointer to that same buffer (for convenience). */ char *disk_name(struct gendisk *hd, int partno, char *buf) { if (!partno) snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name); else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno); else snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno); return buf; } const char *bdevname(struct block_device *bdev, char *buf) { return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf); } EXPORT_SYMBOL(bdevname); /* * There's very little reason to use this, you should really * have a struct block_device just about everywhere and use * bdevname() instead. */ const char *__bdevname(dev_t dev, char *buffer) { scnprintf(buffer, BDEVNAME_SIZE, "unknown-block(%u,%u)", MAJOR(dev), MINOR(dev)); return buffer; } EXPORT_SYMBOL(__bdevname); static ssize_t part_partition_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); return sprintf(buf, "%d\n", p->partno); } static ssize_t part_start_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect); } ssize_t part_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); } static ssize_t part_ro_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); return sprintf(buf, "%d\n", p->policy ? 1 : 0); } static ssize_t part_alignment_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset); } static ssize_t part_discard_alignment_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); return sprintf(buf, "%u\n", p->discard_alignment); } ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); int cpu; cpu = part_stat_lock(); part_round_stats(cpu, p); part_stat_unlock(); return sprintf(buf, "%8lu %8lu %8llu %8u " "%8lu %8lu %8llu %8u " "%8u %8u %8u" "\n", part_stat_read(p, ios[READ]), part_stat_read(p, merges[READ]), (unsigned long long)part_stat_read(p, sectors[READ]), jiffies_to_msecs(part_stat_read(p, ticks[READ])), part_stat_read(p, ios[WRITE]), part_stat_read(p, merges[WRITE]), (unsigned long long)part_stat_read(p, sectors[WRITE]), jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), part_in_flight(p), jiffies_to_msecs(part_stat_read(p, io_ticks)), jiffies_to_msecs(part_stat_read(p, time_in_queue))); } ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]), atomic_read(&p->in_flight[1])); } #ifdef CONFIG_FAIL_MAKE_REQUEST ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); return sprintf(buf, "%d\n", p->make_it_fail); } ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hd_struct *p = dev_to_part(dev); int i; if (count > 0 && sscanf(buf, "%d", &i) > 0) p->make_it_fail = (i == 0) ? 0 : 1; return count; } #endif static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); static DEVICE_ATTR(ro, S_IRUGO, part_ro_show, NULL); static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); #endif static struct attribute *part_attrs[] = { &dev_attr_partition.attr, &dev_attr_start.attr, &dev_attr_size.attr, &dev_attr_ro.attr, &dev_attr_alignment_offset.attr, &dev_attr_discard_alignment.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif NULL }; static struct attribute_group part_attr_group = { .attrs = part_attrs, }; static const struct attribute_group *part_attr_groups[] = { &part_attr_group, #ifdef CONFIG_BLK_DEV_IO_TRACE &blk_trace_attr_group, #endif NULL }; static void part_release(struct device *dev) { struct hd_struct *p = dev_to_part(dev); free_part_stats(p); free_part_info(p); kfree(p); } static int part_uevent(struct device *dev, struct kobj_uevent_env *env) { struct hd_struct *part = dev_to_part(dev); add_uevent_var(env, "PARTN=%u", part->partno); if (part->info && part->info->volname[0]) add_uevent_var(env, "PARTNAME=%s", part->info->volname); return 0; } struct device_type part_type = { .name = "partition", .groups = part_attr_groups, .release = part_release, .uevent = part_uevent, }; static void delete_partition_rcu_cb(struct rcu_head *head) { struct hd_struct *part = container_of(head, struct hd_struct, rcu_head); part->start_sect = 0; part->nr_sects = 0; part_stat_set_all(part, 0); put_device(part_to_dev(part)); } void __delete_partition(struct hd_struct *part) { call_rcu(&part->rcu_head, delete_partition_rcu_cb); } void delete_partition(struct gendisk *disk, int partno) { struct disk_part_tbl *ptbl = disk->part_tbl; struct hd_struct *part; if (partno >= ptbl->len) return; part = ptbl->part[partno]; if (!part) return; blk_free_devt(part_devt(part)); rcu_assign_pointer(ptbl->part[partno], NULL); rcu_assign_pointer(ptbl->last_lookup, NULL); kobject_put(part->holder_dir); device_del(part_to_dev(part)); hd_struct_put(part); } static ssize_t whole_disk_show(struct device *dev, struct device_attribute *attr, char *buf) { return 0; } static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, whole_disk_show, NULL); struct hd_struct *add_partition(struct gendisk *disk, int partno, sector_t start, sector_t len, int flags, struct partition_meta_info *info) { struct hd_struct *p; dev_t devt = MKDEV(0, 0); struct device *ddev = disk_to_dev(disk); struct device *pdev; struct disk_part_tbl *ptbl; const char *dname; int err; err = disk_expand_part_tbl(disk, partno); if (err) return ERR_PTR(err); ptbl = disk->part_tbl; if (ptbl->part[partno]) return ERR_PTR(-EBUSY); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return ERR_PTR(-EBUSY); if (!init_part_stats(p)) { err = -ENOMEM; goto out_free; } pdev = part_to_dev(p); p->start_sect = start; p->alignment_offset = queue_limit_alignment_offset(&disk->queue->limits, start); p->discard_alignment = queue_limit_discard_alignment(&disk->queue->limits, start); p->nr_sects = len; p->partno = partno; p->policy = get_disk_ro(disk); if (info) { struct partition_meta_info *pinfo = alloc_part_info(disk); if (!pinfo) goto out_free_stats; memcpy(pinfo, info, sizeof(*info)); p->info = pinfo; } dname = dev_name(ddev); if (isdigit(dname[strlen(dname) - 1])) dev_set_name(pdev, "%sp%d", dname, partno); else dev_set_name(pdev, "%s%d", dname, partno); device_initialize(pdev); pdev->class = &block_class; pdev->type = &part_type; pdev->parent = ddev; err = blk_alloc_devt(p, &devt); if (err) goto out_free_info; pdev->devt = devt; /* delay uevent until 'holders' subdir is created */ dev_set_uevent_suppress(pdev, 1); err = device_add(pdev); if (err) goto out_put; err = -ENOMEM; p->holder_dir = kobject_create_and_add("holders", &pdev->kobj); if (!p->holder_dir) goto out_del; dev_set_uevent_suppress(pdev, 0); if (flags & ADDPART_FLAG_WHOLEDISK) { err = device_create_file(pdev, &dev_attr_whole_disk); if (err) goto out_del; } /* everything is up and running, commence */ rcu_assign_pointer(ptbl->part[partno], p); /* suppress uevent if the disk suppresses it */ if (!dev_get_uevent_suppress(ddev)) kobject_uevent(&pdev->kobj, KOBJ_ADD); hd_ref_init(p); return p; out_free_info: free_part_info(p); out_free_stats: free_part_stats(p); out_free: kfree(p); return ERR_PTR(err); out_del: kobject_put(p->holder_dir); device_del(pdev); out_put: put_device(pdev); blk_free_devt(devt); return ERR_PTR(err); } static bool disk_unlock_native_capacity(struct gendisk *disk) { const struct block_device_operations *bdops = disk->fops; if (bdops->unlock_native_capacity && !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) { printk(KERN_CONT "enabling native capacity\n"); bdops->unlock_native_capacity(disk); disk->flags |= GENHD_FL_NATIVE_CAPACITY; return true; } else { printk(KERN_CONT "truncated\n"); return false; } } static int drop_partitions(struct gendisk *disk, struct block_device *bdev) { struct disk_part_iter piter; struct hd_struct *part; int res; if (bdev->bd_part_count) return -EBUSY; res = invalidate_partition(disk, 0); if (res) return res; disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); while ((part = disk_part_iter_next(&piter))) delete_partition(disk, part->partno); disk_part_iter_exit(&piter); return 0; } int rescan_partitions(struct gendisk *disk, struct block_device *bdev) { struct parsed_partitions *state = NULL; struct hd_struct *part; int p, highest, res; rescan: if (state && !IS_ERR(state)) { kfree(state); state = NULL; } res = drop_partitions(disk, bdev); if (res) return res; if (disk->fops->revalidate_disk) disk->fops->revalidate_disk(disk); check_disk_size_change(disk, bdev); bdev->bd_invalidated = 0; if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) return 0; if (IS_ERR(state)) { /* * I/O error reading the partition table. If any * partition code tried to read beyond EOD, retry * after unlocking native capacity. */ if (PTR_ERR(state) == -ENOSPC) { printk(KERN_WARNING "%s: partition table beyond EOD, ", disk->disk_name); if (disk_unlock_native_capacity(disk)) goto rescan; } return -EIO; } /* * If any partition code tried to read beyond EOD, try * unlocking native capacity even if partition table is * successfully read as we could be missing some partitions. */ if (state->access_beyond_eod) { printk(KERN_WARNING "%s: partition table partially beyond EOD, ", disk->disk_name); if (disk_unlock_native_capacity(disk)) goto rescan; } /* tell userspace that the media / partition table may have changed */ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); /* Detect the highest partition number and preallocate * disk->part_tbl. This is an optimization and not strictly * necessary. */ for (p = 1, highest = 0; p < state->limit; p++) if (state->parts[p].size) highest = p; disk_expand_part_tbl(disk, highest); /* add partitions */ for (p = 1; p < state->limit; p++) { sector_t size, from; struct partition_meta_info *info = NULL; size = state->parts[p].size; if (!size) continue; from = state->parts[p].from; if (from >= get_capacity(disk)) { printk(KERN_WARNING "%s: p%d start %llu is beyond EOD, ", disk->disk_name, p, (unsigned long long) from); if (disk_unlock_native_capacity(disk)) goto rescan; continue; } if (from + size > get_capacity(disk)) { printk(KERN_WARNING "%s: p%d size %llu extends beyond EOD, ", disk->disk_name, p, (unsigned long long) size); if (disk_unlock_native_capacity(disk)) { /* free state and restart */ goto rescan; } else { /* * we can not ignore partitions of broken tables * created by for example camera firmware, but * we limit them to the end of the disk to avoid * creating invalid block devices */ size = get_capacity(disk) - from; } } if (state->parts[p].has_info) info = &state->parts[p].info; part = add_partition(disk, p, from, size, state->parts[p].flags, &state->parts[p].info); if (IS_ERR(part)) { printk(KERN_ERR " %s: p%d could not be added: %ld\n", disk->disk_name, p, -PTR_ERR(part)); continue; } #ifdef CONFIG_BLK_DEV_MD if (state->parts[p].flags & ADDPART_FLAG_RAID) md_autodetect_dev(part_to_dev(part)->devt); #endif } kfree(state); return 0; } int invalidate_partitions(struct gendisk *disk, struct block_device *bdev) { int res; if (!bdev->bd_invalidated) return 0; res = drop_partitions(disk, bdev); if (res) return res; set_capacity(disk, 0); check_disk_size_change(disk, bdev); bdev->bd_invalidated = 0; /* tell userspace that the media / partition table may have changed */ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); return 0; } unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) { struct address_space *mapping = bdev->bd_inode->i_mapping; struct page *page; page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), NULL); if (!IS_ERR(page)) { if (PageError(page)) goto fail; p->v = page; return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9); fail: page_cache_release(page); } p->v = NULL; return NULL; } EXPORT_SYMBOL(read_dev_sector);
gpl-2.0
iDroid-Project/iDroid-kernel
arch/mips/alchemy/xxs1500/board_setup.c
2589
3333
/* * Copyright 2000-2003, 2008 MontaVista Software Inc. * Author: MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/pm.h> #include <asm/reboot.h> #include <asm/mach-au1x00/au1000.h> #include <prom.h> static void xxs1500_reset(char *c) { /* Jump to the reset vector */ __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); } static void xxs1500_power_off(void) { while (1) asm volatile ( " .set mips32 \n" " wait \n" " .set mips0 \n"); } void __init board_setup(void) { u32 pin_func; pm_power_off = xxs1500_power_off; _machine_halt = xxs1500_power_off; _machine_restart = xxs1500_reset; alchemy_gpio1_input_enable(); alchemy_gpio2_enable(); /* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */ pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3; pin_func |= SYS_PF_UR3; au_writel(pin_func, SYS_PINFUNC); /* Enable UART */ alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); /* Enable DTR (MCR bit 0) = USB power up */ __raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18)); wmb(); #ifdef CONFIG_PCI #if defined(__MIPSEB__) au_writel(0xf | (2 << 6) | (1 << 4), Au1500_PCI_CFG); #else au_writel(0xf, Au1500_PCI_CFG); #endif #endif } static int __init xxs1500_init_irq(void) { irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */ irq_set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); return 0; } arch_initcall(xxs1500_init_irq);
gpl-2.0
fefifofum/android_kernel_bq_maxwell2plus
arch/tile/kernel/stack.c
2845
14142
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/pfn.h> #include <linux/kallsyms.h> #include <linux/stacktrace.h> #include <linux/uaccess.h> #include <linux/mmzone.h> #include <asm/backtrace.h> #include <asm/page.h> #include <asm/tlbflush.h> #include <asm/ucontext.h> #include <asm/sigframe.h> #include <asm/stack.h> #include <arch/abi.h> #include <arch/interrupts.h> #define KBT_ONGOING 0 /* Backtrace still ongoing */ #define KBT_DONE 1 /* Backtrace cleanly completed */ #define KBT_RUNNING 2 /* Can't run backtrace on a running task */ #define KBT_LOOP 3 /* Backtrace entered a loop */ /* Is address on the specified kernel stack? */ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp) { ulong kstack_base = (ulong) kbt->task->stack; if (kstack_base == 0) /* corrupt task pointer; just follow stack... */ return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory; return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; } /* Is address valid for reading? */ static int valid_address(struct KBacktraceIterator *kbt, unsigned long address) { HV_PTE *l1_pgtable = kbt->pgtable; HV_PTE *l2_pgtable; unsigned long pfn; HV_PTE pte; struct page *page; if (l1_pgtable == NULL) return 0; /* can't read user space in other tasks */ #ifdef CONFIG_64BIT /* Find the real l1_pgtable by looking in the l0_pgtable. */ pte = l1_pgtable[HV_L0_INDEX(address)]; if (!hv_pte_get_present(pte)) return 0; pfn = hv_pte_get_pfn(pte); if (pte_huge(pte)) { if (!pfn_valid(pfn)) { pr_err("L0 huge page has bad pfn %#lx\n", pfn); return 0; } return hv_pte_get_present(pte) && hv_pte_get_readable(pte); } page = pfn_to_page(pfn); BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */ l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); #endif pte = l1_pgtable[HV_L1_INDEX(address)]; if (!hv_pte_get_present(pte)) return 0; pfn = hv_pte_get_pfn(pte); if (pte_huge(pte)) { if (!pfn_valid(pfn)) { pr_err("huge page has bad pfn %#lx\n", pfn); return 0; } return hv_pte_get_present(pte) && hv_pte_get_readable(pte); } page = pfn_to_page(pfn); if (PageHighMem(page)) { pr_err("L2 page table not in LOWMEM (%#llx)\n", HV_PFN_TO_CPA(pfn)); return 0; } l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); pte = l2_pgtable[HV_L2_INDEX(address)]; return hv_pte_get_present(pte) && hv_pte_get_readable(pte); } /* Callback for backtracer; basically a glorified memcpy */ static bool read_memory_func(void *result, unsigned long address, unsigned int size, void *vkbt) { int retval; struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; if (__kernel_text_address(address)) { /* OK to read kernel code. */ } else if (address >= PAGE_OFFSET) { /* We only tolerate kernel-space reads of this task's stack */ if (!in_kernel_stack(kbt, address)) return 0; } else if (!valid_address(kbt, address)) { return 0; /* invalid user-space address */ } pagefault_disable(); retval = __copy_from_user_inatomic(result, (void __user __force *)address, size); pagefault_enable(); return (retval == 0); } /* Return a pt_regs pointer for a valid fault handler frame */ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) { const char *fault = NULL; /* happy compiler */ char fault_buf[64]; unsigned long sp = kbt->it.sp; struct pt_regs *p; if (!in_kernel_stack(kbt, sp)) return NULL; if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) return NULL; p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE); if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN) fault = "syscall"; else { if (kbt->verbose) { /* else we aren't going to use it */ snprintf(fault_buf, sizeof(fault_buf), "interrupt %ld", p->faultnum); fault = fault_buf; } } if (EX1_PL(p->ex1) == KERNEL_PL && __kernel_text_address(p->pc) && in_kernel_stack(kbt, p->sp) && p->sp >= sp) { if (kbt->verbose) pr_err(" <%s while in kernel mode>\n", fault); } else if (EX1_PL(p->ex1) == USER_PL && p->pc < PAGE_OFFSET && p->sp < PAGE_OFFSET) { if (kbt->verbose) pr_err(" <%s while in user mode>\n", fault); } else if (kbt->verbose) { pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", p->pc, p->sp, p->ex1); p = NULL; } if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) return p; return NULL; } /* Is the pc pointing to a sigreturn trampoline? */ static int is_sigreturn(unsigned long pc) { return (pc == VDSO_BASE); } /* Return a pt_regs pointer for a valid signal handler frame */ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) { BacktraceIterator *b = &kbt->it; if (b->pc == VDSO_BASE) { struct rt_sigframe *frame; unsigned long sigframe_top = b->sp + sizeof(struct rt_sigframe) - 1; if (!valid_address(kbt, b->sp) || !valid_address(kbt, sigframe_top)) { if (kbt->verbose) pr_err(" (odd signal: sp %#lx?)\n", (unsigned long)(b->sp)); return NULL; } frame = (struct rt_sigframe *)b->sp; if (kbt->verbose) { pr_err(" <received signal %d>\n", frame->info.si_signo); } return (struct pt_regs *)&frame->uc.uc_mcontext; } return NULL; } static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) { return is_sigreturn(kbt->it.pc); } static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) { struct pt_regs *p; p = valid_fault_handler(kbt); if (p == NULL) p = valid_sigframe(kbt); if (p == NULL) return 0; backtrace_init(&kbt->it, read_memory_func, kbt, p->pc, p->lr, p->sp, p->regs[52]); kbt->new_context = 1; return 1; } /* Find a frame that isn't a sigreturn, if there is one. */ static int KBacktraceIterator_next_item_inclusive( struct KBacktraceIterator *kbt) { for (;;) { do { if (!KBacktraceIterator_is_sigreturn(kbt)) return KBT_ONGOING; } while (backtrace_next(&kbt->it)); if (!KBacktraceIterator_restart(kbt)) return KBT_DONE; } } /* * If the current sp is on a page different than what we recorded * as the top-of-kernel-stack last time we context switched, we have * probably blown the stack, and nothing is going to work out well. * If we can at least get out a warning, that may help the debug, * though we probably won't be able to backtrace into the code that * actually did the recursive damage. */ static void validate_stack(struct pt_regs *regs) { int cpu = smp_processor_id(); unsigned long ksp0 = get_current_ksp0(); unsigned long ksp0_base = ksp0 - THREAD_SIZE; unsigned long sp = stack_pointer; if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n" " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); } else if (sp < ksp0_base + sizeof(struct thread_info)) { pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n" " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); } } void KBacktraceIterator_init(struct KBacktraceIterator *kbt, struct task_struct *t, struct pt_regs *regs) { unsigned long pc, lr, sp, r52; int is_current; /* * Set up callback information. We grab the kernel stack base * so we will allow reads of that address range, and if we're * asking about the current process we grab the page table * so we can check user accesses before trying to read them. * We flush the TLB to avoid any weird skew issues. */ is_current = (t == NULL); kbt->is_current = is_current; if (is_current) t = validate_current(); kbt->task = t; kbt->pgtable = NULL; kbt->verbose = 0; /* override in caller if desired */ kbt->profile = 0; /* override in caller if desired */ kbt->end = KBT_ONGOING; kbt->new_context = 0; if (is_current) { HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) { /* * Not just an optimization: this also allows * this to work at all before va/pa mappings * are set up. */ kbt->pgtable = swapper_pg_dir; } else { struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa)); if (!PageHighMem(page)) kbt->pgtable = __va(pgdir_pa); else pr_err("page table not in LOWMEM" " (%#llx)\n", pgdir_pa); } local_flush_tlb_all(); validate_stack(regs); } if (regs == NULL) { if (is_current || t->state == TASK_RUNNING) { /* Can't do this; we need registers */ kbt->end = KBT_RUNNING; return; } pc = get_switch_to_pc(); lr = t->thread.pc; sp = t->thread.ksp; r52 = 0; } else { pc = regs->pc; lr = regs->lr; sp = regs->sp; r52 = regs->regs[52]; } backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52); kbt->end = KBacktraceIterator_next_item_inclusive(kbt); } EXPORT_SYMBOL(KBacktraceIterator_init); int KBacktraceIterator_end(struct KBacktraceIterator *kbt) { return kbt->end != KBT_ONGOING; } EXPORT_SYMBOL(KBacktraceIterator_end); void KBacktraceIterator_next(struct KBacktraceIterator *kbt) { unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp; kbt->new_context = 0; if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) { kbt->end = KBT_DONE; return; } kbt->end = KBacktraceIterator_next_item_inclusive(kbt); if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) { /* Trapped in a loop; give up. */ kbt->end = KBT_LOOP; } } EXPORT_SYMBOL(KBacktraceIterator_next); /* * This method wraps the backtracer's more generic support. * It is only invoked from the architecture-specific code; show_stack() * and dump_stack() (in entry.S) are architecture-independent entry points. */ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) { int i; if (headers) { /* * Add a blank line since if we are called from panic(), * then bust_spinlocks() spit out a space in front of us * and it will mess up our KERN_ERR. */ pr_err("\n"); pr_err("Starting stack dump of tid %d, pid %d (%s)" " on cpu %d at cycle %lld\n", kbt->task->pid, kbt->task->tgid, kbt->task->comm, smp_processor_id(), get_cycles()); } kbt->verbose = 1; i = 0; for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { char *modname; const char *name; unsigned long address = kbt->it.pc; unsigned long offset, size; char namebuf[KSYM_NAME_LEN+100]; if (address >= PAGE_OFFSET) name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); else name = NULL; if (!name) namebuf[0] = '\0'; else { size_t namelen = strlen(namebuf); size_t remaining = (sizeof(namebuf) - 1) - namelen; char *p = namebuf + namelen; int rc = snprintf(p, remaining, "+%#lx/%#lx ", offset, size); if (modname && rc < remaining) snprintf(p + rc, remaining - rc, "[%s] ", modname); namebuf[sizeof(namebuf)-1] = '\0'; } pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", i++, address, namebuf, (unsigned long)(kbt->it.sp)); if (i >= 100) { pr_err("Stack dump truncated" " (%d frames)\n", i); break; } } if (kbt->end == KBT_LOOP) pr_err("Stack dump stopped; next frame identical to this one\n"); if (headers) pr_err("Stack dump complete\n"); } EXPORT_SYMBOL(tile_show_stack); /* This is called from show_regs() and _dump_stack() */ void dump_stack_regs(struct pt_regs *regs) { struct KBacktraceIterator kbt; KBacktraceIterator_init(&kbt, NULL, regs); tile_show_stack(&kbt, 1); } EXPORT_SYMBOL(dump_stack_regs); static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs, ulong pc, ulong lr, ulong sp, ulong r52) { memset(regs, 0, sizeof(struct pt_regs)); regs->pc = pc; regs->lr = lr; regs->sp = sp; regs->regs[52] = r52; return regs; } /* This is called from dump_stack() and just converts to pt_regs */ void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) { struct pt_regs regs; dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52)); } /* This is called from KBacktraceIterator_init_current() */ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, ulong lr, ulong sp, ulong r52) { struct pt_regs regs; KBacktraceIterator_init(kbt, NULL, regs_to_pt_regs(&regs, pc, lr, sp, r52)); } /* This is called only from kernel/sched.c, with esp == NULL */ void show_stack(struct task_struct *task, unsigned long *esp) { struct KBacktraceIterator kbt; if (task == NULL || task == current) KBacktraceIterator_init_current(&kbt); else KBacktraceIterator_init(&kbt, task, NULL); tile_show_stack(&kbt, 0); } #ifdef CONFIG_STACKTRACE /* Support generic Linux stack API too */ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) { struct KBacktraceIterator kbt; int skip = trace->skip; int i = 0; if (task == NULL || task == current) KBacktraceIterator_init_current(&kbt); else KBacktraceIterator_init(&kbt, task, NULL); for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) { if (skip) { --skip; continue; } if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) break; trace->entries[i++] = kbt.it.pc; } trace->nr_entries = i; } EXPORT_SYMBOL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { save_stack_trace_tsk(NULL, trace); } #endif /* In entry.S */ EXPORT_SYMBOL(KBacktraceIterator_init_current);
gpl-2.0
hemanthariyani/omap-kernel
lib/debug_locks.c
3357
1108
/* * lib/debug_locks.c * * Generic place for common debugging facilities for various locks: * spinlocks, rwlocks, mutexes and rwsems. * * Started by Ingo Molnar: * * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> */ #include <linux/rwsem.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/debug_locks.h> /* * We want to turn all lock-debugging facilities on/off at once, * via a global flag. The reason is that once a single bug has been * detected and reported, there might be cascade of followup bugs * that would just muddy the log. So we report the first one and * shut up after that. */ int debug_locks = 1; EXPORT_SYMBOL_GPL(debug_locks); /* * The locking-testsuite uses <debug_locks_silent> to get a * 'silent failure': nothing is printed to the console when * a locking bug is detected. */ int debug_locks_silent; /* * Generic 'turn off all lock debugging' function: */ int debug_locks_off(void) { if (__debug_locks_off()) { if (!debug_locks_silent) { console_verbose(); return 1; } } return 0; }
gpl-2.0
htc-mirror/shooteru-ics-crc-3.0.16-e733189
net/sunrpc/xprtrdma/svc_rdma.c
3357
8357
/* * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <tom@opengridcomputing.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/sysctl.h> #include <linux/workqueue.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/sched.h> #include <linux/sunrpc/svc_rdma.h> #define RPCDBG_FACILITY RPCDBG_SVCXPRT /* RPC/RDMA parameters */ unsigned int svcrdma_ord = RPCRDMA_ORD; static unsigned int min_ord = 1; static unsigned int max_ord = 4096; unsigned int svcrdma_max_requests = RPCRDMA_MAX_REQUESTS; static unsigned int min_max_requests = 4; static unsigned int max_max_requests = 16384; unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE; static unsigned int min_max_inline = 4096; static unsigned int max_max_inline = 65536; atomic_t rdma_stat_recv; atomic_t rdma_stat_read; atomic_t rdma_stat_write; atomic_t rdma_stat_sq_starve; atomic_t rdma_stat_rq_starve; atomic_t rdma_stat_rq_poll; atomic_t rdma_stat_rq_prod; atomic_t rdma_stat_sq_poll; atomic_t rdma_stat_sq_prod; /* Temporary NFS request map and context caches */ struct kmem_cache *svc_rdma_map_cachep; struct kmem_cache *svc_rdma_ctxt_cachep; struct workqueue_struct *svc_rdma_wq; /* * This function implements reading and resetting an atomic_t stat * variable through read/write to a proc file. Any write to the file * resets the associated statistic to zero. Any read returns it's * current value. */ static int read_reset_stat(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { atomic_t *stat = (atomic_t *)table->data; if (!stat) return -EINVAL; if (write) atomic_set(stat, 0); else { char str_buf[32]; char *data; int len = snprintf(str_buf, 32, "%d\n", atomic_read(stat)); if (len >= 32) return -EFAULT; len = strlen(str_buf); if (*ppos > len) { *lenp = 0; return 0; } data = &str_buf[*ppos]; len -= *ppos; if (len > *lenp) len = *lenp; if (len && copy_to_user(buffer, str_buf, len)) return -EFAULT; *lenp = len; *ppos += len; } return 0; } static struct ctl_table_header *svcrdma_table_header; static ctl_table svcrdma_parm_table[] = { { .procname = "max_requests", .data = &svcrdma_max_requests, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_max_requests, .extra2 = &max_max_requests }, { .procname = "max_req_size", .data = &svcrdma_max_req_size, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_max_inline, .extra2 = &max_max_inline }, { .procname = "max_outbound_read_requests", .data = &svcrdma_ord, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_ord, .extra2 = &max_ord, }, { .procname = "rdma_stat_read", .data = &rdma_stat_read, .maxlen = sizeof(atomic_t), .mode = 0644, .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_recv", .data = &rdma_stat_recv, .maxlen = sizeof(atomic_t), .mode = 0644, .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_write", .data = &rdma_stat_write, .maxlen = sizeof(atomic_t), .mode = 0644, .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_sq_starve", .data = &rdma_stat_sq_starve, .maxlen = sizeof(atomic_t), .mode = 0644, .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_rq_starve", .data = &rdma_stat_rq_starve, .maxlen = sizeof(atomic_t), .mode = 0644, .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_rq_poll", .data = &rdma_stat_rq_poll, .maxlen = sizeof(atomic_t), .mode = 0644, .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_rq_prod", .data = &rdma_stat_rq_prod, .maxlen = sizeof(atomic_t), .mode = 0644, .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_sq_poll", .data = &rdma_stat_sq_poll, .maxlen = sizeof(atomic_t), .mode = 0644, .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_sq_prod", .data = &rdma_stat_sq_prod, .maxlen = sizeof(atomic_t), .mode = 0644, .proc_handler = read_reset_stat, }, { }, }; static ctl_table svcrdma_table[] = { { .procname = "svc_rdma", .mode = 0555, .child = svcrdma_parm_table }, { }, }; static ctl_table svcrdma_root_table[] = { { .procname = "sunrpc", .mode = 0555, .child = svcrdma_table }, { }, }; void svc_rdma_cleanup(void) { dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n"); destroy_workqueue(svc_rdma_wq); if (svcrdma_table_header) { unregister_sysctl_table(svcrdma_table_header); svcrdma_table_header = NULL; } svc_unreg_xprt_class(&svc_rdma_class); kmem_cache_destroy(svc_rdma_map_cachep); kmem_cache_destroy(svc_rdma_ctxt_cachep); } int svc_rdma_init(void) { dprintk("SVCRDMA Module Init, register RPC RDMA transport\n"); dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord); dprintk("\tmax_requests : %d\n", svcrdma_max_requests); dprintk("\tsq_depth : %d\n", svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT); dprintk("\tmax_inline : %d\n", svcrdma_max_req_size); svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0); if (!svc_rdma_wq) return -ENOMEM; if (!svcrdma_table_header) svcrdma_table_header = register_sysctl_table(svcrdma_root_table); /* Create the temporary map cache */ svc_rdma_map_cachep = kmem_cache_create("svc_rdma_map_cache", sizeof(struct svc_rdma_req_map), 0, SLAB_HWCACHE_ALIGN, NULL); if (!svc_rdma_map_cachep) { printk(KERN_INFO "Could not allocate map cache.\n"); goto err0; } /* Create the temporary context cache */ svc_rdma_ctxt_cachep = kmem_cache_create("svc_rdma_ctxt_cache", sizeof(struct svc_rdma_op_ctxt), 0, SLAB_HWCACHE_ALIGN, NULL); if (!svc_rdma_ctxt_cachep) { printk(KERN_INFO "Could not allocate WR ctxt cache.\n"); goto err1; } /* Register RDMA with the SVC transport switch */ svc_reg_xprt_class(&svc_rdma_class); return 0; err1: kmem_cache_destroy(svc_rdma_map_cachep); err0: unregister_sysctl_table(svcrdma_table_header); destroy_workqueue(svc_rdma_wq); return -ENOMEM; } MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); MODULE_DESCRIPTION("SVC RDMA Transport"); MODULE_LICENSE("Dual BSD/GPL"); module_init(svc_rdma_init); module_exit(svc_rdma_cleanup);
gpl-2.0
windxixi/android_kernel_htc_msm8660
net/atm/atm_misc.c
3613
2643
/* net/atm/atm_misc.c - Various functions for use by ATM drivers */ /* Written 1995-2000 by Werner Almesberger, EPFL ICA */ #include <linux/module.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/skbuff.h> #include <linux/sonet.h> #include <linux/bitops.h> #include <linux/errno.h> #include <asm/atomic.h> int atm_charge(struct atm_vcc *vcc, int truesize) { atm_force_charge(vcc, truesize); if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) return 1; atm_return(vcc, truesize); atomic_inc(&vcc->stats->rx_drop); return 0; } EXPORT_SYMBOL(atm_charge); struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size, gfp_t gfp_flags) { struct sock *sk = sk_atm(vcc); int guess = atm_guess_pdu2truesize(pdu_size); atm_force_charge(vcc, guess); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags); if (skb) { atomic_add(skb->truesize-guess, &sk->sk_rmem_alloc); return skb; } } atm_return(vcc, guess); atomic_inc(&vcc->stats->rx_drop); return NULL; } EXPORT_SYMBOL(atm_alloc_charge); /* * atm_pcr_goal returns the positive PCR if it should be rounded up, the * negative PCR if it should be rounded down, and zero if the maximum available * bandwidth should be used. * * The rules are as follows (* = maximum, - = absent (0), x = value "x", * (x+ = x or next value above x, x- = x or next value below): * * min max pcr result min max pcr result * - - - * (UBR only) x - - x+ * - - * * x - * * * - - z z- x - z z- * - * - * x * - x+ * - * * * x * * * * - * z z- x * z z- * - y - y- x y - x+ * - y * y- x y * y- * - y z z- x y z z- * * All non-error cases can be converted with the following simple set of rules: * * if pcr == z then z- * else if min == x && pcr == - then x+ * else if max == y then y- * else * */ int atm_pcr_goal(const struct atm_trafprm *tp) { if (tp->pcr && tp->pcr != ATM_MAX_PCR) return -tp->pcr; if (tp->min_pcr && !tp->pcr) return tp->min_pcr; if (tp->max_pcr != ATM_MAX_PCR) return -tp->max_pcr; return 0; } EXPORT_SYMBOL(atm_pcr_goal); void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to) { #define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) __SONET_ITEMS #undef __HANDLE_ITEM } EXPORT_SYMBOL(sonet_copy_stats); void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to) { #define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) __SONET_ITEMS #undef __HANDLE_ITEM } EXPORT_SYMBOL(sonet_subtract_stats);
gpl-2.0
JustAkan/Oxygen_united_kernel-gproj
sound/pci/pcxhr/pcxhr.c
4893
45538
/* * Driver for Digigram pcxhr compatible soundcards * * main file with alsa callbacks * * Copyright (c) 2004 by Digigram <alsa@digigram.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/info.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "pcxhr.h" #include "pcxhr_mixer.h" #include "pcxhr_hwdep.h" #include "pcxhr_core.h" #include "pcxhr_mix22.h" #define DRIVER_NAME "pcxhr" MODULE_AUTHOR("Markus Bollinger <bollinger@digigram.com>, " "Marc Titinger <titinger@digigram.com>"); MODULE_DESCRIPTION("Digigram " DRIVER_NAME " " PCXHR_DRIVER_VERSION_STRING); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Digigram," DRIVER_NAME "}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card */ static bool mono[SNDRV_CARDS]; /* capture mono only */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Digigram " DRIVER_NAME " soundcard"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Digigram " DRIVER_NAME " soundcard"); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Digigram " DRIVER_NAME " soundcard"); module_param_array(mono, bool, NULL, 0444); MODULE_PARM_DESC(mono, "Mono capture mode (default is stereo)"); enum { PCI_ID_VX882HR, PCI_ID_PCX882HR, PCI_ID_VX881HR, PCI_ID_PCX881HR, PCI_ID_VX882E, PCI_ID_PCX882E, PCI_ID_VX881E, PCI_ID_PCX881E, PCI_ID_VX1222HR, PCI_ID_PCX1222HR, PCI_ID_VX1221HR, PCI_ID_PCX1221HR, PCI_ID_VX1222E, PCI_ID_PCX1222E, PCI_ID_VX1221E, PCI_ID_PCX1221E, PCI_ID_VX222HR, PCI_ID_VX222E, PCI_ID_PCX22HR, PCI_ID_PCX22E, PCI_ID_VX222HRMIC, PCI_ID_VX222E_MIC, PCI_ID_PCX924HR, PCI_ID_PCX924E, PCI_ID_PCX924HRMIC, PCI_ID_PCX924E_MIC, PCI_ID_LAST }; static DEFINE_PCI_DEVICE_TABLE(pcxhr_ids) = { { 0x10b5, 0x9656, 0x1369, 0xb001, 0, 0, PCI_ID_VX882HR, }, { 0x10b5, 0x9656, 0x1369, 0xb101, 0, 0, PCI_ID_PCX882HR, }, { 0x10b5, 0x9656, 0x1369, 0xb201, 0, 0, PCI_ID_VX881HR, }, { 0x10b5, 0x9656, 0x1369, 0xb301, 0, 0, PCI_ID_PCX881HR, }, { 0x10b5, 0x9056, 0x1369, 0xb021, 0, 0, PCI_ID_VX882E, }, { 0x10b5, 0x9056, 0x1369, 0xb121, 0, 0, PCI_ID_PCX882E, }, { 0x10b5, 0x9056, 0x1369, 0xb221, 0, 0, PCI_ID_VX881E, }, { 0x10b5, 0x9056, 0x1369, 0xb321, 0, 0, PCI_ID_PCX881E, }, { 0x10b5, 0x9656, 0x1369, 0xb401, 0, 0, PCI_ID_VX1222HR, }, { 0x10b5, 0x9656, 0x1369, 0xb501, 0, 0, PCI_ID_PCX1222HR, }, { 0x10b5, 0x9656, 0x1369, 0xb601, 0, 0, PCI_ID_VX1221HR, }, { 0x10b5, 0x9656, 0x1369, 0xb701, 0, 0, PCI_ID_PCX1221HR, }, { 0x10b5, 0x9056, 0x1369, 0xb421, 0, 0, PCI_ID_VX1222E, }, { 0x10b5, 0x9056, 0x1369, 0xb521, 0, 0, PCI_ID_PCX1222E, }, { 0x10b5, 0x9056, 0x1369, 0xb621, 0, 0, PCI_ID_VX1221E, }, { 0x10b5, 0x9056, 0x1369, 0xb721, 0, 0, PCI_ID_PCX1221E, }, { 0x10b5, 0x9056, 0x1369, 0xba01, 0, 0, PCI_ID_VX222HR, }, { 0x10b5, 0x9056, 0x1369, 0xba21, 0, 0, PCI_ID_VX222E, }, { 0x10b5, 0x9056, 0x1369, 0xbd01, 0, 0, PCI_ID_PCX22HR, }, { 0x10b5, 0x9056, 0x1369, 0xbd21, 0, 0, PCI_ID_PCX22E, }, { 0x10b5, 0x9056, 0x1369, 0xbc01, 0, 0, PCI_ID_VX222HRMIC, }, { 0x10b5, 0x9056, 0x1369, 0xbc21, 0, 0, PCI_ID_VX222E_MIC, }, { 0x10b5, 0x9056, 0x1369, 0xbb01, 0, 0, PCI_ID_PCX924HR, }, { 0x10b5, 0x9056, 0x1369, 0xbb21, 0, 0, PCI_ID_PCX924E, }, { 0x10b5, 0x9056, 0x1369, 0xbf01, 0, 0, PCI_ID_PCX924HRMIC, }, { 0x10b5, 0x9056, 0x1369, 0xbf21, 0, 0, PCI_ID_PCX924E_MIC, }, { 0, } }; MODULE_DEVICE_TABLE(pci, pcxhr_ids); struct board_parameters { char* board_name; short playback_chips; short capture_chips; short fw_file_set; short firmware_num; }; static struct board_parameters pcxhr_board_params[] = { [PCI_ID_VX882HR] = { "VX882HR", 4, 4, 0, 41 }, [PCI_ID_PCX882HR] = { "PCX882HR", 4, 4, 0, 41 }, [PCI_ID_VX881HR] = { "VX881HR", 4, 4, 0, 41 }, [PCI_ID_PCX881HR] = { "PCX881HR", 4, 4, 0, 41 }, [PCI_ID_VX882E] = { "VX882e", 4, 4, 1, 41 }, [PCI_ID_PCX882E] = { "PCX882e", 4, 4, 1, 41 }, [PCI_ID_VX881E] = { "VX881e", 4, 4, 1, 41 }, [PCI_ID_PCX881E] = { "PCX881e", 4, 4, 1, 41 }, [PCI_ID_VX1222HR] = { "VX1222HR", 6, 1, 2, 42 }, [PCI_ID_PCX1222HR] = { "PCX1222HR", 6, 1, 2, 42 }, [PCI_ID_VX1221HR] = { "VX1221HR", 6, 1, 2, 42 }, [PCI_ID_PCX1221HR] = { "PCX1221HR", 6, 1, 2, 42 }, [PCI_ID_VX1222E] = { "VX1222e", 6, 1, 3, 42 }, [PCI_ID_PCX1222E] = { "PCX1222e", 6, 1, 3, 42 }, [PCI_ID_VX1221E] = { "VX1221e", 6, 1, 3, 42 }, [PCI_ID_PCX1221E] = { "PCX1221e", 6, 1, 3, 42 }, [PCI_ID_VX222HR] = { "VX222HR", 1, 1, 4, 44 }, [PCI_ID_VX222E] = { "VX222e", 1, 1, 4, 44 }, [PCI_ID_PCX22HR] = { "PCX22HR", 1, 0, 4, 44 }, [PCI_ID_PCX22E] = { "PCX22e", 1, 0, 4, 44 }, [PCI_ID_VX222HRMIC] = { "VX222HR-Mic", 1, 1, 5, 44 }, [PCI_ID_VX222E_MIC] = { "VX222e-Mic", 1, 1, 5, 44 }, [PCI_ID_PCX924HR] = { "PCX924HR", 1, 1, 5, 44 }, [PCI_ID_PCX924E] = { "PCX924e", 1, 1, 5, 44 }, [PCI_ID_PCX924HRMIC] = { "PCX924HR-Mic", 1, 1, 5, 44 }, [PCI_ID_PCX924E_MIC] = { "PCX924e-Mic", 1, 1, 5, 44 }, }; /* boards without hw AES1 and SRC onboard are all using fw_file_set==4 */ /* VX222HR, VX222e, PCX22HR and PCX22e */ #define PCXHR_BOARD_HAS_AES1(x) (x->fw_file_set != 4) /* some boards do not support 192kHz on digital AES input plugs */ #define PCXHR_BOARD_AESIN_NO_192K(x) ((x->capture_chips == 0) || \ (x->fw_file_set == 0) || \ (x->fw_file_set == 2)) static int pcxhr_pll_freq_register(unsigned int freq, unsigned int* pllreg, unsigned int* realfreq) { unsigned int reg; if (freq < 6900 || freq > 110000) return -EINVAL; reg = (28224000 * 2) / freq; reg = (reg - 1) / 2; if (reg < 0x200) *pllreg = reg + 0x800; else if (reg < 0x400) *pllreg = reg & 0x1ff; else if (reg < 0x800) { *pllreg = ((reg >> 1) & 0x1ff) + 0x200; reg &= ~1; } else { *pllreg = ((reg >> 2) & 0x1ff) + 0x400; reg &= ~3; } if (realfreq) *realfreq = (28224000 / (reg + 1)); return 0; } #define PCXHR_FREQ_REG_MASK 0x1f #define PCXHR_FREQ_QUARTZ_48000 0x00 #define PCXHR_FREQ_QUARTZ_24000 0x01 #define PCXHR_FREQ_QUARTZ_12000 0x09 #define PCXHR_FREQ_QUARTZ_32000 0x08 #define PCXHR_FREQ_QUARTZ_16000 0x04 #define PCXHR_FREQ_QUARTZ_8000 0x0c #define PCXHR_FREQ_QUARTZ_44100 0x02 #define PCXHR_FREQ_QUARTZ_22050 0x0a #define PCXHR_FREQ_QUARTZ_11025 0x06 #define PCXHR_FREQ_PLL 0x05 #define PCXHR_FREQ_QUARTZ_192000 0x10 #define PCXHR_FREQ_QUARTZ_96000 0x18 #define PCXHR_FREQ_QUARTZ_176400 0x14 #define PCXHR_FREQ_QUARTZ_88200 0x1c #define PCXHR_FREQ_QUARTZ_128000 0x12 #define PCXHR_FREQ_QUARTZ_64000 0x1a #define PCXHR_FREQ_WORD_CLOCK 0x0f #define PCXHR_FREQ_SYNC_AES 0x0e #define PCXHR_FREQ_AES_1 0x07 #define PCXHR_FREQ_AES_2 0x0b #define PCXHR_FREQ_AES_3 0x03 #define PCXHR_FREQ_AES_4 0x0d static int pcxhr_get_clock_reg(struct pcxhr_mgr *mgr, unsigned int rate, unsigned int *reg, unsigned int *freq) { unsigned int val, realfreq, pllreg; struct pcxhr_rmh rmh; int err; realfreq = rate; switch (mgr->use_clock_type) { case PCXHR_CLOCK_TYPE_INTERNAL : /* clock by quartz or pll */ switch (rate) { case 48000 : val = PCXHR_FREQ_QUARTZ_48000; break; case 24000 : val = PCXHR_FREQ_QUARTZ_24000; break; case 12000 : val = PCXHR_FREQ_QUARTZ_12000; break; case 32000 : val = PCXHR_FREQ_QUARTZ_32000; break; case 16000 : val = PCXHR_FREQ_QUARTZ_16000; break; case 8000 : val = PCXHR_FREQ_QUARTZ_8000; break; case 44100 : val = PCXHR_FREQ_QUARTZ_44100; break; case 22050 : val = PCXHR_FREQ_QUARTZ_22050; break; case 11025 : val = PCXHR_FREQ_QUARTZ_11025; break; case 192000 : val = PCXHR_FREQ_QUARTZ_192000; break; case 96000 : val = PCXHR_FREQ_QUARTZ_96000; break; case 176400 : val = PCXHR_FREQ_QUARTZ_176400; break; case 88200 : val = PCXHR_FREQ_QUARTZ_88200; break; case 128000 : val = PCXHR_FREQ_QUARTZ_128000; break; case 64000 : val = PCXHR_FREQ_QUARTZ_64000; break; default : val = PCXHR_FREQ_PLL; /* get the value for the pll register */ err = pcxhr_pll_freq_register(rate, &pllreg, &realfreq); if (err) return err; pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE); rmh.cmd[0] |= IO_NUM_REG_GENCLK; rmh.cmd[1] = pllreg & MASK_DSP_WORD; rmh.cmd[2] = pllreg >> 24; rmh.cmd_len = 3; err = pcxhr_send_msg(mgr, &rmh); if (err < 0) { snd_printk(KERN_ERR "error CMD_ACCESS_IO_WRITE " "for PLL register : %x!\n", err); return err; } } break; case PCXHR_CLOCK_TYPE_WORD_CLOCK: val = PCXHR_FREQ_WORD_CLOCK; break; case PCXHR_CLOCK_TYPE_AES_SYNC: val = PCXHR_FREQ_SYNC_AES; break; case PCXHR_CLOCK_TYPE_AES_1: val = PCXHR_FREQ_AES_1; break; case PCXHR_CLOCK_TYPE_AES_2: val = PCXHR_FREQ_AES_2; break; case PCXHR_CLOCK_TYPE_AES_3: val = PCXHR_FREQ_AES_3; break; case PCXHR_CLOCK_TYPE_AES_4: val = PCXHR_FREQ_AES_4; break; default: return -EINVAL; } *reg = val; *freq = realfreq; return 0; } static int pcxhr_sub_set_clock(struct pcxhr_mgr *mgr, unsigned int rate, int *changed) { unsigned int val, realfreq, speed; struct pcxhr_rmh rmh; int err; err = pcxhr_get_clock_reg(mgr, rate, &val, &realfreq); if (err) return err; /* codec speed modes */ if (rate < 55000) speed = 0; /* single speed */ else if (rate < 100000) speed = 1; /* dual speed */ else speed = 2; /* quad speed */ if (mgr->codec_speed != speed) { pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE); /* mute outputs */ rmh.cmd[0] |= IO_NUM_REG_MUTE_OUT; if (DSP_EXT_CMD_SET(mgr)) { rmh.cmd[1] = 1; rmh.cmd_len = 2; } err = pcxhr_send_msg(mgr, &rmh); if (err) return err; pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE); /* set speed ratio */ rmh.cmd[0] |= IO_NUM_SPEED_RATIO; rmh.cmd[1] = speed; rmh.cmd_len = 2; err = pcxhr_send_msg(mgr, &rmh); if (err) return err; } /* set the new frequency */ snd_printdd("clock register : set %x\n", val); err = pcxhr_write_io_num_reg_cont(mgr, PCXHR_FREQ_REG_MASK, val, changed); if (err) return err; mgr->sample_rate_real = realfreq; mgr->cur_clock_type = mgr->use_clock_type; /* unmute after codec speed modes */ if (mgr->codec_speed != speed) { pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_READ); /* unmute outputs */ rmh.cmd[0] |= IO_NUM_REG_MUTE_OUT; if (DSP_EXT_CMD_SET(mgr)) { rmh.cmd[1] = 1; rmh.cmd_len = 2; } err = pcxhr_send_msg(mgr, &rmh); if (err) return err; mgr->codec_speed = speed; /* save new codec speed */ } snd_printdd("pcxhr_sub_set_clock to %dHz (realfreq=%d)\n", rate, realfreq); return 0; } #define PCXHR_MODIFY_CLOCK_S_BIT 0x04 #define PCXHR_IRQ_TIMER_FREQ 92000 #define PCXHR_IRQ_TIMER_PERIOD 48 int pcxhr_set_clock(struct pcxhr_mgr *mgr, unsigned int rate) { struct pcxhr_rmh rmh; int err, changed; if (rate == 0) return 0; /* nothing to do */ if (mgr->is_hr_stereo) err = hr222_sub_set_clock(mgr, rate, &changed); else err = pcxhr_sub_set_clock(mgr, rate, &changed); if (err) return err; if (changed) { pcxhr_init_rmh(&rmh, CMD_MODIFY_CLOCK); rmh.cmd[0] |= PCXHR_MODIFY_CLOCK_S_BIT; /* resync fifos */ if (rate < PCXHR_IRQ_TIMER_FREQ) rmh.cmd[1] = PCXHR_IRQ_TIMER_PERIOD; else rmh.cmd[1] = PCXHR_IRQ_TIMER_PERIOD * 2; rmh.cmd[2] = rate; rmh.cmd_len = 3; err = pcxhr_send_msg(mgr, &rmh); if (err) return err; } return 0; } static int pcxhr_sub_get_external_clock(struct pcxhr_mgr *mgr, enum pcxhr_clock_type clock_type, int *sample_rate) { struct pcxhr_rmh rmh; unsigned char reg; int err, rate; switch (clock_type) { case PCXHR_CLOCK_TYPE_WORD_CLOCK: reg = REG_STATUS_WORD_CLOCK; break; case PCXHR_CLOCK_TYPE_AES_SYNC: reg = REG_STATUS_AES_SYNC; break; case PCXHR_CLOCK_TYPE_AES_1: reg = REG_STATUS_AES_1; break; case PCXHR_CLOCK_TYPE_AES_2: reg = REG_STATUS_AES_2; break; case PCXHR_CLOCK_TYPE_AES_3: reg = REG_STATUS_AES_3; break; case PCXHR_CLOCK_TYPE_AES_4: reg = REG_STATUS_AES_4; break; default: return -EINVAL; } pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_READ); rmh.cmd_len = 2; rmh.cmd[0] |= IO_NUM_REG_STATUS; if (mgr->last_reg_stat != reg) { rmh.cmd[1] = reg; err = pcxhr_send_msg(mgr, &rmh); if (err) return err; udelay(100); /* wait minimum 2 sample_frames at 32kHz ! */ mgr->last_reg_stat = reg; } rmh.cmd[1] = REG_STATUS_CURRENT; err = pcxhr_send_msg(mgr, &rmh); if (err) return err; switch (rmh.stat[1] & 0x0f) { case REG_STATUS_SYNC_32000 : rate = 32000; break; case REG_STATUS_SYNC_44100 : rate = 44100; break; case REG_STATUS_SYNC_48000 : rate = 48000; break; case REG_STATUS_SYNC_64000 : rate = 64000; break; case REG_STATUS_SYNC_88200 : rate = 88200; break; case REG_STATUS_SYNC_96000 : rate = 96000; break; case REG_STATUS_SYNC_128000 : rate = 128000; break; case REG_STATUS_SYNC_176400 : rate = 176400; break; case REG_STATUS_SYNC_192000 : rate = 192000; break; default: rate = 0; } snd_printdd("External clock is at %d Hz\n", rate); *sample_rate = rate; return 0; } int pcxhr_get_external_clock(struct pcxhr_mgr *mgr, enum pcxhr_clock_type clock_type, int *sample_rate) { if (mgr->is_hr_stereo) return hr222_get_external_clock(mgr, clock_type, sample_rate); else return pcxhr_sub_get_external_clock(mgr, clock_type, sample_rate); } /* * start or stop playback/capture substream */ static int pcxhr_set_stream_state(struct pcxhr_stream *stream) { int err; struct snd_pcxhr *chip; struct pcxhr_rmh rmh; int stream_mask, start; if (stream->status == PCXHR_STREAM_STATUS_SCHEDULE_RUN) start = 1; else { if (stream->status != PCXHR_STREAM_STATUS_SCHEDULE_STOP) { snd_printk(KERN_ERR "ERROR pcxhr_set_stream_state " "CANNOT be stopped\n"); return -EINVAL; } start = 0; } if (!stream->substream) return -EINVAL; stream->timer_abs_periods = 0; stream->timer_period_frag = 0; /* reset theoretical stream pos */ stream->timer_buf_periods = 0; stream->timer_is_synced = 0; stream_mask = stream->pipe->is_capture ? 1 : 1<<stream->substream->number; pcxhr_init_rmh(&rmh, start ? CMD_START_STREAM : CMD_STOP_STREAM); pcxhr_set_pipe_cmd_params(&rmh, stream->pipe->is_capture, stream->pipe->first_audio, 0, stream_mask); chip = snd_pcm_substream_chip(stream->substream); err = pcxhr_send_msg(chip->mgr, &rmh); if (err) snd_printk(KERN_ERR "ERROR pcxhr_set_stream_state err=%x;\n", err); stream->status = start ? PCXHR_STREAM_STATUS_STARTED : PCXHR_STREAM_STATUS_STOPPED; return err; } #define HEADER_FMT_BASE_LIN 0xfed00000 #define HEADER_FMT_BASE_FLOAT 0xfad00000 #define HEADER_FMT_INTEL 0x00008000 #define HEADER_FMT_24BITS 0x00004000 #define HEADER_FMT_16BITS 0x00002000 #define HEADER_FMT_UPTO11 0x00000200 #define HEADER_FMT_UPTO32 0x00000100 #define HEADER_FMT_MONO 0x00000080 static int pcxhr_set_format(struct pcxhr_stream *stream) { int err, is_capture, sample_rate, stream_num; struct snd_pcxhr *chip; struct pcxhr_rmh rmh; unsigned int header; switch (stream->format) { case SNDRV_PCM_FORMAT_U8: header = HEADER_FMT_BASE_LIN; break; case SNDRV_PCM_FORMAT_S16_LE: header = HEADER_FMT_BASE_LIN | HEADER_FMT_16BITS | HEADER_FMT_INTEL; break; case SNDRV_PCM_FORMAT_S16_BE: header = HEADER_FMT_BASE_LIN | HEADER_FMT_16BITS; break; case SNDRV_PCM_FORMAT_S24_3LE: header = HEADER_FMT_BASE_LIN | HEADER_FMT_24BITS | HEADER_FMT_INTEL; break; case SNDRV_PCM_FORMAT_S24_3BE: header = HEADER_FMT_BASE_LIN | HEADER_FMT_24BITS; break; case SNDRV_PCM_FORMAT_FLOAT_LE: header = HEADER_FMT_BASE_FLOAT | HEADER_FMT_INTEL; break; default: snd_printk(KERN_ERR "error pcxhr_set_format() : unknown format\n"); return -EINVAL; } chip = snd_pcm_substream_chip(stream->substream); sample_rate = chip->mgr->sample_rate; if (sample_rate <= 32000 && sample_rate !=0) { if (sample_rate <= 11025) header |= HEADER_FMT_UPTO11; else header |= HEADER_FMT_UPTO32; } if (stream->channels == 1) header |= HEADER_FMT_MONO; is_capture = stream->pipe->is_capture; stream_num = is_capture ? 0 : stream->substream->number; pcxhr_init_rmh(&rmh, is_capture ? CMD_FORMAT_STREAM_IN : CMD_FORMAT_STREAM_OUT); pcxhr_set_pipe_cmd_params(&rmh, is_capture, stream->pipe->first_audio, stream_num, 0); if (is_capture) { /* bug with old dsp versions: */ /* bit 12 also sets the format of the playback stream */ if (DSP_EXT_CMD_SET(chip->mgr)) rmh.cmd[0] |= 1<<10; else rmh.cmd[0] |= 1<<12; } rmh.cmd[1] = 0; rmh.cmd_len = 2; if (DSP_EXT_CMD_SET(chip->mgr)) { /* add channels and set bit 19 if channels>2 */ rmh.cmd[1] = stream->channels; if (!is_capture) { /* playback : add channel mask to command */ rmh.cmd[2] = (stream->channels == 1) ? 0x01 : 0x03; rmh.cmd_len = 3; } } rmh.cmd[rmh.cmd_len++] = header >> 8; rmh.cmd[rmh.cmd_len++] = (header & 0xff) << 16; err = pcxhr_send_msg(chip->mgr, &rmh); if (err) snd_printk(KERN_ERR "ERROR pcxhr_set_format err=%x;\n", err); return err; } static int pcxhr_update_r_buffer(struct pcxhr_stream *stream) { int err, is_capture, stream_num; struct pcxhr_rmh rmh; struct snd_pcm_substream *subs = stream->substream; struct snd_pcxhr *chip = snd_pcm_substream_chip(subs); is_capture = (subs->stream == SNDRV_PCM_STREAM_CAPTURE); stream_num = is_capture ? 0 : subs->number; snd_printdd("pcxhr_update_r_buffer(pcm%c%d) : " "addr(%p) bytes(%zx) subs(%d)\n", is_capture ? 'c' : 'p', chip->chip_idx, (void *)(long)subs->runtime->dma_addr, subs->runtime->dma_bytes, subs->number); pcxhr_init_rmh(&rmh, CMD_UPDATE_R_BUFFERS); pcxhr_set_pipe_cmd_params(&rmh, is_capture, stream->pipe->first_audio, stream_num, 0); /* max buffer size is 2 MByte */ snd_BUG_ON(subs->runtime->dma_bytes >= 0x200000); /* size in bits */ rmh.cmd[1] = subs->runtime->dma_bytes * 8; /* most significant byte */ rmh.cmd[2] = subs->runtime->dma_addr >> 24; /* this is a circular buffer */ rmh.cmd[2] |= 1<<19; /* least 3 significant bytes */ rmh.cmd[3] = subs->runtime->dma_addr & MASK_DSP_WORD; rmh.cmd_len = 4; err = pcxhr_send_msg(chip->mgr, &rmh); if (err) snd_printk(KERN_ERR "ERROR CMD_UPDATE_R_BUFFERS err=%x;\n", err); return err; } #if 0 static int pcxhr_pipe_sample_count(struct pcxhr_stream *stream, snd_pcm_uframes_t *sample_count) { struct pcxhr_rmh rmh; int err; pcxhr_t *chip = snd_pcm_substream_chip(stream->substream); pcxhr_init_rmh(&rmh, CMD_PIPE_SAMPLE_COUNT); pcxhr_set_pipe_cmd_params(&rmh, stream->pipe->is_capture, 0, 0, 1<<stream->pipe->first_audio); err = pcxhr_send_msg(chip->mgr, &rmh); if (err == 0) { *sample_count = ((snd_pcm_uframes_t)rmh.stat[0]) << 24; *sample_count += (snd_pcm_uframes_t)rmh.stat[1]; } snd_printdd("PIPE_SAMPLE_COUNT = %lx\n", *sample_count); return err; } #endif static inline int pcxhr_stream_scheduled_get_pipe(struct pcxhr_stream *stream, struct pcxhr_pipe **pipe) { if (stream->status == PCXHR_STREAM_STATUS_SCHEDULE_RUN) { *pipe = stream->pipe; return 1; } return 0; } static void pcxhr_trigger_tasklet(unsigned long arg) { unsigned long flags; int i, j, err; struct pcxhr_pipe *pipe; struct snd_pcxhr *chip; struct pcxhr_mgr *mgr = (struct pcxhr_mgr*)(arg); int capture_mask = 0; int playback_mask = 0; #ifdef CONFIG_SND_DEBUG_VERBOSE struct timeval my_tv1, my_tv2; do_gettimeofday(&my_tv1); #endif mutex_lock(&mgr->setup_mutex); /* check the pipes concerned and build pipe_array */ for (i = 0; i < mgr->num_cards; i++) { chip = mgr->chip[i]; for (j = 0; j < chip->nb_streams_capt; j++) { if (pcxhr_stream_scheduled_get_pipe(&chip->capture_stream[j], &pipe)) capture_mask |= (1 << pipe->first_audio); } for (j = 0; j < chip->nb_streams_play; j++) { if (pcxhr_stream_scheduled_get_pipe(&chip->playback_stream[j], &pipe)) { playback_mask |= (1 << pipe->first_audio); break; /* add only once, as all playback * streams of one chip use the same pipe */ } } } if (capture_mask == 0 && playback_mask == 0) { mutex_unlock(&mgr->setup_mutex); snd_printk(KERN_ERR "pcxhr_trigger_tasklet : no pipes\n"); return; } snd_printdd("pcxhr_trigger_tasklet : " "playback_mask=%x capture_mask=%x\n", playback_mask, capture_mask); /* synchronous stop of all the pipes concerned */ err = pcxhr_set_pipe_state(mgr, playback_mask, capture_mask, 0); if (err) { mutex_unlock(&mgr->setup_mutex); snd_printk(KERN_ERR "pcxhr_trigger_tasklet : " "error stop pipes (P%x C%x)\n", playback_mask, capture_mask); return; } /* the dsp lost format and buffer info with the stop pipe */ for (i = 0; i < mgr->num_cards; i++) { struct pcxhr_stream *stream; chip = mgr->chip[i]; for (j = 0; j < chip->nb_streams_capt; j++) { stream = &chip->capture_stream[j]; if (pcxhr_stream_scheduled_get_pipe(stream, &pipe)) { err = pcxhr_set_format(stream); err = pcxhr_update_r_buffer(stream); } } for (j = 0; j < chip->nb_streams_play; j++) { stream = &chip->playback_stream[j]; if (pcxhr_stream_scheduled_get_pipe(stream, &pipe)) { err = pcxhr_set_format(stream); err = pcxhr_update_r_buffer(stream); } } } /* start all the streams */ for (i = 0; i < mgr->num_cards; i++) { struct pcxhr_stream *stream; chip = mgr->chip[i]; for (j = 0; j < chip->nb_streams_capt; j++) { stream = &chip->capture_stream[j]; if (pcxhr_stream_scheduled_get_pipe(stream, &pipe)) err = pcxhr_set_stream_state(stream); } for (j = 0; j < chip->nb_streams_play; j++) { stream = &chip->playback_stream[j]; if (pcxhr_stream_scheduled_get_pipe(stream, &pipe)) err = pcxhr_set_stream_state(stream); } } /* synchronous start of all the pipes concerned */ err = pcxhr_set_pipe_state(mgr, playback_mask, capture_mask, 1); if (err) { mutex_unlock(&mgr->setup_mutex); snd_printk(KERN_ERR "pcxhr_trigger_tasklet : " "error start pipes (P%x C%x)\n", playback_mask, capture_mask); return; } /* put the streams into the running state now * (increment pointer by interrupt) */ spin_lock_irqsave(&mgr->lock, flags); for ( i =0; i < mgr->num_cards; i++) { struct pcxhr_stream *stream; chip = mgr->chip[i]; for(j = 0; j < chip->nb_streams_capt; j++) { stream = &chip->capture_stream[j]; if(stream->status == PCXHR_STREAM_STATUS_STARTED) stream->status = PCXHR_STREAM_STATUS_RUNNING; } for (j = 0; j < chip->nb_streams_play; j++) { stream = &chip->playback_stream[j]; if (stream->status == PCXHR_STREAM_STATUS_STARTED) { /* playback will already have advanced ! */ stream->timer_period_frag += mgr->granularity; stream->status = PCXHR_STREAM_STATUS_RUNNING; } } } spin_unlock_irqrestore(&mgr->lock, flags); mutex_unlock(&mgr->setup_mutex); #ifdef CONFIG_SND_DEBUG_VERBOSE do_gettimeofday(&my_tv2); snd_printdd("***TRIGGER TASKLET*** TIME = %ld (err = %x)\n", (long)(my_tv2.tv_usec - my_tv1.tv_usec), err); #endif } /* * trigger callback */ static int pcxhr_trigger(struct snd_pcm_substream *subs, int cmd) { struct pcxhr_stream *stream; struct snd_pcm_substream *s; switch (cmd) { case SNDRV_PCM_TRIGGER_START: snd_printdd("SNDRV_PCM_TRIGGER_START\n"); if (snd_pcm_stream_linked(subs)) { struct snd_pcxhr *chip = snd_pcm_substream_chip(subs); snd_pcm_group_for_each_entry(s, subs) { if (snd_pcm_substream_chip(s) != chip) continue; stream = s->runtime->private_data; stream->status = PCXHR_STREAM_STATUS_SCHEDULE_RUN; snd_pcm_trigger_done(s, subs); } tasklet_schedule(&chip->mgr->trigger_taskq); } else { stream = subs->runtime->private_data; snd_printdd("Only one Substream %c %d\n", stream->pipe->is_capture ? 'C' : 'P', stream->pipe->first_audio); if (pcxhr_set_format(stream)) return -EINVAL; if (pcxhr_update_r_buffer(stream)) return -EINVAL; stream->status = PCXHR_STREAM_STATUS_SCHEDULE_RUN; if (pcxhr_set_stream_state(stream)) return -EINVAL; stream->status = PCXHR_STREAM_STATUS_RUNNING; } break; case SNDRV_PCM_TRIGGER_STOP: snd_printdd("SNDRV_PCM_TRIGGER_STOP\n"); snd_pcm_group_for_each_entry(s, subs) { stream = s->runtime->private_data; stream->status = PCXHR_STREAM_STATUS_SCHEDULE_STOP; if (pcxhr_set_stream_state(stream)) return -EINVAL; snd_pcm_trigger_done(s, subs); } break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* TODO */ default: return -EINVAL; } return 0; } static int pcxhr_hardware_timer(struct pcxhr_mgr *mgr, int start) { struct pcxhr_rmh rmh; int err; pcxhr_init_rmh(&rmh, CMD_SET_TIMER_INTERRUPT); if (start) { /* last dsp time invalid */ mgr->dsp_time_last = PCXHR_DSP_TIME_INVALID; rmh.cmd[0] |= mgr->granularity; } err = pcxhr_send_msg(mgr, &rmh); if (err < 0) snd_printk(KERN_ERR "error pcxhr_hardware_timer err(%x)\n", err); return err; } /* * prepare callback for all pcms */ static int pcxhr_prepare(struct snd_pcm_substream *subs) { struct snd_pcxhr *chip = snd_pcm_substream_chip(subs); struct pcxhr_mgr *mgr = chip->mgr; int err = 0; snd_printdd("pcxhr_prepare : period_size(%lx) periods(%x) buffer_size(%lx)\n", subs->runtime->period_size, subs->runtime->periods, subs->runtime->buffer_size); mutex_lock(&mgr->setup_mutex); do { /* only the first stream can choose the sample rate */ /* set the clock only once (first stream) */ if (mgr->sample_rate != subs->runtime->rate) { err = pcxhr_set_clock(mgr, subs->runtime->rate); if (err) break; if (mgr->sample_rate == 0) /* start the DSP-timer */ err = pcxhr_hardware_timer(mgr, 1); mgr->sample_rate = subs->runtime->rate; } } while(0); /* do only once (so we can use break instead of goto) */ mutex_unlock(&mgr->setup_mutex); return err; } /* * HW_PARAMS callback for all pcms */ static int pcxhr_hw_params(struct snd_pcm_substream *subs, struct snd_pcm_hw_params *hw) { struct snd_pcxhr *chip = snd_pcm_substream_chip(subs); struct pcxhr_mgr *mgr = chip->mgr; struct pcxhr_stream *stream = subs->runtime->private_data; snd_pcm_format_t format; int err; int channels; /* set up channels */ channels = params_channels(hw); /* set up format for the stream */ format = params_format(hw); mutex_lock(&mgr->setup_mutex); stream->channels = channels; stream->format = format; /* allocate buffer */ err = snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw)); mutex_unlock(&mgr->setup_mutex); return err; } static int pcxhr_hw_free(struct snd_pcm_substream *subs) { snd_pcm_lib_free_pages(subs); return 0; } /* * CONFIGURATION SPACE for all pcms, mono pcm must update channels_max */ static struct snd_pcm_hardware pcxhr_caps = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE | SNDRV_PCM_FMTBIT_FLOAT_LE), .rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_192000), .rate_min = 8000, .rate_max = 192000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (32*1024), /* 1 byte == 1 frame U8 mono (PCXHR_GRANULARITY is frames!) */ .period_bytes_min = (2*PCXHR_GRANULARITY), .period_bytes_max = (16*1024), .periods_min = 2, .periods_max = (32*1024/PCXHR_GRANULARITY), }; static int pcxhr_open(struct snd_pcm_substream *subs) { struct snd_pcxhr *chip = snd_pcm_substream_chip(subs); struct pcxhr_mgr *mgr = chip->mgr; struct snd_pcm_runtime *runtime = subs->runtime; struct pcxhr_stream *stream; int err; mutex_lock(&mgr->setup_mutex); /* copy the struct snd_pcm_hardware struct */ runtime->hw = pcxhr_caps; if( subs->stream == SNDRV_PCM_STREAM_PLAYBACK ) { snd_printdd("pcxhr_open playback chip%d subs%d\n", chip->chip_idx, subs->number); stream = &chip->playback_stream[subs->number]; } else { snd_printdd("pcxhr_open capture chip%d subs%d\n", chip->chip_idx, subs->number); if (mgr->mono_capture) runtime->hw.channels_max = 1; else runtime->hw.channels_min = 2; stream = &chip->capture_stream[subs->number]; } if (stream->status != PCXHR_STREAM_STATUS_FREE){ /* streams in use */ snd_printk(KERN_ERR "pcxhr_open chip%d subs%d in use\n", chip->chip_idx, subs->number); mutex_unlock(&mgr->setup_mutex); return -EBUSY; } /* float format support is in some cases buggy on stereo cards */ if (mgr->is_hr_stereo) runtime->hw.formats &= ~SNDRV_PCM_FMTBIT_FLOAT_LE; /* buffer-size should better be multiple of period-size */ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) { mutex_unlock(&mgr->setup_mutex); return err; } /* if a sample rate is already used or fixed by external clock, * the stream cannot change */ if (mgr->sample_rate) runtime->hw.rate_min = runtime->hw.rate_max = mgr->sample_rate; else { if (mgr->use_clock_type != PCXHR_CLOCK_TYPE_INTERNAL) { int external_rate; if (pcxhr_get_external_clock(mgr, mgr->use_clock_type, &external_rate) || external_rate == 0) { /* cannot detect the external clock rate */ mutex_unlock(&mgr->setup_mutex); return -EBUSY; } runtime->hw.rate_min = external_rate; runtime->hw.rate_max = external_rate; } } stream->status = PCXHR_STREAM_STATUS_OPEN; stream->substream = subs; stream->channels = 0; /* not configured yet */ runtime->private_data = stream; /* better get a divisor of granularity values (96 or 192) */ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32); snd_pcm_set_sync(subs); mgr->ref_count_rate++; mutex_unlock(&mgr->setup_mutex); return 0; } static int pcxhr_close(struct snd_pcm_substream *subs) { struct snd_pcxhr *chip = snd_pcm_substream_chip(subs); struct pcxhr_mgr *mgr = chip->mgr; struct pcxhr_stream *stream = subs->runtime->private_data; mutex_lock(&mgr->setup_mutex); snd_printdd("pcxhr_close chip%d subs%d\n", chip->chip_idx, subs->number); /* sample rate released */ if (--mgr->ref_count_rate == 0) { mgr->sample_rate = 0; /* the sample rate is no more locked */ pcxhr_hardware_timer(mgr, 0); /* stop the DSP-timer */ } stream->status = PCXHR_STREAM_STATUS_FREE; stream->substream = NULL; mutex_unlock(&mgr->setup_mutex); return 0; } static snd_pcm_uframes_t pcxhr_stream_pointer(struct snd_pcm_substream *subs) { unsigned long flags; u_int32_t timer_period_frag; int timer_buf_periods; struct snd_pcxhr *chip = snd_pcm_substream_chip(subs); struct snd_pcm_runtime *runtime = subs->runtime; struct pcxhr_stream *stream = runtime->private_data; spin_lock_irqsave(&chip->mgr->lock, flags); /* get the period fragment and the nb of periods in the buffer */ timer_period_frag = stream->timer_period_frag; timer_buf_periods = stream->timer_buf_periods; spin_unlock_irqrestore(&chip->mgr->lock, flags); return (snd_pcm_uframes_t)((timer_buf_periods * runtime->period_size) + timer_period_frag); } static struct snd_pcm_ops pcxhr_ops = { .open = pcxhr_open, .close = pcxhr_close, .ioctl = snd_pcm_lib_ioctl, .prepare = pcxhr_prepare, .hw_params = pcxhr_hw_params, .hw_free = pcxhr_hw_free, .trigger = pcxhr_trigger, .pointer = pcxhr_stream_pointer, }; /* */ int pcxhr_create_pcm(struct snd_pcxhr *chip) { int err; struct snd_pcm *pcm; char name[32]; sprintf(name, "pcxhr %d", chip->chip_idx); if ((err = snd_pcm_new(chip->card, name, 0, chip->nb_streams_play, chip->nb_streams_capt, &pcm)) < 0) { snd_printk(KERN_ERR "cannot create pcm %s\n", name); return err; } pcm->private_data = chip; if (chip->nb_streams_play) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcxhr_ops); if (chip->nb_streams_capt) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcxhr_ops); pcm->info_flags = 0; strcpy(pcm->name, name); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->mgr->pci), 32*1024, 32*1024); chip->pcm = pcm; return 0; } static int pcxhr_chip_free(struct snd_pcxhr *chip) { kfree(chip); return 0; } static int pcxhr_chip_dev_free(struct snd_device *device) { struct snd_pcxhr *chip = device->device_data; return pcxhr_chip_free(chip); } /* */ static int __devinit pcxhr_create(struct pcxhr_mgr *mgr, struct snd_card *card, int idx) { int err; struct snd_pcxhr *chip; static struct snd_device_ops ops = { .dev_free = pcxhr_chip_dev_free, }; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (! chip) { snd_printk(KERN_ERR "cannot allocate chip\n"); return -ENOMEM; } chip->card = card; chip->chip_idx = idx; chip->mgr = mgr; if (idx < mgr->playback_chips) /* stereo or mono streams */ chip->nb_streams_play = PCXHR_PLAYBACK_STREAMS; if (idx < mgr->capture_chips) { if (mgr->mono_capture) chip->nb_streams_capt = 2; /* 2 mono streams */ else chip->nb_streams_capt = 1; /* or 1 stereo stream */ } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { pcxhr_chip_free(chip); return err; } mgr->chip[idx] = chip; snd_card_set_dev(card, &mgr->pci->dev); return 0; } /* proc interface */ static void pcxhr_proc_info(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcxhr *chip = entry->private_data; struct pcxhr_mgr *mgr = chip->mgr; snd_iprintf(buffer, "\n%s\n", mgr->longname); /* stats available when embedded DSP is running */ if (mgr->dsp_loaded & (1 << PCXHR_FIRMWARE_DSP_MAIN_INDEX)) { struct pcxhr_rmh rmh; short ver_maj = (mgr->dsp_version >> 16) & 0xff; short ver_min = (mgr->dsp_version >> 8) & 0xff; short ver_build = mgr->dsp_version & 0xff; snd_iprintf(buffer, "module version %s\n", PCXHR_DRIVER_VERSION_STRING); snd_iprintf(buffer, "dsp version %d.%d.%d\n", ver_maj, ver_min, ver_build); if (mgr->board_has_analog) snd_iprintf(buffer, "analog io available\n"); else snd_iprintf(buffer, "digital only board\n"); /* calc cpu load of the dsp */ pcxhr_init_rmh(&rmh, CMD_GET_DSP_RESOURCES); if( ! pcxhr_send_msg(mgr, &rmh) ) { int cur = rmh.stat[0]; int ref = rmh.stat[1]; if (ref > 0) { if (mgr->sample_rate_real != 0 && mgr->sample_rate_real != 48000) { ref = (ref * 48000) / mgr->sample_rate_real; if (mgr->sample_rate_real >= PCXHR_IRQ_TIMER_FREQ) ref *= 2; } cur = 100 - (100 * cur) / ref; snd_iprintf(buffer, "cpu load %d%%\n", cur); snd_iprintf(buffer, "buffer pool %d/%d\n", rmh.stat[2], rmh.stat[3]); } } snd_iprintf(buffer, "dma granularity : %d\n", mgr->granularity); snd_iprintf(buffer, "dsp time errors : %d\n", mgr->dsp_time_err); snd_iprintf(buffer, "dsp async pipe xrun errors : %d\n", mgr->async_err_pipe_xrun); snd_iprintf(buffer, "dsp async stream xrun errors : %d\n", mgr->async_err_stream_xrun); snd_iprintf(buffer, "dsp async last other error : %x\n", mgr->async_err_other_last); /* debug zone dsp */ rmh.cmd[0] = 0x4200 + PCXHR_SIZE_MAX_STATUS; rmh.cmd_len = 1; rmh.stat_len = PCXHR_SIZE_MAX_STATUS; rmh.dsp_stat = 0; rmh.cmd_idx = CMD_LAST_INDEX; if( ! pcxhr_send_msg(mgr, &rmh) ) { int i; if (rmh.stat_len > 8) rmh.stat_len = 8; for (i = 0; i < rmh.stat_len; i++) snd_iprintf(buffer, "debug[%02d] = %06x\n", i, rmh.stat[i]); } } else snd_iprintf(buffer, "no firmware loaded\n"); snd_iprintf(buffer, "\n"); } static void pcxhr_proc_sync(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcxhr *chip = entry->private_data; struct pcxhr_mgr *mgr = chip->mgr; static const char *textsHR22[3] = { "Internal", "AES Sync", "AES 1" }; static const char *textsPCXHR[7] = { "Internal", "Word", "AES Sync", "AES 1", "AES 2", "AES 3", "AES 4" }; const char **texts; int max_clock; if (mgr->is_hr_stereo) { texts = textsHR22; max_clock = HR22_CLOCK_TYPE_MAX; } else { texts = textsPCXHR; max_clock = PCXHR_CLOCK_TYPE_MAX; } snd_iprintf(buffer, "\n%s\n", mgr->longname); snd_iprintf(buffer, "Current Sample Clock\t: %s\n", texts[mgr->cur_clock_type]); snd_iprintf(buffer, "Current Sample Rate\t= %d\n", mgr->sample_rate_real); /* commands available when embedded DSP is running */ if (mgr->dsp_loaded & (1 << PCXHR_FIRMWARE_DSP_MAIN_INDEX)) { int i, err, sample_rate; for (i = 1; i <= max_clock; i++) { err = pcxhr_get_external_clock(mgr, i, &sample_rate); if (err) break; snd_iprintf(buffer, "%s Clock\t\t= %d\n", texts[i], sample_rate); } } else snd_iprintf(buffer, "no firmware loaded\n"); snd_iprintf(buffer, "\n"); } static void pcxhr_proc_gpio_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcxhr *chip = entry->private_data; struct pcxhr_mgr *mgr = chip->mgr; /* commands available when embedded DSP is running */ if (mgr->dsp_loaded & (1 << PCXHR_FIRMWARE_DSP_MAIN_INDEX)) { /* gpio ports on stereo boards only available */ int value = 0; hr222_read_gpio(mgr, 1, &value); /* GPI */ snd_iprintf(buffer, "GPI: 0x%x\n", value); hr222_read_gpio(mgr, 0, &value); /* GP0 */ snd_iprintf(buffer, "GPO: 0x%x\n", value); } else snd_iprintf(buffer, "no firmware loaded\n"); snd_iprintf(buffer, "\n"); } static void pcxhr_proc_gpo_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcxhr *chip = entry->private_data; struct pcxhr_mgr *mgr = chip->mgr; char line[64]; int value; /* commands available when embedded DSP is running */ if (!(mgr->dsp_loaded & (1 << PCXHR_FIRMWARE_DSP_MAIN_INDEX))) return; while (!snd_info_get_line(buffer, line, sizeof(line))) { if (sscanf(line, "GPO: 0x%x", &value) != 1) continue; hr222_write_gpo(mgr, value); /* GP0 */ } } static void __devinit pcxhr_proc_init(struct snd_pcxhr *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "info", &entry)) snd_info_set_text_ops(entry, chip, pcxhr_proc_info); if (! snd_card_proc_new(chip->card, "sync", &entry)) snd_info_set_text_ops(entry, chip, pcxhr_proc_sync); /* gpio available on stereo sound cards only */ if (chip->mgr->is_hr_stereo && !snd_card_proc_new(chip->card, "gpio", &entry)) { snd_info_set_text_ops(entry, chip, pcxhr_proc_gpio_read); entry->c.text.write = pcxhr_proc_gpo_write; entry->mode |= S_IWUSR; } } /* end of proc interface */ /* * release all the cards assigned to a manager instance */ static int pcxhr_free(struct pcxhr_mgr *mgr) { unsigned int i; for (i = 0; i < mgr->num_cards; i++) { if (mgr->chip[i]) snd_card_free(mgr->chip[i]->card); } /* reset board if some firmware was loaded */ if(mgr->dsp_loaded) { pcxhr_reset_board(mgr); snd_printdd("reset pcxhr !\n"); } /* release irq */ if (mgr->irq >= 0) free_irq(mgr->irq, mgr); pci_release_regions(mgr->pci); /* free hostport purgebuffer */ if (mgr->hostport.area) { snd_dma_free_pages(&mgr->hostport); mgr->hostport.area = NULL; } kfree(mgr->prmh); pci_disable_device(mgr->pci); kfree(mgr); return 0; } /* * probe function - creates the card manager */ static int __devinit pcxhr_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct pcxhr_mgr *mgr; unsigned int i; int err; size_t size; char *card_name; if (dev >= SNDRV_CARDS) return -ENODEV; if (! enable[dev]) { dev++; return -ENOENT; } /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; pci_set_master(pci); /* check if we can restrict PCI DMA transfers to 32 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0) { snd_printk(KERN_ERR "architecture does not support " "32bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } /* alloc card manager */ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); if (! mgr) { pci_disable_device(pci); return -ENOMEM; } if (snd_BUG_ON(pci_id->driver_data >= PCI_ID_LAST)) { kfree(mgr); pci_disable_device(pci); return -ENODEV; } card_name = pcxhr_board_params[pci_id->driver_data].board_name; mgr->playback_chips = pcxhr_board_params[pci_id->driver_data].playback_chips; mgr->capture_chips = pcxhr_board_params[pci_id->driver_data].capture_chips; mgr->fw_file_set = pcxhr_board_params[pci_id->driver_data].fw_file_set; mgr->firmware_num = pcxhr_board_params[pci_id->driver_data].firmware_num; mgr->mono_capture = mono[dev]; mgr->is_hr_stereo = (mgr->playback_chips == 1); mgr->board_has_aes1 = PCXHR_BOARD_HAS_AES1(mgr); mgr->board_aes_in_192k = !PCXHR_BOARD_AESIN_NO_192K(mgr); if (mgr->is_hr_stereo) mgr->granularity = PCXHR_GRANULARITY_HR22; else mgr->granularity = PCXHR_GRANULARITY; /* resource assignment */ if ((err = pci_request_regions(pci, card_name)) < 0) { kfree(mgr); pci_disable_device(pci); return err; } for (i = 0; i < 3; i++) mgr->port[i] = pci_resource_start(pci, i); mgr->pci = pci; mgr->irq = -1; if (request_irq(pci->irq, pcxhr_interrupt, IRQF_SHARED, KBUILD_MODNAME, mgr)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); pcxhr_free(mgr); return -EBUSY; } mgr->irq = pci->irq; sprintf(mgr->shortname, "Digigram %s", card_name); sprintf(mgr->longname, "%s at 0x%lx & 0x%lx, 0x%lx irq %i", mgr->shortname, mgr->port[0], mgr->port[1], mgr->port[2], mgr->irq); /* ISR spinlock */ spin_lock_init(&mgr->lock); spin_lock_init(&mgr->msg_lock); /* init setup mutex*/ mutex_init(&mgr->setup_mutex); /* init taslket */ tasklet_init(&mgr->msg_taskq, pcxhr_msg_tasklet, (unsigned long) mgr); tasklet_init(&mgr->trigger_taskq, pcxhr_trigger_tasklet, (unsigned long) mgr); mgr->prmh = kmalloc(sizeof(*mgr->prmh) + sizeof(u32) * (PCXHR_SIZE_MAX_LONG_STATUS - PCXHR_SIZE_MAX_STATUS), GFP_KERNEL); if (! mgr->prmh) { pcxhr_free(mgr); return -ENOMEM; } for (i=0; i < PCXHR_MAX_CARDS; i++) { struct snd_card *card; char tmpid[16]; int idx; if (i >= max(mgr->playback_chips, mgr->capture_chips)) break; mgr->num_cards++; if (index[dev] < 0) idx = index[dev]; else idx = index[dev] + i; snprintf(tmpid, sizeof(tmpid), "%s-%d", id[dev] ? id[dev] : card_name, i); err = snd_card_create(idx, tmpid, THIS_MODULE, 0, &card); if (err < 0) { snd_printk(KERN_ERR "cannot allocate the card %d\n", i); pcxhr_free(mgr); return err; } strcpy(card->driver, DRIVER_NAME); sprintf(card->shortname, "%s [PCM #%d]", mgr->shortname, i); sprintf(card->longname, "%s [PCM #%d]", mgr->longname, i); if ((err = pcxhr_create(mgr, card, i)) < 0) { snd_card_free(card); pcxhr_free(mgr); return err; } if (i == 0) /* init proc interface only for chip0 */ pcxhr_proc_init(mgr->chip[i]); if ((err = snd_card_register(card)) < 0) { pcxhr_free(mgr); return err; } } /* create hostport purgebuffer */ size = PAGE_ALIGN(sizeof(struct pcxhr_hostport)); if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), size, &mgr->hostport) < 0) { pcxhr_free(mgr); return -ENOMEM; } /* init purgebuffer */ memset(mgr->hostport.area, 0, size); /* create a DSP loader */ err = pcxhr_setup_firmware(mgr); if (err < 0) { pcxhr_free(mgr); return err; } pci_set_drvdata(pci, mgr); dev++; return 0; } static void __devexit pcxhr_remove(struct pci_dev *pci) { pcxhr_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = pcxhr_ids, .probe = pcxhr_probe, .remove = __devexit_p(pcxhr_remove), }; static int __init pcxhr_module_init(void) { return pci_register_driver(&driver); } static void __exit pcxhr_module_exit(void) { pci_unregister_driver(&driver); } module_init(pcxhr_module_init) module_exit(pcxhr_module_exit)
gpl-2.0
Split-Screen/android_kernel_samsung_manta
net/caif/cfdbgl.c
5149
1551
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/stddef.h> #include <linux/slab.h> #include <net/caif/caif_layer.h> #include <net/caif/cfsrvl.h> #include <net/caif/cfpkt.h> #define container_obj(layr) ((struct cfsrvl *) layr) static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info) { struct cfsrvl *dbg = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC); if (!dbg) return NULL; caif_assert(offsetof(struct cfsrvl, layer) == 0); cfsrvl_init(dbg, channel_id, dev_info, false); dbg->layer.receive = cfdbgl_receive; dbg->layer.transmit = cfdbgl_transmit; snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id); return &dbg->layer; } static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt) { return layr->up->receive(layr->up, pkt); } static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) { struct cfsrvl *service = container_obj(layr); struct caif_payload_info *info; int ret; if (!cfsrvl_ready(service, &ret)) { cfpkt_destroy(pkt); return ret; } /* Add info for MUX-layer to route the packet out */ info = cfpkt_info(pkt); info->channel_id = service->layer.id; info->dev_info = &service->dev_info; return layr->dn->transmit(layr->dn, pkt); }
gpl-2.0
motley-git/TF201-Kernel
sound/drivers/opl4/opl4_synth.c
10013
23398
/* * OPL4 MIDI synthesizer functions * * Copyright (c) 2003 by Clemens Ladisch <clemens@ladisch.de> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opl4_local.h" #include <linux/delay.h> #include <asm/io.h> #include <sound/asoundef.h> /* GM2 controllers */ #ifndef MIDI_CTL_RELEASE_TIME #define MIDI_CTL_RELEASE_TIME 0x48 #define MIDI_CTL_ATTACK_TIME 0x49 #define MIDI_CTL_DECAY_TIME 0x4b #define MIDI_CTL_VIBRATO_RATE 0x4c #define MIDI_CTL_VIBRATO_DEPTH 0x4d #define MIDI_CTL_VIBRATO_DELAY 0x4e #endif /* * This table maps 100/128 cents to F_NUMBER. */ static const s16 snd_opl4_pitch_map[0x600] = { 0x000,0x000,0x001,0x001,0x002,0x002,0x003,0x003, 0x004,0x004,0x005,0x005,0x006,0x006,0x006,0x007, 0x007,0x008,0x008,0x009,0x009,0x00a,0x00a,0x00b, 0x00b,0x00c,0x00c,0x00d,0x00d,0x00d,0x00e,0x00e, 0x00f,0x00f,0x010,0x010,0x011,0x011,0x012,0x012, 0x013,0x013,0x014,0x014,0x015,0x015,0x015,0x016, 0x016,0x017,0x017,0x018,0x018,0x019,0x019,0x01a, 0x01a,0x01b,0x01b,0x01c,0x01c,0x01d,0x01d,0x01e, 0x01e,0x01e,0x01f,0x01f,0x020,0x020,0x021,0x021, 0x022,0x022,0x023,0x023,0x024,0x024,0x025,0x025, 0x026,0x026,0x027,0x027,0x028,0x028,0x029,0x029, 0x029,0x02a,0x02a,0x02b,0x02b,0x02c,0x02c,0x02d, 0x02d,0x02e,0x02e,0x02f,0x02f,0x030,0x030,0x031, 0x031,0x032,0x032,0x033,0x033,0x034,0x034,0x035, 0x035,0x036,0x036,0x037,0x037,0x038,0x038,0x038, 0x039,0x039,0x03a,0x03a,0x03b,0x03b,0x03c,0x03c, 0x03d,0x03d,0x03e,0x03e,0x03f,0x03f,0x040,0x040, 0x041,0x041,0x042,0x042,0x043,0x043,0x044,0x044, 0x045,0x045,0x046,0x046,0x047,0x047,0x048,0x048, 0x049,0x049,0x04a,0x04a,0x04b,0x04b,0x04c,0x04c, 0x04d,0x04d,0x04e,0x04e,0x04f,0x04f,0x050,0x050, 0x051,0x051,0x052,0x052,0x053,0x053,0x054,0x054, 0x055,0x055,0x056,0x056,0x057,0x057,0x058,0x058, 0x059,0x059,0x05a,0x05a,0x05b,0x05b,0x05c,0x05c, 0x05d,0x05d,0x05e,0x05e,0x05f,0x05f,0x060,0x060, 0x061,0x061,0x062,0x062,0x063,0x063,0x064,0x064, 0x065,0x065,0x066,0x066,0x067,0x067,0x068,0x068, 0x069,0x069,0x06a,0x06a,0x06b,0x06b,0x06c,0x06c, 0x06d,0x06d,0x06e,0x06e,0x06f,0x06f,0x070,0x071, 0x071,0x072,0x072,0x073,0x073,0x074,0x074,0x075, 0x075,0x076,0x076,0x077,0x077,0x078,0x078,0x079, 0x079,0x07a,0x07a,0x07b,0x07b,0x07c,0x07c,0x07d, 0x07d,0x07e,0x07e,0x07f,0x07f,0x080,0x081,0x081, 0x082,0x082,0x083,0x083,0x084,0x084,0x085,0x085, 0x086,0x086,0x087,0x087,0x088,0x088,0x089,0x089, 0x08a,0x08a,0x08b,0x08b,0x08c,0x08d,0x08d,0x08e, 0x08e,0x08f,0x08f,0x090,0x090,0x091,0x091,0x092, 0x092,0x093,0x093,0x094,0x094,0x095,0x096,0x096, 0x097,0x097,0x098,0x098,0x099,0x099,0x09a,0x09a, 0x09b,0x09b,0x09c,0x09c,0x09d,0x09d,0x09e,0x09f, 0x09f,0x0a0,0x0a0,0x0a1,0x0a1,0x0a2,0x0a2,0x0a3, 0x0a3,0x0a4,0x0a4,0x0a5,0x0a6,0x0a6,0x0a7,0x0a7, 0x0a8,0x0a8,0x0a9,0x0a9,0x0aa,0x0aa,0x0ab,0x0ab, 0x0ac,0x0ad,0x0ad,0x0ae,0x0ae,0x0af,0x0af,0x0b0, 0x0b0,0x0b1,0x0b1,0x0b2,0x0b2,0x0b3,0x0b4,0x0b4, 0x0b5,0x0b5,0x0b6,0x0b6,0x0b7,0x0b7,0x0b8,0x0b8, 0x0b9,0x0ba,0x0ba,0x0bb,0x0bb,0x0bc,0x0bc,0x0bd, 0x0bd,0x0be,0x0be,0x0bf,0x0c0,0x0c0,0x0c1,0x0c1, 0x0c2,0x0c2,0x0c3,0x0c3,0x0c4,0x0c4,0x0c5,0x0c6, 0x0c6,0x0c7,0x0c7,0x0c8,0x0c8,0x0c9,0x0c9,0x0ca, 0x0cb,0x0cb,0x0cc,0x0cc,0x0cd,0x0cd,0x0ce,0x0ce, 0x0cf,0x0d0,0x0d0,0x0d1,0x0d1,0x0d2,0x0d2,0x0d3, 0x0d3,0x0d4,0x0d5,0x0d5,0x0d6,0x0d6,0x0d7,0x0d7, 0x0d8,0x0d8,0x0d9,0x0da,0x0da,0x0db,0x0db,0x0dc, 0x0dc,0x0dd,0x0de,0x0de,0x0df,0x0df,0x0e0,0x0e0, 0x0e1,0x0e1,0x0e2,0x0e3,0x0e3,0x0e4,0x0e4,0x0e5, 0x0e5,0x0e6,0x0e7,0x0e7,0x0e8,0x0e8,0x0e9,0x0e9, 0x0ea,0x0eb,0x0eb,0x0ec,0x0ec,0x0ed,0x0ed,0x0ee, 0x0ef,0x0ef,0x0f0,0x0f0,0x0f1,0x0f1,0x0f2,0x0f3, 0x0f3,0x0f4,0x0f4,0x0f5,0x0f5,0x0f6,0x0f7,0x0f7, 0x0f8,0x0f8,0x0f9,0x0f9,0x0fa,0x0fb,0x0fb,0x0fc, 0x0fc,0x0fd,0x0fd,0x0fe,0x0ff,0x0ff,0x100,0x100, 0x101,0x101,0x102,0x103,0x103,0x104,0x104,0x105, 0x106,0x106,0x107,0x107,0x108,0x108,0x109,0x10a, 0x10a,0x10b,0x10b,0x10c,0x10c,0x10d,0x10e,0x10e, 0x10f,0x10f,0x110,0x111,0x111,0x112,0x112,0x113, 0x114,0x114,0x115,0x115,0x116,0x116,0x117,0x118, 0x118,0x119,0x119,0x11a,0x11b,0x11b,0x11c,0x11c, 0x11d,0x11e,0x11e,0x11f,0x11f,0x120,0x120,0x121, 0x122,0x122,0x123,0x123,0x124,0x125,0x125,0x126, 0x126,0x127,0x128,0x128,0x129,0x129,0x12a,0x12b, 0x12b,0x12c,0x12c,0x12d,0x12e,0x12e,0x12f,0x12f, 0x130,0x131,0x131,0x132,0x132,0x133,0x134,0x134, 0x135,0x135,0x136,0x137,0x137,0x138,0x138,0x139, 0x13a,0x13a,0x13b,0x13b,0x13c,0x13d,0x13d,0x13e, 0x13e,0x13f,0x140,0x140,0x141,0x141,0x142,0x143, 0x143,0x144,0x144,0x145,0x146,0x146,0x147,0x148, 0x148,0x149,0x149,0x14a,0x14b,0x14b,0x14c,0x14c, 0x14d,0x14e,0x14e,0x14f,0x14f,0x150,0x151,0x151, 0x152,0x153,0x153,0x154,0x154,0x155,0x156,0x156, 0x157,0x157,0x158,0x159,0x159,0x15a,0x15b,0x15b, 0x15c,0x15c,0x15d,0x15e,0x15e,0x15f,0x160,0x160, 0x161,0x161,0x162,0x163,0x163,0x164,0x165,0x165, 0x166,0x166,0x167,0x168,0x168,0x169,0x16a,0x16a, 0x16b,0x16b,0x16c,0x16d,0x16d,0x16e,0x16f,0x16f, 0x170,0x170,0x171,0x172,0x172,0x173,0x174,0x174, 0x175,0x175,0x176,0x177,0x177,0x178,0x179,0x179, 0x17a,0x17a,0x17b,0x17c,0x17c,0x17d,0x17e,0x17e, 0x17f,0x180,0x180,0x181,0x181,0x182,0x183,0x183, 0x184,0x185,0x185,0x186,0x187,0x187,0x188,0x188, 0x189,0x18a,0x18a,0x18b,0x18c,0x18c,0x18d,0x18e, 0x18e,0x18f,0x190,0x190,0x191,0x191,0x192,0x193, 0x193,0x194,0x195,0x195,0x196,0x197,0x197,0x198, 0x199,0x199,0x19a,0x19a,0x19b,0x19c,0x19c,0x19d, 0x19e,0x19e,0x19f,0x1a0,0x1a0,0x1a1,0x1a2,0x1a2, 0x1a3,0x1a4,0x1a4,0x1a5,0x1a6,0x1a6,0x1a7,0x1a8, 0x1a8,0x1a9,0x1a9,0x1aa,0x1ab,0x1ab,0x1ac,0x1ad, 0x1ad,0x1ae,0x1af,0x1af,0x1b0,0x1b1,0x1b1,0x1b2, 0x1b3,0x1b3,0x1b4,0x1b5,0x1b5,0x1b6,0x1b7,0x1b7, 0x1b8,0x1b9,0x1b9,0x1ba,0x1bb,0x1bb,0x1bc,0x1bd, 0x1bd,0x1be,0x1bf,0x1bf,0x1c0,0x1c1,0x1c1,0x1c2, 0x1c3,0x1c3,0x1c4,0x1c5,0x1c5,0x1c6,0x1c7,0x1c7, 0x1c8,0x1c9,0x1c9,0x1ca,0x1cb,0x1cb,0x1cc,0x1cd, 0x1cd,0x1ce,0x1cf,0x1cf,0x1d0,0x1d1,0x1d1,0x1d2, 0x1d3,0x1d3,0x1d4,0x1d5,0x1d5,0x1d6,0x1d7,0x1d7, 0x1d8,0x1d9,0x1d9,0x1da,0x1db,0x1db,0x1dc,0x1dd, 0x1dd,0x1de,0x1df,0x1df,0x1e0,0x1e1,0x1e1,0x1e2, 0x1e3,0x1e4,0x1e4,0x1e5,0x1e6,0x1e6,0x1e7,0x1e8, 0x1e8,0x1e9,0x1ea,0x1ea,0x1eb,0x1ec,0x1ec,0x1ed, 0x1ee,0x1ee,0x1ef,0x1f0,0x1f0,0x1f1,0x1f2,0x1f3, 0x1f3,0x1f4,0x1f5,0x1f5,0x1f6,0x1f7,0x1f7,0x1f8, 0x1f9,0x1f9,0x1fa,0x1fb,0x1fb,0x1fc,0x1fd,0x1fe, 0x1fe,0x1ff,0x200,0x200,0x201,0x202,0x202,0x203, 0x204,0x205,0x205,0x206,0x207,0x207,0x208,0x209, 0x209,0x20a,0x20b,0x20b,0x20c,0x20d,0x20e,0x20e, 0x20f,0x210,0x210,0x211,0x212,0x212,0x213,0x214, 0x215,0x215,0x216,0x217,0x217,0x218,0x219,0x21a, 0x21a,0x21b,0x21c,0x21c,0x21d,0x21e,0x21e,0x21f, 0x220,0x221,0x221,0x222,0x223,0x223,0x224,0x225, 0x226,0x226,0x227,0x228,0x228,0x229,0x22a,0x22b, 0x22b,0x22c,0x22d,0x22d,0x22e,0x22f,0x230,0x230, 0x231,0x232,0x232,0x233,0x234,0x235,0x235,0x236, 0x237,0x237,0x238,0x239,0x23a,0x23a,0x23b,0x23c, 0x23c,0x23d,0x23e,0x23f,0x23f,0x240,0x241,0x241, 0x242,0x243,0x244,0x244,0x245,0x246,0x247,0x247, 0x248,0x249,0x249,0x24a,0x24b,0x24c,0x24c,0x24d, 0x24e,0x24f,0x24f,0x250,0x251,0x251,0x252,0x253, 0x254,0x254,0x255,0x256,0x257,0x257,0x258,0x259, 0x259,0x25a,0x25b,0x25c,0x25c,0x25d,0x25e,0x25f, 0x25f,0x260,0x261,0x262,0x262,0x263,0x264,0x265, 0x265,0x266,0x267,0x267,0x268,0x269,0x26a,0x26a, 0x26b,0x26c,0x26d,0x26d,0x26e,0x26f,0x270,0x270, 0x271,0x272,0x273,0x273,0x274,0x275,0x276,0x276, 0x277,0x278,0x279,0x279,0x27a,0x27b,0x27c,0x27c, 0x27d,0x27e,0x27f,0x27f,0x280,0x281,0x282,0x282, 0x283,0x284,0x285,0x285,0x286,0x287,0x288,0x288, 0x289,0x28a,0x28b,0x28b,0x28c,0x28d,0x28e,0x28e, 0x28f,0x290,0x291,0x291,0x292,0x293,0x294,0x294, 0x295,0x296,0x297,0x298,0x298,0x299,0x29a,0x29b, 0x29b,0x29c,0x29d,0x29e,0x29e,0x29f,0x2a0,0x2a1, 0x2a1,0x2a2,0x2a3,0x2a4,0x2a5,0x2a5,0x2a6,0x2a7, 0x2a8,0x2a8,0x2a9,0x2aa,0x2ab,0x2ab,0x2ac,0x2ad, 0x2ae,0x2af,0x2af,0x2b0,0x2b1,0x2b2,0x2b2,0x2b3, 0x2b4,0x2b5,0x2b5,0x2b6,0x2b7,0x2b8,0x2b9,0x2b9, 0x2ba,0x2bb,0x2bc,0x2bc,0x2bd,0x2be,0x2bf,0x2c0, 0x2c0,0x2c1,0x2c2,0x2c3,0x2c4,0x2c4,0x2c5,0x2c6, 0x2c7,0x2c7,0x2c8,0x2c9,0x2ca,0x2cb,0x2cb,0x2cc, 0x2cd,0x2ce,0x2ce,0x2cf,0x2d0,0x2d1,0x2d2,0x2d2, 0x2d3,0x2d4,0x2d5,0x2d6,0x2d6,0x2d7,0x2d8,0x2d9, 0x2da,0x2da,0x2db,0x2dc,0x2dd,0x2dd,0x2de,0x2df, 0x2e0,0x2e1,0x2e1,0x2e2,0x2e3,0x2e4,0x2e5,0x2e5, 0x2e6,0x2e7,0x2e8,0x2e9,0x2e9,0x2ea,0x2eb,0x2ec, 0x2ed,0x2ed,0x2ee,0x2ef,0x2f0,0x2f1,0x2f1,0x2f2, 0x2f3,0x2f4,0x2f5,0x2f5,0x2f6,0x2f7,0x2f8,0x2f9, 0x2f9,0x2fa,0x2fb,0x2fc,0x2fd,0x2fd,0x2fe,0x2ff, 0x300,0x301,0x302,0x302,0x303,0x304,0x305,0x306, 0x306,0x307,0x308,0x309,0x30a,0x30a,0x30b,0x30c, 0x30d,0x30e,0x30f,0x30f,0x310,0x311,0x312,0x313, 0x313,0x314,0x315,0x316,0x317,0x318,0x318,0x319, 0x31a,0x31b,0x31c,0x31c,0x31d,0x31e,0x31f,0x320, 0x321,0x321,0x322,0x323,0x324,0x325,0x326,0x326, 0x327,0x328,0x329,0x32a,0x32a,0x32b,0x32c,0x32d, 0x32e,0x32f,0x32f,0x330,0x331,0x332,0x333,0x334, 0x334,0x335,0x336,0x337,0x338,0x339,0x339,0x33a, 0x33b,0x33c,0x33d,0x33e,0x33e,0x33f,0x340,0x341, 0x342,0x343,0x343,0x344,0x345,0x346,0x347,0x348, 0x349,0x349,0x34a,0x34b,0x34c,0x34d,0x34e,0x34e, 0x34f,0x350,0x351,0x352,0x353,0x353,0x354,0x355, 0x356,0x357,0x358,0x359,0x359,0x35a,0x35b,0x35c, 0x35d,0x35e,0x35f,0x35f,0x360,0x361,0x362,0x363, 0x364,0x364,0x365,0x366,0x367,0x368,0x369,0x36a, 0x36a,0x36b,0x36c,0x36d,0x36e,0x36f,0x370,0x370, 0x371,0x372,0x373,0x374,0x375,0x376,0x377,0x377, 0x378,0x379,0x37a,0x37b,0x37c,0x37d,0x37d,0x37e, 0x37f,0x380,0x381,0x382,0x383,0x383,0x384,0x385, 0x386,0x387,0x388,0x389,0x38a,0x38a,0x38b,0x38c, 0x38d,0x38e,0x38f,0x390,0x391,0x391,0x392,0x393, 0x394,0x395,0x396,0x397,0x398,0x398,0x399,0x39a, 0x39b,0x39c,0x39d,0x39e,0x39f,0x39f,0x3a0,0x3a1, 0x3a2,0x3a3,0x3a4,0x3a5,0x3a6,0x3a7,0x3a7,0x3a8, 0x3a9,0x3aa,0x3ab,0x3ac,0x3ad,0x3ae,0x3ae,0x3af, 0x3b0,0x3b1,0x3b2,0x3b3,0x3b4,0x3b5,0x3b6,0x3b6, 0x3b7,0x3b8,0x3b9,0x3ba,0x3bb,0x3bc,0x3bd,0x3be, 0x3bf,0x3bf,0x3c0,0x3c1,0x3c2,0x3c3,0x3c4,0x3c5, 0x3c6,0x3c7,0x3c7,0x3c8,0x3c9,0x3ca,0x3cb,0x3cc, 0x3cd,0x3ce,0x3cf,0x3d0,0x3d1,0x3d1,0x3d2,0x3d3, 0x3d4,0x3d5,0x3d6,0x3d7,0x3d8,0x3d9,0x3da,0x3da, 0x3db,0x3dc,0x3dd,0x3de,0x3df,0x3e0,0x3e1,0x3e2, 0x3e3,0x3e4,0x3e4,0x3e5,0x3e6,0x3e7,0x3e8,0x3e9, 0x3ea,0x3eb,0x3ec,0x3ed,0x3ee,0x3ef,0x3ef,0x3f0, 0x3f1,0x3f2,0x3f3,0x3f4,0x3f5,0x3f6,0x3f7,0x3f8, 0x3f9,0x3fa,0x3fa,0x3fb,0x3fc,0x3fd,0x3fe,0x3ff }; /* * Attenuation according to GM recommendations, in -0.375 dB units. * table[v] = 40 * log(v / 127) / -0.375 */ static unsigned char snd_opl4_volume_table[128] = { 255,224,192,173,160,150,141,134, 128,122,117,113,109,105,102, 99, 96, 93, 90, 88, 85, 83, 81, 79, 77, 75, 73, 71, 70, 68, 67, 65, 64, 62, 61, 59, 58, 57, 56, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 39, 38, 37, 36, 35, 34, 34, 33, 32, 31, 31, 30, 29, 29, 28, 27, 27, 26, 25, 25, 24, 24, 23, 22, 22, 21, 21, 20, 19, 19, 18, 18, 17, 17, 16, 16, 15, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 0, 0, 0 }; /* * Initializes all voices. */ void snd_opl4_synth_reset(struct snd_opl4 *opl4) { unsigned long flags; int i; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) snd_opl4_write(opl4, OPL4_REG_MISC + i, OPL4_DAMP_BIT); spin_unlock_irqrestore(&opl4->reg_lock, flags); INIT_LIST_HEAD(&opl4->off_voices); INIT_LIST_HEAD(&opl4->on_voices); memset(opl4->voices, 0, sizeof(opl4->voices)); for (i = 0; i < OPL4_MAX_VOICES; i++) { opl4->voices[i].number = i; list_add_tail(&opl4->voices[i].list, &opl4->off_voices); } snd_midi_channel_set_clear(opl4->chset); } /* * Shuts down all voices. */ void snd_opl4_synth_shutdown(struct snd_opl4 *opl4) { unsigned long flags; int i; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) snd_opl4_write(opl4, OPL4_REG_MISC + i, opl4->voices[i].reg_misc & ~OPL4_KEY_ON_BIT); spin_unlock_irqrestore(&opl4->reg_lock, flags); } /* * Executes the callback for all voices playing the specified note. */ static void snd_opl4_do_for_note(struct snd_opl4 *opl4, int note, struct snd_midi_channel *chan, void (*func)(struct snd_opl4 *opl4, struct opl4_voice *voice)) { int i; unsigned long flags; struct opl4_voice *voice; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) { voice = &opl4->voices[i]; if (voice->chan == chan && voice->note == note) { func(opl4, voice); } } spin_unlock_irqrestore(&opl4->reg_lock, flags); } /* * Executes the callback for all voices of to the specified channel. */ static void snd_opl4_do_for_channel(struct snd_opl4 *opl4, struct snd_midi_channel *chan, void (*func)(struct snd_opl4 *opl4, struct opl4_voice *voice)) { int i; unsigned long flags; struct opl4_voice *voice; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) { voice = &opl4->voices[i]; if (voice->chan == chan) { func(opl4, voice); } } spin_unlock_irqrestore(&opl4->reg_lock, flags); } /* * Executes the callback for all active voices. */ static void snd_opl4_do_for_all(struct snd_opl4 *opl4, void (*func)(struct snd_opl4 *opl4, struct opl4_voice *voice)) { int i; unsigned long flags; struct opl4_voice *voice; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) { voice = &opl4->voices[i]; if (voice->chan) func(opl4, voice); } spin_unlock_irqrestore(&opl4->reg_lock, flags); } static void snd_opl4_update_volume(struct snd_opl4 *opl4, struct opl4_voice *voice) { int att; att = voice->sound->tone_attenuate; att += snd_opl4_volume_table[opl4->chset->gs_master_volume & 0x7f]; att += snd_opl4_volume_table[voice->chan->gm_volume & 0x7f]; att += snd_opl4_volume_table[voice->chan->gm_expression & 0x7f]; att += snd_opl4_volume_table[voice->velocity]; att = 0x7f - (0x7f - att) * (voice->sound->volume_factor) / 0xfe - volume_boost; if (att < 0) att = 0; else if (att > 0x7e) att = 0x7e; snd_opl4_write(opl4, OPL4_REG_LEVEL + voice->number, (att << 1) | voice->level_direct); voice->level_direct = 0; } static void snd_opl4_update_pan(struct snd_opl4 *opl4, struct opl4_voice *voice) { int pan = voice->sound->panpot; if (!voice->chan->drum_channel) pan += (voice->chan->control[MIDI_CTL_MSB_PAN] - 0x40) >> 3; if (pan < -7) pan = -7; else if (pan > 7) pan = 7; voice->reg_misc = (voice->reg_misc & ~OPL4_PAN_POT_MASK) | (pan & OPL4_PAN_POT_MASK); snd_opl4_write(opl4, OPL4_REG_MISC + voice->number, voice->reg_misc); } static void snd_opl4_update_vibrato_depth(struct snd_opl4 *opl4, struct opl4_voice *voice) { int depth; if (voice->chan->drum_channel) return; depth = (7 - voice->sound->vibrato) * (voice->chan->control[MIDI_CTL_VIBRATO_DEPTH] & 0x7f); depth = (depth >> 7) + voice->sound->vibrato; voice->reg_lfo_vibrato &= ~OPL4_VIBRATO_DEPTH_MASK; voice->reg_lfo_vibrato |= depth & OPL4_VIBRATO_DEPTH_MASK; snd_opl4_write(opl4, OPL4_REG_LFO_VIBRATO + voice->number, voice->reg_lfo_vibrato); } static void snd_opl4_update_pitch(struct snd_opl4 *opl4, struct opl4_voice *voice) { struct snd_midi_channel *chan = voice->chan; int note, pitch, octave; note = chan->drum_channel ? 60 : voice->note; /* * pitch is in 100/128 cents, so 0x80 is one semitone and * 0x600 is one octave. */ pitch = ((note - 60) << 7) * voice->sound->key_scaling / 100 + (60 << 7); pitch += voice->sound->pitch_offset; if (!chan->drum_channel) pitch += chan->gm_rpn_coarse_tuning; pitch += chan->gm_rpn_fine_tuning >> 7; pitch += chan->midi_pitchbend * chan->gm_rpn_pitch_bend_range / 0x2000; if (pitch < 0) pitch = 0; else if (pitch >= 0x6000) pitch = 0x5fff; octave = pitch / 0x600 - 8; pitch = snd_opl4_pitch_map[pitch % 0x600]; snd_opl4_write(opl4, OPL4_REG_OCTAVE + voice->number, (octave << 4) | ((pitch >> 7) & OPL4_F_NUMBER_HIGH_MASK)); voice->reg_f_number = (voice->reg_f_number & OPL4_TONE_NUMBER_BIT8) | ((pitch << 1) & OPL4_F_NUMBER_LOW_MASK); snd_opl4_write(opl4, OPL4_REG_F_NUMBER + voice->number, voice->reg_f_number); } static void snd_opl4_update_tone_parameters(struct snd_opl4 *opl4, struct opl4_voice *voice) { snd_opl4_write(opl4, OPL4_REG_ATTACK_DECAY1 + voice->number, voice->sound->reg_attack_decay1); snd_opl4_write(opl4, OPL4_REG_LEVEL_DECAY2 + voice->number, voice->sound->reg_level_decay2); snd_opl4_write(opl4, OPL4_REG_RELEASE_CORRECTION + voice->number, voice->sound->reg_release_correction); snd_opl4_write(opl4, OPL4_REG_TREMOLO + voice->number, voice->sound->reg_tremolo); } /* allocate one voice */ static struct opl4_voice *snd_opl4_get_voice(struct snd_opl4 *opl4) { /* first, try to get the oldest key-off voice */ if (!list_empty(&opl4->off_voices)) return list_entry(opl4->off_voices.next, struct opl4_voice, list); /* then get the oldest key-on voice */ snd_BUG_ON(list_empty(&opl4->on_voices)); return list_entry(opl4->on_voices.next, struct opl4_voice, list); } static void snd_opl4_wait_for_wave_headers(struct snd_opl4 *opl4) { int timeout = 200; while ((inb(opl4->fm_port) & OPL4_STATUS_LOAD) && --timeout > 0) udelay(10); } void snd_opl4_note_on(void *private_data, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl4 *opl4 = private_data; const struct opl4_region_ptr *regions; struct opl4_voice *voice[2]; const struct opl4_sound *sound[2]; int voices = 0, i; unsigned long flags; /* determine the number of voices and voice parameters */ i = chan->drum_channel ? 0x80 : (chan->midi_program & 0x7f); regions = &snd_yrw801_regions[i]; for (i = 0; i < regions->count; i++) { if (note >= regions->regions[i].key_min && note <= regions->regions[i].key_max) { sound[voices] = &regions->regions[i].sound; if (++voices >= 2) break; } } /* allocate and initialize the needed voices */ spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < voices; i++) { voice[i] = snd_opl4_get_voice(opl4); list_del(&voice[i]->list); list_add_tail(&voice[i]->list, &opl4->on_voices); voice[i]->chan = chan; voice[i]->note = note; voice[i]->velocity = vel & 0x7f; voice[i]->sound = sound[i]; } /* set tone number (triggers header loading) */ for (i = 0; i < voices; i++) { voice[i]->reg_f_number = (sound[i]->tone >> 8) & OPL4_TONE_NUMBER_BIT8; snd_opl4_write(opl4, OPL4_REG_F_NUMBER + voice[i]->number, voice[i]->reg_f_number); snd_opl4_write(opl4, OPL4_REG_TONE_NUMBER + voice[i]->number, sound[i]->tone & 0xff); } /* set parameters which can be set while loading */ for (i = 0; i < voices; i++) { voice[i]->reg_misc = OPL4_LFO_RESET_BIT; snd_opl4_update_pan(opl4, voice[i]); snd_opl4_update_pitch(opl4, voice[i]); voice[i]->level_direct = OPL4_LEVEL_DIRECT_BIT; snd_opl4_update_volume(opl4, voice[i]); } spin_unlock_irqrestore(&opl4->reg_lock, flags); /* wait for completion of loading */ snd_opl4_wait_for_wave_headers(opl4); /* set remaining parameters */ spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < voices; i++) { snd_opl4_update_tone_parameters(opl4, voice[i]); voice[i]->reg_lfo_vibrato = voice[i]->sound->reg_lfo_vibrato; snd_opl4_update_vibrato_depth(opl4, voice[i]); } /* finally, switch on all voices */ for (i = 0; i < voices; i++) { voice[i]->reg_misc = (voice[i]->reg_misc & 0x1f) | OPL4_KEY_ON_BIT; snd_opl4_write(opl4, OPL4_REG_MISC + voice[i]->number, voice[i]->reg_misc); } spin_unlock_irqrestore(&opl4->reg_lock, flags); } static void snd_opl4_voice_off(struct snd_opl4 *opl4, struct opl4_voice *voice) { list_del(&voice->list); list_add_tail(&voice->list, &opl4->off_voices); voice->reg_misc &= ~OPL4_KEY_ON_BIT; snd_opl4_write(opl4, OPL4_REG_MISC + voice->number, voice->reg_misc); } void snd_opl4_note_off(void *private_data, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl4 *opl4 = private_data; snd_opl4_do_for_note(opl4, note, chan, snd_opl4_voice_off); } static void snd_opl4_terminate_voice(struct snd_opl4 *opl4, struct opl4_voice *voice) { list_del(&voice->list); list_add_tail(&voice->list, &opl4->off_voices); voice->reg_misc = (voice->reg_misc & ~OPL4_KEY_ON_BIT) | OPL4_DAMP_BIT; snd_opl4_write(opl4, OPL4_REG_MISC + voice->number, voice->reg_misc); } void snd_opl4_terminate_note(void *private_data, int note, struct snd_midi_channel *chan) { struct snd_opl4 *opl4 = private_data; snd_opl4_do_for_note(opl4, note, chan, snd_opl4_terminate_voice); } void snd_opl4_control(void *private_data, int type, struct snd_midi_channel *chan) { struct snd_opl4 *opl4 = private_data; switch (type) { case MIDI_CTL_MSB_MODWHEEL: chan->control[MIDI_CTL_VIBRATO_DEPTH] = chan->control[MIDI_CTL_MSB_MODWHEEL]; snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_vibrato_depth); break; case MIDI_CTL_MSB_MAIN_VOLUME: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_volume); break; case MIDI_CTL_MSB_PAN: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_pan); break; case MIDI_CTL_MSB_EXPRESSION: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_volume); break; case MIDI_CTL_VIBRATO_RATE: /* not yet supported */ break; case MIDI_CTL_VIBRATO_DEPTH: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_vibrato_depth); break; case MIDI_CTL_VIBRATO_DELAY: /* not yet supported */ break; case MIDI_CTL_E1_REVERB_DEPTH: /* * Each OPL4 voice has a bit called "Pseudo-Reverb", but * IMHO _not_ using it enhances the listening experience. */ break; case MIDI_CTL_PITCHBEND: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_pitch); break; } } void snd_opl4_sysex(void *private_data, unsigned char *buf, int len, int parsed, struct snd_midi_channel_set *chset) { struct snd_opl4 *opl4 = private_data; if (parsed == SNDRV_MIDI_SYSEX_GS_MASTER_VOLUME) snd_opl4_do_for_all(opl4, snd_opl4_update_volume); }
gpl-2.0
perillamint/android_kernel_casio_gzone
fs/ocfs2/dlm/dlmconvert.c
10269
15658
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmconvert.c * * underlying calls for lock conversion * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/spinlock.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmconvert.h" #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" /* NOTE: __dlmconvert_master is the only function in here that * needs a spinlock held on entry (res->spinlock) and it is the * only one that holds a lock on exit (res->spinlock). * All other functions in here need no locks and drop all of * the locks that they acquire. */ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread); static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type); /* * this is only called directly by dlmlock(), and only when the * local node is the owner of the lockres * locking: * caller needs: none * taken: takes and drops res->spinlock * held on exit: none * returns: see __dlmconvert_master */ enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { int call_ast = 0, kick_thread = 0; enum dlm_status status; spin_lock(&res->spinlock); /* we are not in a network handler, this is fine */ __dlm_wait_on_lockres(res); __dlm_lockres_reserve_ast(res); res->state |= DLM_LOCK_RES_IN_PROGRESS; status = __dlmconvert_master(dlm, res, lock, flags, type, &call_ast, &kick_thread); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; spin_unlock(&res->spinlock); wake_up(&res->wq); if (status != DLM_NORMAL && status != DLM_NOTQUEUED) dlm_error(status); /* either queue the ast or release it */ if (call_ast) dlm_queue_ast(dlm, lock); else dlm_lockres_release_ast(dlm, res); if (kick_thread) dlm_kick_thread(dlm, res); return status; } /* performs lock conversion at the lockres master site * locking: * caller needs: res->spinlock * taken: takes and drops lock->spinlock * held on exit: res->spinlock * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED * call_ast: whether ast should be called for this lock * kick_thread: whether dlm_kick_thread should be called */ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread) { enum dlm_status status = DLM_NORMAL; struct list_head *iter; struct dlm_lock *tmplock=NULL; assert_spin_locked(&res->spinlock); mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n", lock->ml.type, lock->ml.convert_type, type); spin_lock(&lock->spinlock); /* already converting? */ if (lock->ml.convert_type != LKM_IVMODE) { mlog(ML_ERROR, "attempted to convert a lock with a lock " "conversion pending\n"); status = DLM_DENIED; goto unlock_exit; } /* must be on grant queue to convert */ if (!dlm_lock_on_list(&res->granted, lock)) { mlog(ML_ERROR, "attempted to convert a lock not on grant " "queue\n"); status = DLM_DENIED; goto unlock_exit; } if (flags & LKM_VALBLK) { switch (lock->ml.type) { case LKM_EXMODE: /* EX + LKM_VALBLK + convert == set lvb */ mlog(0, "will set lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); lock->lksb->flags |= DLM_LKSB_PUT_LVB; break; case LKM_PRMODE: case LKM_NLMODE: /* refetch if new level is not NL */ if (type > LKM_NLMODE) { mlog(0, "will fetch new value into " "lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); lock->lksb->flags |= DLM_LKSB_GET_LVB; } else { mlog(0, "will NOT fetch new value " "into lvb: converting %s->%s\n", dlm_lock_mode_name(lock->ml.type), dlm_lock_mode_name(type)); flags &= ~(LKM_VALBLK); } break; } } /* in-place downconvert? */ if (type <= lock->ml.type) goto grant; /* upconvert from here on */ status = DLM_NORMAL; list_for_each(iter, &res->granted) { tmplock = list_entry(iter, struct dlm_lock, list); if (tmplock == lock) continue; if (!dlm_lock_compatible(tmplock->ml.type, type)) goto switch_queues; } list_for_each(iter, &res->converting) { tmplock = list_entry(iter, struct dlm_lock, list); if (!dlm_lock_compatible(tmplock->ml.type, type)) goto switch_queues; /* existing conversion requests take precedence */ if (!dlm_lock_compatible(tmplock->ml.convert_type, type)) goto switch_queues; } /* fall thru to grant */ grant: mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, res->lockname.name, dlm_lock_mode_name(type)); /* immediately grant the new lock type */ lock->lksb->status = DLM_NORMAL; if (lock->ml.node == dlm->node_num) mlog(0, "doing in-place convert for nonlocal lock\n"); lock->ml.type = type; if (lock->lksb->flags & DLM_LKSB_PUT_LVB) memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); status = DLM_NORMAL; *call_ast = 1; goto unlock_exit; switch_queues: if (flags & LKM_NOQUEUE) { mlog(0, "failed to convert NOQUEUE lock %.*s from " "%d to %d...\n", res->lockname.len, res->lockname.name, lock->ml.type, type); status = DLM_NOTQUEUED; goto unlock_exit; } mlog(0, "res %.*s, queueing...\n", res->lockname.len, res->lockname.name); lock->ml.convert_type = type; /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->converting); unlock_exit: spin_unlock(&lock->spinlock); if (status == DLM_DENIED) { __dlm_print_one_lock_resource(res); } if (status == DLM_NORMAL) *kick_thread = 1; return status; } void dlm_revert_pending_convert(struct dlm_lock_resource *res, struct dlm_lock *lock) { /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->granted); lock->ml.convert_type = LKM_IVMODE; lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); } /* messages the master site to do lock conversion * locking: * caller needs: none * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS * held on exit: none * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node */ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { enum dlm_status status; mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { mlog(0, "bailing out early since res is RECOVERING " "on secondary queue\n"); /* __dlm_print_one_lock_resource(res); */ status = DLM_RECOVERING; goto bail; } /* will exit this call with spinlock held */ __dlm_wait_on_lockres(res); if (lock->ml.convert_type != LKM_IVMODE) { __dlm_print_one_lock_resource(res); mlog(ML_ERROR, "converting a remote lock that is already " "converting! (cookie=%u:%llu, conv=%d)\n", dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), lock->ml.convert_type); status = DLM_DENIED; goto bail; } res->state |= DLM_LOCK_RES_IN_PROGRESS; /* move lock to local convert queue */ /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->converting); lock->convert_pending = 1; lock->ml.convert_type = type; if (flags & LKM_VALBLK) { if (lock->ml.type == LKM_EXMODE) { flags |= LKM_PUT_LVB; lock->lksb->flags |= DLM_LKSB_PUT_LVB; } else { if (lock->ml.convert_type == LKM_NLMODE) flags &= ~LKM_VALBLK; else { flags |= LKM_GET_LVB; lock->lksb->flags |= DLM_LKSB_GET_LVB; } } } spin_unlock(&res->spinlock); /* no locks held here. * need to wait for a reply as to whether it got queued or not. */ status = dlm_send_remote_convert_request(dlm, res, lock, flags, type); spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; lock->convert_pending = 0; /* if it failed, move it back to granted queue */ if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); dlm_revert_pending_convert(res, lock); } bail: spin_unlock(&res->spinlock); /* TODO: should this be a wake_one? */ /* wake up any IN_PROGRESS waiters */ wake_up(&res->wq); return status; } /* sends DLM_CONVERT_LOCK_MSG to master site * locking: * caller needs: none * taken: none * held on exit: none * returns: DLM_NOLOCKMGR, status from remote node */ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) { struct dlm_convert_lock convert; int tmpret; enum dlm_status ret; int status = 0; struct kvec vec[2]; size_t veclen = 1; mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); memset(&convert, 0, sizeof(struct dlm_convert_lock)); convert.node_idx = dlm->node_num; convert.requested_type = type; convert.cookie = lock->ml.cookie; convert.namelen = res->lockname.len; convert.flags = cpu_to_be32(flags); memcpy(convert.name, res->lockname.name, convert.namelen); vec[0].iov_len = sizeof(struct dlm_convert_lock); vec[0].iov_base = &convert; if (flags & LKM_PUT_LVB) { /* extra data to send if we are updating lvb */ vec[1].iov_len = DLM_LVB_LEN; vec[1].iov_base = lock->lksb->lvb; veclen++; } tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key, vec, veclen, res->owner, &status); if (tmpret >= 0) { // successfully sent and received ret = status; // this is already a dlm_status if (ret == DLM_RECOVERING) { mlog(0, "node %u returned DLM_RECOVERING from convert " "message!\n", res->owner); } else if (ret == DLM_MIGRATING) { mlog(0, "node %u returned DLM_MIGRATING from convert " "message!\n", res->owner); } else if (ret == DLM_FORWARD) { mlog(0, "node %u returned DLM_FORWARD from convert " "message!\n", res->owner); } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) dlm_error(ret); } else { mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key, res->owner); if (dlm_is_host_down(tmpret)) { /* instead of logging the same network error over * and over, sleep here and wait for the heartbeat * to notice the node is dead. times out after 5s. */ dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " "from convert message!\n", res->owner); } else { ret = dlm_err_to_dlm_status(tmpret); } } return ret; } /* handler for DLM_CONVERT_LOCK_MSG on master site * locking: * caller needs: none * taken: takes and drop res->spinlock * held on exit: none * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, * status from __dlmconvert_master */ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; struct dlm_lock_resource *res = NULL; struct list_head *iter; struct dlm_lock *lock = NULL; struct dlm_lockstatus *lksb; enum dlm_status status = DLM_NORMAL; u32 flags; int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0; if (!dlm_grab(dlm)) { dlm_error(DLM_REJECTED); return DLM_REJECTED; } mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), "Domain %s not fully joined!\n", dlm->name); if (cnv->namelen > DLM_LOCKID_NAME_MAX) { status = DLM_IVBUFLEN; dlm_error(status); goto leave; } flags = be32_to_cpu(cnv->flags); if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == (LKM_PUT_LVB|LKM_GET_LVB)) { mlog(ML_ERROR, "both PUT and GET lvb specified\n"); status = DLM_BADARGS; goto leave; } mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : (flags & LKM_GET_LVB ? "get lvb" : "none")); status = DLM_IVLOCKID; res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen); if (!res) { dlm_error(status); goto leave; } spin_lock(&res->spinlock); status = __dlm_lockres_state_to_status(res); if (status != DLM_NORMAL) { spin_unlock(&res->spinlock); dlm_error(status); goto leave; } list_for_each(iter, &res->granted) { lock = list_entry(iter, struct dlm_lock, list); if (lock->ml.cookie == cnv->cookie && lock->ml.node == cnv->node_idx) { dlm_lock_get(lock); break; } lock = NULL; } spin_unlock(&res->spinlock); if (!lock) { status = DLM_IVLOCKID; mlog(ML_ERROR, "did not find lock to convert on grant queue! " "cookie=%u:%llu\n", dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie))); dlm_print_one_lock_resource(res); goto leave; } /* found the lock */ lksb = lock->lksb; /* see if caller needed to get/put lvb */ if (flags & LKM_PUT_LVB) { BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); lksb->flags |= DLM_LKSB_PUT_LVB; memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN); } else if (flags & LKM_GET_LVB) { BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); lksb->flags |= DLM_LKSB_GET_LVB; } spin_lock(&res->spinlock); status = __dlm_lockres_state_to_status(res); if (status == DLM_NORMAL) { __dlm_lockres_reserve_ast(res); ast_reserved = 1; res->state |= DLM_LOCK_RES_IN_PROGRESS; status = __dlmconvert_master(dlm, res, lock, flags, cnv->requested_type, &call_ast, &kick_thread); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; wake = 1; } spin_unlock(&res->spinlock); if (wake) wake_up(&res->wq); if (status != DLM_NORMAL) { if (status != DLM_NOTQUEUED) dlm_error(status); lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); } leave: if (lock) dlm_lock_put(lock); /* either queue the ast or release it, if reserved */ if (call_ast) dlm_queue_ast(dlm, lock); else if (ast_reserved) dlm_lockres_release_ast(dlm, res); if (kick_thread) dlm_kick_thread(dlm, res); if (res) dlm_lockres_put(res); dlm_put(dlm); return status; }
gpl-2.0
miuihu/android_kernel_xiaomi_armor
arch/arm/mach-iop13xx/irq.c
11805
5193
/* * iop13xx IRQ handling / support functions * Copyright (c) 2005-2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/sysctl.h> #include <asm/uaccess.h> #include <asm/mach/irq.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/msi.h> /* INTCTL0 CP6 R0 Page 4 */ static u32 read_intctl_0(void) { u32 val; asm volatile("mrc p6, 0, %0, c0, c4, 0":"=r" (val)); return val; } static void write_intctl_0(u32 val) { asm volatile("mcr p6, 0, %0, c0, c4, 0"::"r" (val)); } /* INTCTL1 CP6 R1 Page 4 */ static u32 read_intctl_1(void) { u32 val; asm volatile("mrc p6, 0, %0, c1, c4, 0":"=r" (val)); return val; } static void write_intctl_1(u32 val) { asm volatile("mcr p6, 0, %0, c1, c4, 0"::"r" (val)); } /* INTCTL2 CP6 R2 Page 4 */ static u32 read_intctl_2(void) { u32 val; asm volatile("mrc p6, 0, %0, c2, c4, 0":"=r" (val)); return val; } static void write_intctl_2(u32 val) { asm volatile("mcr p6, 0, %0, c2, c4, 0"::"r" (val)); } /* INTCTL3 CP6 R3 Page 4 */ static u32 read_intctl_3(void) { u32 val; asm volatile("mrc p6, 0, %0, c3, c4, 0":"=r" (val)); return val; } static void write_intctl_3(u32 val) { asm volatile("mcr p6, 0, %0, c3, c4, 0"::"r" (val)); } /* INTSTR0 CP6 R0 Page 5 */ static void write_intstr_0(u32 val) { asm volatile("mcr p6, 0, %0, c0, c5, 0"::"r" (val)); } /* INTSTR1 CP6 R1 Page 5 */ static void write_intstr_1(u32 val) { asm volatile("mcr p6, 0, %0, c1, c5, 0"::"r" (val)); } /* INTSTR2 CP6 R2 Page 5 */ static void write_intstr_2(u32 val) { asm volatile("mcr p6, 0, %0, c2, c5, 0"::"r" (val)); } /* INTSTR3 CP6 R3 Page 5 */ static void write_intstr_3(u32 val) { asm volatile("mcr p6, 0, %0, c3, c5, 0"::"r" (val)); } /* INTBASE CP6 R0 Page 2 */ static void write_intbase(u32 val) { asm volatile("mcr p6, 0, %0, c0, c2, 0"::"r" (val)); } /* INTSIZE CP6 R2 Page 2 */ static void write_intsize(u32 val) { asm volatile("mcr p6, 0, %0, c2, c2, 0"::"r" (val)); } /* 0 = Interrupt Masked and 1 = Interrupt not masked */ static void iop13xx_irq_mask0 (struct irq_data *d) { write_intctl_0(read_intctl_0() & ~(1 << (d->irq - 0))); } static void iop13xx_irq_mask1 (struct irq_data *d) { write_intctl_1(read_intctl_1() & ~(1 << (d->irq - 32))); } static void iop13xx_irq_mask2 (struct irq_data *d) { write_intctl_2(read_intctl_2() & ~(1 << (d->irq - 64))); } static void iop13xx_irq_mask3 (struct irq_data *d) { write_intctl_3(read_intctl_3() & ~(1 << (d->irq - 96))); } static void iop13xx_irq_unmask0(struct irq_data *d) { write_intctl_0(read_intctl_0() | (1 << (d->irq - 0))); } static void iop13xx_irq_unmask1(struct irq_data *d) { write_intctl_1(read_intctl_1() | (1 << (d->irq - 32))); } static void iop13xx_irq_unmask2(struct irq_data *d) { write_intctl_2(read_intctl_2() | (1 << (d->irq - 64))); } static void iop13xx_irq_unmask3(struct irq_data *d) { write_intctl_3(read_intctl_3() | (1 << (d->irq - 96))); } static struct irq_chip iop13xx_irqchip1 = { .name = "IOP13xx-1", .irq_ack = iop13xx_irq_mask0, .irq_mask = iop13xx_irq_mask0, .irq_unmask = iop13xx_irq_unmask0, }; static struct irq_chip iop13xx_irqchip2 = { .name = "IOP13xx-2", .irq_ack = iop13xx_irq_mask1, .irq_mask = iop13xx_irq_mask1, .irq_unmask = iop13xx_irq_unmask1, }; static struct irq_chip iop13xx_irqchip3 = { .name = "IOP13xx-3", .irq_ack = iop13xx_irq_mask2, .irq_mask = iop13xx_irq_mask2, .irq_unmask = iop13xx_irq_unmask2, }; static struct irq_chip iop13xx_irqchip4 = { .name = "IOP13xx-4", .irq_ack = iop13xx_irq_mask3, .irq_mask = iop13xx_irq_mask3, .irq_unmask = iop13xx_irq_unmask3, }; extern void iop_init_cp6_handler(void); void __init iop13xx_init_irq(void) { unsigned int i; iop_init_cp6_handler(); /* disable all interrupts */ write_intctl_0(0); write_intctl_1(0); write_intctl_2(0); write_intctl_3(0); /* treat all as IRQ */ write_intstr_0(0); write_intstr_1(0); write_intstr_2(0); write_intstr_3(0); /* initialize the interrupt vector generator */ write_intbase(INTBASE); write_intsize(INTSIZE_4); for(i = 0; i <= IRQ_IOP13XX_HPI; i++) { if (i < 32) irq_set_chip(i, &iop13xx_irqchip1); else if (i < 64) irq_set_chip(i, &iop13xx_irqchip2); else if (i < 96) irq_set_chip(i, &iop13xx_irqchip3); else irq_set_chip(i, &iop13xx_irqchip4); irq_set_handler(i, handle_level_irq); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } iop13xx_msi_init(); }
gpl-2.0
LinTeX9527/linux
sound/drivers/opl3/opl3_drums.c
14877
7192
/* * Copyright (c) by Uros Bizjak <uros@kss-loka.si> * * OPL2/OPL3/OPL4 FM routines for internal percussion channels * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "opl3_voice.h" extern char snd_opl3_regmap[MAX_OPL2_VOICES][4]; static char snd_opl3_drum_table[47] = { OPL3_BASSDRUM_ON, OPL3_BASSDRUM_ON, OPL3_HIHAT_ON, /* 35 - 37 */ OPL3_SNAREDRUM_ON, OPL3_HIHAT_ON, OPL3_SNAREDRUM_ON, /* 38 - 40 */ OPL3_BASSDRUM_ON, OPL3_HIHAT_ON, OPL3_BASSDRUM_ON, /* 41 - 43 */ OPL3_HIHAT_ON, OPL3_TOMTOM_ON, OPL3_HIHAT_ON, /* 44 - 46 */ OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, OPL3_CYMBAL_ON, /* 47 - 49 */ OPL3_TOMTOM_ON, OPL3_CYMBAL_ON, OPL3_CYMBAL_ON, /* 50 - 52 */ OPL3_CYMBAL_ON, OPL3_CYMBAL_ON, OPL3_CYMBAL_ON, /* 53 - 55 */ OPL3_HIHAT_ON, OPL3_CYMBAL_ON, OPL3_TOMTOM_ON, /* 56 - 58 */ OPL3_CYMBAL_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 59 - 61 */ OPL3_HIHAT_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 62 - 64 */ OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 65 - 67 */ OPL3_TOMTOM_ON, OPL3_HIHAT_ON, OPL3_HIHAT_ON, /* 68 - 70 */ OPL3_HIHAT_ON, OPL3_HIHAT_ON, OPL3_TOMTOM_ON, /* 71 - 73 */ OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 74 - 76 */ OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 77 - 79 */ OPL3_CYMBAL_ON, OPL3_CYMBAL_ON /* 80 - 81 */ }; struct snd_opl3_drum_voice { int voice; int op; unsigned char am_vib; unsigned char ksl_level; unsigned char attack_decay; unsigned char sustain_release; unsigned char feedback_connection; unsigned char wave_select; }; struct snd_opl3_drum_note { int voice; unsigned char fnum; unsigned char octave_f; unsigned char feedback_connection; }; static struct snd_opl3_drum_voice bass_op0 = {6, 0, 0x00, 0x32, 0xf8, 0x66, 0x30, 0x00}; static struct snd_opl3_drum_voice bass_op1 = {6, 1, 0x00, 0x03, 0xf6, 0x57, 0x30, 0x00}; static struct snd_opl3_drum_note bass_note = {6, 0x90, 0x09}; static struct snd_opl3_drum_voice hihat = {7, 0, 0x00, 0x03, 0xf0, 0x06, 0x20, 0x00}; static struct snd_opl3_drum_voice snare = {7, 1, 0x00, 0x03, 0xf0, 0x07, 0x20, 0x02}; static struct snd_opl3_drum_note snare_note = {7, 0xf4, 0x0d}; static struct snd_opl3_drum_voice tomtom = {8, 0, 0x02, 0x03, 0xf0, 0x06, 0x10, 0x00}; static struct snd_opl3_drum_note tomtom_note = {8, 0xf4, 0x09}; static struct snd_opl3_drum_voice cymbal = {8, 1, 0x04, 0x03, 0xf0, 0x06, 0x10, 0x00}; /* * set drum voice characteristics */ static void snd_opl3_drum_voice_set(struct snd_opl3 *opl3, struct snd_opl3_drum_voice *data) { unsigned char op_offset = snd_opl3_regmap[data->voice][data->op]; unsigned char voice_offset = data->voice; unsigned short opl3_reg; /* Set OPL3 AM_VIB register */ opl3_reg = OPL3_LEFT | (OPL3_REG_AM_VIB + op_offset); opl3->command(opl3, opl3_reg, data->am_vib); /* Set OPL3 KSL_LEVEL register */ opl3_reg = OPL3_LEFT | (OPL3_REG_KSL_LEVEL + op_offset); opl3->command(opl3, opl3_reg, data->ksl_level); /* Set OPL3 ATTACK_DECAY register */ opl3_reg = OPL3_LEFT | (OPL3_REG_ATTACK_DECAY + op_offset); opl3->command(opl3, opl3_reg, data->attack_decay); /* Set OPL3 SUSTAIN_RELEASE register */ opl3_reg = OPL3_LEFT | (OPL3_REG_SUSTAIN_RELEASE + op_offset); opl3->command(opl3, opl3_reg, data->sustain_release); /* Set OPL3 FEEDBACK_CONNECTION register */ opl3_reg = OPL3_LEFT | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset); opl3->command(opl3, opl3_reg, data->feedback_connection); /* Select waveform */ opl3_reg = OPL3_LEFT | (OPL3_REG_WAVE_SELECT + op_offset); opl3->command(opl3, opl3_reg, data->wave_select); } /* * Set drum voice pitch */ static void snd_opl3_drum_note_set(struct snd_opl3 *opl3, struct snd_opl3_drum_note *data) { unsigned char voice_offset = data->voice; unsigned short opl3_reg; /* Set OPL3 FNUM_LOW register */ opl3_reg = OPL3_LEFT | (OPL3_REG_FNUM_LOW + voice_offset); opl3->command(opl3, opl3_reg, data->fnum); /* Set OPL3 KEYON_BLOCK register */ opl3_reg = OPL3_LEFT | (OPL3_REG_KEYON_BLOCK + voice_offset); opl3->command(opl3, opl3_reg, data->octave_f); } /* * Set drum voice volume and position */ static void snd_opl3_drum_vol_set(struct snd_opl3 *opl3, struct snd_opl3_drum_voice *data, int vel, struct snd_midi_channel *chan) { unsigned char op_offset = snd_opl3_regmap[data->voice][data->op]; unsigned char voice_offset = data->voice; unsigned char reg_val; unsigned short opl3_reg; /* Set OPL3 KSL_LEVEL register */ reg_val = data->ksl_level; snd_opl3_calc_volume(&reg_val, vel, chan); opl3_reg = OPL3_LEFT | (OPL3_REG_KSL_LEVEL + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Set OPL3 FEEDBACK_CONNECTION register */ /* Set output voice connection */ reg_val = data->feedback_connection | OPL3_STEREO_BITS; if (chan->gm_pan < 43) reg_val &= ~OPL3_VOICE_TO_RIGHT; if (chan->gm_pan > 85) reg_val &= ~OPL3_VOICE_TO_LEFT; opl3_reg = OPL3_LEFT | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset); opl3->command(opl3, opl3_reg, reg_val); } /* * Loads drum voices at init time */ void snd_opl3_load_drums(struct snd_opl3 *opl3) { snd_opl3_drum_voice_set(opl3, &bass_op0); snd_opl3_drum_voice_set(opl3, &bass_op1); snd_opl3_drum_note_set(opl3, &bass_note); snd_opl3_drum_voice_set(opl3, &hihat); snd_opl3_drum_voice_set(opl3, &snare); snd_opl3_drum_note_set(opl3, &snare_note); snd_opl3_drum_voice_set(opl3, &tomtom); snd_opl3_drum_note_set(opl3, &tomtom_note); snd_opl3_drum_voice_set(opl3, &cymbal); } /* * Switch drum voice on or off */ void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan) { unsigned char drum_mask; struct snd_opl3_drum_voice *drum_voice; if (!(opl3->drum_reg & OPL3_PERCUSSION_ENABLE)) return; if ((note < 35) || (note > 81)) return; drum_mask = snd_opl3_drum_table[note - 35]; if (on_off) { switch (drum_mask) { case OPL3_BASSDRUM_ON: drum_voice = &bass_op1; break; case OPL3_HIHAT_ON: drum_voice = &hihat; break; case OPL3_SNAREDRUM_ON: drum_voice = &snare; break; case OPL3_TOMTOM_ON: drum_voice = &tomtom; break; case OPL3_CYMBAL_ON: drum_voice = &cymbal; break; default: drum_voice = &tomtom; } snd_opl3_drum_vol_set(opl3, drum_voice, vel, chan); opl3->drum_reg |= drum_mask; } else { opl3->drum_reg &= ~drum_mask; } opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, opl3->drum_reg); }
gpl-2.0
tgraf/net-next
mm/workingset.c
30
17223
/* * Workingset detection * * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner */ #include <linux/memcontrol.h> #include <linux/writeback.h> #include <linux/pagemap.h> #include <linux/atomic.h> #include <linux/module.h> #include <linux/swap.h> #include <linux/fs.h> #include <linux/mm.h> /* * Double CLOCK lists * * Per node, two clock lists are maintained for file pages: the * inactive and the active list. Freshly faulted pages start out at * the head of the inactive list and page reclaim scans pages from the * tail. Pages that are accessed multiple times on the inactive list * are promoted to the active list, to protect them from reclaim, * whereas active pages are demoted to the inactive list when the * active list grows too big. * * fault ------------------------+ * | * +--------------+ | +-------------+ * reclaim <- | inactive | <-+-- demotion | active | <--+ * +--------------+ +-------------+ | * | | * +-------------- promotion ------------------+ * * * Access frequency and refault distance * * A workload is thrashing when its pages are frequently used but they * are evicted from the inactive list every time before another access * would have promoted them to the active list. * * In cases where the average access distance between thrashing pages * is bigger than the size of memory there is nothing that can be * done - the thrashing set could never fit into memory under any * circumstance. * * However, the average access distance could be bigger than the * inactive list, yet smaller than the size of memory. In this case, * the set could fit into memory if it weren't for the currently * active pages - which may be used more, hopefully less frequently: * * +-memory available to cache-+ * | | * +-inactive------+-active----+ * a b | c d e f g h i | J K L M N | * +---------------+-----------+ * * It is prohibitively expensive to accurately track access frequency * of pages. But a reasonable approximation can be made to measure * thrashing on the inactive list, after which refaulting pages can be * activated optimistically to compete with the existing active pages. * * Approximating inactive page access frequency - Observations: * * 1. When a page is accessed for the first time, it is added to the * head of the inactive list, slides every existing inactive page * towards the tail by one slot, and pushes the current tail page * out of memory. * * 2. When a page is accessed for the second time, it is promoted to * the active list, shrinking the inactive list by one slot. This * also slides all inactive pages that were faulted into the cache * more recently than the activated page towards the tail of the * inactive list. * * Thus: * * 1. The sum of evictions and activations between any two points in * time indicate the minimum number of inactive pages accessed in * between. * * 2. Moving one inactive page N page slots towards the tail of the * list requires at least N inactive page accesses. * * Combining these: * * 1. When a page is finally evicted from memory, the number of * inactive pages accessed while the page was in cache is at least * the number of page slots on the inactive list. * * 2. In addition, measuring the sum of evictions and activations (E) * at the time of a page's eviction, and comparing it to another * reading (R) at the time the page faults back into memory tells * the minimum number of accesses while the page was not cached. * This is called the refault distance. * * Because the first access of the page was the fault and the second * access the refault, we combine the in-cache distance with the * out-of-cache distance to get the complete minimum access distance * of this page: * * NR_inactive + (R - E) * * And knowing the minimum access distance of a page, we can easily * tell if the page would be able to stay in cache assuming all page * slots in the cache were available: * * NR_inactive + (R - E) <= NR_inactive + NR_active * * which can be further simplified to * * (R - E) <= NR_active * * Put into words, the refault distance (out-of-cache) can be seen as * a deficit in inactive list space (in-cache). If the inactive list * had (R - E) more page slots, the page would not have been evicted * in between accesses, but activated instead. And on a full system, * the only thing eating into inactive list space is active pages. * * * Activating refaulting pages * * All that is known about the active list is that the pages have been * accessed more than once in the past. This means that at any given * time there is actually a good chance that pages on the active list * are no longer in active use. * * So when a refault distance of (R - E) is observed and there are at * least (R - E) active pages, the refaulting page is activated * optimistically in the hope that (R - E) active pages are actually * used less frequently than the refaulting page - or even not used at * all anymore. * * If this is wrong and demotion kicks in, the pages which are truly * used more frequently will be reactivated while the less frequently * used once will be evicted from memory. * * But if this is right, the stale pages will be pushed out of memory * and the used pages get to stay in cache. * * * Implementation * * For each node's file LRU lists, a counter for inactive evictions * and activations is maintained (node->inactive_age). * * On eviction, a snapshot of this counter (along with some bits to * identify the node) is stored in the now empty page cache radix tree * slot of the evicted page. This is called a shadow entry. * * On cache misses for which there are shadow entries, an eligible * refault distance will immediately activate the refaulting page. */ #define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \ NODES_SHIFT + \ MEM_CGROUP_ID_SHIFT) #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) /* * Eviction timestamps need to be able to cover the full range of * actionable refaults. However, bits are tight in the radix tree * entry, and after storing the identifier for the lruvec there might * not be enough left to represent every single actionable refault. In * that case, we have to sacrifice granularity for distance, and group * evictions into coarser buckets by shaving off lower timestamp bits. */ static unsigned int bucket_order __read_mostly; static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction) { eviction >>= bucket_order; eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; eviction = (eviction << NODES_SHIFT) | pgdat->node_id; eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); } static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, unsigned long *evictionp) { unsigned long entry = (unsigned long)shadow; int memcgid, nid; entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; nid = entry & ((1UL << NODES_SHIFT) - 1); entry >>= NODES_SHIFT; memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); entry >>= MEM_CGROUP_ID_SHIFT; *memcgidp = memcgid; *pgdat = NODE_DATA(nid); *evictionp = entry << bucket_order; } /** * workingset_eviction - note the eviction of a page from memory * @mapping: address space the page was backing * @page: the page being evicted * * Returns a shadow entry to be stored in @mapping->page_tree in place * of the evicted @page so that a later refault can be detected. */ void *workingset_eviction(struct address_space *mapping, struct page *page) { struct mem_cgroup *memcg = page_memcg(page); struct pglist_data *pgdat = page_pgdat(page); int memcgid = mem_cgroup_id(memcg); unsigned long eviction; struct lruvec *lruvec; /* Page is fully exclusive and pins page->mem_cgroup */ VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); lruvec = mem_cgroup_lruvec(pgdat, memcg); eviction = atomic_long_inc_return(&lruvec->inactive_age); return pack_shadow(memcgid, pgdat, eviction); } /** * workingset_refault - evaluate the refault of a previously evicted page * @shadow: shadow entry of the evicted page * * Calculates and evaluates the refault distance of the previously * evicted page in the context of the node it was allocated in. * * Returns %true if the page should be activated, %false otherwise. */ bool workingset_refault(void *shadow) { unsigned long refault_distance; unsigned long active_file; struct mem_cgroup *memcg; unsigned long eviction; struct lruvec *lruvec; unsigned long refault; struct pglist_data *pgdat; int memcgid; unpack_shadow(shadow, &memcgid, &pgdat, &eviction); rcu_read_lock(); /* * Look up the memcg associated with the stored ID. It might * have been deleted since the page's eviction. * * Note that in rare events the ID could have been recycled * for a new cgroup that refaults a shared page. This is * impossible to tell from the available data. However, this * should be a rare and limited disturbance, and activations * are always speculative anyway. Ultimately, it's the aging * algorithm's job to shake out the minimum access frequency * for the active cache. * * XXX: On !CONFIG_MEMCG, this will always return NULL; it * would be better if the root_mem_cgroup existed in all * configurations instead. */ memcg = mem_cgroup_from_id(memcgid); if (!mem_cgroup_disabled() && !memcg) { rcu_read_unlock(); return false; } lruvec = mem_cgroup_lruvec(pgdat, memcg); refault = atomic_long_read(&lruvec->inactive_age); active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE); rcu_read_unlock(); /* * The unsigned subtraction here gives an accurate distance * across inactive_age overflows in most cases. * * There is a special case: usually, shadow entries have a * short lifetime and are either refaulted or reclaimed along * with the inode before they get too old. But it is not * impossible for the inactive_age to lap a shadow entry in * the field, which can then can result in a false small * refault distance, leading to a false activation should this * old entry actually refault again. However, earlier kernels * used to deactivate unconditionally with *every* reclaim * invocation for the longest time, so the occasional * inappropriate activation leading to pressure on the active * list is not a problem. */ refault_distance = (refault - eviction) & EVICTION_MASK; inc_node_state(pgdat, WORKINGSET_REFAULT); if (refault_distance <= active_file) { inc_node_state(pgdat, WORKINGSET_ACTIVATE); return true; } return false; } /** * workingset_activation - note a page activation * @page: page that is being activated */ void workingset_activation(struct page *page) { struct mem_cgroup *memcg; struct lruvec *lruvec; rcu_read_lock(); /* * Filter non-memcg pages here, e.g. unmap can call * mark_page_accessed() on VDSO pages. * * XXX: See workingset_refault() - this should return * root_mem_cgroup even for !CONFIG_MEMCG. */ memcg = page_memcg_rcu(page); if (!mem_cgroup_disabled() && !memcg) goto out; lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); atomic_long_inc(&lruvec->inactive_age); out: rcu_read_unlock(); } /* * Shadow entries reflect the share of the working set that does not * fit into memory, so their number depends on the access pattern of * the workload. In most cases, they will refault or get reclaimed * along with the inode, but a (malicious) workload that streams * through files with a total size several times that of available * memory, while preventing the inodes from being reclaimed, can * create excessive amounts of shadow nodes. To keep a lid on this, * track shadow nodes and reclaim them when they grow way past the * point where they would still be useful. */ struct list_lru workingset_shadow_nodes; static unsigned long count_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { unsigned long shadow_nodes; unsigned long max_nodes; unsigned long pages; /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ local_irq_disable(); shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); local_irq_enable(); if (sc->memcg) { pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid, LRU_ALL_FILE); } else { pages = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) + node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE); } /* * Active cache pages are limited to 50% of memory, and shadow * entries that represent a refault distance bigger than that * do not have any effect. Limit the number of shadow nodes * such that shadow entries do not exceed the number of active * cache pages, assuming a worst-case node population density * of 1/8th on average. * * On 64-bit with 7 radix_tree_nodes per page and 64 slots * each, this will reclaim shadow entries when they consume * ~2% of available memory: * * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE */ max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3); if (shadow_nodes <= max_nodes) return 0; return shadow_nodes - max_nodes; } static enum lru_status shadow_lru_isolate(struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct address_space *mapping; struct radix_tree_node *node; unsigned int i; int ret; /* * Page cache insertions and deletions synchroneously maintain * the shadow node LRU under the mapping->tree_lock and the * lru_lock. Because the page cache tree is emptied before * the inode can be destroyed, holding the lru_lock pins any * address_space that has radix tree nodes on the LRU. * * We can then safely transition to the mapping->tree_lock to * pin only the address_space of the particular node we want * to reclaim, take the node off-LRU, and drop the lru_lock. */ node = container_of(item, struct radix_tree_node, private_list); mapping = node->private_data; /* Coming from the list, invert the lock order */ if (!spin_trylock(&mapping->tree_lock)) { spin_unlock(lru_lock); ret = LRU_RETRY; goto out; } list_lru_isolate(lru, item); spin_unlock(lru_lock); /* * The nodes should only contain one or more shadow entries, * no pages, so we expect to be able to remove them all and * delete and free the empty node afterwards. */ BUG_ON(!workingset_node_shadows(node)); BUG_ON(workingset_node_pages(node)); for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { if (node->slots[i]) { BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); node->slots[i] = NULL; workingset_node_shadows_dec(node); BUG_ON(!mapping->nrexceptional); mapping->nrexceptional--; } } BUG_ON(workingset_node_shadows(node)); inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); if (!__radix_tree_delete_node(&mapping->page_tree, node)) BUG(); spin_unlock(&mapping->tree_lock); ret = LRU_REMOVED_RETRY; out: local_irq_enable(); cond_resched(); local_irq_disable(); spin_lock(lru_lock); return ret; } static unsigned long scan_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { unsigned long ret; /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ local_irq_disable(); ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc, shadow_lru_isolate, NULL); local_irq_enable(); return ret; } static struct shrinker workingset_shadow_shrinker = { .count_objects = count_shadow_nodes, .scan_objects = scan_shadow_nodes, .seeks = DEFAULT_SEEKS, .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, }; /* * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe * mapping->tree_lock. */ static struct lock_class_key shadow_nodes_key; static int __init workingset_init(void) { unsigned int timestamp_bits; unsigned int max_order; int ret; BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); /* * Calculate the eviction bucket size to cover the longest * actionable refault distance, which is currently half of * memory (totalram_pages/2). However, memory hotplug may add * some more pages at runtime, so keep working with up to * double the initial memory by using totalram_pages as-is. */ timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; max_order = fls_long(totalram_pages - 1); if (max_order > timestamp_bits) bucket_order = max_order - timestamp_bits; pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", timestamp_bits, max_order, bucket_order); ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); if (ret) goto err; ret = register_shrinker(&workingset_shadow_shrinker); if (ret) goto err_list_lru; return 0; err_list_lru: list_lru_destroy(&workingset_shadow_nodes); err: return ret; } module_init(workingset_init);
gpl-2.0
Fusion-Devices/android_kernel_moto_shamu
fs/f2fs/crypto.c
30
13840
/* * linux/fs/f2fs/crypto.c * * Copied from linux/fs/ext4/crypto.c * * Copyright (C) 2015, Google, Inc. * Copyright (C) 2015, Motorola Mobility * * This contains encryption functions for f2fs * * Written by Michael Halcrow, 2014. * * Filename encryption additions * Uday Savagaonkar, 2014 * Encryption policy handling additions * Ildar Muslukhov, 2014 * Remove ext4_encrypted_zeroout(), * add f2fs_restore_and_release_control_page() * Jaegeuk Kim, 2015. * * This has not yet undergone a rigorous security audit. * * The usage of AES-XTS should conform to recommendations in NIST * Special Publication 800-38E and IEEE P1619/D16. */ #include <crypto/hash.h> #include <crypto/sha.h> #include <keys/user-type.h> #include <keys/encrypted-type.h> #include <linux/crypto.h> #include <linux/ecryptfs.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/key.h> #include <linux/list.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/random.h> #include <linux/scatterlist.h> #include <linux/spinlock_types.h> #include <linux/f2fs_fs.h> #include <linux/ratelimit.h> #include <linux/bio.h> #include "f2fs.h" #include "xattr.h" /* Encryption added and removed here! (L: */ static unsigned int num_prealloc_crypto_ctxs = 128; static unsigned int num_prealloc_crypto_pages = BIO_MAX_PAGES; static unsigned int num_prealloc_emergent_pages = 32; module_param(num_prealloc_crypto_pages, uint, 0444); MODULE_PARM_DESC(num_prealloc_crypto_pages, "Number of crypto pages to preallocate"); module_param(num_prealloc_crypto_ctxs, uint, 0444); MODULE_PARM_DESC(num_prealloc_crypto_ctxs, "Number of crypto contexts to preallocate"); static mempool_t *f2fs_bounce_page_pool, *f2fs_emergent_page_pool; static LIST_HEAD(f2fs_free_crypto_ctxs); static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock); static struct workqueue_struct *f2fs_read_workqueue; static DEFINE_MUTEX(crypto_init); static struct kmem_cache *f2fs_crypto_ctx_cachep; struct kmem_cache *f2fs_crypt_info_cachep; /** * f2fs_release_crypto_ctx() - Releases an encryption context * @ctx: The encryption context to release. * * If the encryption context was allocated from the pre-allocated pool, returns * it to that pool. Else, frees it. * * If there's a bounce page in the context, this frees that. */ void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx) { unsigned long flags; if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) { if (ctx->flags & F2FS_BOUNCE_PAGE_POOL_FREE_ENCRYPT_FL) mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool); else if (ctx->flags & F2FS_EMERGENT_PAGE_POOL_FREE_ENCRYPT_FL) mempool_free(ctx->w.bounce_page, f2fs_emergent_page_pool); else __free_page(ctx->w.bounce_page); ctx->w.bounce_page = NULL; } ctx->w.control_page = NULL; if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { kmem_cache_free(f2fs_crypto_ctx_cachep, ctx); } else { spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags); list_add(&ctx->free_list, &f2fs_free_crypto_ctxs); spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags); } } /** * f2fs_get_crypto_ctx() - Gets an encryption context * @inode: The inode for which we are doing the crypto * * Allocates and initializes an encryption context. * * Return: An allocated and initialized encryption context on success; error * value or NULL otherwise. */ struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode) { struct f2fs_crypto_ctx *ctx = NULL; unsigned long flags; struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info; if (ci == NULL) return ERR_PTR(-EACCES); /* * We first try getting the ctx from a free list because in * the common case the ctx will have an allocated and * initialized crypto tfm, so it's probably a worthwhile * optimization. For the bounce page, we first try getting it * from the kernel allocator because that's just about as fast * as getting it from a list and because a cache of free pages * should generally be a "last resort" option for a filesystem * to be able to do its job. */ spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags); ctx = list_first_entry_or_null(&f2fs_free_crypto_ctxs, struct f2fs_crypto_ctx, free_list); if (ctx) list_del(&ctx->free_list); spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags); if (!ctx) { ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_NOFS); if (!ctx) return ERR_PTR(-ENOMEM); ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL; } else { ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL; } ctx->flags &= ~F2FS_WRITE_PATH_FL; return ctx; } /* * Call f2fs_decrypt on every single page, reusing the encryption * context. */ static void completion_pages(struct work_struct *work) { struct f2fs_crypto_ctx *ctx = container_of(work, struct f2fs_crypto_ctx, r.work); struct bio *bio = ctx->r.bio; struct bio_vec *bv; int i; bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; int ret = f2fs_decrypt(ctx, page); if (ret) { WARN_ON_ONCE(1); SetPageError(page); } else SetPageUptodate(page); unlock_page(page); } f2fs_release_crypto_ctx(ctx); bio_put(bio); } void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio) { INIT_WORK(&ctx->r.work, completion_pages); ctx->r.bio = bio; queue_work(f2fs_read_workqueue, &ctx->r.work); } static void f2fs_crypto_destroy(void) { struct f2fs_crypto_ctx *pos, *n; list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list) kmem_cache_free(f2fs_crypto_ctx_cachep, pos); INIT_LIST_HEAD(&f2fs_free_crypto_ctxs); if (f2fs_bounce_page_pool) mempool_destroy(f2fs_bounce_page_pool); f2fs_bounce_page_pool = NULL; if (f2fs_emergent_page_pool) mempool_destroy(f2fs_emergent_page_pool); f2fs_emergent_page_pool = NULL; } /** * f2fs_crypto_initialize() - Set up for f2fs encryption. * * We only call this when we start accessing encrypted files, since it * results in memory getting allocated that wouldn't otherwise be used. * * Return: Zero on success, non-zero otherwise. */ int f2fs_crypto_initialize(void) { int i, res = -ENOMEM; if (f2fs_bounce_page_pool) return 0; mutex_lock(&crypto_init); if (f2fs_bounce_page_pool) goto already_initialized; for (i = 0; i < num_prealloc_crypto_ctxs; i++) { struct f2fs_crypto_ctx *ctx; ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL); if (!ctx) goto fail; list_add(&ctx->free_list, &f2fs_free_crypto_ctxs); } /* must be allocated at the last step to avoid race condition above */ f2fs_bounce_page_pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0); if (!f2fs_bounce_page_pool) goto fail; f2fs_emergent_page_pool = mempool_create_page_pool(num_prealloc_emergent_pages, 0); if (!f2fs_emergent_page_pool) goto fail; already_initialized: mutex_unlock(&crypto_init); return 0; fail: f2fs_crypto_destroy(); mutex_unlock(&crypto_init); return res; } /** * f2fs_exit_crypto() - Shutdown the f2fs encryption system */ void f2fs_exit_crypto(void) { f2fs_crypto_destroy(); if (f2fs_read_workqueue) destroy_workqueue(f2fs_read_workqueue); if (f2fs_crypto_ctx_cachep) kmem_cache_destroy(f2fs_crypto_ctx_cachep); if (f2fs_crypt_info_cachep) kmem_cache_destroy(f2fs_crypt_info_cachep); } int __init f2fs_init_crypto(void) { int res = -ENOMEM; f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0); if (!f2fs_read_workqueue) goto fail; f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx, SLAB_RECLAIM_ACCOUNT); if (!f2fs_crypto_ctx_cachep) goto fail; f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info, SLAB_RECLAIM_ACCOUNT); if (!f2fs_crypt_info_cachep) goto fail; return 0; fail: f2fs_exit_crypto(); return res; } void f2fs_restore_and_release_control_page(struct page **page) { struct f2fs_crypto_ctx *ctx; struct page *bounce_page; /* The bounce data pages are unmapped. */ if ((*page)->mapping) return; /* The bounce data page is unmapped. */ bounce_page = *page; ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page); /* restore control page */ *page = ctx->w.control_page; f2fs_restore_control_page(bounce_page); } void f2fs_restore_control_page(struct page *data_page) { struct f2fs_crypto_ctx *ctx = (struct f2fs_crypto_ctx *)page_private(data_page); set_page_private(data_page, (unsigned long)NULL); ClearPagePrivate(data_page); unlock_page(data_page); f2fs_release_crypto_ctx(ctx); } /** * f2fs_crypt_complete() - The completion callback for page encryption * @req: The asynchronous encryption request context * @res: The result of the encryption operation */ static void f2fs_crypt_complete(struct crypto_async_request *req, int res) { struct f2fs_completion_result *ecr = req->data; if (res == -EINPROGRESS) return; ecr->res = res; complete(&ecr->completion); } typedef enum { F2FS_DECRYPT = 0, F2FS_ENCRYPT, } f2fs_direction_t; static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx, struct inode *inode, f2fs_direction_t rw, pgoff_t index, struct page *src_page, struct page *dest_page) { u8 xts_tweak[F2FS_XTS_TWEAK_SIZE]; struct ablkcipher_request *req = NULL; DECLARE_F2FS_COMPLETION_RESULT(ecr); struct scatterlist dst, src; struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info; struct crypto_ablkcipher *tfm = ci->ci_ctfm; int res = 0; req = ablkcipher_request_alloc(tfm, GFP_NOFS); if (!req) { printk_ratelimited(KERN_ERR "%s: crypto_request_alloc() failed\n", __func__); return -ENOMEM; } ablkcipher_request_set_callback( req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, f2fs_crypt_complete, &ecr); BUILD_BUG_ON(F2FS_XTS_TWEAK_SIZE < sizeof(index)); memcpy(xts_tweak, &index, sizeof(index)); memset(&xts_tweak[sizeof(index)], 0, F2FS_XTS_TWEAK_SIZE - sizeof(index)); sg_init_table(&dst, 1); sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); sg_init_table(&src, 1); sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, xts_tweak); if (rw == F2FS_DECRYPT) res = crypto_ablkcipher_decrypt(req); else res = crypto_ablkcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { BUG_ON(req->base.data != &ecr); wait_for_completion(&ecr.completion); res = ecr.res; } ablkcipher_request_free(req); if (res) { printk_ratelimited(KERN_ERR "%s: crypto_ablkcipher_encrypt() returned %d\n", __func__, res); return res; } return 0; } /** * f2fs_encrypt() - Encrypts a page * @inode: The inode for which the encryption should take place * @plaintext_page: The page to encrypt. Must be locked. * * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx * encryption context. * * Called on the page write path. The caller must call * f2fs_restore_control_page() on the returned ciphertext page to * release the bounce buffer and the encryption context. * * Return: An allocated page with the encrypted content on success. Else, an * error value or NULL. */ struct page *f2fs_encrypt(struct inode *inode, struct page *plaintext_page) { struct f2fs_crypto_ctx *ctx; struct page *ciphertext_page = NULL; int err; BUG_ON(!PageLocked(plaintext_page)); ctx = f2fs_get_crypto_ctx(inode); if (IS_ERR(ctx)) return (struct page *)ctx; /* The encryption operation will require a bounce page. */ ctx->flags &= ~F2FS_MASK_PAGE_POOL_FREE_ENCRYPT_FL; ciphertext_page = mempool_alloc(f2fs_bounce_page_pool, GFP_NOFS); if (ciphertext_page) { ctx->flags |= F2FS_BOUNCE_PAGE_POOL_FREE_ENCRYPT_FL; goto got_it; } ciphertext_page = alloc_page(GFP_NOFS); if (!ciphertext_page) { /* * This is a potential bottleneck, but at least we'll have * forward progress. */ ciphertext_page = mempool_alloc(f2fs_emergent_page_pool, GFP_NOFS); if (WARN_ON_ONCE(!ciphertext_page)) ciphertext_page = mempool_alloc(f2fs_emergent_page_pool, GFP_NOFS | __GFP_NOFAIL); ctx->flags |= F2FS_EMERGENT_PAGE_POOL_FREE_ENCRYPT_FL; } got_it: ctx->flags |= F2FS_WRITE_PATH_FL; ctx->w.bounce_page = ciphertext_page; ctx->w.control_page = plaintext_page; err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index, plaintext_page, ciphertext_page); if (err) { f2fs_release_crypto_ctx(ctx); return ERR_PTR(err); } SetPagePrivate(ciphertext_page); set_page_private(ciphertext_page, (unsigned long)ctx); lock_page(ciphertext_page); return ciphertext_page; } /** * f2fs_decrypt() - Decrypts a page in-place * @ctx: The encryption context. * @page: The page to decrypt. Must be locked. * * Decrypts page in-place using the ctx encryption context. * * Called from the read completion callback. * * Return: Zero on success, non-zero otherwise. */ int f2fs_decrypt(struct f2fs_crypto_ctx *ctx, struct page *page) { BUG_ON(!PageLocked(page)); return f2fs_page_crypto(ctx, page->mapping->host, F2FS_DECRYPT, page->index, page, page); } /* * Convenience function which takes care of allocating and * deallocating the encryption context */ int f2fs_decrypt_one(struct inode *inode, struct page *page) { struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode); int ret; if (!ctx) return -ENOMEM; ret = f2fs_decrypt(ctx, page); f2fs_release_crypto_ctx(ctx); return ret; } bool f2fs_valid_contents_enc_mode(uint32_t mode) { return (mode == F2FS_ENCRYPTION_MODE_AES_256_XTS); } /** * f2fs_validate_encryption_key_size() - Validate the encryption key size * @mode: The key mode. * @size: The key size to validate. * * Return: The validated key size for @mode. Zero if invalid. */ uint32_t f2fs_validate_encryption_key_size(uint32_t mode, uint32_t size) { if (size == f2fs_encryption_key_size(mode)) return size; return 0; }
gpl-2.0
kennethlyn/enclustra_zynq_linux
drivers/gpu/drm/i915/i915_irq.c
30
73480
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- */ /* * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sysrq.h> #include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" #include "i915_trace.h" #include "intel_drv.h" /* For display hotplug interrupt */ static void ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { if ((dev_priv->irq_mask & mask) != 0) { dev_priv->irq_mask &= ~mask; I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); } } static inline void ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { if ((dev_priv->irq_mask & mask) != mask) { dev_priv->irq_mask |= mask; I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); } } void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) { if ((dev_priv->pipestat[pipe] & mask) != mask) { u32 reg = PIPESTAT(pipe); dev_priv->pipestat[pipe] |= mask; /* Enable the interrupt, clear any pending status */ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); POSTING_READ(reg); } } void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) { if ((dev_priv->pipestat[pipe] & mask) != 0) { u32 reg = PIPESTAT(pipe); dev_priv->pipestat[pipe] &= ~mask; I915_WRITE(reg, dev_priv->pipestat[pipe]); POSTING_READ(reg); } } /** * intel_enable_asle - enable ASLE interrupt for OpRegion */ void intel_enable_asle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; /* FIXME: opregion/asle for VLV */ if (IS_VALLEYVIEW(dev)) return; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, DE_GSE); else { i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } /** * i915_pipe_enabled - check if a pipe is enabled * @dev: DRM device * @pipe: pipe to check * * Reading certain registers when the pipe is disabled can hang the chip. * Use this routine to make sure the PLL is running and the pipe is active * before reading such registers if unsure. */ static int i915_pipe_enabled(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; } /* Called from drm generic code, passed a 'crtc', which * we use as a pipe index */ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long high_frame; unsigned long low_frame; u32 high1, high2, low; if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get vblank count for disabled " "pipe %c\n", pipe_name(pipe)); return 0; } high_frame = PIPEFRAME(pipe); low_frame = PIPEFRAMEPIXEL(pipe); /* * High & low register fields aren't synchronized, so make sure * we get a low value that's stable across two reads of the high * register. */ do { high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; } while (high1 != high2); high1 >>= PIPE_FRAME_HIGH_SHIFT; low >>= PIPE_FRAME_LOW_SHIFT; return (high1 << 8) | low; } static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int reg = PIPE_FRMCOUNT_GM45(pipe); if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get vblank count for disabled " "pipe %c\n", pipe_name(pipe)); return 0; } return I915_READ(reg); } static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, int *vpos, int *hpos) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 vbl = 0, position = 0; int vbl_start, vbl_end, htotal, vtotal; bool in_vbl = true; int ret = 0; if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " "pipe %c\n", pipe_name(pipe)); return 0; } /* Get vtotal. */ vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); if (INTEL_INFO(dev)->gen >= 4) { /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ position = I915_READ(PIPEDSL(pipe)); /* Decode into vertical scanout position. Don't have * horizontal scanout position. */ *vpos = position & 0x1fff; *hpos = 0; } else { /* Have access to pixelcount since start of frame. * We can split this into vertical and horizontal * scanout position. */ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); *vpos = position / htotal; *hpos = position - (*vpos * htotal); } /* Query vblank area. */ vbl = I915_READ(VBLANK(pipe)); /* Test position against vblank region. */ vbl_start = vbl & 0x1fff; vbl_end = (vbl >> 16) & 0x1fff; if ((*vpos < vbl_start) || (*vpos > vbl_end)) in_vbl = false; /* Inside "upper part" of vblank area? Apply corrective offset: */ if (in_vbl && (*vpos >= vbl_start)) *vpos = *vpos - vtotal; /* Readouts valid? */ if (vbl > 0) ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; /* In vblank? */ if (in_vbl) ret |= DRM_SCANOUTPOS_INVBL; return ret; } static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error, struct timeval *vblank_time, unsigned flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; if (pipe < 0 || pipe >= dev_priv->num_pipe) { DRM_ERROR("Invalid crtc %d\n", pipe); return -EINVAL; } /* Get drm_crtc to timestamp: */ crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc == NULL) { DRM_ERROR("Invalid crtc %d\n", pipe); return -EINVAL; } if (!crtc->enabled) { DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); return -EBUSY; } /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, vblank_time, flags, crtc); } /* * Handle hotplug events outside the interrupt handler proper. */ static void i915_hotplug_work_func(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, hotplug_work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; mutex_lock(&mode_config->mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->hot_plug) encoder->hot_plug(encoder); mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); } static void i915_handle_rps_change(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay = dev_priv->cur_delay; I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); busy_up = I915_READ(RCPREVBSYTUPAVG); busy_down = I915_READ(RCPREVBSYTDNAVG); max_avg = I915_READ(RCBMAXAVG); min_avg = I915_READ(RCBMINAVG); /* Handle RCS change request from hw */ if (busy_up > max_avg) { if (dev_priv->cur_delay != dev_priv->max_delay) new_delay = dev_priv->cur_delay - 1; if (new_delay < dev_priv->max_delay) new_delay = dev_priv->max_delay; } else if (busy_down < min_avg) { if (dev_priv->cur_delay != dev_priv->min_delay) new_delay = dev_priv->cur_delay + 1; if (new_delay > dev_priv->min_delay) new_delay = dev_priv->min_delay; } if (ironlake_set_drps(dev, new_delay)) dev_priv->cur_delay = new_delay; return; } static void notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; if (ring->obj == NULL) return; trace_i915_gem_request_complete(ring, ring->get_seqno(ring)); wake_up_all(&ring->irq_queue); if (i915_enable_hangcheck) { dev_priv->hangcheck_count = 0; mod_timer(&dev_priv->hangcheck_timer, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } } static void gen6_pm_rps_work(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, rps_work); u32 pm_iir, pm_imr; u8 new_delay; spin_lock_irq(&dev_priv->rps_lock); pm_iir = dev_priv->pm_iir; dev_priv->pm_iir = 0; pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE(GEN6_PMIMR, 0); spin_unlock_irq(&dev_priv->rps_lock); if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) return; mutex_lock(&dev_priv->dev->struct_mutex); if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) new_delay = dev_priv->cur_delay + 1; else new_delay = dev_priv->cur_delay - 1; gen6_set_rps(dev_priv->dev, new_delay); mutex_unlock(&dev_priv->dev->struct_mutex); } static void snb_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & GEN6_BSD_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VCS]); if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[BCS]); if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | GT_GEN6_BSD_CS_ERROR_INTERRUPT | GT_RENDER_CS_ERROR_INTERRUPT)) { DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); i915_handle_error(dev, false); } } static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, u32 pm_iir) { unsigned long flags; /* * IIR bits should never already be set because IMR should * prevent an interrupt from being shown in IIR. The warning * displays a case where we've unsafely cleared * dev_priv->pm_iir. Although missing an interrupt of the same * type is not a problem, it displays a problem in the logic. * * The mask bit in IMR is cleared by rps_work. */ spin_lock_irqsave(&dev_priv->rps_lock, flags); dev_priv->pm_iir |= pm_iir; I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); POSTING_READ(GEN6_PMIMR); spin_unlock_irqrestore(&dev_priv->rps_lock, flags); queue_work(dev_priv->wq, &dev_priv->rps_work); } static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 iir, gt_iir, pm_iir; irqreturn_t ret = IRQ_NONE; unsigned long irqflags; int pipe; u32 pipe_stats[I915_MAX_PIPES]; u32 vblank_status; int vblank = 0; bool blc_event; atomic_inc(&dev_priv->irq_received); vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS | PIPE_VBLANK_INTERRUPT_STATUS; while (true) { iir = I915_READ(VLV_IIR); gt_iir = I915_READ(GTIIR); pm_iir = I915_READ(GEN6_PMIIR); if (gt_iir == 0 && pm_iir == 0 && iir == 0) goto out; ret = IRQ_HANDLED; snb_gt_irq_handler(dev, dev_priv, gt_iir); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); for_each_pipe(pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); /* * Clear the PIPE*STAT regs before the IIR */ if (pipe_stats[pipe] & 0x8000ffff) { if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) DRM_DEBUG_DRIVER("pipe %c underrun\n", pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); } } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* Consume port. Then clear IIR or we'll miss events */ if (iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", hotplug_status); if (hotplug_status & dev_priv->hotplug_supported_mask) queue_work(dev_priv->wq, &dev_priv->hotplug_work); I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); I915_READ(PORT_HOTPLUG_STAT); } if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) { drm_handle_vblank(dev, 0); vblank++; intel_finish_page_flip(dev, 0); } if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) { drm_handle_vblank(dev, 1); vblank++; intel_finish_page_flip(dev, 0); } if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pm_iir & GEN6_PM_DEFERRED_EVENTS) gen6_queue_rps_work(dev_priv, pm_iir); I915_WRITE(GTIIR, gt_iir); I915_WRITE(GEN6_PMIIR, pm_iir); I915_WRITE(VLV_IIR, iir); } out: return ret; } static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; if (pch_iir & SDE_AUDIO_POWER_MASK) DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", (pch_iir & SDE_AUDIO_POWER_MASK) >> SDE_AUDIO_POWER_SHIFT); if (pch_iir & SDE_GMBUS) DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); if (pch_iir & SDE_AUDIO_HDCP_MASK) DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); if (pch_iir & SDE_AUDIO_TRANS_MASK) DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); if (pch_iir & SDE_POISON) DRM_ERROR("PCH poison interrupt\n"); if (pch_iir & SDE_FDI_MASK) for_each_pipe(pipe) DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), I915_READ(FDI_RX_IIR(pipe))); if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); if (pch_iir & SDE_TRANSB_FIFO_UNDER) DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); if (pch_iir & SDE_TRANSA_FIFO_UNDER) DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); } static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> SDE_AUDIO_POWER_SHIFT_CPT); if (pch_iir & SDE_AUX_MASK_CPT) DRM_DEBUG_DRIVER("AUX channel interrupt\n"); if (pch_iir & SDE_GMBUS_CPT) DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); if (pch_iir & SDE_AUDIO_CP_CHG_CPT) DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); if (pch_iir & SDE_FDI_MASK_CPT) for_each_pipe(pipe) DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), I915_READ(FDI_RX_IIR(pipe))); } static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 de_iir, gt_iir, de_ier, pm_iir; irqreturn_t ret = IRQ_NONE; int i; atomic_inc(&dev_priv->irq_received); /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); gt_iir = I915_READ(GTIIR); if (gt_iir) { snb_gt_irq_handler(dev, dev_priv, gt_iir); I915_WRITE(GTIIR, gt_iir); ret = IRQ_HANDLED; } de_iir = I915_READ(DEIIR); if (de_iir) { if (de_iir & DE_GSE_IVB) intel_opregion_gse_intr(dev); for (i = 0; i < 3; i++) { if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { intel_prepare_page_flip(dev, i); intel_finish_page_flip_plane(dev, i); } if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) drm_handle_vblank(dev, i); } /* check event from PCH */ if (de_iir & DE_PCH_EVENT_IVB) { u32 pch_iir = I915_READ(SDEIIR); if (pch_iir & SDE_HOTPLUG_MASK_CPT) queue_work(dev_priv->wq, &dev_priv->hotplug_work); cpt_irq_handler(dev, pch_iir); /* clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); } I915_WRITE(DEIIR, de_iir); ret = IRQ_HANDLED; } pm_iir = I915_READ(GEN6_PMIIR); if (pm_iir) { if (pm_iir & GEN6_PM_DEFERRED_EVENTS) gen6_queue_rps_work(dev_priv, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir); ret = IRQ_HANDLED; } I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); return ret; } static void ilk_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & GT_BSD_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VCS]); } static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 hotplug_mask; atomic_inc(&dev_priv->irq_received); /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); POSTING_READ(DEIER); de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); pch_iir = I915_READ(SDEIIR); pm_iir = I915_READ(GEN6_PMIIR); if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) goto done; if (HAS_PCH_CPT(dev)) hotplug_mask = SDE_HOTPLUG_MASK_CPT; else hotplug_mask = SDE_HOTPLUG_MASK; ret = IRQ_HANDLED; if (IS_GEN5(dev)) ilk_gt_irq_handler(dev, dev_priv, gt_iir); else snb_gt_irq_handler(dev, dev_priv, gt_iir); if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); if (de_iir & DE_PLANEA_FLIP_DONE) { intel_prepare_page_flip(dev, 0); intel_finish_page_flip_plane(dev, 0); } if (de_iir & DE_PLANEB_FLIP_DONE) { intel_prepare_page_flip(dev, 1); intel_finish_page_flip_plane(dev, 1); } if (de_iir & DE_PIPEA_VBLANK) drm_handle_vblank(dev, 0); if (de_iir & DE_PIPEB_VBLANK) drm_handle_vblank(dev, 1); /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { if (pch_iir & hotplug_mask) queue_work(dev_priv->wq, &dev_priv->hotplug_work); if (HAS_PCH_CPT(dev)) cpt_irq_handler(dev, pch_iir); else ibx_irq_handler(dev, pch_iir); } if (de_iir & DE_PCU_EVENT) { I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); i915_handle_rps_change(dev); } if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) gen6_queue_rps_work(dev_priv, pm_iir); /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); I915_WRITE(GTIIR, gt_iir); I915_WRITE(DEIIR, de_iir); I915_WRITE(GEN6_PMIIR, pm_iir); done: I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); return ret; } /** * i915_error_work_func - do process context error handling work * @work: work struct * * Fire an error uevent so userspace can see that a hang or error * was detected. */ static void i915_error_work_func(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, error_work); struct drm_device *dev = dev_priv->dev; char *error_event[] = { "ERROR=1", NULL }; char *reset_event[] = { "RESET=1", NULL }; char *reset_done_event[] = { "ERROR=0", NULL }; kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); if (atomic_read(&dev_priv->mm.wedged)) { DRM_DEBUG_DRIVER("resetting chip\n"); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); if (!i915_reset(dev)) { atomic_set(&dev_priv->mm.wedged, 0); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); } complete_all(&dev_priv->error_completion); } } #ifdef CONFIG_DEBUG_FS static struct drm_i915_error_object * i915_error_object_create(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *src) { struct drm_i915_error_object *dst; int page, page_count; u32 reloc_offset; if (src == NULL || src->pages == NULL) return NULL; page_count = src->base.size / PAGE_SIZE; dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC); if (dst == NULL) return NULL; reloc_offset = src->gtt_offset; for (page = 0; page < page_count; page++) { unsigned long flags; void *d; d = kmalloc(PAGE_SIZE, GFP_ATOMIC); if (d == NULL) goto unwind; local_irq_save(flags); if (reloc_offset < dev_priv->mm.gtt_mappable_end && src->has_global_gtt_mapping) { void __iomem *s; /* Simply ignore tiling or any overlapping fence. * It's part of the error state, and this hopefully * captures what the GPU read. */ s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, reloc_offset); memcpy_fromio(d, s, PAGE_SIZE); io_mapping_unmap_atomic(s); } else { void *s; drm_clflush_pages(&src->pages[page], 1); s = kmap_atomic(src->pages[page]); memcpy(d, s, PAGE_SIZE); kunmap_atomic(s); drm_clflush_pages(&src->pages[page], 1); } local_irq_restore(flags); dst->pages[page] = d; reloc_offset += PAGE_SIZE; } dst->page_count = page_count; dst->gtt_offset = src->gtt_offset; return dst; unwind: while (page--) kfree(dst->pages[page]); kfree(dst); return NULL; } static void i915_error_object_free(struct drm_i915_error_object *obj) { int page; if (obj == NULL) return; for (page = 0; page < obj->page_count; page++) kfree(obj->pages[page]); kfree(obj); } void i915_error_state_free(struct kref *error_ref) { struct drm_i915_error_state *error = container_of(error_ref, typeof(*error), ref); int i; for (i = 0; i < ARRAY_SIZE(error->ring); i++) { i915_error_object_free(error->ring[i].batchbuffer); i915_error_object_free(error->ring[i].ringbuffer); kfree(error->ring[i].requests); } kfree(error->active_bo); kfree(error->overlay); kfree(error); } static void capture_bo(struct drm_i915_error_buffer *err, struct drm_i915_gem_object *obj) { err->size = obj->base.size; err->name = obj->base.name; err->seqno = obj->last_rendering_seqno; err->gtt_offset = obj->gtt_offset; err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; err->fence_reg = obj->fence_reg; err->pinned = 0; if (obj->pin_count > 0) err->pinned = 1; if (obj->user_pin_count > 0) err->pinned = -1; err->tiling = obj->tiling_mode; err->dirty = obj->dirty; err->purgeable = obj->madv != I915_MADV_WILLNEED; err->ring = obj->ring ? obj->ring->id : -1; err->cache_level = obj->cache_level; } static u32 capture_active_bo(struct drm_i915_error_buffer *err, int count, struct list_head *head) { struct drm_i915_gem_object *obj; int i = 0; list_for_each_entry(obj, head, mm_list) { capture_bo(err++, obj); if (++i == count) break; } return i; } static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, int count, struct list_head *head) { struct drm_i915_gem_object *obj; int i = 0; list_for_each_entry(obj, head, gtt_list) { if (obj->pin_count == 0) continue; capture_bo(err++, obj); if (++i == count) break; } return i; } static void i915_gem_record_fences(struct drm_device *dev, struct drm_i915_error_state *error) { struct drm_i915_private *dev_priv = dev->dev_private; int i; /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); break; case 5: case 4: for (i = 0; i < 16; i++) error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); break; case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); case 2: for (i = 0; i < 8; i++) error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); break; } } static struct drm_i915_error_object * i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, struct intel_ring_buffer *ring) { struct drm_i915_gem_object *obj; u32 seqno; if (!ring->get_seqno) return NULL; seqno = ring->get_seqno(ring); list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { if (obj->ring != ring) continue; if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) continue; if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) continue; /* We need to copy these to an anonymous buffer as the simplest * method to avoid being overwritten by userspace. */ return i915_error_object_create(dev_priv, obj); } return NULL; } static void i915_record_ring_state(struct drm_device *dev, struct drm_i915_error_state *error, struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 6) { error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); error->semaphore_mboxes[ring->id][0] = I915_READ(RING_SYNC_0(ring->mmio_base)); error->semaphore_mboxes[ring->id][1] = I915_READ(RING_SYNC_1(ring->mmio_base)); } if (INTEL_INFO(dev)->gen >= 4) { error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); if (ring->id == RCS) { error->instdone1 = I915_READ(INSTDONE1); error->bbaddr = I915_READ64(BB_ADDR); } } else { error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); error->ipeir[ring->id] = I915_READ(IPEIR); error->ipehr[ring->id] = I915_READ(IPEHR); error->instdone[ring->id] = I915_READ(INSTDONE); } error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); error->seqno[ring->id] = ring->get_seqno(ring); error->acthd[ring->id] = intel_ring_get_active_head(ring); error->head[ring->id] = I915_READ_HEAD(ring); error->tail[ring->id] = I915_READ_TAIL(ring); error->cpu_ring_head[ring->id] = ring->head; error->cpu_ring_tail[ring->id] = ring->tail; } static void i915_gem_record_rings(struct drm_device *dev, struct drm_i915_error_state *error) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; struct drm_i915_gem_request *request; int i, count; for_each_ring(ring, dev_priv, i) { i915_record_ring_state(dev, error, ring); error->ring[i].batchbuffer = i915_error_first_batchbuffer(dev_priv, ring); error->ring[i].ringbuffer = i915_error_object_create(dev_priv, ring->obj); count = 0; list_for_each_entry(request, &ring->request_list, list) count++; error->ring[i].num_requests = count; error->ring[i].requests = kmalloc(count*sizeof(struct drm_i915_error_request), GFP_ATOMIC); if (error->ring[i].requests == NULL) { error->ring[i].num_requests = 0; continue; } count = 0; list_for_each_entry(request, &ring->request_list, list) { struct drm_i915_error_request *erq; erq = &error->ring[i].requests[count++]; erq->seqno = request->seqno; erq->jiffies = request->emitted_jiffies; erq->tail = request->tail; } } } /** * i915_capture_error_state - capture an error record for later analysis * @dev: drm device * * Should be called when an error is detected (either a hang or an error * interrupt) to capture error state from the time of the error. Fills * out a structure which becomes available in debugfs for user level tools * to pick up. */ static void i915_capture_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_i915_error_state *error; unsigned long flags; int i, pipe; spin_lock_irqsave(&dev_priv->error_lock, flags); error = dev_priv->first_error; spin_unlock_irqrestore(&dev_priv->error_lock, flags); if (error) return; /* Account for pipe specific data like PIPE*STAT */ error = kzalloc(sizeof(*error), GFP_ATOMIC); if (!error) { DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); return; } DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", dev->primary->index); kref_init(&error->ref); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); if (HAS_PCH_SPLIT(dev)) error->ier = I915_READ(DEIER) | I915_READ(GTIER); else if (IS_VALLEYVIEW(dev)) error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); else if (IS_GEN2(dev)) error->ier = I915_READ16(IER); else error->ier = I915_READ(IER); for_each_pipe(pipe) error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); if (INTEL_INFO(dev)->gen >= 6) { error->error = I915_READ(ERROR_GEN6); error->done_reg = I915_READ(DONE_REG); } i915_gem_record_fences(dev, error); i915_gem_record_rings(dev, error); /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; error->pinned_bo = NULL; i = 0; list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) i++; error->active_bo_count = i; list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) if (obj->pin_count) i++; error->pinned_bo_count = i - error->active_bo_count; error->active_bo = NULL; error->pinned_bo = NULL; if (i) { error->active_bo = kmalloc(sizeof(*error->active_bo)*i, GFP_ATOMIC); if (error->active_bo) error->pinned_bo = error->active_bo + error->active_bo_count; } if (error->active_bo) error->active_bo_count = capture_active_bo(error->active_bo, error->active_bo_count, &dev_priv->mm.active_list); if (error->pinned_bo) error->pinned_bo_count = capture_pinned_bo(error->pinned_bo, error->pinned_bo_count, &dev_priv->mm.gtt_list); do_gettimeofday(&error->time); error->overlay = intel_overlay_capture_error_state(dev); error->display = intel_display_capture_error_state(dev); spin_lock_irqsave(&dev_priv->error_lock, flags); if (dev_priv->first_error == NULL) { dev_priv->first_error = error; error = NULL; } spin_unlock_irqrestore(&dev_priv->error_lock, flags); if (error) i915_error_state_free(&error->ref); } void i915_destroy_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error; unsigned long flags; spin_lock_irqsave(&dev_priv->error_lock, flags); error = dev_priv->first_error; dev_priv->first_error = NULL; spin_unlock_irqrestore(&dev_priv->error_lock, flags); if (error) kref_put(&error->ref, i915_error_state_free); } #else #define i915_capture_error_state(x) #endif static void i915_report_and_clear_eir(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 eir = I915_READ(EIR); int pipe; if (!eir) return; pr_err("render error detected, EIR: 0x%08x\n", eir); if (IS_G4X(dev)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { u32 ipeir = I915_READ(IPEIR_I965); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE_I965)); pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); } if (eir & GM45_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); pr_err("page table error\n"); pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); POSTING_READ(PGTBL_ER); } } if (!IS_GEN2(dev)) { if (eir & I915_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); pr_err("page table error\n"); pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); POSTING_READ(PGTBL_ER); } } if (eir & I915_ERROR_MEMORY_REFRESH) { pr_err("memory refresh error:\n"); for_each_pipe(pipe) pr_err("pipe %c stat: 0x%08x\n", pipe_name(pipe), I915_READ(PIPESTAT(pipe))); /* pipestat has already been acked */ } if (eir & I915_ERROR_INSTRUCTION) { pr_err("instruction error\n"); pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); if (INTEL_INFO(dev)->gen < 4) { u32 ipeir = I915_READ(IPEIR); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE)); pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); I915_WRITE(IPEIR, ipeir); POSTING_READ(IPEIR); } else { u32 ipeir = I915_READ(IPEIR_I965); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE_I965)); pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); } } I915_WRITE(EIR, eir); POSTING_READ(EIR); eir = I915_READ(EIR); if (eir) { /* * some errors might have become stuck, * mask them. */ DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); I915_WRITE(EMR, I915_READ(EMR) | eir); I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); } } /** * i915_handle_error - handle an error interrupt * @dev: drm device * * Do some basic checking of regsiter state at error interrupt time and * dump it to the syslog. Also call i915_capture_error_state() to make * sure we get a record and make it available in debugfs. Fire a uevent * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ void i915_handle_error(struct drm_device *dev, bool wedged) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; int i; i915_capture_error_state(dev); i915_report_and_clear_eir(dev); if (wedged) { INIT_COMPLETION(dev_priv->error_completion); atomic_set(&dev_priv->mm.wedged, 1); /* * Wakeup waiting processes so they don't hang */ for_each_ring(ring, dev_priv, i) wake_up_all(&ring->irq_queue); } queue_work(dev_priv->wq, &dev_priv->error_work); } static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_gem_object *obj; struct intel_unpin_work *work; unsigned long flags; bool stall_detected; /* Ignore early vblank irqs */ if (intel_crtc == NULL) return; spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || work->pending || !work->enable_stall_check) { /* Either the pending flip IRQ arrived, or we're too early. Don't check */ spin_unlock_irqrestore(&dev->event_lock, flags); return; } /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ obj = work->pending_flip_obj; if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = DSPSURF(intel_crtc->plane); stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == obj->gtt_offset; } else { int dspaddr = DSPADDR(intel_crtc->plane); stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + crtc->y * crtc->fb->pitches[0] + crtc->x * crtc->fb->bits_per_pixel/8); } spin_unlock_irqrestore(&dev->event_lock, flags); if (stall_detected) { DRM_DEBUG_DRIVER("Pageflip stall detected\n"); intel_prepare_page_flip(dev, intel_crtc->plane); } } /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ static int i915_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); /* maintain vblank delivery even in deep C-states */ if (dev_priv->info->gen == 3) I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } static int ironlake_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); ironlake_enable_display_irq(dev_priv, DE_PIPEA_VBLANK_IVB << (5 * pipe)); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } static int valleyview_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; u32 dpfl, imr; if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); dpfl = I915_READ(VLV_DPFLIPSTAT); imr = I915_READ(VLV_IMR); if (pipe == 0) { dpfl |= PIPEA_VBLANK_INT_EN; imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; } else { dpfl |= PIPEA_VBLANK_INT_EN; imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; } I915_WRITE(VLV_DPFLIPSTAT, dpfl); I915_WRITE(VLV_IMR, imr); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ static void i915_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (dev_priv->info->gen == 3) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE | PIPE_START_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } static void ironlake_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); ironlake_disable_display_irq(dev_priv, DE_PIPEA_VBLANK_IVB << (pipe * 5)); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } static void valleyview_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; u32 dpfl, imr; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); dpfl = I915_READ(VLV_DPFLIPSTAT); imr = I915_READ(VLV_IMR); if (pipe == 0) { dpfl &= ~PIPEA_VBLANK_INT_EN; imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; } else { dpfl &= ~PIPEB_VBLANK_INT_EN; imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; } I915_WRITE(VLV_IMR, imr); I915_WRITE(VLV_DPFLIPSTAT, dpfl); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } static u32 ring_last_seqno(struct intel_ring_buffer *ring) { return list_entry(ring->request_list.prev, struct drm_i915_gem_request, list)->seqno; } static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) { if (list_empty(&ring->request_list) || i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { /* Issue a wake-up to catch stuck h/w. */ if (waitqueue_active(&ring->irq_queue)) { DRM_ERROR("Hangcheck timer elapsed... %s idle\n", ring->name); wake_up_all(&ring->irq_queue); *err = true; } return true; } return false; } static bool kick_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp = I915_READ_CTL(ring); if (tmp & RING_WAIT) { DRM_ERROR("Kicking stuck wait on %s\n", ring->name); I915_WRITE_CTL(ring, tmp); return true; } return false; } static bool i915_hangcheck_hung(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (dev_priv->hangcheck_count++ > 1) { bool hung = true; DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); i915_handle_error(dev, true); if (!IS_GEN2(dev)) { struct intel_ring_buffer *ring; int i; /* Is the chip hanging on a WAIT_FOR_EVENT? * If so we can simply poke the RB_WAIT bit * and break the hang. This should work on * all but the second generation chipsets. */ for_each_ring(ring, dev_priv, i) hung &= !kick_ring(ring); } return hung; } return false; } /** * This is called when the chip hasn't reported back with completed * batchbuffers in a long time. The first time this is called we simply record * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses * again, we assume the chip is wedged and try to fix it. */ void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; uint32_t acthd[I915_NUM_RINGS], instdone, instdone1; struct intel_ring_buffer *ring; bool err = false, idle; int i; if (!i915_enable_hangcheck) return; memset(acthd, 0, sizeof(acthd)); idle = true; for_each_ring(ring, dev_priv, i) { idle &= i915_hangcheck_ring_idle(ring, &err); acthd[i] = intel_ring_get_active_head(ring); } /* If all work is done then ACTHD clearly hasn't advanced. */ if (idle) { if (err) { if (i915_hangcheck_hung(dev)) return; goto repeat; } dev_priv->hangcheck_count = 0; return; } if (INTEL_INFO(dev)->gen < 4) { instdone = I915_READ(INSTDONE); instdone1 = 0; } else { instdone = I915_READ(INSTDONE_I965); instdone1 = I915_READ(INSTDONE1); } if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && dev_priv->last_instdone == instdone && dev_priv->last_instdone1 == instdone1) { if (i915_hangcheck_hung(dev)) return; } else { dev_priv->hangcheck_count = 0; memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); dev_priv->last_instdone = instdone; dev_priv->last_instdone1 = instdone1; } repeat: /* Reset timer case chip hangs without another request being added */ mod_timer(&dev_priv->hangcheck_timer, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } /* drm_dma.h hooks */ static void ironlake_irq_preinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; atomic_set(&dev_priv->irq_received, 0); I915_WRITE(HWSTAM, 0xeffe); /* XXX hotplug from PCH */ I915_WRITE(DEIMR, 0xffffffff); I915_WRITE(DEIER, 0x0); POSTING_READ(DEIER); /* and GT */ I915_WRITE(GTIMR, 0xffffffff); I915_WRITE(GTIER, 0x0); POSTING_READ(GTIER); /* south display irq */ I915_WRITE(SDEIMR, 0xffffffff); I915_WRITE(SDEIER, 0x0); POSTING_READ(SDEIER); } static void valleyview_irq_preinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; atomic_set(&dev_priv->irq_received, 0); /* VLV magic */ I915_WRITE(VLV_IMR, 0); I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); I915_WRITE(RING_IMR(BLT_RING_BASE), 0); /* and GT */ I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, 0xffffffff); I915_WRITE(GTIER, 0x0); POSTING_READ(GTIER); I915_WRITE(DPINVGTT, 0xff); I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0xffff); I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IMR, 0xffffffff); I915_WRITE(VLV_IER, 0x0); POSTING_READ(VLV_IER); } /* * Enable digital hotplug on the PCH, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec) * * This register is the same on all known PCH chips. */ static void ironlake_enable_pch_hotplug(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 hotplug; hotplug = I915_READ(PCH_PORT_HOTPLUG); hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } static int ironlake_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; u32 render_irqs; u32 hotplug_mask; dev_priv->irq_mask = ~display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); I915_WRITE(DEIMR, dev_priv->irq_mask); I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); POSTING_READ(DEIER); dev_priv->gt_irq_mask = ~0; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); if (IS_GEN6(dev)) render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | GEN6_BLITTER_USER_INTERRUPT; else render_irqs = GT_USER_INTERRUPT | GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); if (HAS_PCH_CPT(dev)) { hotplug_mask = (SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); } else { hotplug_mask = (SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG | SDE_AUX_MASK); } dev_priv->pch_irq_mask = ~hotplug_mask; I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); I915_WRITE(SDEIER, hotplug_mask); POSTING_READ(SDEIER); ironlake_enable_pch_hotplug(dev); if (IS_IRONLAKE_M(dev)) { /* Clear & enable PCU event interrupts */ I915_WRITE(DEIIR, DE_PCU_EVENT); I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); } return 0; } static int ivybridge_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; /* enable kind of interrupts always enabled */ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | DE_PLANEB_FLIP_DONE_IVB | DE_PLANEA_FLIP_DONE_IVB; u32 render_irqs; u32 hotplug_mask; dev_priv->irq_mask = ~display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); I915_WRITE(DEIMR, dev_priv->irq_mask); I915_WRITE(DEIER, display_mask | DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | DE_PIPEA_VBLANK_IVB); POSTING_READ(DEIER); dev_priv->gt_irq_mask = ~0; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | GEN6_BLITTER_USER_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); hotplug_mask = (SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); dev_priv->pch_irq_mask = ~hotplug_mask; I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); I915_WRITE(SDEIER, hotplug_mask); POSTING_READ(SDEIER); ironlake_enable_pch_hotplug(dev); return 0; } static int valleyview_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 render_irqs; u32 enable_mask; u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); u16 msid; enable_mask = I915_DISPLAY_PORT_INTERRUPT; enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; dev_priv->irq_mask = ~enable_mask; dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; /* Hack for broken MSIs on VLV */ pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); pci_read_config_word(dev->pdev, 0x98, &msid); msid &= 0xff; /* mask out delivery bits */ msid |= (1<<14); pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); I915_WRITE(VLV_IMR, dev_priv->irq_mask); I915_WRITE(VLV_IER, enable_mask); I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(PIPESTAT(0), 0xffff); I915_WRITE(PIPESTAT(1), 0xffff); POSTING_READ(VLV_IER); I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff); render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | GT_GEN6_BLT_CS_ERROR_INTERRUPT | GT_GEN6_BLT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | GT_GEN6_BSD_CS_ERROR_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT | GT_PIPE_NOTIFY | GT_RENDER_CS_ERROR_INTERRUPT | GT_SYNC_STATUS | GT_USER_INTERRUPT; dev_priv->gt_irq_mask = ~render_irqs; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, 0); I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); /* ack & enable invalid PTE error interrupts */ #if 0 /* FIXME: add support to irq handler for checking these bits */ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); #endif I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); #if 0 /* FIXME: check register definitions; some have moved */ /* Note HDMI and DP share bits */ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) hotplug_en |= HDMIB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) hotplug_en |= HDMIC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) hotplug_en |= HDMID_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) hotplug_en |= SDVOC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) hotplug_en |= SDVOB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { hotplug_en |= CRT_HOTPLUG_INT_EN; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; } #endif I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); return 0; } static void valleyview_irq_uninstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; if (!dev_priv) return; for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0xffff); I915_WRITE(HWSTAM, 0xffffffff); I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0xffff); I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IMR, 0xffffffff); I915_WRITE(VLV_IER, 0x0); POSTING_READ(VLV_IER); } static void ironlake_irq_uninstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; if (!dev_priv) return; I915_WRITE(HWSTAM, 0xffffffff); I915_WRITE(DEIMR, 0xffffffff); I915_WRITE(DEIER, 0x0); I915_WRITE(DEIIR, I915_READ(DEIIR)); I915_WRITE(GTIMR, 0xffffffff); I915_WRITE(GTIER, 0x0); I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(SDEIMR, 0xffffffff); I915_WRITE(SDEIER, 0x0); I915_WRITE(SDEIIR, I915_READ(SDEIIR)); } static void i8xx_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; atomic_set(&dev_priv->irq_received, 0); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE16(IMR, 0xffff); I915_WRITE16(IER, 0x0); POSTING_READ16(IER); } static int i8xx_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); I915_WRITE16(IMR, dev_priv->irq_mask); I915_WRITE16(IER, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT); POSTING_READ16(IER); return 0; } static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u16 iir, new_iir; u32 pipe_stats[2]; unsigned long irqflags; int irq_received; int pipe; u16 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; atomic_inc(&dev_priv->irq_received); iir = I915_READ16(IIR); if (iir == 0) return IRQ_NONE; while (iir & ~flip_mask) { /* Can't rely on pipestat interrupt bit in iir as it might * have been cleared after the pipestat interrupt was received. * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) i915_handle_error(dev, false); for_each_pipe(pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); /* * Clear the PIPE*STAT regs before the IIR */ if (pipe_stats[pipe] & 0x8000ffff) { if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) DRM_DEBUG_DRIVER("pipe %c underrun\n", pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); irq_received = 1; } } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); I915_WRITE16(IIR, iir & ~flip_mask); new_iir = I915_READ16(IIR); /* Flush posted writes */ i915_update_dri1_breadcrumb(dev); if (iir & I915_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[RCS]); if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && drm_handle_vblank(dev, 0)) { if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 0); intel_finish_page_flip(dev, 0); flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; } } if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && drm_handle_vblank(dev, 1)) { if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 1); intel_finish_page_flip(dev, 1); flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; } } iir = new_iir; } return IRQ_HANDLED; } static void i8xx_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; for_each_pipe(pipe) { /* Clear enable bits; then clear status bits */ I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); } I915_WRITE16(IMR, 0xffff); I915_WRITE16(IER, 0x0); I915_WRITE16(IIR, I915_READ16(IIR)); } static void i915_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; atomic_set(&dev_priv->irq_received, 0); if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } I915_WRITE16(HWSTAM, 0xeffe); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); POSTING_READ(IER); } static int i915_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask; dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT; if (I915_HAS_HOTPLUG(dev)) { /* Enable in IER... */ enable_mask |= I915_DISPLAY_PORT_INTERRUPT; /* and unmask in IMR */ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; } I915_WRITE(IMR, dev_priv->irq_mask); I915_WRITE(IER, enable_mask); POSTING_READ(IER); if (I915_HAS_HOTPLUG(dev)) { u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) hotplug_en |= HDMIB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) hotplug_en |= HDMIC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) hotplug_en |= HDMID_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) hotplug_en |= SDVOC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) hotplug_en |= SDVOB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { hotplug_en |= CRT_HOTPLUG_INT_EN; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; } /* Ignore TV since it's buggy */ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); } intel_opregion_enable_asle(dev); return 0; } static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; unsigned long irqflags; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; u32 flip[2] = { I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT }; int pipe, ret = IRQ_NONE; atomic_inc(&dev_priv->irq_received); iir = I915_READ(IIR); do { bool irq_received = (iir & ~flip_mask) != 0; bool blc_event = false; /* Can't rely on pipestat interrupt bit in iir as it might * have been cleared after the pipestat interrupt was received. * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) i915_handle_error(dev, false); for_each_pipe(pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); /* Clear the PIPE*STAT regs before the IIR */ if (pipe_stats[pipe] & 0x8000ffff) { if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) DRM_DEBUG_DRIVER("pipe %c underrun\n", pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); irq_received = true; } } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); if (!irq_received) break; /* Consume port. Then clear IIR or we'll miss events */ if ((I915_HAS_HOTPLUG(dev)) && (iir & I915_DISPLAY_PORT_INTERRUPT)) { u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", hotplug_status); if (hotplug_status & dev_priv->hotplug_supported_mask) queue_work(dev_priv->wq, &dev_priv->hotplug_work); I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); POSTING_READ(PORT_HOTPLUG_STAT); } I915_WRITE(IIR, iir & ~flip_mask); new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[RCS]); for_each_pipe(pipe) { int plane = pipe; if (IS_MOBILE(dev)) plane = !plane; if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && drm_handle_vblank(dev, pipe)) { if (iir & flip[plane]) { intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); flip_mask &= ~flip[plane]; } } if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; } if (blc_event || (iir & I915_ASLE_INTERRUPT)) intel_opregion_asle_intr(dev); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got * set while we were handling the existing iir bits, then * we would never get another interrupt. * * This is fine on non-MSI as well, as if we hit this path * we avoid exiting the interrupt handler only to generate * another one. * * Note that for MSI this could cause a stray interrupt report * if an interrupt landed in the time between writing IIR and * the posting read. This should be rare enough to never * trigger the 99% of 100,000 interrupts test for disabling * stray interrupts. */ ret = IRQ_HANDLED; iir = new_iir; } while (iir & ~flip_mask); i915_update_dri1_breadcrumb(dev); return ret; } static void i915_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } I915_WRITE16(HWSTAM, 0xffff); for_each_pipe(pipe) { /* Clear enable bits; then clear status bits */ I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); } I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); I915_WRITE(IIR, I915_READ(IIR)); } static void i965_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; atomic_set(&dev_priv->irq_received, 0); if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } I915_WRITE(HWSTAM, 0xeffe); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); POSTING_READ(IER); } static int i965_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask; u32 error_mask; /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); enable_mask = ~dev_priv->irq_mask; enable_mask |= I915_USER_INTERRUPT; if (IS_G4X(dev)) enable_mask |= I915_BSD_USER_INTERRUPT; dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; if (I915_HAS_HOTPLUG(dev)) { /* Enable in IER... */ enable_mask |= I915_DISPLAY_PORT_INTERRUPT; /* and unmask in IMR */ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; } /* * Enable some error detection, note the instruction error mask * bit is reserved, so we leave it masked. */ if (IS_G4X(dev)) { error_mask = ~(GM45_ERROR_PAGE_TABLE | GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV | I915_ERROR_MEMORY_REFRESH); } else { error_mask = ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH); } I915_WRITE(EMR, error_mask); I915_WRITE(IMR, dev_priv->irq_mask); I915_WRITE(IER, enable_mask); POSTING_READ(IER); if (I915_HAS_HOTPLUG(dev)) { u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); /* Note HDMI and DP share bits */ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) hotplug_en |= HDMIB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) hotplug_en |= HDMIC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) hotplug_en |= HDMID_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) hotplug_en |= SDVOC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) hotplug_en |= SDVOB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { hotplug_en |= CRT_HOTPLUG_INT_EN; /* Programming the CRT detection parameters tends to generate a spurious hotplug event about three seconds later. So just do it once. */ if (IS_G4X(dev)) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; } /* Ignore TV since it's buggy */ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); } intel_opregion_enable_asle(dev); return 0; } static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 iir, new_iir; u32 pipe_stats[I915_MAX_PIPES]; unsigned long irqflags; int irq_received; int ret = IRQ_NONE, pipe; atomic_inc(&dev_priv->irq_received); iir = I915_READ(IIR); for (;;) { bool blc_event = false; irq_received = iir != 0; /* Can't rely on pipestat interrupt bit in iir as it might * have been cleared after the pipestat interrupt was received. * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) i915_handle_error(dev, false); for_each_pipe(pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); /* * Clear the PIPE*STAT regs before the IIR */ if (pipe_stats[pipe] & 0x8000ffff) { if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) DRM_DEBUG_DRIVER("pipe %c underrun\n", pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); irq_received = 1; } } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); if (!irq_received) break; ret = IRQ_HANDLED; /* Consume port. Then clear IIR or we'll miss events */ if ((I915_HAS_HOTPLUG(dev)) && (iir & I915_DISPLAY_PORT_INTERRUPT)) { u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", hotplug_status); if (hotplug_status & dev_priv->hotplug_supported_mask) queue_work(dev_priv->wq, &dev_priv->hotplug_work); I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); I915_READ(PORT_HOTPLUG_STAT); } I915_WRITE(IIR, iir); new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[RCS]); if (iir & I915_BSD_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VCS]); if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) intel_prepare_page_flip(dev, 0); if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) intel_prepare_page_flip(dev, 1); for_each_pipe(pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && drm_handle_vblank(dev, pipe)) { i915_pageflip_stall_check(dev, pipe); intel_finish_page_flip(dev, pipe); } if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; } if (blc_event || (iir & I915_ASLE_INTERRUPT)) intel_opregion_asle_intr(dev); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got * set while we were handling the existing iir bits, then * we would never get another interrupt. * * This is fine on non-MSI as well, as if we hit this path * we avoid exiting the interrupt handler only to generate * another one. * * Note that for MSI this could cause a stray interrupt report * if an interrupt landed in the time between writing IIR and * the posting read. This should be rare enough to never * trigger the 99% of 100,000 interrupts test for disabling * stray interrupts. */ iir = new_iir; } i915_update_dri1_breadcrumb(dev); return ret; } static void i965_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; if (!dev_priv) return; if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } I915_WRITE(HWSTAM, 0xffffffff); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)) & 0x8000ffff); I915_WRITE(IIR, I915_READ(IIR)); } void intel_irq_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } if (drm_core_check_feature(dev, DRIVER_MODESET)) dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; else dev->driver->get_vblank_timestamp = NULL; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; if (IS_VALLEYVIEW(dev)) { dev->driver->irq_handler = valleyview_irq_handler; dev->driver->irq_preinstall = valleyview_irq_preinstall; dev->driver->irq_postinstall = valleyview_irq_postinstall; dev->driver->irq_uninstall = valleyview_irq_uninstall; dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank; } else if (IS_IVYBRIDGE(dev)) { /* Share pre & uninstall handlers with ILK/SNB */ dev->driver->irq_handler = ivybridge_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_postinstall = ivybridge_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_uninstall; dev->driver->enable_vblank = ivybridge_enable_vblank; dev->driver->disable_vblank = ivybridge_disable_vblank; } else if (IS_HASWELL(dev)) { /* Share interrupts handling with IVB */ dev->driver->irq_handler = ivybridge_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_postinstall = ivybridge_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_uninstall; dev->driver->enable_vblank = ivybridge_enable_vblank; dev->driver->disable_vblank = ivybridge_disable_vblank; } else if (HAS_PCH_SPLIT(dev)) { dev->driver->irq_handler = ironlake_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_postinstall = ironlake_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_uninstall; dev->driver->enable_vblank = ironlake_enable_vblank; dev->driver->disable_vblank = ironlake_disable_vblank; } else { if (INTEL_INFO(dev)->gen == 2) { dev->driver->irq_preinstall = i8xx_irq_preinstall; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_uninstall; } else if (INTEL_INFO(dev)->gen == 3) { /* IIR "flip pending" means done if this bit is set */ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); dev->driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_uninstall; dev->driver->irq_handler = i915_irq_handler; } else { dev->driver->irq_preinstall = i965_irq_preinstall; dev->driver->irq_postinstall = i965_irq_postinstall; dev->driver->irq_uninstall = i965_irq_uninstall; dev->driver->irq_handler = i965_irq_handler; } dev->driver->enable_vblank = i915_enable_vblank; dev->driver->disable_vblank = i915_disable_vblank; } }
gpl-2.0
virt2x/domuKernel
drivers/pci/setup-res.c
286
9025
/* * drivers/pci/setup-res.c * * Extruded from code written by * Dave Rusling (david.rusling@reo.mts.dec.com) * David Mosberger (davidm@cs.arizona.edu) * David Miller (davem@redhat.com) * * Support routines for initializing a PCI subsystem. */ /* fixed for multiple pci buses, 1999 Andrea Arcangeli <andrea@suse.de> */ /* * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru> * Resource sorting */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/cache.h> #include <linux/slab.h> #include "pci.h" void pci_update_resource(struct pci_dev *dev, int resno) { struct pci_bus_region region; bool disable; u16 cmd; u32 new, check, mask; int reg; enum pci_bar_type type; struct resource *res = dev->resource + resno; /* * Ignore resources for unimplemented BARs and unused resource slots * for 64 bit BARs. */ if (!res->flags) return; /* * Ignore non-moveable resources. This might be legacy resources for * which no functional BAR register exists or another important * system resource we shouldn't move around. */ if (res->flags & IORESOURCE_PCI_FIXED) return; pcibios_resource_to_bus(dev, &region, res); new = region.start | (res->flags & PCI_REGION_FLAG_MASK); if (res->flags & IORESOURCE_IO) mask = (u32)PCI_BASE_ADDRESS_IO_MASK; else mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; reg = pci_resource_bar(dev, resno, &type); if (!reg) return; if (type != pci_bar_unknown) { if (!(res->flags & IORESOURCE_ROM_ENABLE)) return; new |= PCI_ROM_ADDRESS_ENABLE; } /* * We can't update a 64-bit BAR atomically, so when possible, * disable decoding so that a half-updated BAR won't conflict * with another device. */ disable = (res->flags & IORESOURCE_MEM_64) && !dev->mmio_always_on; if (disable) { pci_read_config_word(dev, PCI_COMMAND, &cmd); pci_write_config_word(dev, PCI_COMMAND, cmd & ~PCI_COMMAND_MEMORY); } pci_write_config_dword(dev, reg, new); pci_read_config_dword(dev, reg, &check); if ((new ^ check) & mask) { dev_err(&dev->dev, "BAR %d: error updating (%#08x != %#08x)\n", resno, new, check); } if (res->flags & IORESOURCE_MEM_64) { new = region.start >> 16 >> 16; pci_write_config_dword(dev, reg + 4, new); pci_read_config_dword(dev, reg + 4, &check); if (check != new) { dev_err(&dev->dev, "BAR %d: error updating " "(high %#08x != %#08x)\n", resno, new, check); } } if (disable) pci_write_config_word(dev, PCI_COMMAND, cmd); res->flags &= ~IORESOURCE_UNSET; dev_dbg(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n", resno, res, (unsigned long long)region.start, (unsigned long long)region.end); } int pci_claim_resource(struct pci_dev *dev, int resource) { struct resource *res = &dev->resource[resource]; struct resource *root, *conflict; root = pci_find_parent_resource(dev, res); if (!root) { dev_info(&dev->dev, "no compatible bridge window for %pR\n", res); return -EINVAL; } conflict = request_resource_conflict(root, res); if (conflict) { dev_info(&dev->dev, "address space collision: %pR conflicts with %s %pR\n", res, conflict->name, conflict); return -EBUSY; } return 0; } EXPORT_SYMBOL(pci_claim_resource); void pci_disable_bridge_window(struct pci_dev *dev) { dev_info(&dev->dev, "disabling bridge mem windows\n"); /* MMIO Base/Limit */ pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0); /* Prefetchable MMIO Base/Limit */ pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0); pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0); pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff); } /* * Generic function that returns a value indicating that the device's * original BIOS BAR address was not saved and so is not available for * reinstatement. * * Can be over-ridden by architecture specific code that implements * reinstatement functionality rather than leaving it disabled when * normal allocation attempts fail. */ resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx) { return 0; } static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, int resno, resource_size_t size) { struct resource *root, *conflict; resource_size_t fw_addr, start, end; int ret = 0; fw_addr = pcibios_retrieve_fw_addr(dev, resno); if (!fw_addr) return 1; start = res->start; end = res->end; res->start = fw_addr; res->end = res->start + size - 1; root = pci_find_parent_resource(dev, res); if (!root) { if (res->flags & IORESOURCE_IO) root = &ioport_resource; else root = &iomem_resource; } dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", resno, res); conflict = request_resource_conflict(root, res); if (conflict) { dev_info(&dev->dev, "BAR %d: %pR conflicts with %s %pR\n", resno, res, conflict->name, conflict); res->start = start; res->end = end; ret = 1; } return ret; } static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, int resno, resource_size_t size, resource_size_t align) { struct resource *res = dev->resource + resno; resource_size_t min; int ret; min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; /* First, try exact prefetching match.. */ ret = pci_bus_alloc_resource(bus, res, size, align, min, IORESOURCE_PREFETCH, pcibios_align_resource, dev); if (ret < 0 && (res->flags & IORESOURCE_PREFETCH)) { /* * That failed. * * But a prefetching area can handle a non-prefetching * window (it will just not perform as well). */ ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, pcibios_align_resource, dev); } return ret; } static int _pci_assign_resource(struct pci_dev *dev, int resno, resource_size_t size, resource_size_t min_align) { struct resource *res = dev->resource + resno; struct pci_bus *bus; int ret; char *type; bus = dev->bus; while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) { if (!bus->parent || !bus->self->transparent) break; bus = bus->parent; } if (ret) { if (res->flags & IORESOURCE_MEM) if (res->flags & IORESOURCE_PREFETCH) type = "mem pref"; else type = "mem"; else if (res->flags & IORESOURCE_IO) type = "io"; else type = "unknown"; dev_info(&dev->dev, "BAR %d: can't assign %s (size %#llx)\n", resno, type, (unsigned long long) resource_size(res)); } return ret; } int pci_assign_resource(struct pci_dev *dev, int resno) { struct resource *res = dev->resource + resno; resource_size_t align, size; struct pci_bus *bus; int ret; align = pci_resource_alignment(dev, res); if (!align) { dev_info(&dev->dev, "BAR %d: can't assign %pR " "(bogus alignment)\n", resno, res); return -EINVAL; } bus = dev->bus; size = resource_size(res); ret = _pci_assign_resource(dev, resno, size, align); /* * If we failed to assign anything, let's try the address * where firmware left it. That at least has a chance of * working, which is better than just leaving it disabled. */ if (ret < 0) ret = pci_revert_fw_address(res, dev, resno, size); if (!ret) { res->flags &= ~IORESOURCE_STARTALIGN; dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); if (resno < PCI_BRIDGE_RESOURCES) pci_update_resource(dev, resno); } return ret; } int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize, resource_size_t min_align) { struct resource *res = dev->resource + resno; resource_size_t new_size; int ret; if (!res->parent) { dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR " "\n", resno, res); return -EINVAL; } /* already aligned with min_align */ new_size = resource_size(res) + addsize; ret = _pci_assign_resource(dev, resno, new_size, min_align); if (!ret) { res->flags &= ~IORESOURCE_STARTALIGN; dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res); if (resno < PCI_BRIDGE_RESOURCES) pci_update_resource(dev, resno); } return ret; } int pci_enable_resources(struct pci_dev *dev, int mask) { u16 cmd, old_cmd; int i; struct resource *r; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; for (i = 0; i < PCI_NUM_RESOURCES; i++) { if (!(mask & (1 << i))) continue; r = &dev->resource[i]; if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) continue; if ((i == PCI_ROM_RESOURCE) && (!(r->flags & IORESOURCE_ROM_ENABLE))) continue; if (!r->parent) { dev_err(&dev->dev, "device not available " "(can't reserve %pR)\n", r); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { dev_info(&dev->dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; }
gpl-2.0
ulrikdb/linux
drivers/spi/spi-pxa2xx-pci.c
286
1952
/* * CE4100's SPI device is more or less the same one as found on PXA * */ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/of_device.h> #include <linux/module.h> #include <linux/spi/pxa2xx_spi.h> static int ce4100_spi_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct platform_device_info pi; int ret; struct platform_device *pdev; struct pxa2xx_spi_master spi_pdata; struct ssp_device *ssp; ret = pcim_enable_device(dev); if (ret) return ret; ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI"); if (ret) return ret; memset(&spi_pdata, 0, sizeof(spi_pdata)); spi_pdata.num_chipselect = dev->devfn; ssp = &spi_pdata.ssp; ssp->phys_base = pci_resource_start(dev, 0); ssp->mmio_base = pcim_iomap_table(dev)[0]; if (!ssp->mmio_base) { dev_err(&dev->dev, "failed to ioremap() registers\n"); return -EIO; } ssp->irq = dev->irq; ssp->port_id = dev->devfn; ssp->type = PXA25x_SSP; memset(&pi, 0, sizeof(pi)); pi.parent = &dev->dev; pi.name = "pxa2xx-spi"; pi.id = ssp->port_id; pi.data = &spi_pdata; pi.size_data = sizeof(spi_pdata); pdev = platform_device_register_full(&pi); if (IS_ERR(pdev)) return PTR_ERR(pdev); pci_set_drvdata(dev, pdev); return 0; } static void ce4100_spi_remove(struct pci_dev *dev) { struct platform_device *pdev = pci_get_drvdata(dev); platform_device_unregister(pdev); } static const struct pci_device_id ce4100_spi_devices[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, { }, }; MODULE_DEVICE_TABLE(pci, ce4100_spi_devices); static struct pci_driver ce4100_spi_driver = { .name = "ce4100_spi", .id_table = ce4100_spi_devices, .probe = ce4100_spi_probe, .remove = ce4100_spi_remove, }; module_pci_driver(ce4100_spi_driver); MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
gpl-2.0
Twisted-Kernel/Sick-Twisted-Tmo
kernel/power/process.c
286
5882
/* * drivers/power/process.c - Functions for starting/stopping processes on * suspend transitions. * * Originally from swsusp. */ #undef DEBUG #include <linux/interrupt.h> #include <linux/oom.h> #include <linux/suspend.h> #include <linux/module.h> #include <linux/syscalls.h> #include <linux/freezer.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/kmod.h> #include <linux/wakeup_reason.h> /* * Timeout for stopping processes */ unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_msecs64; unsigned int elapsed_msecs; bool wakeup = false; int sleep_usecs = USEC_PER_MSEC; #ifdef CONFIG_PM_SLEEP char suspend_abort[MAX_SUSPEND_ABORT_LEN]; #endif do_gettimeofday(&start); end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs); if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; if (!freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { #ifdef CONFIG_PM_SLEEP pm_get_active_wakeup_sources(suspend_abort, MAX_SUSPEND_ABORT_LEN); log_suspend_abort_reason(suspend_abort); #endif wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the refrigerator. Start with an initial * 1 ms sleep followed by exponential backoff until 8 ms. */ usleep_range(sleep_usecs / 2, sleep_usecs); if (sleep_usecs < 8 * USEC_PER_MSEC) sleep_usecs *= 2; } do_gettimeofday(&end); elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_msecs64, NSEC_PER_MSEC); elapsed_msecs = elapsed_msecs64; if (wakeup) { printk("\n"); printk(KERN_ERR "Freezing of tasks aborted after %d.%03d seconds", elapsed_msecs / 1000, elapsed_msecs % 1000); } else if (todo) { printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%03d seconds" " (%d tasks refusing to freeze, wq_busy=%d):\n", elapsed_msecs / 1000, elapsed_msecs % 1000, todo - wq_busy, wq_busy); read_lock(&tasklist_lock); do_each_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p)) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else { printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000, elapsed_msecs % 1000); } return todo ? -EBUSY : 0; } /* * Returns true if all freezable tasks (except for current) are frozen already */ static bool check_frozen_processes(void) { struct task_struct *g, *p; bool ret = true; read_lock(&tasklist_lock); for_each_process_thread(g, p) { if (p != current && !freezer_should_skip(p) && !frozen(p)) { ret = false; goto done; } } done: read_unlock(&tasklist_lock); return ret; } /** * freeze_processes - Signal user space processes to enter the refrigerator. * * On success, returns 0. On failure, -errno and system is fully thawed. */ int freeze_processes(void) { int error; int oom_kills_saved; error = __usermodehelper_disable(UMH_FREEZING); if (error) return error; if (!pm_freezing) atomic_inc(&system_freezing_cnt); printk("Freezing user space processes ... "); pm_freezing = true; oom_kills_saved = oom_kills_count(); error = try_to_freeze_tasks(true); if (!error) { __usermodehelper_set_disable_depth(UMH_DISABLED); oom_killer_disable(); /* * There might have been an OOM kill while we were * freezing tasks and the killed task might be still * on the way out so we have to double check for race. */ if (oom_kills_count() != oom_kills_saved && !check_frozen_processes()) { __usermodehelper_set_disable_depth(UMH_ENABLED); printk("OOM in progress."); error = -EBUSY; goto done; } printk("done."); } done: printk("\n"); BUG_ON(in_atomic()); if (error) thaw_processes(); return error; } /** * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. * * On success, returns 0. On failure, -errno and only the kernel threads are * thawed, so as to give a chance to the caller to do additional cleanups * (if any) before thawing the userspace tasks. So, it is the responsibility * of the caller to thaw the userspace tasks, when the time is right. */ int freeze_kernel_threads(void) { int error; printk("Freezing remaining freezable tasks ... "); pm_nosig_freezing = true; error = try_to_freeze_tasks(false); if (!error) printk("done."); printk("\n"); BUG_ON(in_atomic()); if (error) thaw_kernel_threads(); return error; } void thaw_processes(void) { struct task_struct *g, *p; if (pm_freezing) atomic_dec(&system_freezing_cnt); pm_freezing = false; pm_nosig_freezing = false; oom_killer_enable(); printk("Restarting tasks ... "); __usermodehelper_set_disable_depth(UMH_FREEZING); thaw_workqueues(); read_lock(&tasklist_lock); do_each_thread(g, p) { __thaw_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); usermodehelper_enable(); schedule(); printk("done.\n"); } void thaw_kernel_threads(void) { struct task_struct *g, *p; pm_nosig_freezing = false; printk("Restarting kernel threads ... "); thaw_workqueues(); read_lock(&tasklist_lock); do_each_thread(g, p) { if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) __thaw_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); schedule(); printk("done.\n"); }
gpl-2.0
pikachukaki/android_kernel_huawei_honor
drivers/gpu/drm/i915/intel_dp.c
542
54271
/* * Copyright © 2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Keith Packard <keithp@keithp.com> * */ #include <linux/i2c.h> #include <linux/slab.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "drm_dp_helper.h" #define DP_LINK_STATUS_SIZE 6 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) #define DP_LINK_CONFIGURATION_SIZE 9 struct intel_dp { struct intel_encoder base; uint32_t output_reg; uint32_t DP; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; bool has_audio; int force_audio; uint32_t color_range; uint8_t link_bw; uint8_t lane_count; uint8_t dpcd[4]; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; bool is_pch_edp; uint8_t train_set[4]; uint8_t link_status[DP_LINK_STATUS_SIZE]; }; /** * is_edp - is the given port attached to an eDP panel (either CPU or PCH) * @intel_dp: DP struct * * If a CPU or PCH DP output is attached to an eDP panel, this function * will return true, and false otherwise. */ static bool is_edp(struct intel_dp *intel_dp) { return intel_dp->base.type == INTEL_OUTPUT_EDP; } /** * is_pch_edp - is the port on the PCH and attached to an eDP panel? * @intel_dp: DP struct * * Returns true if the given DP struct corresponds to a PCH DP port attached * to an eDP panel, false otherwise. Helpful for determining whether we * may need FDI resources for a given DP output or not. */ static bool is_pch_edp(struct intel_dp *intel_dp) { return intel_dp->is_pch_edp; } static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { return container_of(encoder, struct intel_dp, base.base); } static struct intel_dp *intel_attached_dp(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_dp, base); } /** * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? * @encoder: DRM encoder * * Return true if @encoder corresponds to a PCH attached eDP panel. Needed * by intel_display.c. */ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) { struct intel_dp *intel_dp; if (!encoder) return false; intel_dp = enc_to_intel_dp(encoder); return is_pch_edp(intel_dp); } static void intel_dp_start_link_train(struct intel_dp *intel_dp); static void intel_dp_complete_link_train(struct intel_dp *intel_dp); static void intel_dp_link_down(struct intel_dp *intel_dp); void intel_edp_link_config (struct intel_encoder *intel_encoder, int *lane_num, int *link_bw) { struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); *lane_num = intel_dp->lane_count; if (intel_dp->link_bw == DP_LINK_BW_1_62) *link_bw = 162000; else if (intel_dp->link_bw == DP_LINK_BW_2_7) *link_bw = 270000; } static int intel_dp_max_lane_count(struct intel_dp *intel_dp) { int max_lane_count = 4; if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; switch (max_lane_count) { case 1: case 2: case 4: break; default: max_lane_count = 4; } } return max_lane_count; } static int intel_dp_max_link_bw(struct intel_dp *intel_dp) { int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; switch (max_link_bw) { case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: break; default: max_link_bw = DP_LINK_BW_1_62; break; } return max_link_bw; } static int intel_dp_link_clock(uint8_t link_bw) { if (link_bw == DP_LINK_BW_2_7) return 270000; else return 162000; } /* I think this is a fiction */ static int intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) { struct drm_i915_private *dev_priv = dev->dev_private; if (is_edp(intel_dp)) return (pixel_clock * dev_priv->edp.bpp + 7) / 8; else return pixel_clock * 3; } static int intel_dp_max_data_rate(int max_link_clock, int max_lanes) { return (max_link_clock * max_lanes * 8) / 10; } static int intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); int max_lanes = intel_dp_max_lane_count(intel_dp); if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay) return MODE_PANEL; } /* only refuse the mode on non eDP since we have seen some weird eDP panels which are outside spec tolerances but somehow work by magic */ if (!is_edp(intel_dp) && (intel_dp_link_required(connector->dev, intel_dp, mode->clock) > intel_dp_max_data_rate(max_link_clock, max_lanes))) return MODE_CLOCK_HIGH; if (mode->clock < 10000) return MODE_CLOCK_LOW; return MODE_OK; } static uint32_t pack_aux(uint8_t *src, int src_bytes) { int i; uint32_t v = 0; if (src_bytes > 4) src_bytes = 4; for (i = 0; i < src_bytes; i++) v |= ((uint32_t) src[i]) << ((3-i) * 8); return v; } static void unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) { int i; if (dst_bytes > 4) dst_bytes = 4; for (i = 0; i < dst_bytes; i++) dst[i] = src >> ((3-i) * 8); } /* hrawclock is 1/4 the FSB frequency */ static int intel_hrawclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t clkcfg; clkcfg = I915_READ(CLKCFG); switch (clkcfg & CLKCFG_FSB_MASK) { case CLKCFG_FSB_400: return 100; case CLKCFG_FSB_533: return 133; case CLKCFG_FSB_667: return 166; case CLKCFG_FSB_800: return 200; case CLKCFG_FSB_1067: return 266; case CLKCFG_FSB_1333: return 333; /* these two are just a guess; one of them might be right */ case CLKCFG_FSB_1600: case CLKCFG_FSB_1600_ALT: return 400; default: return 133; } } static int intel_dp_aux_ch(struct intel_dp *intel_dp, uint8_t *send, int send_bytes, uint8_t *recv, int recv_size) { uint32_t output_reg = intel_dp->output_reg; struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_data = ch_ctl + 4; int i; int recv_bytes; uint32_t status; uint32_t aux_clock_divider; int try, precharge; /* The clock divider is based off the hrawclk, * and would like to run at 2MHz. So, take the * hrawclk value and divide by 2 and use that * * Note that PCH attached eDP panels should use a 125MHz input * clock divider. */ if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { if (IS_GEN6(dev)) aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ else aux_clock_divider = 225; /* eDP input clock at 450Mhz */ } else if (HAS_PCH_SPLIT(dev)) aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ else aux_clock_divider = intel_hrawclk(dev) / 2; if (IS_GEN6(dev)) precharge = 3; else precharge = 5; if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { DRM_ERROR("dp_aux_ch not started status 0x%08x\n", I915_READ(ch_ctl)); return -EBUSY; } /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ for (i = 0; i < send_bytes; i += 4) I915_WRITE(ch_data + i, pack_aux(send + i, send_bytes - i)); /* Send the command and wait for it to complete */ I915_WRITE(ch_ctl, DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_TIME_OUT_400us | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR); for (;;) { status = I915_READ(ch_ctl); if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) break; udelay(100); } /* Clear done status and any errors */ I915_WRITE(ch_ctl, status | DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR); if (status & DP_AUX_CH_CTL_DONE) break; } if ((status & DP_AUX_CH_CTL_DONE) == 0) { DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); return -EBUSY; } /* Check for timeout or receive error. * Timeouts occur when the sink is not connected */ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); return -EIO; } /* Timeouts occur when the device isn't connected, so they're * "normal" -- don't fill the kernel log with these */ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); return -ETIMEDOUT; } /* Unload any bytes sent back from the other side */ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); if (recv_bytes > recv_size) recv_bytes = recv_size; for (i = 0; i < recv_bytes; i += 4) unpack_aux(I915_READ(ch_data + i), recv + i, recv_bytes - i); return recv_bytes; } /* Write data to the aux channel in native mode */ static int intel_dp_aux_native_write(struct intel_dp *intel_dp, uint16_t address, uint8_t *send, int send_bytes) { int ret; uint8_t msg[20]; int msg_bytes; uint8_t ack; if (send_bytes > 16) return -1; msg[0] = AUX_NATIVE_WRITE << 4; msg[1] = address >> 8; msg[2] = address & 0xff; msg[3] = send_bytes - 1; memcpy(&msg[4], send, send_bytes); msg_bytes = send_bytes + 4; for (;;) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) break; else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) udelay(100); else return -EIO; } return send_bytes; } /* Write a single byte to the aux channel in native mode */ static int intel_dp_aux_native_write_1(struct intel_dp *intel_dp, uint16_t address, uint8_t byte) { return intel_dp_aux_native_write(intel_dp, address, &byte, 1); } /* read bytes from a native aux channel */ static int intel_dp_aux_native_read(struct intel_dp *intel_dp, uint16_t address, uint8_t *recv, int recv_bytes) { uint8_t msg[4]; int msg_bytes; uint8_t reply[20]; int reply_bytes; uint8_t ack; int ret; msg[0] = AUX_NATIVE_READ << 4; msg[1] = address >> 8; msg[2] = address & 0xff; msg[3] = recv_bytes - 1; msg_bytes = 4; reply_bytes = recv_bytes + 1; for (;;) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, reply, reply_bytes); if (ret == 0) return -EPROTO; if (ret < 0) return ret; ack = reply[0]; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { memcpy(recv, reply + 1, ret - 1); return ret - 1; } else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) udelay(100); else return -EIO; } } static int intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, uint8_t write_byte, uint8_t *read_byte) { struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; struct intel_dp *intel_dp = container_of(adapter, struct intel_dp, adapter); uint16_t address = algo_data->address; uint8_t msg[5]; uint8_t reply[2]; unsigned retry; int msg_bytes; int reply_bytes; int ret; /* Set up the command byte */ if (mode & MODE_I2C_READ) msg[0] = AUX_I2C_READ << 4; else msg[0] = AUX_I2C_WRITE << 4; if (!(mode & MODE_I2C_STOP)) msg[0] |= AUX_I2C_MOT << 4; msg[1] = address >> 8; msg[2] = address; switch (mode) { case MODE_I2C_WRITE: msg[3] = 0; msg[4] = write_byte; msg_bytes = 5; reply_bytes = 1; break; case MODE_I2C_READ: msg[3] = 0; msg_bytes = 4; reply_bytes = 2; break; default: msg_bytes = 3; reply_bytes = 1; break; } for (retry = 0; retry < 5; retry++) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, reply, reply_bytes); if (ret < 0) { DRM_DEBUG_KMS("aux_ch failed %d\n", ret); return ret; } switch (reply[0] & AUX_NATIVE_REPLY_MASK) { case AUX_NATIVE_REPLY_ACK: /* I2C-over-AUX Reply field is only valid * when paired with AUX ACK. */ break; case AUX_NATIVE_REPLY_NACK: DRM_DEBUG_KMS("aux_ch native nack\n"); return -EREMOTEIO; case AUX_NATIVE_REPLY_DEFER: udelay(100); continue; default: DRM_ERROR("aux_ch invalid native reply 0x%02x\n", reply[0]); return -EREMOTEIO; } switch (reply[0] & AUX_I2C_REPLY_MASK) { case AUX_I2C_REPLY_ACK: if (mode == MODE_I2C_READ) { *read_byte = reply[1]; } return reply_bytes - 1; case AUX_I2C_REPLY_NACK: DRM_DEBUG_KMS("aux_i2c nack\n"); return -EREMOTEIO; case AUX_I2C_REPLY_DEFER: DRM_DEBUG_KMS("aux_i2c defer\n"); udelay(100); break; default: DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); return -EREMOTEIO; } } DRM_ERROR("too many retries, giving up\n"); return -EREMOTEIO; } static int intel_dp_i2c_init(struct intel_dp *intel_dp, struct intel_connector *intel_connector, const char *name) { DRM_DEBUG_KMS("i2c_init %s\n", name); intel_dp->algo.running = false; intel_dp->algo.address = 0; intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); intel_dp->adapter.owner = THIS_MODULE; intel_dp->adapter.class = I2C_CLASS_DDC; strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; intel_dp->adapter.algo_data = &intel_dp->algo; intel_dp->adapter.dev.parent = &intel_connector->base.kdev; return i2c_dp_aux_add_bus(&intel_dp->adapter); } static bool intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int lane_count, clock; int max_lane_count = intel_dp_max_lane_count(intel_dp); int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, mode, adjusted_mode); /* * the mode->clock is used to calculate the Data&Link M/N * of the pipe. For the eDP the fixed clock should be used. */ mode->clock = dev_priv->panel_fixed_mode->clock; } for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock) <= link_avail) { intel_dp->link_bw = bws[clock]; intel_dp->lane_count = lane_count; adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); DRM_DEBUG_KMS("Display port link bw %02x lane " "count %d clock %d\n", intel_dp->link_bw, intel_dp->lane_count, adjusted_mode->clock); return true; } } } if (is_edp(intel_dp)) { /* okay we failed just pick the highest */ intel_dp->lane_count = max_lane_count; intel_dp->link_bw = bws[max_clock]; adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); DRM_DEBUG_KMS("Force picking display port link bw %02x lane " "count %d clock %d\n", intel_dp->link_bw, intel_dp->lane_count, adjusted_mode->clock); return true; } return false; } struct intel_dp_m_n { uint32_t tu; uint32_t gmch_m; uint32_t gmch_n; uint32_t link_m; uint32_t link_n; }; static void intel_reduce_ratio(uint32_t *num, uint32_t *den) { while (*num > 0xffffff || *den > 0xffffff) { *num >>= 1; *den >>= 1; } } static void intel_dp_compute_m_n(int bpp, int nlanes, int pixel_clock, int link_clock, struct intel_dp_m_n *m_n) { m_n->tu = 64; m_n->gmch_m = (pixel_clock * bpp) >> 3; m_n->gmch_n = link_clock * nlanes; intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); m_n->link_m = pixel_clock; m_n->link_n = link_clock; intel_reduce_ratio(&m_n->link_m, &m_n->link_n); } void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_encoder *encoder; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int lane_count = 4, bpp = 24; struct intel_dp_m_n m_n; int pipe = intel_crtc->pipe; /* * Find the lane count in the intel_encoder private */ list_for_each_entry(encoder, &mode_config->encoder_list, head) { struct intel_dp *intel_dp; if (encoder->crtc != crtc) continue; intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = intel_dp->lane_count; break; } else if (is_edp(intel_dp)) { lane_count = dev_priv->edp.lanes; bpp = dev_priv->edp.bpp; break; } } /* * Compute the GMCH and Link ratios. The '3' here is * the number of bytes_per_pixel post-LUT, which we always * set up for 8-bits of R/G/B, or 3 bytes total. */ intel_dp_compute_m_n(bpp, lane_count, mode->clock, adjusted_mode->clock, &m_n); if (HAS_PCH_SPLIT(dev)) { I915_WRITE(TRANSDATA_M1(pipe), ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | m_n.gmch_m); I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); } else { I915_WRITE(PIPE_GMCH_DATA_M(pipe), ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | m_n.gmch_m); I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); } } static void intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; intel_dp->DP |= intel_dp->color_range; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) intel_dp->DP |= DP_SYNC_HS_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) intel_dp->DP |= DP_SYNC_VS_HIGH; if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; else intel_dp->DP |= DP_LINK_TRAIN_OFF; switch (intel_dp->lane_count) { case 1: intel_dp->DP |= DP_PORT_WIDTH_1; break; case 2: intel_dp->DP |= DP_PORT_WIDTH_2; break; case 4: intel_dp->DP |= DP_PORT_WIDTH_4; break; } if (intel_dp->has_audio) intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); intel_dp->link_configuration[0] = intel_dp->link_bw; intel_dp->link_configuration[1] = intel_dp->lane_count; /* * Check for DPCD version > 1.1 and enhanced framing support */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; intel_dp->DP |= DP_ENHANCED_FRAMING; } /* CPT DP's pipe select is decided in TRANS_DP_CTL */ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) intel_dp->DP |= DP_PIPEB_SELECT; if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { /* don't miss out required setting for eDP */ intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) intel_dp->DP |= DP_PLL_FREQ_160MHZ; else intel_dp->DP |= DP_PLL_FREQ_270MHZ; } } static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; /* * If the panel wasn't on, make sure there's not a currently * active PP sequence before enabling AUX VDD. */ if (!(I915_READ(PCH_PP_STATUS) & PP_ON)) msleep(dev_priv->panel_t3); pp = I915_READ(PCH_PP_CONTROL); pp |= EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); } static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; pp = I915_READ(PCH_PP_CONTROL); pp &= ~EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); /* Make sure sequencer is idle before allowing subsequent activity */ msleep(dev_priv->panel_t12); } /* Returns true if the panel was already on when called */ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; if (I915_READ(PCH_PP_STATUS) & PP_ON) return true; pp = I915_READ(PCH_PP_CONTROL); /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 5000)) DRM_ERROR("panel on wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); return false; } static void ironlake_edp_panel_off (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; pp = I915_READ(PCH_PP_CONTROL); /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); pp &= ~POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) DRM_ERROR("panel off wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); } static void ironlake_edp_backlight_on (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; DRM_DEBUG_KMS("\n"); /* * If we enable the backlight right away following a panel power * on, we may see slight flicker as the panel syncs with the eDP * link. So delay a bit to make sure the image is solid before * allowing it to appear. */ msleep(300); pp = I915_READ(PCH_PP_CONTROL); pp |= EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); } static void ironlake_edp_backlight_off (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; DRM_DEBUG_KMS("\n"); pp = I915_READ(PCH_PP_CONTROL); pp &= ~EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); } static void ironlake_edp_pll_on(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; DRM_DEBUG_KMS("\n"); dpa_ctl = I915_READ(DP_A); dpa_ctl |= DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); udelay(200); } static void ironlake_edp_pll_off(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; dpa_ctl = I915_READ(DP_A); dpa_ctl &= ~DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); udelay(200); } /* If the sink supports it, try to set the power state appropriately */ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) { int ret, i; /* Should have a valid DPCD by this point */ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) return; if (mode != DRM_MODE_DPMS_ON) { ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, DP_SET_POWER_D3); if (ret != 1) DRM_DEBUG_DRIVER("failed to write sink power state\n"); } else { /* * When turning on, we need to retry for 1ms to give the sink * time to wake up. */ for (i = 0; i < 3; i++) { ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, DP_SET_POWER_D0); if (ret == 1) break; msleep(1); } } } static void intel_dp_prepare(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; /* Wake up the sink first */ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); if (is_edp(intel_dp)) { ironlake_edp_backlight_off(dev); ironlake_edp_panel_off(dev); if (!is_pch_edp(intel_dp)) ironlake_edp_pll_on(encoder); else ironlake_edp_pll_off(encoder); } intel_dp_link_down(intel_dp); } static void intel_dp_commit(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; if (is_edp(intel_dp)) ironlake_edp_panel_vdd_on(intel_dp); intel_dp_start_link_train(intel_dp); if (is_edp(intel_dp)) { ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp); } intel_dp_complete_link_train(intel_dp); if (is_edp(intel_dp)) ironlake_edp_backlight_on(dev); } static void intel_dp_dpms(struct drm_encoder *encoder, int mode) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { if (is_edp(intel_dp)) ironlake_edp_backlight_off(dev); intel_dp_sink_dpms(intel_dp, mode); intel_dp_link_down(intel_dp); if (is_edp(intel_dp)) ironlake_edp_panel_off(dev); if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) ironlake_edp_pll_off(encoder); } else { if (is_edp(intel_dp)) ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, mode); if (!(dp_reg & DP_PORT_EN)) { intel_dp_start_link_train(intel_dp); if (is_edp(intel_dp)) { ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp); } intel_dp_complete_link_train(intel_dp); } if (is_edp(intel_dp)) ironlake_edp_backlight_on(dev); } } /* * Native read with retry for link status and receiver capability reads for * cases where the sink may still be asleep. */ static bool intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, uint8_t *recv, int recv_bytes) { int ret, i; /* * Sinks are *supposed* to come up within 1ms from an off state, * but we're also supposed to retry 3 times per the spec. */ for (i = 0; i < 3; i++) { ret = intel_dp_aux_native_read(intel_dp, address, recv, recv_bytes); if (ret == recv_bytes) return true; msleep(1); } return false; } /* * Fetch AUX CH registers 0x202 - 0x207 which contain * link status information */ static bool intel_dp_get_link_status(struct intel_dp *intel_dp) { return intel_dp_aux_native_read_retry(intel_dp, DP_LANE0_1_STATUS, intel_dp->link_status, DP_LINK_STATUS_SIZE); } static uint8_t intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], int r) { return link_status[r - DP_LANE0_1_STATUS]; } static uint8_t intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane) { int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); int s = ((lane & 1) ? DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); uint8_t l = intel_dp_link_status(link_status, i); return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; } static uint8_t intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane) { int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); int s = ((lane & 1) ? DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); uint8_t l = intel_dp_link_status(link_status, i); return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; } #if 0 static char *voltage_names[] = { "0.4V", "0.6V", "0.8V", "1.2V" }; static char *pre_emph_names[] = { "0dB", "3.5dB", "6dB", "9.5dB" }; static char *link_train_names[] = { "pattern 1", "pattern 2", "idle", "off" }; #endif /* * These are source-specific values; current Intel hardware supports * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB */ #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 static uint8_t intel_dp_pre_emphasis_max(uint8_t voltage_swing) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: return DP_TRAIN_PRE_EMPHASIS_6; case DP_TRAIN_VOLTAGE_SWING_600: return DP_TRAIN_PRE_EMPHASIS_6; case DP_TRAIN_VOLTAGE_SWING_800: return DP_TRAIN_PRE_EMPHASIS_3_5; case DP_TRAIN_VOLTAGE_SWING_1200: default: return DP_TRAIN_PRE_EMPHASIS_0; } } static void intel_get_adjust_train(struct intel_dp *intel_dp) { uint8_t v = 0; uint8_t p = 0; int lane; for (lane = 0; lane < intel_dp->lane_count; lane++) { uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); if (this_v > v) v = this_v; if (this_p > p) p = this_p; } if (v >= I830_DP_VOLTAGE_MAX) v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; if (p >= intel_dp_pre_emphasis_max(v)) p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; for (lane = 0; lane < 4; lane++) intel_dp->train_set[lane] = v | p; } static uint32_t intel_dp_signal_levels(uint8_t train_set, int lane_count) { uint32_t signal_levels = 0; switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: default: signal_levels |= DP_VOLTAGE_0_4; break; case DP_TRAIN_VOLTAGE_SWING_600: signal_levels |= DP_VOLTAGE_0_6; break; case DP_TRAIN_VOLTAGE_SWING_800: signal_levels |= DP_VOLTAGE_0_8; break; case DP_TRAIN_VOLTAGE_SWING_1200: signal_levels |= DP_VOLTAGE_1_2; break; } switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPHASIS_0: default: signal_levels |= DP_PRE_EMPHASIS_0; break; case DP_TRAIN_PRE_EMPHASIS_3_5: signal_levels |= DP_PRE_EMPHASIS_3_5; break; case DP_TRAIN_PRE_EMPHASIS_6: signal_levels |= DP_PRE_EMPHASIS_6; break; case DP_TRAIN_PRE_EMPHASIS_9_5: signal_levels |= DP_PRE_EMPHASIS_9_5; break; } return signal_levels; } /* Gen6's DP voltage swing and pre-emphasis control */ static uint32_t intel_gen6_edp_signal_levels(uint8_t train_set) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); switch (signal_levels) { case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; default: DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" "0x%x\n", signal_levels); return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; } } static uint8_t intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane) { int i = DP_LANE0_1_STATUS + (lane >> 1); int s = (lane & 1) * 4; uint8_t l = intel_dp_link_status(link_status, i); return (l >> s) & 0xf; } /* Check for clock recovery is done on all channels */ static bool intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) { int lane; uint8_t lane_status; for (lane = 0; lane < lane_count; lane++) { lane_status = intel_get_lane_status(link_status, lane); if ((lane_status & DP_LANE_CR_DONE) == 0) return false; } return true; } /* Check to see if channel eq is done on all channels */ #define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ DP_LANE_CHANNEL_EQ_DONE|\ DP_LANE_SYMBOL_LOCKED) static bool intel_channel_eq_ok(struct intel_dp *intel_dp) { uint8_t lane_align; uint8_t lane_status; int lane; lane_align = intel_dp_link_status(intel_dp->link_status, DP_LANE_ALIGN_STATUS_UPDATED); if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) return false; for (lane = 0; lane < intel_dp->lane_count; lane++) { lane_status = intel_get_lane_status(intel_dp->link_status, lane); if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) return false; } return true; } static bool intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, intel_dp->train_set, 4); if (ret != 4) return false; return true; } /* Enable corresponding port and start training pattern 1 */ static void intel_dp_start_link_train(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); int i; uint8_t voltage; bool clock_recovery = false; int tries; u32 reg; uint32_t DP = intel_dp->DP; /* Enable output, wait for it to become active */ I915_WRITE(intel_dp->output_reg, intel_dp->DP); POSTING_READ(intel_dp->output_reg); intel_wait_for_vblank(dev, intel_crtc->pipe); /* Write the link configuration data */ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, intel_dp->link_configuration, DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) DP &= ~DP_LINK_TRAIN_MASK_CPT; else DP &= ~DP_LINK_TRAIN_MASK; memset(intel_dp->train_set, 0, 4); voltage = 0xff; tries = 0; clock_recovery = false; for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; if (IS_GEN6(dev) && is_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_1_CPT; else reg = DP | DP_LINK_TRAIN_PAT_1; if (!intel_dp_set_link_train(intel_dp, reg, DP_TRAINING_PATTERN_1)) break; /* Set training pattern 1 */ udelay(100); if (!intel_dp_get_link_status(intel_dp)) break; if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { clock_recovery = true; break; } /* Check to see if we've tried the max voltage */ for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; if (i == intel_dp->lane_count) break; /* Check to see if we've tried the same voltage 5 times */ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { ++tries; if (tries == 5) break; } else tries = 0; voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Compute new intel_dp->train_set as requested by target */ intel_get_adjust_train(intel_dp); } intel_dp->DP = DP; } static void intel_dp_complete_link_train(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; bool channel_eq = false; int tries, cr_tries; u32 reg; uint32_t DP = intel_dp->DP; /* channel equalization */ tries = 0; cr_tries = 0; channel_eq = false; for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; if (cr_tries > 5) { DRM_ERROR("failed to train DP, aborting\n"); intel_dp_link_down(intel_dp); break; } if (IS_GEN6(dev) && is_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_2_CPT; else reg = DP | DP_LINK_TRAIN_PAT_2; /* channel eq pattern */ if (!intel_dp_set_link_train(intel_dp, reg, DP_TRAINING_PATTERN_2)) break; udelay(400); if (!intel_dp_get_link_status(intel_dp)) break; /* Make sure clock is still ok */ if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { intel_dp_start_link_train(intel_dp); cr_tries++; continue; } if (intel_channel_eq_ok(intel_dp)) { channel_eq = true; break; } /* Try 5 times, then try clock recovery if that fails */ if (tries > 5) { intel_dp_link_down(intel_dp); intel_dp_start_link_train(intel_dp); tries = 0; cr_tries++; continue; } /* Compute new intel_dp->train_set as requested by target */ intel_get_adjust_train(intel_dp); ++tries; } if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_OFF_CPT; else reg = DP | DP_LINK_TRAIN_OFF; I915_WRITE(intel_dp->output_reg, reg); POSTING_READ(intel_dp->output_reg); intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); } static void intel_dp_link_down(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) return; DRM_DEBUG_KMS("\n"); if (is_edp(intel_dp)) { DP &= ~DP_PLL_ENABLE; I915_WRITE(intel_dp->output_reg, DP); POSTING_READ(intel_dp->output_reg); udelay(100); } if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) { DP &= ~DP_LINK_TRAIN_MASK_CPT; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); } else { DP &= ~DP_LINK_TRAIN_MASK; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); } POSTING_READ(intel_dp->output_reg); msleep(17); if (is_edp(intel_dp)) DP |= DP_LINK_TRAIN_OFF; if (!HAS_PCH_CPT(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { struct drm_crtc *crtc = intel_dp->base.base.crtc; /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the * corresponding HDMI output on transcoder A. * * Combine this with another hardware workaround: * transcoder select bit can only be cleared while the * port is enabled. */ DP &= ~DP_PIPEB_SELECT; I915_WRITE(intel_dp->output_reg, DP); /* Changes to enable or select take place the vblank * after being written. */ if (crtc == NULL) { /* We can arrive here never having been attached * to a CRTC, for instance, due to inheriting * random state from the BIOS. * * If the pipe is not running, play safe and * wait for the clocks to stabilise before * continuing. */ POSTING_READ(intel_dp->output_reg); msleep(50); } else intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); } I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); POSTING_READ(intel_dp->output_reg); } /* * According to DP spec * 5.1.2: * 1. Read DPCD * 2. Configure link according to Receiver Capabilities * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 * 4. Check link status on receipt of hot-plug interrupt */ static void intel_dp_check_link_status(struct intel_dp *intel_dp) { int ret; if (!intel_dp->base.base.crtc) return; if (!intel_dp_get_link_status(intel_dp)) { intel_dp_link_down(intel_dp); return; } /* Try to read receiver status if the link appears to be up */ ret = intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, sizeof (intel_dp->dpcd)); if (ret != sizeof(intel_dp->dpcd)) { intel_dp_link_down(intel_dp); return; } if (!intel_channel_eq_ok(intel_dp)) { intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); } } static enum drm_connector_status ironlake_dp_detect(struct intel_dp *intel_dp) { enum drm_connector_status status; bool ret; /* Can't disconnect eDP, but you can close the lid... */ if (is_edp(intel_dp)) { status = intel_panel_detect(intel_dp->base.base.dev); if (status == connector_status_unknown) status = connector_status_connected; return status; } status = connector_status_disconnected; ret = intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, sizeof (intel_dp->dpcd)); if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0) status = connector_status_connected; DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); return status; } static enum drm_connector_status g4x_dp_detect(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; enum drm_connector_status status; uint32_t temp, bit; switch (intel_dp->output_reg) { case DP_B: bit = DPB_HOTPLUG_INT_STATUS; break; case DP_C: bit = DPC_HOTPLUG_INT_STATUS; break; case DP_D: bit = DPD_HOTPLUG_INT_STATUS; break; default: return connector_status_unknown; } temp = I915_READ(PORT_HOTPLUG_STAT); if ((temp & bit) == 0) return connector_status_disconnected; status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) { if (intel_dp->dpcd[DP_DPCD_REV] != 0) status = connector_status_connected; } return status; } /** * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. * * \return true if DP port is connected. * \return false if DP port is disconnected. */ static enum drm_connector_status intel_dp_detect(struct drm_connector *connector, bool force) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = intel_dp->base.base.dev; enum drm_connector_status status; struct edid *edid = NULL; intel_dp->has_audio = false; if (HAS_PCH_SPLIT(dev)) status = ironlake_dp_detect(intel_dp); else status = g4x_dp_detect(intel_dp); if (status != connector_status_connected) return status; if (intel_dp->force_audio) { intel_dp->has_audio = intel_dp->force_audio > 0; } else { edid = drm_get_edid(connector, &intel_dp->adapter); if (edid) { intel_dp->has_audio = drm_detect_monitor_audio(edid); connector->display_info.raw_edid = NULL; kfree(edid); } } return connector_status_connected; } static int intel_dp_get_modes(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* We should parse the EDID data and find out if it has an audio sink */ ret = intel_ddc_get_modes(connector, &intel_dp->adapter); if (ret) { if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { struct drm_display_mode *newmode; list_for_each_entry(newmode, &connector->probed_modes, head) { if (newmode->type & DRM_MODE_TYPE_PREFERRED) { dev_priv->panel_fixed_mode = drm_mode_duplicate(dev, newmode); break; } } } return ret; } /* if eDP has no EDID, try to use fixed panel mode from VBT */ if (is_edp(intel_dp)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); drm_mode_probed_add(connector, mode); return 1; } } return 0; } static bool intel_dp_detect_audio(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct edid *edid; bool has_audio = false; edid = drm_get_edid(connector, &intel_dp->adapter); if (edid) { has_audio = drm_detect_monitor_audio(edid); connector->display_info.raw_edid = NULL; kfree(edid); } return has_audio; } static int intel_dp_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct drm_i915_private *dev_priv = connector->dev->dev_private; struct intel_dp *intel_dp = intel_attached_dp(connector); int ret; ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; if (property == dev_priv->force_audio_property) { int i = val; bool has_audio; if (i == intel_dp->force_audio) return 0; intel_dp->force_audio = i; if (i == 0) has_audio = intel_dp_detect_audio(connector); else has_audio = i > 0; if (has_audio == intel_dp->has_audio) return 0; intel_dp->has_audio = has_audio; goto done; } if (property == dev_priv->broadcast_rgb_property) { if (val == !!intel_dp->color_range) return 0; intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; goto done; } return -EINVAL; done: if (intel_dp->base.base.crtc) { struct drm_crtc *crtc = intel_dp->base.base.crtc; drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } return 0; } static void intel_dp_destroy (struct drm_connector *connector) { drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } static void intel_dp_encoder_destroy(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); i2c_del_adapter(&intel_dp->adapter); drm_encoder_cleanup(encoder); kfree(intel_dp); } static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { .dpms = intel_dp_dpms, .mode_fixup = intel_dp_mode_fixup, .prepare = intel_dp_prepare, .mode_set = intel_dp_mode_set, .commit = intel_dp_commit, }; static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_set_property, .destroy = intel_dp_destroy, }; static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { .destroy = intel_dp_encoder_destroy, }; static void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); intel_dp_check_link_status(intel_dp); } /* Return which DP Port should be selected for Transcoder DP control */ int intel_trans_dp_port_sel (struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_encoder *encoder; list_for_each_entry(encoder, &mode_config->encoder_list, head) { struct intel_dp *intel_dp; if (encoder->crtc != crtc) continue; intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) return intel_dp->output_reg; } return -1; } /* check the VBT to see whether the eDP is on DP-D port */ bool intel_dpd_is_edp(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct child_device_config *p_child; int i; if (!dev_priv->child_dev_num) return false; for (i = 0; i < dev_priv->child_dev_num; i++) { p_child = dev_priv->child_dev + i; if (p_child->dvo_port == PORT_IDPD && p_child->device_type == DEVICE_TYPE_eDP) return true; } return false; } static void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) { intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); } void intel_dp_init(struct drm_device *dev, int output_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; struct intel_dp *intel_dp; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; const char *name = NULL; int type; intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); if (!intel_dp) return; intel_dp->output_reg = output_reg; intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { kfree(intel_dp); return; } intel_encoder = &intel_dp->base; if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) if (intel_dpd_is_edp(dev)) intel_dp->is_pch_edp = true; if (output_reg == DP_A || is_pch_edp(intel_dp)) { type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; } else { type = DRM_MODE_CONNECTOR_DisplayPort; intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; } connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); connector->polled = DRM_CONNECTOR_POLL_HPD; if (output_reg == DP_B || output_reg == PCH_DP_B) intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); else if (output_reg == DP_C || output_reg == PCH_DP_C) intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); else if (output_reg == DP_D || output_reg == PCH_DP_D) intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); if (is_edp(intel_dp)) intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); intel_encoder->crtc_mask = (1 << 0) | (1 << 1); connector->interlace_allowed = true; connector->doublescan_allowed = 0; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); /* Set up the DDC bus. */ switch (output_reg) { case DP_A: name = "DPDDC-A"; break; case DP_B: case PCH_DP_B: dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; name = "DPDDC-B"; break; case DP_C: case PCH_DP_C: dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; name = "DPDDC-C"; break; case DP_D: case PCH_DP_D: dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; name = "DPDDC-D"; break; } intel_dp_i2c_init(intel_dp, intel_connector, name); /* Cache some DPCD data in the eDP case */ if (is_edp(intel_dp)) { int ret; u32 pp_on, pp_div; pp_on = I915_READ(PCH_PP_ON_DELAYS); pp_div = I915_READ(PCH_PP_DIVISOR); /* Get T3 & T12 values (note: VESA not bspec terminology) */ dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16; dev_priv->panel_t3 /= 10; /* t3 in 100us units */ dev_priv->panel_t12 = pp_div & 0xf; dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ ironlake_edp_panel_vdd_on(intel_dp); ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, intel_dp->dpcd, sizeof(intel_dp->dpcd)); ironlake_edp_panel_vdd_off(intel_dp); if (ret == sizeof(intel_dp->dpcd)) { if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING; } else { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); intel_dp_encoder_destroy(&intel_dp->base.base); intel_dp_destroy(&intel_connector->base); return; } } intel_encoder->hot_plug = intel_dp_hot_plug; if (is_edp(intel_dp)) { /* initialize panel mode from VBT if available for eDP */ if (dev_priv->lfp_lvds_vbt_mode) { dev_priv->panel_fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); if (dev_priv->panel_fixed_mode) { dev_priv->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; } } } intel_dp_add_properties(intel_dp, connector); /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. */ if (IS_G4X(dev) && !IS_GM45(dev)) { u32 temp = I915_READ(PEG_BAND_GAP_DATA); I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } }
gpl-2.0
DashBlacK/samsung-kernel-dart.old
drivers/ata/sata_sx4.c
542
40636
/* * sata_sx4.c - Promise SATA * * Maintained by: Jeff Garzik <jgarzik@pobox.com> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2003-2004 Red Hat, Inc. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Hardware documentation available under NDA. * */ /* Theory of operation ------------------- The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy engine, DIMM memory, and four ATA engines (one per SATA port). Data is copied to/from DIMM memory by the HDMA engine, before handing off to one (or more) of the ATA engines. The ATA engines operate solely on DIMM memory. The SX4 behaves like a PATA chip, with no SATA controls or knowledge whatsoever, leading to the presumption that PATA<->SATA bridges exist on SX4 boards, external to the PDC20621 chip itself. The chip is quite capable, supporting an XOR engine and linked hardware commands (permits a string to transactions to be submitted and waited-on as a single unit), and an optional microprocessor. The limiting factor is largely software. This Linux driver was written to multiplex the single HDMA engine to copy disk transactions into a fixed DIMM memory space, from where an ATA engine takes over. As a result, each WRITE looks like this: submit HDMA packet to hardware hardware copies data from system memory to DIMM hardware raises interrupt submit ATA packet to hardware hardware executes ATA WRITE command, w/ data in DIMM hardware raises interrupt and each READ looks like this: submit ATA packet to hardware hardware executes ATA READ command, w/ data in DIMM hardware raises interrupt submit HDMA packet to hardware hardware copies data from DIMM to system memory hardware raises interrupt This is a very slow, lock-step way of doing things that can certainly be improved by motivated kernel hackers. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> #include "sata_promise.h" #define DRV_NAME "sata_sx4" #define DRV_VERSION "0.12" enum { PDC_MMIO_BAR = 3, PDC_DIMM_BAR = 4, PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */ PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */ PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */ PDC_CTLSTAT = 0x60, /* IDEn control / status */ PDC_20621_SEQCTL = 0x400, PDC_20621_SEQMASK = 0x480, PDC_20621_GENERAL_CTL = 0x484, PDC_20621_PAGE_SIZE = (32 * 1024), /* chosen, not constant, values; we design our own DIMM mem map */ PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */ PDC_20621_DIMM_BASE = 0x00200000, PDC_20621_DIMM_DATA = (64 * 1024), PDC_DIMM_DATA_STEP = (256 * 1024), PDC_DIMM_WINDOW_STEP = (8 * 1024), PDC_DIMM_HOST_PRD = (6 * 1024), PDC_DIMM_HOST_PKT = (128 * 0), PDC_DIMM_HPKT_PRD = (128 * 1), PDC_DIMM_ATA_PKT = (128 * 2), PDC_DIMM_APKT_PRD = (128 * 3), PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128, PDC_PAGE_WINDOW = 0x40, PDC_PAGE_DATA = PDC_PAGE_WINDOW + (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE), PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE, PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */ PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | (1<<23), board_20621 = 0, /* FastTrak S150 SX4 */ PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */ PDC_RESET = (1 << 11), /* HDMA/ATA reset */ PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */ PDC_MAX_HDMA = 32, PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1), PDC_DIMM0_SPD_DEV_ADDRESS = 0x50, PDC_DIMM1_SPD_DEV_ADDRESS = 0x51, PDC_I2C_CONTROL = 0x48, PDC_I2C_ADDR_DATA = 0x4C, PDC_DIMM0_CONTROL = 0x80, PDC_DIMM1_CONTROL = 0x84, PDC_SDRAM_CONTROL = 0x88, PDC_I2C_WRITE = 0, /* master -> slave */ PDC_I2C_READ = (1 << 6), /* master <- slave */ PDC_I2C_START = (1 << 7), /* start I2C proto */ PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */ PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */ PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */ PDC_DIMM_SPD_SUBADDRESS_START = 0x00, PDC_DIMM_SPD_SUBADDRESS_END = 0x7F, PDC_DIMM_SPD_ROW_NUM = 3, PDC_DIMM_SPD_COLUMN_NUM = 4, PDC_DIMM_SPD_MODULE_ROW = 5, PDC_DIMM_SPD_TYPE = 11, PDC_DIMM_SPD_FRESH_RATE = 12, PDC_DIMM_SPD_BANK_NUM = 17, PDC_DIMM_SPD_CAS_LATENCY = 18, PDC_DIMM_SPD_ATTRIBUTE = 21, PDC_DIMM_SPD_ROW_PRE_CHARGE = 27, PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28, PDC_DIMM_SPD_RAS_CAS_DELAY = 29, PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30, PDC_DIMM_SPD_SYSTEM_FREQ = 126, PDC_CTL_STATUS = 0x08, PDC_DIMM_WINDOW_CTLR = 0x0C, PDC_TIME_CONTROL = 0x3C, PDC_TIME_PERIOD = 0x40, PDC_TIME_COUNTER = 0x44, PDC_GENERAL_CTLR = 0x484, PCI_PLL_INIT = 0x8A531824, PCI_X_TCOUNT = 0xEE1E5CFF, /* PDC_TIME_CONTROL bits */ PDC_TIMER_BUZZER = (1 << 10), PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */ PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */ PDC_TIMER_ENABLE = (1 << 7), PDC_TIMER_MASK_INT = (1 << 5), PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */ PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE | PDC_TIMER_ENABLE | PDC_TIMER_MASK_INT, }; #define ECC_ERASE_BUF_SZ (128 * 1024) struct pdc_port_priv { u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512]; u8 *pkt; dma_addr_t pkt_dma; }; struct pdc_host_priv { unsigned int doing_hdma; unsigned int hdma_prod; unsigned int hdma_cons; struct { struct ata_queued_cmd *qc; unsigned int seq; unsigned long pkt_ofs; } hdma[32]; }; static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void pdc_error_handler(struct ata_port *ap); static void pdc_freeze(struct ata_port *ap); static void pdc_thaw(struct ata_port *ap); static int pdc_port_start(struct ata_port *ap); static void pdc20621_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static unsigned int pdc20621_dimm_init(struct ata_host *host); static int pdc20621_detect_dimm(struct ata_host *host); static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, u32 subaddr, u32 *pdata); static int pdc20621_prog_dimm0(struct ata_host *host); static unsigned int pdc20621_prog_dimm_global(struct ata_host *host); #ifdef ATA_VERBOSE_DEBUG static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, u32 offset, u32 size); #endif static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, u32 offset, u32 size); static void pdc20621_irq_clear(struct ata_port *ap); static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc); static int pdc_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); static struct scsi_host_template pdc_sata_sht = { ATA_BASE_SHT(DRV_NAME), .sg_tablesize = LIBATA_MAX_PRD, .dma_boundary = ATA_DMA_BOUNDARY, }; /* TODO: inherit from base port_ops after converting to new EH */ static struct ata_port_operations pdc_20621_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = pdc_check_atapi_dma, .qc_prep = pdc20621_qc_prep, .qc_issue = pdc20621_qc_issue, .freeze = pdc_freeze, .thaw = pdc_thaw, .softreset = pdc_softreset, .error_handler = pdc_error_handler, .lost_interrupt = ATA_OP_NULL, .post_internal_cmd = pdc_post_internal_cmd, .port_start = pdc_port_start, .sff_tf_load = pdc_tf_load_mmio, .sff_exec_command = pdc_exec_command_mmio, .sff_irq_clear = pdc20621_irq_clear, }; static const struct ata_port_info pdc_port_info[] = { /* board_20621 */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &pdc_20621_ops, }, }; static const struct pci_device_id pdc_sata_pci_tbl[] = { { PCI_VDEVICE(PROMISE, 0x6622), board_20621 }, { } /* terminate list */ }; static struct pci_driver pdc_sata_pci_driver = { .name = DRV_NAME, .id_table = pdc_sata_pci_tbl, .probe = pdc_sata_init_one, .remove = ata_pci_remove_one, }; static int pdc_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct pdc_port_priv *pp; int rc; rc = ata_port_start(ap); if (rc) return rc; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); if (!pp->pkt) return -ENOMEM; ap->private_data = pp; return 0; } static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, unsigned int portno, unsigned int total_len) { u32 addr; unsigned int dw = PDC_DIMM_APKT_PRD >> 2; __le32 *buf32 = (__le32 *) buf; /* output ATA packet S/G table */ addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + (PDC_DIMM_DATA_STEP * portno); VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr); buf32[dw] = cpu_to_le32(addr); buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n", PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_APKT_PRD, buf32[dw], buf32[dw + 1]); } static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf, unsigned int portno, unsigned int total_len) { u32 addr; unsigned int dw = PDC_DIMM_HPKT_PRD >> 2; __le32 *buf32 = (__le32 *) buf; /* output Host DMA packet S/G table */ addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + (PDC_DIMM_DATA_STEP * portno); buf32[dw] = cpu_to_le32(addr); buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n", PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_HPKT_PRD, buf32[dw], buf32[dw + 1]); } static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf, unsigned int devno, u8 *buf, unsigned int portno) { unsigned int i, dw; __le32 *buf32 = (__le32 *) buf; u8 dev_reg; unsigned int dimm_sg = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_APKT_PRD; VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); i = PDC_DIMM_ATA_PKT; /* * Set up ATA packet */ if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) buf[i++] = PDC_PKT_READ; else if (tf->protocol == ATA_PROT_NODATA) buf[i++] = PDC_PKT_NODATA; else buf[i++] = 0; buf[i++] = 0; /* reserved */ buf[i++] = portno + 1; /* seq. id */ buf[i++] = 0xff; /* delay seq. id */ /* dimm dma S/G, and next-pkt */ dw = i >> 2; if (tf->protocol == ATA_PROT_NODATA) buf32[dw] = 0; else buf32[dw] = cpu_to_le32(dimm_sg); buf32[dw + 1] = 0; i += 8; if (devno == 0) dev_reg = ATA_DEVICE_OBS; else dev_reg = ATA_DEVICE_OBS | ATA_DEV1; /* select device */ buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE; buf[i++] = dev_reg; /* device control register */ buf[i++] = (1 << 5) | PDC_REG_DEVCTL; buf[i++] = tf->ctl; return i; } static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, unsigned int portno) { unsigned int dw; u32 tmp; __le32 *buf32 = (__le32 *) buf; unsigned int host_sg = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_HOST_PRD; unsigned int dimm_sg = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_HPKT_PRD; VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg); dw = PDC_DIMM_HOST_PKT >> 2; /* * Set up Host DMA packet */ if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) tmp = PDC_PKT_READ; else tmp = 0; tmp |= ((portno + 1 + 4) << 16); /* seq. id */ tmp |= (0xff << 24); /* delay seq. id */ buf32[dw + 0] = cpu_to_le32(tmp); buf32[dw + 1] = cpu_to_le32(host_sg); buf32[dw + 2] = cpu_to_le32(dimm_sg); buf32[dw + 3] = 0; VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n", PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_HOST_PKT, buf32[dw + 0], buf32[dw + 1], buf32[dw + 2], buf32[dw + 3]); } static void pdc20621_dma_prep(struct ata_queued_cmd *qc) { struct scatterlist *sg; struct ata_port *ap = qc->ap; struct pdc_port_priv *pp = ap->private_data; void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; unsigned int portno = ap->port_no; unsigned int i, si, idx, total_len = 0, sgt_len; __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); VPRINTK("ata%u: ENTER\n", ap->print_id); /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; /* * Build S/G table */ idx = 0; for_each_sg(qc->sg, sg, qc->n_elem, si) { buf[idx++] = cpu_to_le32(sg_dma_address(sg)); buf[idx++] = cpu_to_le32(sg_dma_len(sg)); total_len += sg_dma_len(sg); } buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); sgt_len = idx * 4; /* * Build ATA, host DMA packets */ pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno); pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); if (qc->tf.flags & ATA_TFLAG_LBA48) i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); else i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); /* copy three S/G tables and two packets to DIMM MMIO window */ memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP), &pp->dimm_buf, PDC_DIMM_HEADER_SZ); memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) + PDC_DIMM_HOST_PRD, &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len); /* force host FIFO dump */ writel(0x00000001, mmio + PDC_20621_GENERAL_CTL); readl(dimm_mmio); /* MMIO PCI posting flush */ VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len); } static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pdc_port_priv *pp = ap->private_data; void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; unsigned int portno = ap->port_no; unsigned int i; VPRINTK("ata%u: ENTER\n", ap->print_id); /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); if (qc->tf.flags & ATA_TFLAG_LBA48) i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); else i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); /* copy three S/G tables and two packets to DIMM MMIO window */ memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP), &pp->dimm_buf, PDC_DIMM_HEADER_SZ); /* force host FIFO dump */ writel(0x00000001, mmio + PDC_20621_GENERAL_CTL); readl(dimm_mmio); /* MMIO PCI posting flush */ VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); } static void pdc20621_qc_prep(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { case ATA_PROT_DMA: pdc20621_dma_prep(qc); break; case ATA_PROT_NODATA: pdc20621_nodata_prep(qc); break; default: break; } } static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, unsigned int seq, u32 pkt_ofs) { struct ata_port *ap = qc->ap; struct ata_host *host = ap->host; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT); readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */ } static void pdc20621_push_hdma(struct ata_queued_cmd *qc, unsigned int seq, u32 pkt_ofs) { struct ata_port *ap = qc->ap; struct pdc_host_priv *pp = ap->host->private_data; unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK; if (!pp->doing_hdma) { __pdc20621_push_hdma(qc, seq, pkt_ofs); pp->doing_hdma = 1; return; } pp->hdma[idx].qc = qc; pp->hdma[idx].seq = seq; pp->hdma[idx].pkt_ofs = pkt_ofs; pp->hdma_prod++; } static void pdc20621_pop_hdma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pdc_host_priv *pp = ap->host->private_data; unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK; /* if nothing on queue, we're done */ if (pp->hdma_prod == pp->hdma_cons) { pp->doing_hdma = 0; return; } __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq, pp->hdma[idx].pkt_ofs); pp->hdma_cons++; } #ifdef ATA_VERBOSE_DEBUG static void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; unsigned int port_no = ap->port_no; void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); dimm_mmio += PDC_DIMM_HOST_PKT; printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio)); printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4)); printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8)); printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12)); } #else static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { } #endif /* ATA_VERBOSE_DEBUG */ static void pdc20621_packet_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_host *host = ap->host; unsigned int port_no = ap->port_no; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u8 seq = (u8) (port_no + 1); unsigned int port_ofs; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; VPRINTK("ata%u: ENTER\n", ap->print_id); wmb(); /* flush PRD, pkt writes */ port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); /* if writing, we (1) DMA to DIMM, then (2) do ATA command */ if (rw && qc->tf.protocol == ATA_PROT_DMA) { seq += 4; pdc20621_dump_hdma(qc); pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT); VPRINTK("queued ofs 0x%x (%u), seq %u\n", port_ofs + PDC_DIMM_HOST_PKT, port_ofs + PDC_DIMM_HOST_PKT, seq); } else { writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ writel(port_ofs + PDC_DIMM_ATA_PKT, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); VPRINTK("submitted ofs 0x%x (%u), seq %u\n", port_ofs + PDC_DIMM_ATA_PKT, port_ofs + PDC_DIMM_ATA_PKT, seq); } } static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { case ATA_PROT_NODATA: if (qc->tf.flags & ATA_TFLAG_POLLING) break; /*FALLTHROUGH*/ case ATA_PROT_DMA: pdc20621_packet_start(qc); return 0; case ATAPI_PROT_DMA: BUG(); break; default: break; } return ata_sff_qc_issue(qc); } static inline unsigned int pdc20621_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc, unsigned int doing_hdma, void __iomem *mmio) { unsigned int port_no = ap->port_no; unsigned int port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); u8 status; unsigned int handled = 0; VPRINTK("ENTER\n"); if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */ (!(qc->tf.flags & ATA_TFLAG_WRITE))) { /* step two - DMA from DIMM to host */ if (doing_hdma) { VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* get drive status; clear intr; complete txn */ qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); ata_qc_complete(qc); pdc20621_pop_hdma(qc); } /* step one - exec ATA command */ else { u8 seq = (u8) (port_no + 1 + 4); VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* submit hdma pkt */ pdc20621_dump_hdma(qc); pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT); } handled = 1; } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */ /* step one - DMA from host to DIMM */ if (doing_hdma) { u8 seq = (u8) (port_no + 1); VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* submit ata pkt */ writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); readl(mmio + PDC_20621_SEQCTL + (seq * 4)); writel(port_ofs + PDC_DIMM_ATA_PKT, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); } /* step two - execute ATA command */ else { VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* get drive status; clear intr; complete txn */ qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); ata_qc_complete(qc); pdc20621_pop_hdma(qc); } handled = 1; /* command completion, but no data xfer */ } else if (qc->tf.protocol == ATA_PROT_NODATA) { status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); qc->err_mask |= ac_err_mask(status); ata_qc_complete(qc); handled = 1; } else { ap->stats.idle_irq++; } return handled; } static void pdc20621_irq_clear(struct ata_port *ap) { ioread8(ap->ioaddr.status_addr); } static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct ata_port *ap; u32 mask = 0; unsigned int i, tmp, port_no; unsigned int handled = 0; void __iomem *mmio_base; VPRINTK("ENTER\n"); if (!host || !host->iomap[PDC_MMIO_BAR]) { VPRINTK("QUICK EXIT\n"); return IRQ_NONE; } mmio_base = host->iomap[PDC_MMIO_BAR]; /* reading should also clear interrupts */ mmio_base += PDC_CHIP0_OFS; mask = readl(mmio_base + PDC_20621_SEQMASK); VPRINTK("mask == 0x%x\n", mask); if (mask == 0xffffffff) { VPRINTK("QUICK EXIT 2\n"); return IRQ_NONE; } mask &= 0xffff; /* only 16 tags possible */ if (!mask) { VPRINTK("QUICK EXIT 3\n"); return IRQ_NONE; } spin_lock(&host->lock); for (i = 1; i < 9; i++) { port_no = i - 1; if (port_no > 3) port_no -= 4; if (port_no >= host->n_ports) ap = NULL; else ap = host->ports[port_no]; tmp = mask & (1 << i); VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); if (tmp && ap && !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += pdc20621_host_intr(ap, qc, (i > 4), mmio_base); } } spin_unlock(&host->lock); VPRINTK("mask == 0x%x\n", mask); VPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } static void pdc_freeze(struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.cmd_addr; u32 tmp; /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */ tmp = readl(mmio + PDC_CTLSTAT); tmp |= PDC_MASK_INT; tmp &= ~PDC_DMA_ENABLE; writel(tmp, mmio + PDC_CTLSTAT); readl(mmio + PDC_CTLSTAT); /* flush */ } static void pdc_thaw(struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.cmd_addr; u32 tmp; /* FIXME: start HDMA engine, if zero ATA engines running */ /* clear IRQ */ ioread8(ap->ioaddr.status_addr); /* turn IRQ back on */ tmp = readl(mmio + PDC_CTLSTAT); tmp &= ~PDC_MASK_INT; writel(tmp, mmio + PDC_CTLSTAT); readl(mmio + PDC_CTLSTAT); /* flush */ } static void pdc_reset_port(struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; unsigned int i; u32 tmp; /* FIXME: handle HDMA copy engine */ for (i = 11; i > 0; i--) { tmp = readl(mmio); if (tmp & PDC_RESET) break; udelay(100); tmp |= PDC_RESET; writel(tmp, mmio); } tmp &= ~PDC_RESET; writel(tmp, mmio); readl(mmio); /* flush */ } static int pdc_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { pdc_reset_port(link->ap); return ata_sff_softreset(link, class, deadline); } static void pdc_error_handler(struct ata_port *ap) { if (!(ap->pflags & ATA_PFLAG_FROZEN)) pdc_reset_port(ap); ata_std_error_handler(ap); } static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; /* make DMA engine forget about the failed command */ if (qc->flags & ATA_QCFLAG_FAILED) pdc_reset_port(ap); } static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) { u8 *scsicmd = qc->scsicmd->cmnd; int pio = 1; /* atapi dma off by default */ /* Whitelist commands that may use DMA. */ switch (scsicmd[0]) { case WRITE_12: case WRITE_10: case WRITE_6: case READ_12: case READ_10: case READ_6: case 0xad: /* READ_DVD_STRUCTURE */ case 0xbe: /* READ_CD */ pio = 0; } /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ if (scsicmd[0] == WRITE_10) { unsigned int lba = (scsicmd[2] << 24) | (scsicmd[3] << 16) | (scsicmd[4] << 8) | scsicmd[5]; if (lba >= 0xFFFF4FA2) pio = 1; } return pio; } static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); ata_sff_tf_load(ap, tf); } static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); ata_sff_exec_command(ap, tf); } static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = base; port->data_addr = base; port->feature_addr = port->error_addr = base + 0x4; port->nsect_addr = base + 0x8; port->lbal_addr = base + 0xc; port->lbam_addr = base + 0x10; port->lbah_addr = base + 0x14; port->device_addr = base + 0x18; port->command_addr = port->status_addr = base + 0x1c; port->altstatus_addr = port->ctl_addr = base + 0x38; } #ifdef ATA_VERBOSE_DEBUG static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, u32 offset, u32 size) { u32 window_size; u16 idx; u8 page_mask; long dist; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; page_mask = 0x00; window_size = 0x2000 * 4; /* 32K byte uchar size */ idx = (u16) (offset / window_size); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); offset -= (idx * window_size); idx++; dist = ((long) (window_size - (offset + size))) >= 0 ? size : (long) (window_size - offset); memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4), dist); psource += dist; size -= dist; for (; (long) size >= (long) window_size ;) { writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); memcpy_fromio((char *) psource, (char *) (dimm_mmio), window_size / 4); psource += window_size; size -= window_size; idx++; } if (size) { writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); memcpy_fromio((char *) psource, (char *) (dimm_mmio), size / 4); } } #endif static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, u32 offset, u32 size) { u32 window_size; u16 idx; u8 page_mask; long dist; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; page_mask = 0x00; window_size = 0x2000 * 4; /* 32K byte uchar size */ idx = (u16) (offset / window_size); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); offset -= (idx * window_size); idx++; dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size : (long) (window_size - offset); memcpy_toio(dimm_mmio + offset / 4, psource, dist); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); psource += dist; size -= dist; for (; (long) size >= (long) window_size ;) { writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); memcpy_toio(dimm_mmio, psource, window_size / 4); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); psource += window_size; size -= window_size; idx++; } if (size) { writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); memcpy_toio(dimm_mmio, psource, size / 4); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); } } static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, u32 subaddr, u32 *pdata) { void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; u32 i2creg = 0; u32 status; u32 count = 0; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; i2creg |= device << 24; i2creg |= subaddr << 16; /* Set the device and subaddress */ writel(i2creg, mmio + PDC_I2C_ADDR_DATA); readl(mmio + PDC_I2C_ADDR_DATA); /* Write Control to perform read operation, mask int */ writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT, mmio + PDC_I2C_CONTROL); for (count = 0; count <= 1000; count ++) { status = readl(mmio + PDC_I2C_CONTROL); if (status & PDC_I2C_COMPLETE) { status = readl(mmio + PDC_I2C_ADDR_DATA); break; } else if (count == 1000) return 0; } *pdata = (status >> 8) & 0x000000ff; return 1; } static int pdc20621_detect_dimm(struct ata_host *host) { u32 data = 0; if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_SYSTEM_FREQ, &data)) { if (data == 100) return 100; } else return 0; if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { if (data <= 0x75) return 133; } else return 0; return 0; } static int pdc20621_prog_dimm0(struct ata_host *host) { u32 spd0[50]; u32 data = 0; int size, i; u8 bdimmsize; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; static const struct { unsigned int reg; unsigned int ofs; } pdc_i2c_read_data [] = { { PDC_DIMM_SPD_TYPE, 11 }, { PDC_DIMM_SPD_FRESH_RATE, 12 }, { PDC_DIMM_SPD_COLUMN_NUM, 4 }, { PDC_DIMM_SPD_ATTRIBUTE, 21 }, { PDC_DIMM_SPD_ROW_NUM, 3 }, { PDC_DIMM_SPD_BANK_NUM, 17 }, { PDC_DIMM_SPD_MODULE_ROW, 5 }, { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 }, { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 }, { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 }, { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 }, { PDC_DIMM_SPD_CAS_LATENCY, 18 }, }; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg, &spd0[pdc_i2c_read_data[i].ofs]); data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | ((((spd0[27] + 9) / 10) - 1) << 8) ; data |= (((((spd0[29] > spd0[28]) ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10; data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; if (spd0[18] & 0x08) data |= ((0x03) << 14); else if (spd0[18] & 0x04) data |= ((0x02) << 14); else if (spd0[18] & 0x01) data |= ((0x01) << 14); else data |= (0 << 14); /* Calculate the size of bDIMMSize (power of 2) and merge the DIMM size by program start/end address. */ bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3; size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */ data |= (((size / 16) - 1) << 16); data |= (0 << 23); data |= 8; writel(data, mmio + PDC_DIMM0_CONTROL); readl(mmio + PDC_DIMM0_CONTROL); return size; } static unsigned int pdc20621_prog_dimm_global(struct ata_host *host) { u32 data, spd0; int error, i; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; /* Set To Default : DIMM Module Global Control Register (0x022259F1) DIMM Arbitration Disable (bit 20) DIMM Data/Control Output Driving Selection (bit12 - bit15) Refresh Enable (bit 17) */ data = 0x022259F1; writel(data, mmio + PDC_SDRAM_CONTROL); readl(mmio + PDC_SDRAM_CONTROL); /* Turn on for ECC */ pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE, &spd0); if (spd0 == 0x02) { data |= (0x01 << 16); writel(data, mmio + PDC_SDRAM_CONTROL); readl(mmio + PDC_SDRAM_CONTROL); printk(KERN_ERR "Local DIMM ECC Enabled\n"); } /* DIMM Initialization Select/Enable (bit 18/19) */ data &= (~(1<<18)); data |= (1<<19); writel(data, mmio + PDC_SDRAM_CONTROL); error = 1; for (i = 1; i <= 10; i++) { /* polling ~5 secs */ data = readl(mmio + PDC_SDRAM_CONTROL); if (!(data & (1<<19))) { error = 0; break; } msleep(i*100); } return error; } static unsigned int pdc20621_dimm_init(struct ata_host *host) { int speed, size, length; u32 addr, spd0, pci_status; u32 time_period = 0; u32 tcount = 0; u32 ticks = 0; u32 clock = 0; u32 fparam = 0; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; /* Initialize PLL based upon PCI Bus Frequency */ /* Initialize Time Period Register */ writel(0xffffffff, mmio + PDC_TIME_PERIOD); time_period = readl(mmio + PDC_TIME_PERIOD); VPRINTK("Time Period Register (0x40): 0x%x\n", time_period); /* Enable timer */ writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL); readl(mmio + PDC_TIME_CONTROL); /* Wait 3 seconds */ msleep(3000); /* When timer is enabled, counter is decreased every internal clock cycle. */ tcount = readl(mmio + PDC_TIME_COUNTER); VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount); /* If SX4 is on PCI-X bus, after 3 seconds, the timer counter register should be >= (0xffffffff - 3x10^8). */ if (tcount >= PCI_X_TCOUNT) { ticks = (time_period - tcount); VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); clock = (ticks / 300000); VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock); clock = (clock * 33); VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock); /* PLL F Param (bit 22:16) */ fparam = (1400000 / clock) - 2; VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam); /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */ pci_status = (0x8a001824 | (fparam << 16)); } else pci_status = PCI_PLL_INIT; /* Initialize PLL. */ VPRINTK("pci_status: 0x%x\n", pci_status); writel(pci_status, mmio + PDC_CTL_STATUS); readl(mmio + PDC_CTL_STATUS); /* Read SPD of DIMM by I2C interface, and program the DIMM Module Controller. */ if (!(speed = pdc20621_detect_dimm(host))) { printk(KERN_ERR "Detect Local DIMM Fail\n"); return 1; /* DIMM error */ } VPRINTK("Local DIMM Speed = %d\n", speed); /* Programming DIMM0 Module Control Register (index_CID0:80h) */ size = pdc20621_prog_dimm0(host); VPRINTK("Local DIMM Size = %dMB\n", size); /* Programming DIMM Module Global Control Register (index_CID0:88h) */ if (pdc20621_prog_dimm_global(host)) { printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); return 1; } #ifdef ATA_VERBOSE_DEBUG { u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ', 'N','o','t',' ','Y','e','t',' ', 'D','e','f','i','n','e','d',' ', '1','.','1','0', '9','8','0','3','1','6','1','2',0,0}; u8 test_parttern2[40] = {0}; pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40); pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40); pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40); pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); pdc20621_get_from_dimm(host, test_parttern2, 0x10040, 40); printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40); pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); } #endif /* ECC initiliazation. */ pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE, &spd0); if (spd0 == 0x02) { void *buf; VPRINTK("Start ECC initialization\n"); addr = 0; length = size * 1024 * 1024; buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL); while (addr < length) { pdc20621_put_to_dimm(host, buf, addr, ECC_ERASE_BUF_SZ); addr += ECC_ERASE_BUF_SZ; } kfree(buf); VPRINTK("Finish ECC initialization\n"); } return 0; } static void pdc_20621_init(struct ata_host *host) { u32 tmp; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; /* * Select page 0x40 for our 32k DIMM window */ tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000; tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */ writel(tmp, mmio + PDC_20621_DIMM_WINDOW); /* * Reset Host DMA */ tmp = readl(mmio + PDC_HDMA_CTLSTAT); tmp |= PDC_RESET; writel(tmp, mmio + PDC_HDMA_CTLSTAT); readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ udelay(10); tmp = readl(mmio + PDC_HDMA_CTLSTAT); tmp &= ~PDC_RESET; writel(tmp, mmio + PDC_HDMA_CTLSTAT); readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ } static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; const struct ata_port_info *ppi[] = { &pdc_port_info[ent->driver_data], NULL }; struct ata_host *host; struct pdc_host_priv *hpriv; int i, rc; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); /* allocate host */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!host || !hpriv) return -ENOMEM; host->private_data = hpriv; /* acquire resources and fill host */ rc = pcim_enable_device(pdev); if (rc) return rc; rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR), DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); for (i = 0; i < 4; i++) { struct ata_port *ap = host->ports[i]; void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS; unsigned int offset = 0x200 + i * 0x80; pdc_sata_setup_port(&ap->ioaddr, base + offset); ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm"); ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port"); } /* configure and activate */ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; if (pdc20621_dimm_init(host)) return -ENOMEM; pdc_20621_init(host); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, pdc20621_interrupt, IRQF_SHARED, &pdc_sata_sht); } static int __init pdc_sata_init(void) { return pci_register_driver(&pdc_sata_pci_driver); } static void __exit pdc_sata_exit(void) { pci_unregister_driver(&pdc_sata_pci_driver); } MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Promise SATA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl); MODULE_VERSION(DRV_VERSION); module_init(pdc_sata_init); module_exit(pdc_sata_exit);
gpl-2.0
Windeal/Linux-2.6.32.64
drivers/input/keyboard/jornada720_kbd.c
798
5651
/* * drivers/input/keyboard/jornada720_kbd.c * * HP Jornada 720 keyboard platform driver * * Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@Gmail.com> * * Copyright (C) 2006 jornada 720 kbd driver by Filip Zyzniewsk <Filip.Zyzniewski@tefnet.plX * based on (C) 2004 jornada 720 kbd driver by Alex Lange <chicken@handhelds.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/device.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/input.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <mach/jornada720.h> #include <mach/hardware.h> MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver"); MODULE_LICENSE("GPL v2"); static unsigned short jornada_std_keymap[128] = { /* ROW */ 0, KEY_ESC, KEY_F1, KEY_F2, KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7, /* #1 */ KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE, /* -> */ 0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, /* #2 */ KEY_0, KEY_MINUS, KEY_EQUAL,0, 0, 0, /* -> */ 0, KEY_Q, KEY_W, KEY_E, KEY_R, KEY_T, KEY_Y, KEY_U, KEY_I, KEY_O, /* #3 */ KEY_P, KEY_BACKSLASH, KEY_BACKSPACE, 0, 0, 0, /* -> */ 0, KEY_A, KEY_S, KEY_D, KEY_F, KEY_G, KEY_H, KEY_J, KEY_K, KEY_L, /* #4 */ KEY_SEMICOLON, KEY_LEFTBRACE, KEY_RIGHTBRACE, 0, 0, 0, /* -> */ 0, KEY_Z, KEY_X, KEY_C, KEY_V, KEY_B, KEY_N, KEY_M, KEY_COMMA, /* #5 */ KEY_DOT, KEY_KPMINUS, KEY_APOSTROPHE, KEY_ENTER, 0, 0,0, /* -> */ 0, KEY_TAB, 0, KEY_LEFTSHIFT, 0, KEY_APOSTROPHE, 0, 0, 0, 0, /* #6 */ KEY_UP, 0, KEY_RIGHTSHIFT, 0, 0, 0,0, 0, 0, 0, 0, KEY_LEFTALT, KEY_GRAVE, /* -> */ 0, 0, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0,0, KEY_KPASTERISK, /* -> */ KEY_LEFTCTRL, 0, KEY_SPACE, 0, 0, 0, KEY_SLASH, KEY_DELETE, 0, 0, /* -> */ 0, 0, 0, KEY_POWER, /* -> */ }; struct jornadakbd { unsigned short keymap[ARRAY_SIZE(jornada_std_keymap)]; struct input_dev *input; }; static irqreturn_t jornada720_kbd_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct jornadakbd *jornadakbd = platform_get_drvdata(pdev); struct input_dev *input = jornadakbd->input; u8 count, kbd_data, scan_code; /* startup ssp with spinlock */ jornada_ssp_start(); if (jornada_ssp_inout(GETSCANKEYCODE) != TXDUMMY) { printk(KERN_DEBUG "jornada720_kbd: " "GetKeycode command failed with ETIMEDOUT, " "flushed bus\n"); } else { /* How many keycodes are waiting for us? */ count = jornada_ssp_byte(TXDUMMY); /* Lets drag them out one at a time */ while (count--) { /* Exchange TxDummy for location (keymap[kbddata]) */ kbd_data = jornada_ssp_byte(TXDUMMY); scan_code = kbd_data & 0x7f; input_event(input, EV_MSC, MSC_SCAN, scan_code); input_report_key(input, jornadakbd->keymap[scan_code], !(kbd_data & 0x80)); input_sync(input); } } /* release spinlock and turn off ssp */ jornada_ssp_end(); return IRQ_HANDLED; }; static int __devinit jornada720_kbd_probe(struct platform_device *pdev) { struct jornadakbd *jornadakbd; struct input_dev *input_dev; int i, err; jornadakbd = kzalloc(sizeof(struct jornadakbd), GFP_KERNEL); input_dev = input_allocate_device(); if (!jornadakbd || !input_dev) { err = -ENOMEM; goto fail1; } platform_set_drvdata(pdev, jornadakbd); memcpy(jornadakbd->keymap, jornada_std_keymap, sizeof(jornada_std_keymap)); jornadakbd->input = input_dev; input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); input_dev->name = "HP Jornada 720 keyboard"; input_dev->phys = "jornadakbd/input0"; input_dev->keycode = jornadakbd->keymap; input_dev->keycodesize = sizeof(unsigned short); input_dev->keycodemax = ARRAY_SIZE(jornada_std_keymap); input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; for (i = 0; i < ARRAY_SIZE(jornadakbd->keymap); i++) __set_bit(jornadakbd->keymap[i], input_dev->keybit); __clear_bit(KEY_RESERVED, input_dev->keybit); input_set_capability(input_dev, EV_MSC, MSC_SCAN); err = request_irq(IRQ_GPIO0, jornada720_kbd_interrupt, IRQF_DISABLED | IRQF_TRIGGER_FALLING, "jornadakbd", pdev); if (err) { printk(KERN_INFO "jornadakbd720_kbd: Unable to grab IRQ\n"); goto fail1; } err = input_register_device(jornadakbd->input); if (err) goto fail2; return 0; fail2: /* IRQ, DEVICE, MEMORY */ free_irq(IRQ_GPIO0, pdev); fail1: /* DEVICE, MEMORY */ platform_set_drvdata(pdev, NULL); input_free_device(input_dev); kfree(jornadakbd); return err; }; static int __devexit jornada720_kbd_remove(struct platform_device *pdev) { struct jornadakbd *jornadakbd = platform_get_drvdata(pdev); free_irq(IRQ_GPIO0, pdev); platform_set_drvdata(pdev, NULL); input_unregister_device(jornadakbd->input); kfree(jornadakbd); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:jornada720_kbd"); static struct platform_driver jornada720_kbd_driver = { .driver = { .name = "jornada720_kbd", .owner = THIS_MODULE, }, .probe = jornada720_kbd_probe, .remove = __devexit_p(jornada720_kbd_remove), }; static int __init jornada720_kbd_init(void) { return platform_driver_register(&jornada720_kbd_driver); } static void __exit jornada720_kbd_exit(void) { platform_driver_unregister(&jornada720_kbd_driver); } module_init(jornada720_kbd_init); module_exit(jornada720_kbd_exit);
gpl-2.0
GaloisInc/linux-deadline
drivers/ps3/ps3-sys-manager.c
1054
20937
/* * PS3 System Manager. * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/reboot.h> #include <asm/firmware.h> #include <asm/lv1call.h> #include <asm/ps3.h> #include "vuart.h" /** * ps3_sys_manager - PS3 system manager driver. * * The system manager provides an asynchronous system event notification * mechanism for reporting events like thermal alert and button presses to * guests. It also provides support to control system shutdown and startup. * * The actual system manager is implemented as an application running in the * system policy module in lpar_1. Guests communicate with the system manager * through port 2 of the vuart using a simple packet message protocol. * Messages are comprised of a fixed field header followed by a message * specific payload. */ /** * struct ps3_sys_manager_header - System manager message header. * @version: Header version, currently 1. * @size: Header size in bytes, currently 16. * @payload_size: Message payload size in bytes. * @service_id: Message type, one of enum ps3_sys_manager_service_id. * @request_tag: Unique number to identify reply. */ struct ps3_sys_manager_header { /* version 1 */ u8 version; u8 size; u16 reserved_1; u32 payload_size; u16 service_id; u16 reserved_2; u32 request_tag; }; #define dump_sm_header(_h) _dump_sm_header(_h, __func__, __LINE__) static void __maybe_unused _dump_sm_header( const struct ps3_sys_manager_header *h, const char *func, int line) { pr_debug("%s:%d: version: %xh\n", func, line, h->version); pr_debug("%s:%d: size: %xh\n", func, line, h->size); pr_debug("%s:%d: payload_size: %xh\n", func, line, h->payload_size); pr_debug("%s:%d: service_id: %xh\n", func, line, h->service_id); pr_debug("%s:%d: request_tag: %xh\n", func, line, h->request_tag); } /** * @PS3_SM_RX_MSG_LEN_MIN - Shortest received message length. * @PS3_SM_RX_MSG_LEN_MAX - Longest received message length. * * Currently all messages received from the system manager are either * (16 bytes header + 8 bytes payload = 24 bytes) or (16 bytes header * + 16 bytes payload = 32 bytes). This knowlege is used to simplify * the logic. */ enum { PS3_SM_RX_MSG_LEN_MIN = 24, PS3_SM_RX_MSG_LEN_MAX = 32, }; /** * enum ps3_sys_manager_service_id - Message header service_id. * @PS3_SM_SERVICE_ID_REQUEST: guest --> sys_manager. * @PS3_SM_SERVICE_ID_REQUEST_ERROR: guest <-- sys_manager. * @PS3_SM_SERVICE_ID_COMMAND: guest <-- sys_manager. * @PS3_SM_SERVICE_ID_RESPONSE: guest --> sys_manager. * @PS3_SM_SERVICE_ID_SET_ATTR: guest --> sys_manager. * @PS3_SM_SERVICE_ID_EXTERN_EVENT: guest <-- sys_manager. * @PS3_SM_SERVICE_ID_SET_NEXT_OP: guest --> sys_manager. * * PS3_SM_SERVICE_ID_REQUEST_ERROR is returned for invalid data values in a * a PS3_SM_SERVICE_ID_REQUEST message. It also seems to be returned when * a REQUEST message is sent at the wrong time. */ enum ps3_sys_manager_service_id { /* version 1 */ PS3_SM_SERVICE_ID_REQUEST = 1, PS3_SM_SERVICE_ID_RESPONSE = 2, PS3_SM_SERVICE_ID_COMMAND = 3, PS3_SM_SERVICE_ID_EXTERN_EVENT = 4, PS3_SM_SERVICE_ID_SET_NEXT_OP = 5, PS3_SM_SERVICE_ID_REQUEST_ERROR = 6, PS3_SM_SERVICE_ID_SET_ATTR = 8, }; /** * enum ps3_sys_manager_attr - Notification attribute (bit position mask). * @PS3_SM_ATTR_POWER: Power button. * @PS3_SM_ATTR_RESET: Reset button, not available on retail console. * @PS3_SM_ATTR_THERMAL: System thermal alert. * @PS3_SM_ATTR_CONTROLLER: Remote controller event. * @PS3_SM_ATTR_ALL: Logical OR of all. * * The guest tells the system manager which events it is interested in receiving * notice of by sending the system manager a logical OR of notification * attributes via the ps3_sys_manager_send_attr() routine. */ enum ps3_sys_manager_attr { /* version 1 */ PS3_SM_ATTR_POWER = 1, PS3_SM_ATTR_RESET = 2, PS3_SM_ATTR_THERMAL = 4, PS3_SM_ATTR_CONTROLLER = 8, /* bogus? */ PS3_SM_ATTR_ALL = 0x0f, }; /** * enum ps3_sys_manager_event - External event type, reported by system manager. * @PS3_SM_EVENT_POWER_PRESSED: payload.value = * enum ps3_sys_manager_button_event. * @PS3_SM_EVENT_POWER_RELEASED: payload.value = time pressed in millisec. * @PS3_SM_EVENT_RESET_PRESSED: payload.value = * enum ps3_sys_manager_button_event. * @PS3_SM_EVENT_RESET_RELEASED: payload.value = time pressed in millisec. * @PS3_SM_EVENT_THERMAL_ALERT: payload.value = thermal zone id. * @PS3_SM_EVENT_THERMAL_CLEARED: payload.value = thermal zone id. */ enum ps3_sys_manager_event { /* version 1 */ PS3_SM_EVENT_POWER_PRESSED = 3, PS3_SM_EVENT_POWER_RELEASED = 4, PS3_SM_EVENT_RESET_PRESSED = 5, PS3_SM_EVENT_RESET_RELEASED = 6, PS3_SM_EVENT_THERMAL_ALERT = 7, PS3_SM_EVENT_THERMAL_CLEARED = 8, /* no info on controller events */ }; /** * enum ps3_sys_manager_button_event - Button event payload values. * @PS3_SM_BUTTON_EVENT_HARD: Hardware generated event. * @PS3_SM_BUTTON_EVENT_SOFT: Software generated event. */ enum ps3_sys_manager_button_event { PS3_SM_BUTTON_EVENT_HARD = 0, PS3_SM_BUTTON_EVENT_SOFT = 1, }; /** * enum ps3_sys_manager_next_op - Operation to perform after lpar is destroyed. */ enum ps3_sys_manager_next_op { /* version 3 */ PS3_SM_NEXT_OP_SYS_SHUTDOWN = 1, PS3_SM_NEXT_OP_SYS_REBOOT = 2, PS3_SM_NEXT_OP_LPAR_REBOOT = 0x82, }; /** * enum ps3_sys_manager_wake_source - Next-op wakeup source (bit position mask). * @PS3_SM_WAKE_DEFAULT: Disk insert, power button, eject button. * @PS3_SM_WAKE_W_O_L: Ether or wireless LAN. * @PS3_SM_WAKE_P_O_R: Power on reset. * * Additional wakeup sources when specifying PS3_SM_NEXT_OP_SYS_SHUTDOWN. * The system will always wake from the PS3_SM_WAKE_DEFAULT sources. * Sources listed here are the only ones available to guests in the * other-os lpar. */ enum ps3_sys_manager_wake_source { /* version 3 */ PS3_SM_WAKE_DEFAULT = 0, PS3_SM_WAKE_W_O_L = 0x00000400, PS3_SM_WAKE_P_O_R = 0x80000000, }; /** * user_wake_sources - User specified wakeup sources. * * Logical OR of enum ps3_sys_manager_wake_source types. */ static u32 user_wake_sources = PS3_SM_WAKE_DEFAULT; /** * enum ps3_sys_manager_cmd - Command from system manager to guest. * * The guest completes the actions needed, then acks or naks the command via * ps3_sys_manager_send_response(). In the case of @PS3_SM_CMD_SHUTDOWN, * the guest must be fully prepared for a system poweroff prior to acking the * command. */ enum ps3_sys_manager_cmd { /* version 1 */ PS3_SM_CMD_SHUTDOWN = 1, /* shutdown guest OS */ }; /** * ps3_sm_force_power_off - Poweroff helper. * * A global variable used to force a poweroff when the power button has * been pressed irrespective of how init handles the ctrl_alt_del signal. * */ static unsigned int ps3_sm_force_power_off; /** * ps3_sys_manager_write - Helper to write a two part message to the vuart. * */ static int ps3_sys_manager_write(struct ps3_system_bus_device *dev, const struct ps3_sys_manager_header *header, const void *payload) { int result; BUG_ON(header->version != 1); BUG_ON(header->size != 16); BUG_ON(header->payload_size != 8 && header->payload_size != 16); BUG_ON(header->service_id > 8); result = ps3_vuart_write(dev, header, sizeof(struct ps3_sys_manager_header)); if (!result) result = ps3_vuart_write(dev, payload, header->payload_size); return result; } /** * ps3_sys_manager_send_attr - Send a 'set attribute' to the system manager. * */ static int ps3_sys_manager_send_attr(struct ps3_system_bus_device *dev, enum ps3_sys_manager_attr attr) { struct ps3_sys_manager_header header; struct { u8 version; u8 reserved_1[3]; u32 attribute; } payload; BUILD_BUG_ON(sizeof(payload) != 8); dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, attr); memset(&header, 0, sizeof(header)); header.version = 1; header.size = 16; header.payload_size = 16; header.service_id = PS3_SM_SERVICE_ID_SET_ATTR; memset(&payload, 0, sizeof(payload)); payload.version = 1; payload.attribute = attr; return ps3_sys_manager_write(dev, &header, &payload); } /** * ps3_sys_manager_send_next_op - Send a 'set next op' to the system manager. * * Tell the system manager what to do after this lpar is destroyed. */ static int ps3_sys_manager_send_next_op(struct ps3_system_bus_device *dev, enum ps3_sys_manager_next_op op, enum ps3_sys_manager_wake_source wake_source) { struct ps3_sys_manager_header header; struct { u8 version; u8 type; u8 gos_id; u8 reserved_1; u32 wake_source; u8 reserved_2[8]; } payload; BUILD_BUG_ON(sizeof(payload) != 16); dev_dbg(&dev->core, "%s:%d: (%xh)\n", __func__, __LINE__, op); memset(&header, 0, sizeof(header)); header.version = 1; header.size = 16; header.payload_size = 16; header.service_id = PS3_SM_SERVICE_ID_SET_NEXT_OP; memset(&payload, 0, sizeof(payload)); payload.version = 3; payload.type = op; payload.gos_id = 3; /* other os */ payload.wake_source = wake_source; return ps3_sys_manager_write(dev, &header, &payload); } /** * ps3_sys_manager_send_request_shutdown - Send 'request' to the system manager. * * The guest sends this message to request an operation or action of the system * manager. The reply is a command message from the system manager. In the * command handler the guest performs the requested operation. The result of * the command is then communicated back to the system manager with a response * message. * * Currently, the only supported request is the 'shutdown self' request. */ static int ps3_sys_manager_send_request_shutdown( struct ps3_system_bus_device *dev) { struct ps3_sys_manager_header header; struct { u8 version; u8 type; u8 gos_id; u8 reserved_1[13]; } payload; BUILD_BUG_ON(sizeof(payload) != 16); dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__); memset(&header, 0, sizeof(header)); header.version = 1; header.size = 16; header.payload_size = 16; header.service_id = PS3_SM_SERVICE_ID_REQUEST; memset(&payload, 0, sizeof(payload)); payload.version = 1; payload.type = 1; /* shutdown */ payload.gos_id = 0; /* self */ return ps3_sys_manager_write(dev, &header, &payload); } /** * ps3_sys_manager_send_response - Send a 'response' to the system manager. * @status: zero = success, others fail. * * The guest sends this message to the system manager to acnowledge success or * failure of a command sent by the system manager. */ static int ps3_sys_manager_send_response(struct ps3_system_bus_device *dev, u64 status) { struct ps3_sys_manager_header header; struct { u8 version; u8 reserved_1[3]; u8 status; u8 reserved_2[11]; } payload; BUILD_BUG_ON(sizeof(payload) != 16); dev_dbg(&dev->core, "%s:%d: (%s)\n", __func__, __LINE__, (status ? "nak" : "ack")); memset(&header, 0, sizeof(header)); header.version = 1; header.size = 16; header.payload_size = 16; header.service_id = PS3_SM_SERVICE_ID_RESPONSE; memset(&payload, 0, sizeof(payload)); payload.version = 1; payload.status = status; return ps3_sys_manager_write(dev, &header, &payload); } /** * ps3_sys_manager_handle_event - Second stage event msg handler. * */ static int ps3_sys_manager_handle_event(struct ps3_system_bus_device *dev) { int result; struct { u8 version; u8 type; u8 reserved_1[2]; u32 value; u8 reserved_2[8]; } event; BUILD_BUG_ON(sizeof(event) != 16); result = ps3_vuart_read(dev, &event, sizeof(event)); BUG_ON(result && "need to retry here"); if (event.version != 1) { dev_dbg(&dev->core, "%s:%d: unsupported event version (%u)\n", __func__, __LINE__, event.version); return -EIO; } switch (event.type) { case PS3_SM_EVENT_POWER_PRESSED: dev_dbg(&dev->core, "%s:%d: POWER_PRESSED (%s)\n", __func__, __LINE__, (event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft" : "hard")); ps3_sm_force_power_off = 1; /* * A memory barrier is use here to sync memory since * ps3_sys_manager_final_restart() could be called on * another cpu. */ wmb(); kill_cad_pid(SIGINT, 1); /* ctrl_alt_del */ break; case PS3_SM_EVENT_POWER_RELEASED: dev_dbg(&dev->core, "%s:%d: POWER_RELEASED (%u ms)\n", __func__, __LINE__, event.value); break; case PS3_SM_EVENT_RESET_PRESSED: dev_dbg(&dev->core, "%s:%d: RESET_PRESSED (%s)\n", __func__, __LINE__, (event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft" : "hard")); ps3_sm_force_power_off = 0; /* * A memory barrier is use here to sync memory since * ps3_sys_manager_final_restart() could be called on * another cpu. */ wmb(); kill_cad_pid(SIGINT, 1); /* ctrl_alt_del */ break; case PS3_SM_EVENT_RESET_RELEASED: dev_dbg(&dev->core, "%s:%d: RESET_RELEASED (%u ms)\n", __func__, __LINE__, event.value); break; case PS3_SM_EVENT_THERMAL_ALERT: dev_dbg(&dev->core, "%s:%d: THERMAL_ALERT (zone %u)\n", __func__, __LINE__, event.value); pr_info("PS3 Thermal Alert Zone %u\n", event.value); break; case PS3_SM_EVENT_THERMAL_CLEARED: dev_dbg(&dev->core, "%s:%d: THERMAL_CLEARED (zone %u)\n", __func__, __LINE__, event.value); break; default: dev_dbg(&dev->core, "%s:%d: unknown event (%u)\n", __func__, __LINE__, event.type); return -EIO; } return 0; } /** * ps3_sys_manager_handle_cmd - Second stage command msg handler. * * The system manager sends this in reply to a 'request' message from the guest. */ static int ps3_sys_manager_handle_cmd(struct ps3_system_bus_device *dev) { int result; struct { u8 version; u8 type; u8 reserved_1[14]; } cmd; BUILD_BUG_ON(sizeof(cmd) != 16); dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__); result = ps3_vuart_read(dev, &cmd, sizeof(cmd)); BUG_ON(result && "need to retry here"); if (result) return result; if (cmd.version != 1) { dev_dbg(&dev->core, "%s:%d: unsupported cmd version (%u)\n", __func__, __LINE__, cmd.version); return -EIO; } if (cmd.type != PS3_SM_CMD_SHUTDOWN) { dev_dbg(&dev->core, "%s:%d: unknown cmd (%u)\n", __func__, __LINE__, cmd.type); return -EIO; } ps3_sys_manager_send_response(dev, 0); return 0; } /** * ps3_sys_manager_handle_msg - First stage msg handler. * * Can be called directly to manually poll vuart and pump message handler. */ static int ps3_sys_manager_handle_msg(struct ps3_system_bus_device *dev) { int result; struct ps3_sys_manager_header header; result = ps3_vuart_read(dev, &header, sizeof(struct ps3_sys_manager_header)); if (result) return result; if (header.version != 1) { dev_dbg(&dev->core, "%s:%d: unsupported header version (%u)\n", __func__, __LINE__, header.version); dump_sm_header(&header); goto fail_header; } BUILD_BUG_ON(sizeof(header) != 16); if (header.size != 16 || (header.payload_size != 8 && header.payload_size != 16)) { dump_sm_header(&header); BUG(); } switch (header.service_id) { case PS3_SM_SERVICE_ID_EXTERN_EVENT: dev_dbg(&dev->core, "%s:%d: EVENT\n", __func__, __LINE__); return ps3_sys_manager_handle_event(dev); case PS3_SM_SERVICE_ID_COMMAND: dev_dbg(&dev->core, "%s:%d: COMMAND\n", __func__, __LINE__); return ps3_sys_manager_handle_cmd(dev); case PS3_SM_SERVICE_ID_REQUEST_ERROR: dev_dbg(&dev->core, "%s:%d: REQUEST_ERROR\n", __func__, __LINE__); dump_sm_header(&header); break; default: dev_dbg(&dev->core, "%s:%d: unknown service_id (%u)\n", __func__, __LINE__, header.service_id); break; } goto fail_id; fail_header: ps3_vuart_clear_rx_bytes(dev, 0); return -EIO; fail_id: ps3_vuart_clear_rx_bytes(dev, header.payload_size); return -EIO; } static void ps3_sys_manager_fin(struct ps3_system_bus_device *dev) { ps3_sys_manager_send_request_shutdown(dev); pr_emerg("System Halted, OK to turn off power\n"); while (ps3_sys_manager_handle_msg(dev)) { /* pause until next DEC interrupt */ lv1_pause(0); } while (1) { /* pause, ignoring DEC interrupt */ lv1_pause(1); } } /** * ps3_sys_manager_final_power_off - The final platform machine_power_off routine. * * This routine never returns. The routine disables asynchronous vuart reads * then spins calling ps3_sys_manager_handle_msg() to receive and acknowledge * the shutdown command sent from the system manager. Soon after the * acknowledgement is sent the lpar is destroyed by the HV. This routine * should only be called from ps3_power_off() through * ps3_sys_manager_ops.power_off. */ static void ps3_sys_manager_final_power_off(struct ps3_system_bus_device *dev) { BUG_ON(!dev); dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__); ps3_vuart_cancel_async(dev); ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_SYS_SHUTDOWN, user_wake_sources); ps3_sys_manager_fin(dev); } /** * ps3_sys_manager_final_restart - The final platform machine_restart routine. * * This routine never returns. The routine disables asynchronous vuart reads * then spins calling ps3_sys_manager_handle_msg() to receive and acknowledge * the shutdown command sent from the system manager. Soon after the * acknowledgement is sent the lpar is destroyed by the HV. This routine * should only be called from ps3_restart() through ps3_sys_manager_ops.restart. */ static void ps3_sys_manager_final_restart(struct ps3_system_bus_device *dev) { BUG_ON(!dev); dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__); /* Check if we got here via a power button event. */ if (ps3_sm_force_power_off) { dev_dbg(&dev->core, "%s:%d: forcing poweroff\n", __func__, __LINE__); ps3_sys_manager_final_power_off(dev); } ps3_vuart_cancel_async(dev); ps3_sys_manager_send_attr(dev, 0); ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_SYS_REBOOT, user_wake_sources); ps3_sys_manager_fin(dev); } /** * ps3_sys_manager_get_wol - Get wake-on-lan setting. */ int ps3_sys_manager_get_wol(void) { pr_debug("%s:%d\n", __func__, __LINE__); return (user_wake_sources & PS3_SM_WAKE_W_O_L) != 0; } EXPORT_SYMBOL_GPL(ps3_sys_manager_get_wol); /** * ps3_sys_manager_set_wol - Set wake-on-lan setting. */ void ps3_sys_manager_set_wol(int state) { static DEFINE_MUTEX(mutex); mutex_lock(&mutex); pr_debug("%s:%d: %d\n", __func__, __LINE__, state); if (state) user_wake_sources |= PS3_SM_WAKE_W_O_L; else user_wake_sources &= ~PS3_SM_WAKE_W_O_L; mutex_unlock(&mutex); } EXPORT_SYMBOL_GPL(ps3_sys_manager_set_wol); /** * ps3_sys_manager_work - Asynchronous read handler. * * Signaled when PS3_SM_RX_MSG_LEN_MIN bytes arrive at the vuart port. */ static void ps3_sys_manager_work(struct ps3_system_bus_device *dev) { ps3_sys_manager_handle_msg(dev); ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN); } static int __devinit ps3_sys_manager_probe(struct ps3_system_bus_device *dev) { int result; struct ps3_sys_manager_ops ops; dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__); ops.power_off = ps3_sys_manager_final_power_off; ops.restart = ps3_sys_manager_final_restart; ops.dev = dev; /* ps3_sys_manager_register_ops copies ops. */ ps3_sys_manager_register_ops(&ops); result = ps3_sys_manager_send_attr(dev, PS3_SM_ATTR_ALL); BUG_ON(result); result = ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN); BUG_ON(result); return result; } static int ps3_sys_manager_remove(struct ps3_system_bus_device *dev) { dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__); return 0; } static void ps3_sys_manager_shutdown(struct ps3_system_bus_device *dev) { dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__); } static struct ps3_vuart_port_driver ps3_sys_manager = { .core.match_id = PS3_MATCH_ID_SYSTEM_MANAGER, .core.core.name = "ps3_sys_manager", .probe = ps3_sys_manager_probe, .remove = ps3_sys_manager_remove, .shutdown = ps3_sys_manager_shutdown, .work = ps3_sys_manager_work, }; static int __init ps3_sys_manager_init(void) { if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) return -ENODEV; return ps3_vuart_port_driver_register(&ps3_sys_manager); } module_init(ps3_sys_manager_init); /* Module remove not supported. */ MODULE_AUTHOR("Sony Corporation"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PS3 System Manager"); MODULE_ALIAS(PS3_MODULE_ALIAS_SYSTEM_MANAGER);
gpl-2.0
chneukirchen/linux-jetson-tk1
drivers/media/tuners/fc0013.c
1310
14679
/* * Fitipower FC0013 tuner driver * * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net> * partially based on driver code from Fitipower * Copyright (C) 2010 Fitipower Integrated Technology Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "fc0013.h" #include "fc0013-priv.h" static int fc0013_writereg(struct fc0013_priv *priv, u8 reg, u8 val) { u8 buf[2] = {reg, val}; struct i2c_msg msg = { .addr = priv->addr, .flags = 0, .buf = buf, .len = 2 }; if (i2c_transfer(priv->i2c, &msg, 1) != 1) { err("I2C write reg failed, reg: %02x, val: %02x", reg, val); return -EREMOTEIO; } return 0; } static int fc0013_readreg(struct fc0013_priv *priv, u8 reg, u8 *val) { struct i2c_msg msg[2] = { { .addr = priv->addr, .flags = 0, .buf = &reg, .len = 1 }, { .addr = priv->addr, .flags = I2C_M_RD, .buf = val, .len = 1 }, }; if (i2c_transfer(priv->i2c, msg, 2) != 2) { err("I2C read reg failed, reg: %02x", reg); return -EREMOTEIO; } return 0; } static int fc0013_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int fc0013_init(struct dvb_frontend *fe) { struct fc0013_priv *priv = fe->tuner_priv; int i, ret = 0; unsigned char reg[] = { 0x00, /* reg. 0x00: dummy */ 0x09, /* reg. 0x01 */ 0x16, /* reg. 0x02 */ 0x00, /* reg. 0x03 */ 0x00, /* reg. 0x04 */ 0x17, /* reg. 0x05 */ 0x02, /* reg. 0x06 */ 0x0a, /* reg. 0x07: CHECK */ 0xff, /* reg. 0x08: AGC Clock divide by 256, AGC gain 1/256, Loop Bw 1/8 */ 0x6f, /* reg. 0x09: enable LoopThrough */ 0xb8, /* reg. 0x0a: Disable LO Test Buffer */ 0x82, /* reg. 0x0b: CHECK */ 0xfc, /* reg. 0x0c: depending on AGC Up-Down mode, may need 0xf8 */ 0x01, /* reg. 0x0d: AGC Not Forcing & LNA Forcing, may need 0x02 */ 0x00, /* reg. 0x0e */ 0x00, /* reg. 0x0f */ 0x00, /* reg. 0x10 */ 0x00, /* reg. 0x11 */ 0x00, /* reg. 0x12 */ 0x00, /* reg. 0x13 */ 0x50, /* reg. 0x14: DVB-t High Gain, UHF. Middle Gain: 0x48, Low Gain: 0x40 */ 0x01, /* reg. 0x15 */ }; switch (priv->xtal_freq) { case FC_XTAL_27_MHZ: case FC_XTAL_28_8_MHZ: reg[0x07] |= 0x20; break; case FC_XTAL_36_MHZ: default: break; } if (priv->dual_master) reg[0x0c] |= 0x02; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */ for (i = 1; i < sizeof(reg); i++) { ret = fc0013_writereg(priv, i, reg[i]); if (ret) break; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ if (ret) err("fc0013_writereg failed: %d", ret); return ret; } static int fc0013_sleep(struct dvb_frontend *fe) { /* nothing to do here */ return 0; } int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val) { struct fc0013_priv *priv = fe->tuner_priv; int ret; u8 rc_cal; int val; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */ /* push rc_cal value, get rc_cal value */ ret = fc0013_writereg(priv, 0x10, 0x00); if (ret) goto error_out; /* get rc_cal value */ ret = fc0013_readreg(priv, 0x10, &rc_cal); if (ret) goto error_out; rc_cal &= 0x0f; val = (int)rc_cal + rc_val; /* forcing rc_cal */ ret = fc0013_writereg(priv, 0x0d, 0x11); if (ret) goto error_out; /* modify rc_cal value */ if (val > 15) ret = fc0013_writereg(priv, 0x10, 0x0f); else if (val < 0) ret = fc0013_writereg(priv, 0x10, 0x00); else ret = fc0013_writereg(priv, 0x10, (u8)val); error_out: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ return ret; } EXPORT_SYMBOL(fc0013_rc_cal_add); int fc0013_rc_cal_reset(struct dvb_frontend *fe) { struct fc0013_priv *priv = fe->tuner_priv; int ret; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */ ret = fc0013_writereg(priv, 0x0d, 0x01); if (!ret) ret = fc0013_writereg(priv, 0x10, 0x00); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ return ret; } EXPORT_SYMBOL(fc0013_rc_cal_reset); static int fc0013_set_vhf_track(struct fc0013_priv *priv, u32 freq) { int ret; u8 tmp; ret = fc0013_readreg(priv, 0x1d, &tmp); if (ret) goto error_out; tmp &= 0xe3; if (freq <= 177500) { /* VHF Track: 7 */ ret = fc0013_writereg(priv, 0x1d, tmp | 0x1c); } else if (freq <= 184500) { /* VHF Track: 6 */ ret = fc0013_writereg(priv, 0x1d, tmp | 0x18); } else if (freq <= 191500) { /* VHF Track: 5 */ ret = fc0013_writereg(priv, 0x1d, tmp | 0x14); } else if (freq <= 198500) { /* VHF Track: 4 */ ret = fc0013_writereg(priv, 0x1d, tmp | 0x10); } else if (freq <= 205500) { /* VHF Track: 3 */ ret = fc0013_writereg(priv, 0x1d, tmp | 0x0c); } else if (freq <= 219500) { /* VHF Track: 2 */ ret = fc0013_writereg(priv, 0x1d, tmp | 0x08); } else if (freq < 300000) { /* VHF Track: 1 */ ret = fc0013_writereg(priv, 0x1d, tmp | 0x04); } else { /* UHF and GPS */ ret = fc0013_writereg(priv, 0x1d, tmp | 0x1c); } if (ret) goto error_out; error_out: return ret; } static int fc0013_set_params(struct dvb_frontend *fe) { struct fc0013_priv *priv = fe->tuner_priv; int i, ret = 0; struct dtv_frontend_properties *p = &fe->dtv_property_cache; u32 freq = p->frequency / 1000; u32 delsys = p->delivery_system; unsigned char reg[7], am, pm, multi, tmp; unsigned long f_vco; unsigned short xtal_freq_khz_2, xin, xdiv; bool vco_select = false; if (fe->callback) { ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER, FC_FE_CALLBACK_VHF_ENABLE, (freq > 300000 ? 0 : 1)); if (ret) goto exit; } switch (priv->xtal_freq) { case FC_XTAL_27_MHZ: xtal_freq_khz_2 = 27000 / 2; break; case FC_XTAL_36_MHZ: xtal_freq_khz_2 = 36000 / 2; break; case FC_XTAL_28_8_MHZ: default: xtal_freq_khz_2 = 28800 / 2; break; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */ /* set VHF track */ ret = fc0013_set_vhf_track(priv, freq); if (ret) goto exit; if (freq < 300000) { /* enable VHF filter */ ret = fc0013_readreg(priv, 0x07, &tmp); if (ret) goto exit; ret = fc0013_writereg(priv, 0x07, tmp | 0x10); if (ret) goto exit; /* disable UHF & disable GPS */ ret = fc0013_readreg(priv, 0x14, &tmp); if (ret) goto exit; ret = fc0013_writereg(priv, 0x14, tmp & 0x1f); if (ret) goto exit; } else if (freq <= 862000) { /* disable VHF filter */ ret = fc0013_readreg(priv, 0x07, &tmp); if (ret) goto exit; ret = fc0013_writereg(priv, 0x07, tmp & 0xef); if (ret) goto exit; /* enable UHF & disable GPS */ ret = fc0013_readreg(priv, 0x14, &tmp); if (ret) goto exit; ret = fc0013_writereg(priv, 0x14, (tmp & 0x1f) | 0x40); if (ret) goto exit; } else { /* disable VHF filter */ ret = fc0013_readreg(priv, 0x07, &tmp); if (ret) goto exit; ret = fc0013_writereg(priv, 0x07, tmp & 0xef); if (ret) goto exit; /* disable UHF & enable GPS */ ret = fc0013_readreg(priv, 0x14, &tmp); if (ret) goto exit; ret = fc0013_writereg(priv, 0x14, (tmp & 0x1f) | 0x20); if (ret) goto exit; } /* select frequency divider and the frequency of VCO */ if (freq < 37084) { /* freq * 96 < 3560000 */ multi = 96; reg[5] = 0x82; reg[6] = 0x00; } else if (freq < 55625) { /* freq * 64 < 3560000 */ multi = 64; reg[5] = 0x02; reg[6] = 0x02; } else if (freq < 74167) { /* freq * 48 < 3560000 */ multi = 48; reg[5] = 0x42; reg[6] = 0x00; } else if (freq < 111250) { /* freq * 32 < 3560000 */ multi = 32; reg[5] = 0x82; reg[6] = 0x02; } else if (freq < 148334) { /* freq * 24 < 3560000 */ multi = 24; reg[5] = 0x22; reg[6] = 0x00; } else if (freq < 222500) { /* freq * 16 < 3560000 */ multi = 16; reg[5] = 0x42; reg[6] = 0x02; } else if (freq < 296667) { /* freq * 12 < 3560000 */ multi = 12; reg[5] = 0x12; reg[6] = 0x00; } else if (freq < 445000) { /* freq * 8 < 3560000 */ multi = 8; reg[5] = 0x22; reg[6] = 0x02; } else if (freq < 593334) { /* freq * 6 < 3560000 */ multi = 6; reg[5] = 0x0a; reg[6] = 0x00; } else if (freq < 950000) { /* freq * 4 < 3800000 */ multi = 4; reg[5] = 0x12; reg[6] = 0x02; } else { multi = 2; reg[5] = 0x0a; reg[6] = 0x02; } f_vco = freq * multi; if (f_vco >= 3060000) { reg[6] |= 0x08; vco_select = true; } if (freq >= 45000) { /* From divided value (XDIV) determined the FA and FP value */ xdiv = (unsigned short)(f_vco / xtal_freq_khz_2); if ((f_vco - xdiv * xtal_freq_khz_2) >= (xtal_freq_khz_2 / 2)) xdiv++; pm = (unsigned char)(xdiv / 8); am = (unsigned char)(xdiv - (8 * pm)); if (am < 2) { reg[1] = am + 8; reg[2] = pm - 1; } else { reg[1] = am; reg[2] = pm; } } else { /* fix for frequency less than 45 MHz */ reg[1] = 0x06; reg[2] = 0x11; } /* fix clock out */ reg[6] |= 0x20; /* From VCO frequency determines the XIN ( fractional part of Delta Sigma PLL) and divided value (XDIV) */ xin = (unsigned short)(f_vco - (f_vco / xtal_freq_khz_2) * xtal_freq_khz_2); xin = (xin << 15) / xtal_freq_khz_2; if (xin >= 16384) xin += 32768; reg[3] = xin >> 8; reg[4] = xin & 0xff; if (delsys == SYS_DVBT) { reg[6] &= 0x3f; /* bits 6 and 7 describe the bandwidth */ switch (p->bandwidth_hz) { case 6000000: reg[6] |= 0x80; break; case 7000000: reg[6] |= 0x40; break; case 8000000: default: break; } } else { err("%s: modulation type not supported!", __func__); return -EINVAL; } /* modified for Realtek demod */ reg[5] |= 0x07; for (i = 1; i <= 6; i++) { ret = fc0013_writereg(priv, i, reg[i]); if (ret) goto exit; } ret = fc0013_readreg(priv, 0x11, &tmp); if (ret) goto exit; if (multi == 64) ret = fc0013_writereg(priv, 0x11, tmp | 0x04); else ret = fc0013_writereg(priv, 0x11, tmp & 0xfb); if (ret) goto exit; /* VCO Calibration */ ret = fc0013_writereg(priv, 0x0e, 0x80); if (!ret) ret = fc0013_writereg(priv, 0x0e, 0x00); /* VCO Re-Calibration if needed */ if (!ret) ret = fc0013_writereg(priv, 0x0e, 0x00); if (!ret) { msleep(10); ret = fc0013_readreg(priv, 0x0e, &tmp); } if (ret) goto exit; /* vco selection */ tmp &= 0x3f; if (vco_select) { if (tmp > 0x3c) { reg[6] &= ~0x08; ret = fc0013_writereg(priv, 0x06, reg[6]); if (!ret) ret = fc0013_writereg(priv, 0x0e, 0x80); if (!ret) ret = fc0013_writereg(priv, 0x0e, 0x00); } } else { if (tmp < 0x02) { reg[6] |= 0x08; ret = fc0013_writereg(priv, 0x06, reg[6]); if (!ret) ret = fc0013_writereg(priv, 0x0e, 0x80); if (!ret) ret = fc0013_writereg(priv, 0x0e, 0x00); } } priv->frequency = p->frequency; priv->bandwidth = p->bandwidth_hz; exit: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ if (ret) warn("%s: failed: %d", __func__, ret); return ret; } static int fc0013_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct fc0013_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static int fc0013_get_if_frequency(struct dvb_frontend *fe, u32 *frequency) { /* always ? */ *frequency = 0; return 0; } static int fc0013_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct fc0013_priv *priv = fe->tuner_priv; *bandwidth = priv->bandwidth; return 0; } #define INPUT_ADC_LEVEL -8 static int fc0013_get_rf_strength(struct dvb_frontend *fe, u16 *strength) { struct fc0013_priv *priv = fe->tuner_priv; int ret; unsigned char tmp; int int_temp, lna_gain, int_lna, tot_agc_gain, power; const int fc0013_lna_gain_table[] = { /* low gain */ -63, -58, -99, -73, -63, -65, -54, -60, /* middle gain */ 71, 70, 68, 67, 65, 63, 61, 58, /* high gain */ 197, 191, 188, 186, 184, 182, 181, 179, }; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */ ret = fc0013_writereg(priv, 0x13, 0x00); if (ret) goto err; ret = fc0013_readreg(priv, 0x13, &tmp); if (ret) goto err; int_temp = tmp; ret = fc0013_readreg(priv, 0x14, &tmp); if (ret) goto err; lna_gain = tmp & 0x1f; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ if (lna_gain < ARRAY_SIZE(fc0013_lna_gain_table)) { int_lna = fc0013_lna_gain_table[lna_gain]; tot_agc_gain = (abs((int_temp >> 5) - 7) - 2 + (int_temp & 0x1f)) * 2; power = INPUT_ADC_LEVEL - tot_agc_gain - int_lna / 10; if (power >= 45) *strength = 255; /* 100% */ else if (power < -95) *strength = 0; else *strength = (power + 95) * 255 / 140; *strength |= *strength << 8; } else { ret = -1; } goto exit; err: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */ exit: if (ret) warn("%s: failed: %d", __func__, ret); return ret; } static const struct dvb_tuner_ops fc0013_tuner_ops = { .info = { .name = "Fitipower FC0013", .frequency_min = 37000000, /* estimate */ .frequency_max = 1680000000, /* CHECK */ .frequency_step = 0, }, .release = fc0013_release, .init = fc0013_init, .sleep = fc0013_sleep, .set_params = fc0013_set_params, .get_frequency = fc0013_get_frequency, .get_if_frequency = fc0013_get_if_frequency, .get_bandwidth = fc0013_get_bandwidth, .get_rf_strength = fc0013_get_rf_strength, }; struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 i2c_address, int dual_master, enum fc001x_xtal_freq xtal_freq) { struct fc0013_priv *priv = NULL; priv = kzalloc(sizeof(struct fc0013_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->i2c = i2c; priv->dual_master = dual_master; priv->addr = i2c_address; priv->xtal_freq = xtal_freq; info("Fitipower FC0013 successfully attached."); fe->tuner_priv = priv; memcpy(&fe->ops.tuner_ops, &fc0013_tuner_ops, sizeof(struct dvb_tuner_ops)); return fe; } EXPORT_SYMBOL(fc0013_attach); MODULE_DESCRIPTION("Fitipower FC0013 silicon tuner driver"); MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.2");
gpl-2.0
beealone/linux
arch/arm/mach-s3c24xx/mach-vstms.c
1566
3604
/* linux/arch/arm/mach-s3c2412/mach-vstms.c * * (C) 2006 Thomas Gleixner <tglx@linutronix.de> * * Derived from mach-smdk2413.c - (C) 2006 Simtec Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/serial_s3c.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/memblock.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> #include <mach/fb.h> #include <linux/platform_data/i2c-s3c2410.h> #include <linux/platform_data/mtd-nand-s3c2410.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/samsung-time.h> #include "common.h" static struct map_desc vstms_iodesc[] __initdata = { }; static struct s3c2410_uartcfg vstms_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, }, [1] = { .hwport = 1, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, }, [2] = { .hwport = 2, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, } }; static struct mtd_partition __initdata vstms_nand_part[] = { [0] = { .name = "Boot Agent", .size = 0x7C000, .offset = 0, }, [1] = { .name = "UBoot Config", .offset = 0x7C000, .size = 0x4000, }, [2] = { .name = "Kernel", .offset = 0x80000, .size = 0x200000, }, [3] = { .name = "RFS", .offset = 0x280000, .size = 0x3d80000, }, }; static struct s3c2410_nand_set __initdata vstms_nand_sets[] = { [0] = { .name = "NAND", .nr_chips = 1, .nr_partitions = ARRAY_SIZE(vstms_nand_part), .partitions = vstms_nand_part, }, }; /* choose a set of timings which should suit most 512Mbit * chips and beyond. */ static struct s3c2410_platform_nand __initdata vstms_nand_info = { .tacls = 20, .twrph0 = 60, .twrph1 = 20, .nr_sets = ARRAY_SIZE(vstms_nand_sets), .sets = vstms_nand_sets, }; static struct platform_device *vstms_devices[] __initdata = { &s3c_device_ohci, &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_iis, &s3c_device_rtc, &s3c_device_nand, &s3c2412_device_dma, }; static void __init vstms_fixup(struct tag *tags, char **cmdline) { if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { memblock_add(0x30000000, SZ_64M); } } static void __init vstms_map_io(void) { s3c24xx_init_io(vstms_iodesc, ARRAY_SIZE(vstms_iodesc)); s3c24xx_init_uarts(vstms_uartcfgs, ARRAY_SIZE(vstms_uartcfgs)); samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4); } static void __init vstms_init_time(void) { s3c2412_init_clocks(12000000); samsung_timer_init(); } static void __init vstms_init(void) { s3c_i2c0_set_platdata(NULL); s3c_nand_set_platdata(&vstms_nand_info); platform_add_devices(vstms_devices, ARRAY_SIZE(vstms_devices)); } MACHINE_START(VSTMS, "VSTMS") .atag_offset = 0x100, .fixup = vstms_fixup, .init_irq = s3c2412_init_irq, .init_machine = vstms_init, .map_io = vstms_map_io, .init_time = vstms_init_time, MACHINE_END
gpl-2.0
OneEducation/kernel-rk310-lollipop-firefly
drivers/iommu/irq_remapping.c
2078
8664
#include <linux/seq_file.h> #include <linux/cpumask.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/msi.h> #include <linux/irq.h> #include <linux/pci.h> #include <asm/hw_irq.h> #include <asm/irq_remapping.h> #include <asm/processor.h> #include <asm/x86_init.h> #include <asm/apic.h> #include "irq_remapping.h" int irq_remapping_enabled; int disable_irq_remap; int irq_remap_broken; int disable_sourceid_checking; int no_x2apic_optout; static struct irq_remap_ops *remap_ops; static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec); static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, int index, int sub_handle); static int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask, bool force); static bool irq_remapped(struct irq_cfg *cfg) { return (cfg->remapped == 1); } static void irq_remapping_disable_io_apic(void) { /* * With interrupt-remapping, for now we will use virtual wire A * mode, as virtual wire B is little complex (need to configure * both IOAPIC RTE as well as interrupt-remapping table entry). * As this gets called during crash dump, keep this simple for * now. */ if (cpu_has_apic || apic_from_smp_config()) disconnect_bsp_APIC(0); } static int do_setup_msi_irqs(struct pci_dev *dev, int nvec) { int node, ret, sub_handle, index = 0; unsigned int irq; struct msi_desc *msidesc; nvec = __roundup_pow_of_two(nvec); WARN_ON(!list_is_singular(&dev->msi_list)); msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); WARN_ON(msidesc->irq); WARN_ON(msidesc->msi_attrib.multiple); node = dev_to_node(&dev->dev); irq = __create_irqs(get_nr_irqs_gsi(), nvec, node); if (irq == 0) return -ENOSPC; msidesc->msi_attrib.multiple = ilog2(nvec); for (sub_handle = 0; sub_handle < nvec; sub_handle++) { if (!sub_handle) { index = msi_alloc_remapped_irq(dev, irq, nvec); if (index < 0) { ret = index; goto error; } } else { ret = msi_setup_remapped_irq(dev, irq + sub_handle, index, sub_handle); if (ret < 0) goto error; } ret = setup_msi_irq(dev, msidesc, irq, sub_handle); if (ret < 0) goto error; } return 0; error: destroy_irqs(irq, nvec); /* * Restore altered MSI descriptor fields and prevent just destroyed * IRQs from tearing down again in default_teardown_msi_irqs() */ msidesc->irq = 0; msidesc->msi_attrib.multiple = 0; return ret; } static int do_setup_msix_irqs(struct pci_dev *dev, int nvec) { int node, ret, sub_handle, index = 0; struct msi_desc *msidesc; unsigned int irq; node = dev_to_node(&dev->dev); irq = get_nr_irqs_gsi(); sub_handle = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { irq = create_irq_nr(irq, node); if (irq == 0) return -1; if (sub_handle == 0) ret = index = msi_alloc_remapped_irq(dev, irq, nvec); else ret = msi_setup_remapped_irq(dev, irq, index, sub_handle); if (ret < 0) goto error; ret = setup_msi_irq(dev, msidesc, irq, 0); if (ret < 0) goto error; sub_handle += 1; irq += 1; } return 0; error: destroy_irq(irq); return ret; } static int irq_remapping_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { if (type == PCI_CAP_ID_MSI) return do_setup_msi_irqs(dev, nvec); else return do_setup_msix_irqs(dev, nvec); } void eoi_ioapic_pin_remapped(int apic, int pin, int vector) { /* * Intr-remapping uses pin number as the virtual vector * in the RTE. Actual vector is programmed in * intr-remapping table entry. Hence for the io-apic * EOI we use the pin number. */ io_apic_eoi(apic, pin); } static void __init irq_remapping_modify_x86_ops(void) { x86_io_apic_ops.disable = irq_remapping_disable_io_apic; x86_io_apic_ops.set_affinity = set_remapped_irq_affinity; x86_io_apic_ops.setup_entry = setup_ioapic_remapped_entry; x86_io_apic_ops.eoi_ioapic_pin = eoi_ioapic_pin_remapped; x86_msi.setup_msi_irqs = irq_remapping_setup_msi_irqs; x86_msi.setup_hpet_msi = setup_hpet_msi_remapped; x86_msi.compose_msi_msg = compose_remapped_msi_msg; } static __init int setup_nointremap(char *str) { disable_irq_remap = 1; return 0; } early_param("nointremap", setup_nointremap); static __init int setup_irqremap(char *str) { if (!str) return -EINVAL; while (*str) { if (!strncmp(str, "on", 2)) disable_irq_remap = 0; else if (!strncmp(str, "off", 3)) disable_irq_remap = 1; else if (!strncmp(str, "nosid", 5)) disable_sourceid_checking = 1; else if (!strncmp(str, "no_x2apic_optout", 16)) no_x2apic_optout = 1; str += strcspn(str, ","); while (*str == ',') str++; } return 0; } early_param("intremap", setup_irqremap); void __init setup_irq_remapping_ops(void) { remap_ops = &intel_irq_remap_ops; #ifdef CONFIG_AMD_IOMMU if (amd_iommu_irq_ops.prepare() == 0) remap_ops = &amd_iommu_irq_ops; #endif } void set_irq_remapping_broken(void) { irq_remap_broken = 1; } int irq_remapping_supported(void) { if (disable_irq_remap) return 0; if (!remap_ops || !remap_ops->supported) return 0; return remap_ops->supported(); } int __init irq_remapping_prepare(void) { if (!remap_ops || !remap_ops->prepare) return -ENODEV; return remap_ops->prepare(); } int __init irq_remapping_enable(void) { int ret; if (!remap_ops || !remap_ops->enable) return -ENODEV; ret = remap_ops->enable(); if (irq_remapping_enabled) irq_remapping_modify_x86_ops(); return ret; } void irq_remapping_disable(void) { if (!irq_remapping_enabled || !remap_ops || !remap_ops->disable) return; remap_ops->disable(); } int irq_remapping_reenable(int mode) { if (!irq_remapping_enabled || !remap_ops || !remap_ops->reenable) return 0; return remap_ops->reenable(mode); } int __init irq_remap_enable_fault_handling(void) { if (!irq_remapping_enabled) return 0; if (!remap_ops || !remap_ops->enable_faulting) return -ENODEV; return remap_ops->enable_faulting(); } int setup_ioapic_remapped_entry(int irq, struct IO_APIC_route_entry *entry, unsigned int destination, int vector, struct io_apic_irq_attr *attr) { if (!remap_ops || !remap_ops->setup_ioapic_entry) return -ENODEV; return remap_ops->setup_ioapic_entry(irq, entry, destination, vector, attr); } int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { if (!config_enabled(CONFIG_SMP) || !remap_ops || !remap_ops->set_affinity) return 0; return remap_ops->set_affinity(data, mask, force); } void free_remapped_irq(int irq) { struct irq_cfg *cfg = irq_get_chip_data(irq); if (!remap_ops || !remap_ops->free_irq) return; if (irq_remapped(cfg)) remap_ops->free_irq(irq); } void compose_remapped_msi_msg(struct pci_dev *pdev, unsigned int irq, unsigned int dest, struct msi_msg *msg, u8 hpet_id) { struct irq_cfg *cfg = irq_get_chip_data(irq); if (!irq_remapped(cfg)) native_compose_msi_msg(pdev, irq, dest, msg, hpet_id); else if (remap_ops && remap_ops->compose_msi_msg) remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id); } static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) { if (!remap_ops || !remap_ops->msi_alloc_irq) return -ENODEV; return remap_ops->msi_alloc_irq(pdev, irq, nvec); } static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, int index, int sub_handle) { if (!remap_ops || !remap_ops->msi_setup_irq) return -ENODEV; return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle); } int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) { if (!remap_ops || !remap_ops->setup_hpet_msi) return -ENODEV; return remap_ops->setup_hpet_msi(irq, id); } void panic_if_irq_remap(const char *msg) { if (irq_remapping_enabled) panic(msg); } static void ir_ack_apic_edge(struct irq_data *data) { ack_APIC_irq(); } static void ir_ack_apic_level(struct irq_data *data) { ack_APIC_irq(); eoi_ioapic_irq(data->irq, data->chip_data); } static void ir_print_prefix(struct irq_data *data, struct seq_file *p) { seq_printf(p, " IR-%s", data->chip->name); } void irq_remap_modify_chip_defaults(struct irq_chip *chip) { chip->irq_print_chip = ir_print_prefix; chip->irq_ack = ir_ack_apic_edge; chip->irq_eoi = ir_ack_apic_level; chip->irq_set_affinity = x86_io_apic_ops.set_affinity; } bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip) { if (!irq_remapped(cfg)) return false; irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); irq_remap_modify_chip_defaults(chip); return true; }
gpl-2.0
mericon/Xp_Kernel_LGH850
arch/arm/mach-imx/hotplug.c
2334
1536
/* * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/errno.h> #include <linux/jiffies.h> #include <asm/cp15.h> #include <asm/proc-fns.h> #include "common.h" static inline void cpu_enter_lowpower(void) { unsigned int v; asm volatile( "mcr p15, 0, %1, c7, c5, 0\n" " mcr p15, 0, %1, c7, c10, 4\n" /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, %3\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C), "Ir" (0x40) : "cc"); } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void imx_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); /* * We use the cpu jumping argument register to sync with * imx_cpu_kill() which is running on cpu0 and waiting for * the register being cleared to kill the cpu. */ imx_set_cpu_arg(cpu, ~0); while (1) cpu_do_idle(); } int imx_cpu_kill(unsigned int cpu) { unsigned long timeout = jiffies + msecs_to_jiffies(50); while (imx_get_cpu_arg(cpu) == 0) if (time_after(jiffies, timeout)) return 0; imx_enable_cpu(cpu, false); imx_set_cpu_arg(cpu, 0); return 1; }
gpl-2.0
kbehren/android_kernel_lenovo_msm8226
drivers/spi/spi-pl022.c
2846
65406
/* * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. * * Copyright (C) 2008-2009 ST-Ericsson AB * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. * * Author: Linus Walleij <linus.walleij@stericsson.com> * * Initial version inspired by: * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c * Initial adoption to PL022 by: * Sachin Verma <sachin.verma@st.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/ioport.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/amba/bus.h> #include <linux/amba/pl022.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/pm_runtime.h> /* * This macro is used to define some register default values. * reg is masked with mask, the OR:ed with an (again masked) * val shifted sb steps to the left. */ #define SSP_WRITE_BITS(reg, val, mask, sb) \ ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) /* * This macro is also used to define some default values. * It will just shift val by sb steps to the left and mask * the result with mask. */ #define GEN_MASK_BITS(val, mask, sb) \ (((val)<<(sb)) & (mask)) #define DRIVE_TX 0 #define DO_NOT_DRIVE_TX 1 #define DO_NOT_QUEUE_DMA 0 #define QUEUE_DMA 1 #define RX_TRANSFER 1 #define TX_TRANSFER 2 /* * Macros to access SSP Registers with their offsets */ #define SSP_CR0(r) (r + 0x000) #define SSP_CR1(r) (r + 0x004) #define SSP_DR(r) (r + 0x008) #define SSP_SR(r) (r + 0x00C) #define SSP_CPSR(r) (r + 0x010) #define SSP_IMSC(r) (r + 0x014) #define SSP_RIS(r) (r + 0x018) #define SSP_MIS(r) (r + 0x01C) #define SSP_ICR(r) (r + 0x020) #define SSP_DMACR(r) (r + 0x024) #define SSP_ITCR(r) (r + 0x080) #define SSP_ITIP(r) (r + 0x084) #define SSP_ITOP(r) (r + 0x088) #define SSP_TDR(r) (r + 0x08C) #define SSP_PID0(r) (r + 0xFE0) #define SSP_PID1(r) (r + 0xFE4) #define SSP_PID2(r) (r + 0xFE8) #define SSP_PID3(r) (r + 0xFEC) #define SSP_CID0(r) (r + 0xFF0) #define SSP_CID1(r) (r + 0xFF4) #define SSP_CID2(r) (r + 0xFF8) #define SSP_CID3(r) (r + 0xFFC) /* * SSP Control Register 0 - SSP_CR0 */ #define SSP_CR0_MASK_DSS (0x0FUL << 0) #define SSP_CR0_MASK_FRF (0x3UL << 4) #define SSP_CR0_MASK_SPO (0x1UL << 6) #define SSP_CR0_MASK_SPH (0x1UL << 7) #define SSP_CR0_MASK_SCR (0xFFUL << 8) /* * The ST version of this block moves som bits * in SSP_CR0 and extends it to 32 bits */ #define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) #define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) #define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) #define SSP_CR0_MASK_FRF_ST (0x3UL << 21) /* * SSP Control Register 0 - SSP_CR1 */ #define SSP_CR1_MASK_LBM (0x1UL << 0) #define SSP_CR1_MASK_SSE (0x1UL << 1) #define SSP_CR1_MASK_MS (0x1UL << 2) #define SSP_CR1_MASK_SOD (0x1UL << 3) /* * The ST version of this block adds some bits * in SSP_CR1 */ #define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) #define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) #define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) #define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) #define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) /* This one is only in the PL023 variant */ #define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) /* * SSP Status Register - SSP_SR */ #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ /* * SSP Clock Prescale Register - SSP_CPSR */ #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) /* * SSP Interrupt Mask Set/Clear Register - SSP_IMSC */ #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */ #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */ #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */ #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */ /* * SSP Raw Interrupt Status Register - SSP_RIS */ /* Receive Overrun Raw Interrupt status */ #define SSP_RIS_MASK_RORRIS (0x1UL << 0) /* Receive Timeout Raw Interrupt status */ #define SSP_RIS_MASK_RTRIS (0x1UL << 1) /* Receive FIFO Raw Interrupt status */ #define SSP_RIS_MASK_RXRIS (0x1UL << 2) /* Transmit FIFO Raw Interrupt status */ #define SSP_RIS_MASK_TXRIS (0x1UL << 3) /* * SSP Masked Interrupt Status Register - SSP_MIS */ /* Receive Overrun Masked Interrupt status */ #define SSP_MIS_MASK_RORMIS (0x1UL << 0) /* Receive Timeout Masked Interrupt status */ #define SSP_MIS_MASK_RTMIS (0x1UL << 1) /* Receive FIFO Masked Interrupt status */ #define SSP_MIS_MASK_RXMIS (0x1UL << 2) /* Transmit FIFO Masked Interrupt status */ #define SSP_MIS_MASK_TXMIS (0x1UL << 3) /* * SSP Interrupt Clear Register - SSP_ICR */ /* Receive Overrun Raw Clear Interrupt bit */ #define SSP_ICR_MASK_RORIC (0x1UL << 0) /* Receive Timeout Clear Interrupt bit */ #define SSP_ICR_MASK_RTIC (0x1UL << 1) /* * SSP DMA Control Register - SSP_DMACR */ /* Receive DMA Enable bit */ #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0) /* Transmit DMA Enable bit */ #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) /* * SSP Integration Test control Register - SSP_ITCR */ #define SSP_ITCR_MASK_ITEN (0x1UL << 0) #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1) /* * SSP Integration Test Input Register - SSP_ITIP */ #define ITIP_MASK_SSPRXD (0x1UL << 0) #define ITIP_MASK_SSPFSSIN (0x1UL << 1) #define ITIP_MASK_SSPCLKIN (0x1UL << 2) #define ITIP_MASK_RXDMAC (0x1UL << 3) #define ITIP_MASK_TXDMAC (0x1UL << 4) #define ITIP_MASK_SSPTXDIN (0x1UL << 5) /* * SSP Integration Test output Register - SSP_ITOP */ #define ITOP_MASK_SSPTXD (0x1UL << 0) #define ITOP_MASK_SSPFSSOUT (0x1UL << 1) #define ITOP_MASK_SSPCLKOUT (0x1UL << 2) #define ITOP_MASK_SSPOEn (0x1UL << 3) #define ITOP_MASK_SSPCTLOEn (0x1UL << 4) #define ITOP_MASK_RORINTR (0x1UL << 5) #define ITOP_MASK_RTINTR (0x1UL << 6) #define ITOP_MASK_RXINTR (0x1UL << 7) #define ITOP_MASK_TXINTR (0x1UL << 8) #define ITOP_MASK_INTR (0x1UL << 9) #define ITOP_MASK_RXDMABREQ (0x1UL << 10) #define ITOP_MASK_RXDMASREQ (0x1UL << 11) #define ITOP_MASK_TXDMABREQ (0x1UL << 12) #define ITOP_MASK_TXDMASREQ (0x1UL << 13) /* * SSP Test Data Register - SSP_TDR */ #define TDR_MASK_TESTDATA (0xFFFFFFFF) /* * Message State * we use the spi_message.state (void *) pointer to * hold a single state value, that's why all this * (void *) casting is done here. */ #define STATE_START ((void *) 0) #define STATE_RUNNING ((void *) 1) #define STATE_DONE ((void *) 2) #define STATE_ERROR ((void *) -1) /* * SSP State - Whether Enabled or Disabled */ #define SSP_DISABLED (0) #define SSP_ENABLED (1) /* * SSP DMA State - Whether DMA Enabled or Disabled */ #define SSP_DMA_DISABLED (0) #define SSP_DMA_ENABLED (1) /* * SSP Clock Defaults */ #define SSP_DEFAULT_CLKRATE 0x2 #define SSP_DEFAULT_PRESCALE 0x40 /* * SSP Clock Parameter ranges */ #define CPSDVR_MIN 0x02 #define CPSDVR_MAX 0xFE #define SCR_MIN 0x00 #define SCR_MAX 0xFF /* * SSP Interrupt related Macros */ #define DEFAULT_SSP_REG_IMSC 0x0UL #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) #define CLEAR_ALL_INTERRUPTS 0x3 #define SPI_POLLING_TIMEOUT 1000 /* * The type of reading going on on this chip */ enum ssp_reading { READING_NULL, READING_U8, READING_U16, READING_U32 }; /** * The type of writing going on on this chip */ enum ssp_writing { WRITING_NULL, WRITING_U8, WRITING_U16, WRITING_U32 }; /** * struct vendor_data - vendor-specific config parameters * for PL022 derivates * @fifodepth: depth of FIFOs (both) * @max_bpw: maximum number of bits per word * @unidir: supports unidirection transfers * @extended_cr: 32 bit wide control register 0 with extra * features and extra features in CR1 as found in the ST variants * @pl023: supports a subset of the ST extensions called "PL023" */ struct vendor_data { int fifodepth; int max_bpw; bool unidir; bool extended_cr; bool pl023; bool loopback; }; /** * struct pl022 - This is the private SSP driver data structure * @adev: AMBA device model hookup * @vendor: vendor data for the IP block * @phybase: the physical memory where the SSP device resides * @virtbase: the virtual memory where the SSP is mapped * @clk: outgoing clock "SPICLK" for the SPI bus * @master: SPI framework hookup * @master_info: controller-specific data from machine setup * @kworker: thread struct for message pump * @kworker_task: pointer to task for message pump kworker thread * @pump_messages: work struct for scheduling work to the message pump * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue * @busy: message pump is busy * @running: message pump is running * @pump_transfers: Tasklet used in Interrupt Transfer mode * @cur_msg: Pointer to current spi_message being processed * @cur_transfer: Pointer to current spi_transfer * @cur_chip: pointer to current clients chip(assigned from controller_state) * @next_msg_cs_active: the next message in the queue has been examined * and it was found that it uses the same chip select as the previous * message, so we left it active after the previous transfer, and it's * active already. * @tx: current position in TX buffer to be read * @tx_end: end position in TX buffer to be read * @rx: current position in RX buffer to be written * @rx_end: end position in RX buffer to be written * @read: the type of read currently going on * @write: the type of write currently going on * @exp_fifo_level: expected FIFO level * @dma_rx_channel: optional channel for RX DMA * @dma_tx_channel: optional channel for TX DMA * @sgt_rx: scattertable for the RX transfer * @sgt_tx: scattertable for the TX transfer * @dummypage: a dummy page used for driving data on the bus with DMA */ struct pl022 { struct amba_device *adev; struct vendor_data *vendor; resource_size_t phybase; void __iomem *virtbase; struct clk *clk; struct spi_master *master; struct pl022_ssp_controller *master_info; /* Message per-transfer pump */ struct tasklet_struct pump_transfers; struct spi_message *cur_msg; struct spi_transfer *cur_transfer; struct chip_data *cur_chip; bool next_msg_cs_active; void *tx; void *tx_end; void *rx; void *rx_end; enum ssp_reading read; enum ssp_writing write; u32 exp_fifo_level; enum ssp_rx_level_trig rx_lev_trig; enum ssp_tx_level_trig tx_lev_trig; /* DMA settings */ #ifdef CONFIG_DMA_ENGINE struct dma_chan *dma_rx_channel; struct dma_chan *dma_tx_channel; struct sg_table sgt_rx; struct sg_table sgt_tx; char *dummypage; bool dma_running; #endif }; /** * struct chip_data - To maintain runtime state of SSP for each client chip * @cr0: Value of control register CR0 of SSP - on later ST variants this * register is 32 bits wide rather than just 16 * @cr1: Value of control register CR1 of SSP * @dmacr: Value of DMA control Register of SSP * @cpsr: Value of Clock prescale register * @n_bytes: how many bytes(power of 2) reqd for a given data width of client * @enable_dma: Whether to enable DMA or not * @read: function ptr to be used to read when doing xfer for this chip * @write: function ptr to be used to write when doing xfer for this chip * @cs_control: chip select callback provided by chip * @xfer_type: polling/interrupt/DMA * * Runtime state of the SSP controller, maintained per chip, * This would be set according to the current message that would be served */ struct chip_data { u32 cr0; u16 cr1; u16 dmacr; u16 cpsr; u8 n_bytes; bool enable_dma; enum ssp_reading read; enum ssp_writing write; void (*cs_control) (u32 command); int xfer_type; }; /** * null_cs_control - Dummy chip select function * @command: select/delect the chip * * If no chip select function is provided by client this is used as dummy * chip select */ static void null_cs_control(u32 command) { pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); } /** * giveback - current spi_message is over, schedule next message and call * callback of this message. Assumes that caller already * set message->status; dma and pio irqs are blocked * @pl022: SSP driver private data structure */ static void giveback(struct pl022 *pl022) { struct spi_transfer *last_transfer; pl022->next_msg_cs_active = false; last_transfer = list_entry(pl022->cur_msg->transfers.prev, struct spi_transfer, transfer_list); /* Delay if requested before any change in chip select */ if (last_transfer->delay_usecs) /* * FIXME: This runs in interrupt context. * Is this really smart? */ udelay(last_transfer->delay_usecs); if (!last_transfer->cs_change) { struct spi_message *next_msg; /* * cs_change was not set. We can keep the chip select * enabled if there is message in the queue and it is * for the same spi device. * * We cannot postpone this until pump_messages, because * after calling msg->complete (below) the driver that * sent the current message could be unloaded, which * could invalidate the cs_control() callback... */ /* get a pointer to the next message, if any */ next_msg = spi_get_next_queued_message(pl022->master); /* * see if the next and current messages point * to the same spi device. */ if (next_msg && next_msg->spi != pl022->cur_msg->spi) next_msg = NULL; if (!next_msg || pl022->cur_msg->state == STATE_ERROR) pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); else pl022->next_msg_cs_active = true; } pl022->cur_msg = NULL; pl022->cur_transfer = NULL; pl022->cur_chip = NULL; spi_finalize_current_message(pl022->master); } /** * flush - flush the FIFO to reach a clean state * @pl022: SSP driver private data structure */ static int flush(struct pl022 *pl022) { unsigned long limit = loops_per_jiffy << 1; dev_dbg(&pl022->adev->dev, "flush\n"); do { while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) readw(SSP_DR(pl022->virtbase)); } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); pl022->exp_fifo_level = 0; return limit; } /** * restore_state - Load configuration of current chip * @pl022: SSP driver private data structure */ static void restore_state(struct pl022 *pl022) { struct chip_data *chip = pl022->cur_chip; if (pl022->vendor->extended_cr) writel(chip->cr0, SSP_CR0(pl022->virtbase)); else writew(chip->cr0, SSP_CR0(pl022->virtbase)); writew(chip->cr1, SSP_CR1(pl022->virtbase)); writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); } /* * Default SSP Register Values */ #define DEFAULT_SSP_REG_CR0 ( \ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ ) /* ST versions have slightly different bit layout */ #define DEFAULT_SSP_REG_CR0_ST ( \ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ ) /* The PL023 version is slightly different again */ #define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ ) #define DEFAULT_SSP_REG_CR1 ( \ GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ ) /* ST versions extend this register to use all 16 bits */ #define DEFAULT_SSP_REG_CR1_ST ( \ DEFAULT_SSP_REG_CR1 | \ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ ) /* * The PL023 variant has further differences: no loopback mode, no microwire * support, and a new clock feedback delay setting. */ #define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ ) #define DEFAULT_SSP_REG_CPSR ( \ GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ ) #define DEFAULT_SSP_REG_DMACR (\ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ ) /** * load_ssp_default_config - Load default configuration for SSP * @pl022: SSP driver private data structure */ static void load_ssp_default_config(struct pl022 *pl022) { if (pl022->vendor->pl023) { writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); } else if (pl022->vendor->extended_cr) { writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); } else { writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); } writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); } /** * This will write to TX and read from RX according to the parameters * set in pl022. */ static void readwriter(struct pl022 *pl022) { /* * The FIFO depth is different between primecell variants. * I believe filling in too much in the FIFO might cause * errons in 8bit wide transfers on ARM variants (just 8 words * FIFO, means only 8x8 = 64 bits in FIFO) at least. * * To prevent this issue, the TX FIFO is only filled to the * unused RX FIFO fill length, regardless of what the TX * FIFO status flag indicates. */ dev_dbg(&pl022->adev->dev, "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end); /* Read as much as you can */ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) && (pl022->rx < pl022->rx_end)) { switch (pl022->read) { case READING_NULL: readw(SSP_DR(pl022->virtbase)); break; case READING_U8: *(u8 *) (pl022->rx) = readw(SSP_DR(pl022->virtbase)) & 0xFFU; break; case READING_U16: *(u16 *) (pl022->rx) = (u16) readw(SSP_DR(pl022->virtbase)); break; case READING_U32: *(u32 *) (pl022->rx) = readl(SSP_DR(pl022->virtbase)); break; } pl022->rx += (pl022->cur_chip->n_bytes); pl022->exp_fifo_level--; } /* * Write as much as possible up to the RX FIFO size */ while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) && (pl022->tx < pl022->tx_end)) { switch (pl022->write) { case WRITING_NULL: writew(0x0, SSP_DR(pl022->virtbase)); break; case WRITING_U8: writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase)); break; case WRITING_U16: writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase)); break; case WRITING_U32: writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase)); break; } pl022->tx += (pl022->cur_chip->n_bytes); pl022->exp_fifo_level++; /* * This inner reader takes care of things appearing in the RX * FIFO as we're transmitting. This will happen a lot since the * clock starts running when you put things into the TX FIFO, * and then things are continuously clocked into the RX FIFO. */ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) && (pl022->rx < pl022->rx_end)) { switch (pl022->read) { case READING_NULL: readw(SSP_DR(pl022->virtbase)); break; case READING_U8: *(u8 *) (pl022->rx) = readw(SSP_DR(pl022->virtbase)) & 0xFFU; break; case READING_U16: *(u16 *) (pl022->rx) = (u16) readw(SSP_DR(pl022->virtbase)); break; case READING_U32: *(u32 *) (pl022->rx) = readl(SSP_DR(pl022->virtbase)); break; } pl022->rx += (pl022->cur_chip->n_bytes); pl022->exp_fifo_level--; } } /* * When we exit here the TX FIFO should be full and the RX FIFO * should be empty */ } /** * next_transfer - Move to the Next transfer in the current spi message * @pl022: SSP driver private data structure * * This function moves though the linked list of spi transfers in the * current spi message and returns with the state of current spi * message i.e whether its last transfer is done(STATE_DONE) or * Next transfer is ready(STATE_RUNNING) */ static void *next_transfer(struct pl022 *pl022) { struct spi_message *msg = pl022->cur_msg; struct spi_transfer *trans = pl022->cur_transfer; /* Move to next transfer */ if (trans->transfer_list.next != &msg->transfers) { pl022->cur_transfer = list_entry(trans->transfer_list.next, struct spi_transfer, transfer_list); return STATE_RUNNING; } return STATE_DONE; } /* * This DMA functionality is only compiled in if we have * access to the generic DMA devices/DMA engine. */ #ifdef CONFIG_DMA_ENGINE static void unmap_free_dma_scatter(struct pl022 *pl022) { /* Unmap and free the SG tables */ dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, pl022->sgt_tx.nents, DMA_TO_DEVICE); dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, pl022->sgt_rx.nents, DMA_FROM_DEVICE); sg_free_table(&pl022->sgt_rx); sg_free_table(&pl022->sgt_tx); } static void dma_callback(void *data) { struct pl022 *pl022 = data; struct spi_message *msg = pl022->cur_msg; BUG_ON(!pl022->sgt_rx.sgl); #ifdef VERBOSE_DEBUG /* * Optionally dump out buffers to inspect contents, this is * good if you want to convince yourself that the loopback * read/write contents are the same, when adopting to a new * DMA engine. */ { struct scatterlist *sg; unsigned int i; dma_sync_sg_for_cpu(&pl022->adev->dev, pl022->sgt_rx.sgl, pl022->sgt_rx.nents, DMA_FROM_DEVICE); for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); print_hex_dump(KERN_ERR, "SPI RX: ", DUMP_PREFIX_OFFSET, 16, 1, sg_virt(sg), sg_dma_len(sg), 1); } for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); print_hex_dump(KERN_ERR, "SPI TX: ", DUMP_PREFIX_OFFSET, 16, 1, sg_virt(sg), sg_dma_len(sg), 1); } } #endif unmap_free_dma_scatter(pl022); /* Update total bytes transferred */ msg->actual_length += pl022->cur_transfer->len; if (pl022->cur_transfer->cs_change) pl022->cur_chip-> cs_control(SSP_CHIP_DESELECT); /* Move to next transfer */ msg->state = next_transfer(pl022); tasklet_schedule(&pl022->pump_transfers); } static void setup_dma_scatter(struct pl022 *pl022, void *buffer, unsigned int length, struct sg_table *sgtab) { struct scatterlist *sg; int bytesleft = length; void *bufp = buffer; int mapbytes; int i; if (buffer) { for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { /* * If there are less bytes left than what fits * in the current page (plus page alignment offset) * we just feed in this, else we stuff in as much * as we can. */ if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) mapbytes = bytesleft; else mapbytes = PAGE_SIZE - offset_in_page(bufp); sg_set_page(sg, virt_to_page(bufp), mapbytes, offset_in_page(bufp)); bufp += mapbytes; bytesleft -= mapbytes; dev_dbg(&pl022->adev->dev, "set RX/TX target page @ %p, %d bytes, %d left\n", bufp, mapbytes, bytesleft); } } else { /* Map the dummy buffer on every page */ for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { if (bytesleft < PAGE_SIZE) mapbytes = bytesleft; else mapbytes = PAGE_SIZE; sg_set_page(sg, virt_to_page(pl022->dummypage), mapbytes, 0); bytesleft -= mapbytes; dev_dbg(&pl022->adev->dev, "set RX/TX to dummy page %d bytes, %d left\n", mapbytes, bytesleft); } } BUG_ON(bytesleft); } /** * configure_dma - configures the channels for the next transfer * @pl022: SSP driver's private data structure */ static int configure_dma(struct pl022 *pl022) { struct dma_slave_config rx_conf = { .src_addr = SSP_DR(pl022->phybase), .direction = DMA_DEV_TO_MEM, .device_fc = false, }; struct dma_slave_config tx_conf = { .dst_addr = SSP_DR(pl022->phybase), .direction = DMA_MEM_TO_DEV, .device_fc = false, }; unsigned int pages; int ret; int rx_sglen, tx_sglen; struct dma_chan *rxchan = pl022->dma_rx_channel; struct dma_chan *txchan = pl022->dma_tx_channel; struct dma_async_tx_descriptor *rxdesc; struct dma_async_tx_descriptor *txdesc; /* Check that the channels are available */ if (!rxchan || !txchan) return -ENODEV; /* * If supplied, the DMA burstsize should equal the FIFO trigger level. * Notice that the DMA engine uses one-to-one mapping. Since we can * not trigger on 2 elements this needs explicit mapping rather than * calculation. */ switch (pl022->rx_lev_trig) { case SSP_RX_1_OR_MORE_ELEM: rx_conf.src_maxburst = 1; break; case SSP_RX_4_OR_MORE_ELEM: rx_conf.src_maxburst = 4; break; case SSP_RX_8_OR_MORE_ELEM: rx_conf.src_maxburst = 8; break; case SSP_RX_16_OR_MORE_ELEM: rx_conf.src_maxburst = 16; break; case SSP_RX_32_OR_MORE_ELEM: rx_conf.src_maxburst = 32; break; default: rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1; break; } switch (pl022->tx_lev_trig) { case SSP_TX_1_OR_MORE_EMPTY_LOC: tx_conf.dst_maxburst = 1; break; case SSP_TX_4_OR_MORE_EMPTY_LOC: tx_conf.dst_maxburst = 4; break; case SSP_TX_8_OR_MORE_EMPTY_LOC: tx_conf.dst_maxburst = 8; break; case SSP_TX_16_OR_MORE_EMPTY_LOC: tx_conf.dst_maxburst = 16; break; case SSP_TX_32_OR_MORE_EMPTY_LOC: tx_conf.dst_maxburst = 32; break; default: tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1; break; } switch (pl022->read) { case READING_NULL: /* Use the same as for writing */ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; break; case READING_U8: rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; break; case READING_U16: rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case READING_U32: rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; break; } switch (pl022->write) { case WRITING_NULL: /* Use the same as for reading */ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; break; case WRITING_U8: tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; break; case WRITING_U16: tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case WRITING_U32: tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; break; } /* SPI pecularity: we need to read and write the same width */ if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) rx_conf.src_addr_width = tx_conf.dst_addr_width; if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) tx_conf.dst_addr_width = rx_conf.src_addr_width; BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); dmaengine_slave_config(rxchan, &rx_conf); dmaengine_slave_config(txchan, &tx_conf); /* Create sglists for the transfers */ pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE); dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC); if (ret) goto err_alloc_rx_sg; ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC); if (ret) goto err_alloc_tx_sg; /* Fill in the scatterlists for the RX+TX buffers */ setup_dma_scatter(pl022, pl022->rx, pl022->cur_transfer->len, &pl022->sgt_rx); setup_dma_scatter(pl022, pl022->tx, pl022->cur_transfer->len, &pl022->sgt_tx); /* Map DMA buffers */ rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, pl022->sgt_rx.nents, DMA_FROM_DEVICE); if (!rx_sglen) goto err_rx_sgmap; tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, pl022->sgt_tx.nents, DMA_TO_DEVICE); if (!tx_sglen) goto err_tx_sgmap; /* Send both scatterlists */ rxdesc = dmaengine_prep_slave_sg(rxchan, pl022->sgt_rx.sgl, rx_sglen, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!rxdesc) goto err_rxdesc; txdesc = dmaengine_prep_slave_sg(txchan, pl022->sgt_tx.sgl, tx_sglen, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) goto err_txdesc; /* Put the callback on the RX transfer only, that should finish last */ rxdesc->callback = dma_callback; rxdesc->callback_param = pl022; /* Submit and fire RX and TX with TX last so we're ready to read! */ dmaengine_submit(rxdesc); dmaengine_submit(txdesc); dma_async_issue_pending(rxchan); dma_async_issue_pending(txchan); pl022->dma_running = true; return 0; err_txdesc: dmaengine_terminate_all(txchan); err_rxdesc: dmaengine_terminate_all(rxchan); dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, pl022->sgt_tx.nents, DMA_TO_DEVICE); err_tx_sgmap: dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, pl022->sgt_tx.nents, DMA_FROM_DEVICE); err_rx_sgmap: sg_free_table(&pl022->sgt_tx); err_alloc_tx_sg: sg_free_table(&pl022->sgt_rx); err_alloc_rx_sg: return -ENOMEM; } static int __devinit pl022_dma_probe(struct pl022 *pl022) { dma_cap_mask_t mask; /* Try to acquire a generic DMA engine slave channel */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); /* * We need both RX and TX channels to do DMA, else do none * of them. */ pl022->dma_rx_channel = dma_request_channel(mask, pl022->master_info->dma_filter, pl022->master_info->dma_rx_param); if (!pl022->dma_rx_channel) { dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); goto err_no_rxchan; } pl022->dma_tx_channel = dma_request_channel(mask, pl022->master_info->dma_filter, pl022->master_info->dma_tx_param); if (!pl022->dma_tx_channel) { dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); goto err_no_txchan; } pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!pl022->dummypage) { dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); goto err_no_dummypage; } dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", dma_chan_name(pl022->dma_rx_channel), dma_chan_name(pl022->dma_tx_channel)); return 0; err_no_dummypage: dma_release_channel(pl022->dma_tx_channel); err_no_txchan: dma_release_channel(pl022->dma_rx_channel); pl022->dma_rx_channel = NULL; err_no_rxchan: dev_err(&pl022->adev->dev, "Failed to work in dma mode, work without dma!\n"); return -ENODEV; } static void terminate_dma(struct pl022 *pl022) { struct dma_chan *rxchan = pl022->dma_rx_channel; struct dma_chan *txchan = pl022->dma_tx_channel; dmaengine_terminate_all(rxchan); dmaengine_terminate_all(txchan); unmap_free_dma_scatter(pl022); pl022->dma_running = false; } static void pl022_dma_remove(struct pl022 *pl022) { if (pl022->dma_running) terminate_dma(pl022); if (pl022->dma_tx_channel) dma_release_channel(pl022->dma_tx_channel); if (pl022->dma_rx_channel) dma_release_channel(pl022->dma_rx_channel); kfree(pl022->dummypage); } #else static inline int configure_dma(struct pl022 *pl022) { return -ENODEV; } static inline int pl022_dma_probe(struct pl022 *pl022) { return 0; } static inline void pl022_dma_remove(struct pl022 *pl022) { } #endif /** * pl022_interrupt_handler - Interrupt handler for SSP controller * * This function handles interrupts generated for an interrupt based transfer. * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the * current message's state as STATE_ERROR and schedule the tasklet * pump_transfers which will do the postprocessing of the current message by * calling giveback(). Otherwise it reads data from RX FIFO till there is no * more data, and writes data in TX FIFO till it is not full. If we complete * the transfer we move to the next transfer and schedule the tasklet. */ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) { struct pl022 *pl022 = dev_id; struct spi_message *msg = pl022->cur_msg; u16 irq_status = 0; u16 flag = 0; if (unlikely(!msg)) { dev_err(&pl022->adev->dev, "bad message state in interrupt handler"); /* Never fail */ return IRQ_HANDLED; } /* Read the Interrupt Status Register */ irq_status = readw(SSP_MIS(pl022->virtbase)); if (unlikely(!irq_status)) return IRQ_NONE; /* * This handles the FIFO interrupts, the timeout * interrupts are flatly ignored, they cannot be * trusted. */ if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { /* * Overrun interrupt - bail out since our Data has been * corrupted */ dev_err(&pl022->adev->dev, "FIFO overrun\n"); if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) dev_err(&pl022->adev->dev, "RXFIFO is full\n"); if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) dev_err(&pl022->adev->dev, "TXFIFO is full\n"); /* * Disable and clear interrupts, disable SSP, * mark message with bad status so it can be * retried. */ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); msg->state = STATE_ERROR; /* Schedule message queue handler */ tasklet_schedule(&pl022->pump_transfers); return IRQ_HANDLED; } readwriter(pl022); if ((pl022->tx == pl022->tx_end) && (flag == 0)) { flag = 1; /* Disable Transmit interrupt, enable receive interrupt */ writew((readw(SSP_IMSC(pl022->virtbase)) & ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase)); } /* * Since all transactions must write as much as shall be read, * we can conclude the entire transaction once RX is complete. * At this point, all TX will always be finished. */ if (pl022->rx >= pl022->rx_end) { writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); if (unlikely(pl022->rx > pl022->rx_end)) { dev_warn(&pl022->adev->dev, "read %u surplus " "bytes (did you request an odd " "number of bytes on a 16bit bus?)\n", (u32) (pl022->rx - pl022->rx_end)); } /* Update total bytes transferred */ msg->actual_length += pl022->cur_transfer->len; if (pl022->cur_transfer->cs_change) pl022->cur_chip-> cs_control(SSP_CHIP_DESELECT); /* Move to next transfer */ msg->state = next_transfer(pl022); tasklet_schedule(&pl022->pump_transfers); return IRQ_HANDLED; } return IRQ_HANDLED; } /** * This sets up the pointers to memory for the next message to * send out on the SPI bus. */ static int set_up_next_transfer(struct pl022 *pl022, struct spi_transfer *transfer) { int residue; /* Sanity check the message for this bus width */ residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; if (unlikely(residue != 0)) { dev_err(&pl022->adev->dev, "message of %u bytes to transmit but the current " "chip bus has a data width of %u bytes!\n", pl022->cur_transfer->len, pl022->cur_chip->n_bytes); dev_err(&pl022->adev->dev, "skipping this message\n"); return -EIO; } pl022->tx = (void *)transfer->tx_buf; pl022->tx_end = pl022->tx + pl022->cur_transfer->len; pl022->rx = (void *)transfer->rx_buf; pl022->rx_end = pl022->rx + pl022->cur_transfer->len; pl022->write = pl022->tx ? pl022->cur_chip->write : WRITING_NULL; pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; return 0; } /** * pump_transfers - Tasklet function which schedules next transfer * when running in interrupt or DMA transfer mode. * @data: SSP driver private data structure * */ static void pump_transfers(unsigned long data) { struct pl022 *pl022 = (struct pl022 *) data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; /* Get current state information */ message = pl022->cur_msg; transfer = pl022->cur_transfer; /* Handle for abort */ if (message->state == STATE_ERROR) { message->status = -EIO; giveback(pl022); return; } /* Handle end of message */ if (message->state == STATE_DONE) { message->status = 0; giveback(pl022); return; } /* Delay if requested at end of transfer before CS change */ if (message->state == STATE_RUNNING) { previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) /* * FIXME: This runs in interrupt context. * Is this really smart? */ udelay(previous->delay_usecs); /* Reselect chip select only if cs_change was requested */ if (previous->cs_change) pl022->cur_chip->cs_control(SSP_CHIP_SELECT); } else { /* STATE_START */ message->state = STATE_RUNNING; } if (set_up_next_transfer(pl022, transfer)) { message->state = STATE_ERROR; message->status = -EIO; giveback(pl022); return; } /* Flush the FIFOs and let's go! */ flush(pl022); if (pl022->cur_chip->enable_dma) { if (configure_dma(pl022)) { dev_dbg(&pl022->adev->dev, "configuration of DMA failed, fall back to interrupt mode\n"); goto err_config_dma; } return; } err_config_dma: /* enable all interrupts except RX */ writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase)); } static void do_interrupt_dma_transfer(struct pl022 *pl022) { /* * Default is to enable all interrupts except RX - * this will be enabled once TX is complete */ u32 irqflags = ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM; /* Enable target chip, if not already active */ if (!pl022->next_msg_cs_active) pl022->cur_chip->cs_control(SSP_CHIP_SELECT); if (set_up_next_transfer(pl022, pl022->cur_transfer)) { /* Error path */ pl022->cur_msg->state = STATE_ERROR; pl022->cur_msg->status = -EIO; giveback(pl022); return; } /* If we're using DMA, set up DMA here */ if (pl022->cur_chip->enable_dma) { /* Configure DMA transfer */ if (configure_dma(pl022)) { dev_dbg(&pl022->adev->dev, "configuration of DMA failed, fall back to interrupt mode\n"); goto err_config_dma; } /* Disable interrupts in DMA mode, IRQ from DMA controller */ irqflags = DISABLE_ALL_INTERRUPTS; } err_config_dma: /* Enable SSP, turn on interrupts */ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); writew(irqflags, SSP_IMSC(pl022->virtbase)); } static void do_polling_transfer(struct pl022 *pl022) { struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; struct chip_data *chip; unsigned long time, timeout; chip = pl022->cur_chip; message = pl022->cur_msg; while (message->state != STATE_DONE) { /* Handle for abort */ if (message->state == STATE_ERROR) break; transfer = pl022->cur_transfer; /* Delay if requested at end of transfer */ if (message->state == STATE_RUNNING) { previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) udelay(previous->delay_usecs); if (previous->cs_change) pl022->cur_chip->cs_control(SSP_CHIP_SELECT); } else { /* STATE_START */ message->state = STATE_RUNNING; if (!pl022->next_msg_cs_active) pl022->cur_chip->cs_control(SSP_CHIP_SELECT); } /* Configuration Changing Per Transfer */ if (set_up_next_transfer(pl022, transfer)) { /* Error path */ message->state = STATE_ERROR; break; } /* Flush FIFOs and enable SSP */ flush(pl022); writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { time = jiffies; readwriter(pl022); if (time_after(time, timeout)) { dev_warn(&pl022->adev->dev, "%s: timeout!\n", __func__); message->state = STATE_ERROR; goto out; } cpu_relax(); } /* Update total byte transferred */ message->actual_length += pl022->cur_transfer->len; if (pl022->cur_transfer->cs_change) pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); /* Move to next transfer */ message->state = next_transfer(pl022); } out: /* Handle end of message */ if (message->state == STATE_DONE) message->status = 0; else message->status = -EIO; giveback(pl022); return; } static int pl022_transfer_one_message(struct spi_master *master, struct spi_message *msg) { struct pl022 *pl022 = spi_master_get_devdata(master); /* Initial message state */ pl022->cur_msg = msg; msg->state = STATE_START; pl022->cur_transfer = list_entry(msg->transfers.next, struct spi_transfer, transfer_list); /* Setup the SPI using the per chip configuration */ pl022->cur_chip = spi_get_ctldata(msg->spi); restore_state(pl022); flush(pl022); if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) do_polling_transfer(pl022); else do_interrupt_dma_transfer(pl022); return 0; } static int pl022_prepare_transfer_hardware(struct spi_master *master) { struct pl022 *pl022 = spi_master_get_devdata(master); /* * Just make sure we have all we need to run the transfer by syncing * with the runtime PM framework. */ pm_runtime_get_sync(&pl022->adev->dev); return 0; } static int pl022_unprepare_transfer_hardware(struct spi_master *master) { struct pl022 *pl022 = spi_master_get_devdata(master); /* nothing more to do - disable spi/ssp and power off */ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); if (pl022->master_info->autosuspend_delay > 0) { pm_runtime_mark_last_busy(&pl022->adev->dev); pm_runtime_put_autosuspend(&pl022->adev->dev); } else { pm_runtime_put(&pl022->adev->dev); } return 0; } static int verify_controller_parameters(struct pl022 *pl022, struct pl022_config_chip const *chip_info) { if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { dev_err(&pl022->adev->dev, "interface is configured incorrectly\n"); return -EINVAL; } if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && (!pl022->vendor->unidir)) { dev_err(&pl022->adev->dev, "unidirectional mode not supported in this " "hardware version\n"); return -EINVAL; } if ((chip_info->hierarchy != SSP_MASTER) && (chip_info->hierarchy != SSP_SLAVE)) { dev_err(&pl022->adev->dev, "hierarchy is configured incorrectly\n"); return -EINVAL; } if ((chip_info->com_mode != INTERRUPT_TRANSFER) && (chip_info->com_mode != DMA_TRANSFER) && (chip_info->com_mode != POLLING_TRANSFER)) { dev_err(&pl022->adev->dev, "Communication mode is configured incorrectly\n"); return -EINVAL; } switch (chip_info->rx_lev_trig) { case SSP_RX_1_OR_MORE_ELEM: case SSP_RX_4_OR_MORE_ELEM: case SSP_RX_8_OR_MORE_ELEM: /* These are always OK, all variants can handle this */ break; case SSP_RX_16_OR_MORE_ELEM: if (pl022->vendor->fifodepth < 16) { dev_err(&pl022->adev->dev, "RX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } break; case SSP_RX_32_OR_MORE_ELEM: if (pl022->vendor->fifodepth < 32) { dev_err(&pl022->adev->dev, "RX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } break; default: dev_err(&pl022->adev->dev, "RX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; break; } switch (chip_info->tx_lev_trig) { case SSP_TX_1_OR_MORE_EMPTY_LOC: case SSP_TX_4_OR_MORE_EMPTY_LOC: case SSP_TX_8_OR_MORE_EMPTY_LOC: /* These are always OK, all variants can handle this */ break; case SSP_TX_16_OR_MORE_EMPTY_LOC: if (pl022->vendor->fifodepth < 16) { dev_err(&pl022->adev->dev, "TX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } break; case SSP_TX_32_OR_MORE_EMPTY_LOC: if (pl022->vendor->fifodepth < 32) { dev_err(&pl022->adev->dev, "TX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } break; default: dev_err(&pl022->adev->dev, "TX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; break; } if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { if ((chip_info->ctrl_len < SSP_BITS_4) || (chip_info->ctrl_len > SSP_BITS_32)) { dev_err(&pl022->adev->dev, "CTRL LEN is configured incorrectly\n"); return -EINVAL; } if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { dev_err(&pl022->adev->dev, "Wait State is configured incorrectly\n"); return -EINVAL; } /* Half duplex is only available in the ST Micro version */ if (pl022->vendor->extended_cr) { if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) && (chip_info->duplex != SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { dev_err(&pl022->adev->dev, "Microwire duplex mode is configured incorrectly\n"); return -EINVAL; } } else { if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) dev_err(&pl022->adev->dev, "Microwire half duplex mode requested," " but this is only available in the" " ST version of PL022\n"); return -EINVAL; } } return 0; } static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr) { return rate / (cpsdvsr * (1 + scr)); } static int calculate_effective_freq(struct pl022 *pl022, int freq, struct ssp_clock_params * clk_freq) { /* Lets calculate the frequency parameters */ u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN; u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0, best_scr = 0, tmp, found = 0; rate = clk_get_rate(pl022->clk); /* cpsdvscr = 2 & scr 0 */ max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN); /* cpsdvsr = 254 & scr = 255 */ min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX); if (freq > max_tclk) dev_warn(&pl022->adev->dev, "Max speed that can be programmed is %d Hz, you requested %d\n", max_tclk, freq); if (freq < min_tclk) { dev_err(&pl022->adev->dev, "Requested frequency: %d Hz is less than minimum possible %d Hz\n", freq, min_tclk); return -EINVAL; } /* * best_freq will give closest possible available rate (<= requested * freq) for all values of scr & cpsdvsr. */ while ((cpsdvsr <= CPSDVR_MAX) && !found) { while (scr <= SCR_MAX) { tmp = spi_rate(rate, cpsdvsr, scr); if (tmp > freq) { /* we need lower freq */ scr++; continue; } /* * If found exact value, mark found and break. * If found more closer value, update and break. */ if (tmp > best_freq) { best_freq = tmp; best_cpsdvsr = cpsdvsr; best_scr = scr; if (tmp == freq) found = 1; } /* * increased scr will give lower rates, which are not * required */ break; } cpsdvsr += 2; scr = SCR_MIN; } WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n", freq); clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF); clk_freq->scr = (u8) (best_scr & 0xFF); dev_dbg(&pl022->adev->dev, "SSP Target Frequency is: %u, Effective Frequency is %u\n", freq, best_freq); dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n", clk_freq->cpsdvsr, clk_freq->scr); return 0; } /* * A piece of default chip info unless the platform * supplies it. */ static const struct pl022_config_chip pl022_default_chip_info = { .com_mode = POLLING_TRANSFER, .iface = SSP_INTERFACE_MOTOROLA_SPI, .hierarchy = SSP_SLAVE, .slave_tx_disable = DO_NOT_DRIVE_TX, .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, .ctrl_len = SSP_BITS_8, .wait_state = SSP_MWIRE_WAIT_ZERO, .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, .cs_control = null_cs_control, }; /** * pl022_setup - setup function registered to SPI master framework * @spi: spi device which is requesting setup * * This function is registered to the SPI framework for this SPI master * controller. If it is the first time when setup is called by this device, * this function will initialize the runtime state for this chip and save * the same in the device structure. Else it will update the runtime info * with the updated chip info. Nothing is really being written to the * controller hardware here, that is not done until the actual transfer * commence. */ static int pl022_setup(struct spi_device *spi) { struct pl022_config_chip const *chip_info; struct chip_data *chip; struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0}; int status = 0; struct pl022 *pl022 = spi_master_get_devdata(spi->master); unsigned int bits = spi->bits_per_word; u32 tmp; if (!spi->max_speed_hz) return -EINVAL; /* Get controller_state if one is supplied */ chip = spi_get_ctldata(spi); if (chip == NULL) { chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) { dev_err(&spi->dev, "cannot allocate controller state\n"); return -ENOMEM; } dev_dbg(&spi->dev, "allocated memory for controller's runtime state\n"); } /* Get controller data if one is supplied */ chip_info = spi->controller_data; if (chip_info == NULL) { chip_info = &pl022_default_chip_info; /* spi_board_info.controller_data not is supplied */ dev_dbg(&spi->dev, "using default controller_data settings\n"); } else dev_dbg(&spi->dev, "using user supplied controller_data settings\n"); /* * We can override with custom divisors, else we use the board * frequency setting */ if ((0 == chip_info->clk_freq.cpsdvsr) && (0 == chip_info->clk_freq.scr)) { status = calculate_effective_freq(pl022, spi->max_speed_hz, &clk_freq); if (status < 0) goto err_config_params; } else { memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); if ((clk_freq.cpsdvsr % 2) != 0) clk_freq.cpsdvsr = clk_freq.cpsdvsr - 1; } if ((clk_freq.cpsdvsr < CPSDVR_MIN) || (clk_freq.cpsdvsr > CPSDVR_MAX)) { status = -EINVAL; dev_err(&spi->dev, "cpsdvsr is configured incorrectly\n"); goto err_config_params; } status = verify_controller_parameters(pl022, chip_info); if (status) { dev_err(&spi->dev, "controller data is incorrect"); goto err_config_params; } pl022->rx_lev_trig = chip_info->rx_lev_trig; pl022->tx_lev_trig = chip_info->tx_lev_trig; /* Now set controller state based on controller data */ chip->xfer_type = chip_info->com_mode; if (!chip_info->cs_control) { chip->cs_control = null_cs_control; dev_warn(&spi->dev, "chip select function is NULL for this chip\n"); } else chip->cs_control = chip_info->cs_control; /* Check bits per word with vendor specific range */ if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) { status = -ENOTSUPP; dev_err(&spi->dev, "illegal data size for this controller!\n"); dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n", pl022->vendor->max_bpw); goto err_config_params; } else if (bits <= 8) { dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); chip->n_bytes = 1; chip->read = READING_U8; chip->write = WRITING_U8; } else if (bits <= 16) { dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); chip->n_bytes = 2; chip->read = READING_U16; chip->write = WRITING_U16; } else { dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); chip->n_bytes = 4; chip->read = READING_U32; chip->write = WRITING_U32; } /* Now Initialize all register settings required for this chip */ chip->cr0 = 0; chip->cr1 = 0; chip->dmacr = 0; chip->cpsr = 0; if ((chip_info->com_mode == DMA_TRANSFER) && ((pl022->master_info)->enable_dma)) { chip->enable_dma = true; dev_dbg(&spi->dev, "DMA mode set in controller state\n"); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, SSP_DMACR_MASK_RXDMAE, 0); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, SSP_DMACR_MASK_TXDMAE, 1); } else { chip->enable_dma = false; dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1); } chip->cpsr = clk_freq.cpsdvsr; /* Special setup for the ST micro extended control registers */ if (pl022->vendor->extended_cr) { u32 etx; if (pl022->vendor->pl023) { /* These bits are only in the PL023 */ SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, SSP_CR1_MASK_FBCLKDEL_ST, 13); } else { /* These bits are in the PL022 but not PL023 */ SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP_ST, 5); SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS_ST, 16); SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF_ST, 21); SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT_ST, 6); } SSP_WRITE_BITS(chip->cr0, bits - 1, SSP_CR0_MASK_DSS_ST, 0); if (spi->mode & SPI_LSB_FIRST) { tmp = SSP_RX_LSB; etx = SSP_TX_LSB; } else { tmp = SSP_RX_MSB; etx = SSP_TX_MSB; } SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL_ST, 7); SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL_ST, 10); } else { SSP_WRITE_BITS(chip->cr0, bits - 1, SSP_CR0_MASK_DSS, 0); SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 4); } /* Stuff that is common for all versions */ if (spi->mode & SPI_CPOL) tmp = SSP_CLK_POL_IDLE_HIGH; else tmp = SSP_CLK_POL_IDLE_LOW; SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); if (spi->mode & SPI_CPHA) tmp = SSP_CLK_SECOND_EDGE; else tmp = SSP_CLK_FIRST_EDGE; SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); /* Loopback is available on all versions except PL023 */ if (pl022->vendor->loopback) { if (spi->mode & SPI_LOOP) tmp = LOOPBACK_ENABLED; else tmp = LOOPBACK_DISABLED; SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); } SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); /* Save controller_state */ spi_set_ctldata(spi, chip); return status; err_config_params: spi_set_ctldata(spi, NULL); kfree(chip); return status; } /** * pl022_cleanup - cleanup function registered to SPI master framework * @spi: spi device which is requesting cleanup * * This function is registered to the SPI framework for this SPI master * controller. It will free the runtime state of chip. */ static void pl022_cleanup(struct spi_device *spi) { struct chip_data *chip = spi_get_ctldata(spi); spi_set_ctldata(spi, NULL); kfree(chip); } static int __devinit pl022_probe(struct amba_device *adev, const struct amba_id *id) { struct device *dev = &adev->dev; struct pl022_ssp_controller *platform_info = adev->dev.platform_data; struct spi_master *master; struct pl022 *pl022 = NULL; /*Data for this driver */ int status = 0; dev_info(&adev->dev, "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); if (platform_info == NULL) { dev_err(&adev->dev, "probe - no platform data supplied\n"); status = -ENODEV; goto err_no_pdata; } /* Allocate master with space for data */ master = spi_alloc_master(dev, sizeof(struct pl022)); if (master == NULL) { dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); status = -ENOMEM; goto err_no_master; } pl022 = spi_master_get_devdata(master); pl022->master = master; pl022->master_info = platform_info; pl022->adev = adev; pl022->vendor = id->data; /* * Bus Number Which has been Assigned to this SSP controller * on this board */ master->bus_num = platform_info->bus_id; master->num_chipselect = platform_info->num_chipselect; master->cleanup = pl022_cleanup; master->setup = pl022_setup; master->prepare_transfer_hardware = pl022_prepare_transfer_hardware; master->transfer_one_message = pl022_transfer_one_message; master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware; master->rt = platform_info->rt; /* * Supports mode 0-3, loopback, and active low CS. Transfers are * always MS bit first on the original pl022. */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; if (pl022->vendor->extended_cr) master->mode_bits |= SPI_LSB_FIRST; dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); status = amba_request_regions(adev, NULL); if (status) goto err_no_ioregion; pl022->phybase = adev->res.start; pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); if (pl022->virtbase == NULL) { status = -ENOMEM; goto err_no_ioremap; } printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", adev->res.start, pl022->virtbase); pl022->clk = clk_get(&adev->dev, NULL); if (IS_ERR(pl022->clk)) { status = PTR_ERR(pl022->clk); dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); goto err_no_clk; } status = clk_prepare(pl022->clk); if (status) { dev_err(&adev->dev, "could not prepare SSP/SPI bus clock\n"); goto err_clk_prep; } status = clk_enable(pl022->clk); if (status) { dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n"); goto err_no_clk_en; } /* Initialize transfer pump */ tasklet_init(&pl022->pump_transfers, pump_transfers, (unsigned long)pl022); /* Disable SSP */ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); load_ssp_default_config(pl022); status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", pl022); if (status < 0) { dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); goto err_no_irq; } /* Get DMA channels */ if (platform_info->enable_dma) { status = pl022_dma_probe(pl022); if (status != 0) platform_info->enable_dma = 0; } /* Register with the SPI framework */ amba_set_drvdata(adev, pl022); status = spi_register_master(master); if (status != 0) { dev_err(&adev->dev, "probe - problem registering spi master\n"); goto err_spi_register; } dev_dbg(dev, "probe succeeded\n"); /* let runtime pm put suspend */ if (platform_info->autosuspend_delay > 0) { dev_info(&adev->dev, "will use autosuspend for runtime pm, delay %dms\n", platform_info->autosuspend_delay); pm_runtime_set_autosuspend_delay(dev, platform_info->autosuspend_delay); pm_runtime_use_autosuspend(dev); pm_runtime_put_autosuspend(dev); } else { pm_runtime_put(dev); } return 0; err_spi_register: if (platform_info->enable_dma) pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); err_no_irq: clk_disable(pl022->clk); err_no_clk_en: clk_unprepare(pl022->clk); err_clk_prep: clk_put(pl022->clk); err_no_clk: iounmap(pl022->virtbase); err_no_ioremap: amba_release_regions(adev); err_no_ioregion: spi_master_put(master); err_no_master: err_no_pdata: return status; } static int __devexit pl022_remove(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); if (!pl022) return 0; /* * undo pm_runtime_put() in probe. I assume that we're not * accessing the primecell here. */ pm_runtime_get_noresume(&adev->dev); load_ssp_default_config(pl022); if (pl022->master_info->enable_dma) pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); clk_disable(pl022->clk); clk_unprepare(pl022->clk); clk_put(pl022->clk); iounmap(pl022->virtbase); amba_release_regions(adev); tasklet_disable(&pl022->pump_transfers); spi_unregister_master(pl022->master); spi_master_put(pl022->master); amba_set_drvdata(adev, NULL); return 0; } #ifdef CONFIG_SUSPEND static int pl022_suspend(struct device *dev) { struct pl022 *pl022 = dev_get_drvdata(dev); int ret; ret = spi_master_suspend(pl022->master); if (ret) { dev_warn(dev, "cannot suspend master\n"); return ret; } dev_dbg(dev, "suspended\n"); return 0; } static int pl022_resume(struct device *dev) { struct pl022 *pl022 = dev_get_drvdata(dev); int ret; /* Start the queue running */ ret = spi_master_resume(pl022->master); if (ret) dev_err(dev, "problem starting queue (%d)\n", ret); else dev_dbg(dev, "resumed\n"); return ret; } #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME static int pl022_runtime_suspend(struct device *dev) { struct pl022 *pl022 = dev_get_drvdata(dev); clk_disable(pl022->clk); return 0; } static int pl022_runtime_resume(struct device *dev) { struct pl022 *pl022 = dev_get_drvdata(dev); clk_enable(pl022->clk); return 0; } #endif static const struct dev_pm_ops pl022_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume) SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL) }; static struct vendor_data vendor_arm = { .fifodepth = 8, .max_bpw = 16, .unidir = false, .extended_cr = false, .pl023 = false, .loopback = true, }; static struct vendor_data vendor_st = { .fifodepth = 32, .max_bpw = 32, .unidir = false, .extended_cr = true, .pl023 = false, .loopback = true, }; static struct vendor_data vendor_st_pl023 = { .fifodepth = 32, .max_bpw = 32, .unidir = false, .extended_cr = true, .pl023 = true, .loopback = false, }; static struct vendor_data vendor_db5500_pl023 = { .fifodepth = 32, .max_bpw = 32, .unidir = false, .extended_cr = true, .pl023 = true, .loopback = true, }; static struct amba_id pl022_ids[] = { { /* * ARM PL022 variant, this has a 16bit wide * and 8 locations deep TX/RX FIFO */ .id = 0x00041022, .mask = 0x000fffff, .data = &vendor_arm, }, { /* * ST Micro derivative, this has 32bit wide * and 32 locations deep TX/RX FIFO */ .id = 0x01080022, .mask = 0xffffffff, .data = &vendor_st, }, { /* * ST-Ericsson derivative "PL023" (this is not * an official ARM number), this is a PL022 SSP block * stripped to SPI mode only, it has 32bit wide * and 32 locations deep TX/RX FIFO but no extended * CR0/CR1 register */ .id = 0x00080023, .mask = 0xffffffff, .data = &vendor_st_pl023, }, { .id = 0x10080023, .mask = 0xffffffff, .data = &vendor_db5500_pl023, }, { 0, 0 }, }; MODULE_DEVICE_TABLE(amba, pl022_ids); static struct amba_driver pl022_driver = { .drv = { .name = "ssp-pl022", .pm = &pl022_dev_pm_ops, }, .id_table = pl022_ids, .probe = pl022_probe, .remove = __devexit_p(pl022_remove), }; static int __init pl022_init(void) { return amba_driver_register(&pl022_driver); } subsys_initcall(pl022_init); static void __exit pl022_exit(void) { amba_driver_unregister(&pl022_driver); } module_exit(pl022_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("PL022 SSP Controller Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Abhinav1997/kernel_sony_msm8930
arch/ia64/kvm/kvm-ia64.c
4382
45694
/* * kvm_ia64.c: Basic KVM suppport On Itanium series processors * * * Copyright (C) 2007, Intel Corporation. * Xiantao Zhang (xiantao.zhang@intel.com) * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/percpu.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/bitops.h> #include <linux/hrtimer.h> #include <linux/uaccess.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/pci.h> #include <asm/pgtable.h> #include <asm/gcc_intrin.h> #include <asm/pal.h> #include <asm/cacheflush.h> #include <asm/div64.h> #include <asm/tlb.h> #include <asm/elf.h> #include <asm/sn/addrs.h> #include <asm/sn/clksupport.h> #include <asm/sn/shub_mmr.h> #include "misc.h" #include "vti.h" #include "iodev.h" #include "ioapic.h" #include "lapic.h" #include "irq.h" static unsigned long kvm_vmm_base; static unsigned long kvm_vsa_base; static unsigned long kvm_vm_buffer; static unsigned long kvm_vm_buffer_size; unsigned long kvm_vmm_gp; static long vp_env_info; static struct kvm_vmm_info *kvm_vmm_info; static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu) { #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) if (vcpu->kvm->arch.is_sn2) return rtc_time(); else #endif return ia64_getreg(_IA64_REG_AR_ITC); } static void kvm_flush_icache(unsigned long start, unsigned long len) { int l; for (l = 0; l < (len + 32); l += 32) ia64_fc((void *)(start + l)); ia64_sync_i(); ia64_srlz_i(); } static void kvm_flush_tlb_all(void) { unsigned long i, j, count0, count1, stride0, stride1, addr; long flags; addr = local_cpu_data->ptce_base; count0 = local_cpu_data->ptce_count[0]; count1 = local_cpu_data->ptce_count[1]; stride0 = local_cpu_data->ptce_stride[0]; stride1 = local_cpu_data->ptce_stride[1]; local_irq_save(flags); for (i = 0; i < count0; ++i) { for (j = 0; j < count1; ++j) { ia64_ptce(addr); addr += stride1; } addr += stride0; } local_irq_restore(flags); ia64_srlz_i(); /* srlz.i implies srlz.d */ } long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) { struct ia64_pal_retval iprv; PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, (u64)opt_handler); return iprv.status; } static DEFINE_SPINLOCK(vp_lock); int kvm_arch_hardware_enable(void *garbage) { long status; long tmp_base; unsigned long pte; unsigned long saved_psr; int slot; pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); local_irq_save(saved_psr); slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); local_irq_restore(saved_psr); if (slot < 0) return -EINVAL; spin_lock(&vp_lock); status = ia64_pal_vp_init_env(kvm_vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE, __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); if (status != 0) { spin_unlock(&vp_lock); printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); return -EINVAL; } if (!kvm_vsa_base) { kvm_vsa_base = tmp_base; printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); } spin_unlock(&vp_lock); ia64_ptr_entry(0x3, slot); return 0; } void kvm_arch_hardware_disable(void *garbage) { long status; int slot; unsigned long pte; unsigned long saved_psr; unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); local_irq_save(saved_psr); slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); local_irq_restore(saved_psr); if (slot < 0) return; status = ia64_pal_vp_exit_env(host_iva); if (status) printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", status); ia64_ptr_entry(0x3, slot); } void kvm_arch_check_processor_compat(void *rtn) { *(int *)rtn = 0; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_MP_STATE: case KVM_CAP_IRQ_INJECT_STATUS: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; default: r = 0; } return r; } static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 1; return 0; } static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct kvm_mmio_req *p; struct kvm_io_device *mmio_dev; int r; p = kvm_get_vcpu_ioreq(vcpu); if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) goto mmio; vcpu->mmio_needed = 1; vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; vcpu->mmio_size = kvm_run->mmio.len = p->size; vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; if (vcpu->mmio_is_write) memcpy(vcpu->mmio_data, &p->data, p->size); memcpy(kvm_run->mmio.data, &p->data, p->size); kvm_run->exit_reason = KVM_EXIT_MMIO; return 0; mmio: if (p->dir) r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); else r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); if (r) printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); p->state = STATE_IORESP_READY; return 1; } static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_PAL_CALL) return kvm_pal_emul(vcpu, kvm_run); else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 2; return 0; } } static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); if (p->exit_reason == EXIT_REASON_SAL_CALL) { kvm_sal_emul(vcpu); return 1; } else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = 3; return 0; } } static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (!test_and_set_bit(vector, &vpd->irr[0])) { vcpu->arch.irq_new_pending = 1; kvm_vcpu_kick(vcpu); return 1; } return 0; } /* * offset: address offset to IPI space. * value: deliver value. */ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, uint64_t vector) { switch (dm) { case SAPIC_FIXED: break; case SAPIC_NMI: vector = 2; break; case SAPIC_EXTINT: vector = 0; break; case SAPIC_INIT: case SAPIC_PMI: default: printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); return; } __apic_accept_irq(vcpu, vector); } static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, unsigned long eid) { union ia64_lid lid; int i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { lid.val = VCPU_LID(vcpu); if (lid.id == id && lid.eid == eid) return vcpu; } return NULL; } static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p = kvm_get_exit_data(vcpu); struct kvm_vcpu *target_vcpu; struct kvm_pt_regs *regs; union ia64_ipi_a addr = p->u.ipi_data.addr; union ia64_ipi_d data = p->u.ipi_data.data; target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); if (!target_vcpu) return handle_vm_error(vcpu, kvm_run); if (!target_vcpu->arch.launched) { regs = vcpu_regs(target_vcpu); regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; if (waitqueue_active(&target_vcpu->wq)) wake_up_interruptible(&target_vcpu->wq); } else { vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); if (target_vcpu != vcpu) kvm_vcpu_kick(target_vcpu); } return 1; } struct call_data { struct kvm_ptc_g ptc_g_data; struct kvm_vcpu *vcpu; }; static void vcpu_global_purge(void *info) { struct call_data *p = (struct call_data *)info; struct kvm_vcpu *vcpu = p->vcpu; if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) return; set_bit(KVM_REQ_PTC_G, &vcpu->requests); if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = p->ptc_g_data; } else { clear_bit(KVM_REQ_PTC_G, &vcpu->requests); vcpu->arch.ptc_g_count = 0; set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); } } static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p = kvm_get_exit_data(vcpu); struct kvm *kvm = vcpu->kvm; struct call_data call_data; int i; struct kvm_vcpu *vcpui; call_data.ptc_g_data = p->u.ptc_g_data; kvm_for_each_vcpu(i, vcpui, kvm) { if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || vcpu == vcpui) continue; if (waitqueue_active(&vcpui->wq)) wake_up_interruptible(&vcpui->wq); if (vcpui->cpu != -1) { call_data.vcpu = vcpui; smp_call_function_single(vcpui->cpu, vcpu_global_purge, &call_data, 1); } else printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); } return 1; } static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { return 1; } static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu) { unsigned long pte, rtc_phys_addr, map_addr; int slot; map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT); rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC; pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC)); slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT); vcpu->arch.sn_rtc_tr_slot = slot; if (slot < 0) { printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n"); slot = 0; } return slot; } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ktime_t kt; long itc_diff; unsigned long vcpu_now_itc; unsigned long expires; struct hrtimer *p_ht = &vcpu->arch.hlt_timer; unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (irqchip_in_kernel(vcpu->kvm)) { vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset; if (time_after(vcpu_now_itc, vpd->itm)) { vcpu->arch.timer_check = 1; return 1; } itc_diff = vpd->itm - vcpu_now_itc; if (itc_diff < 0) itc_diff = -itc_diff; expires = div64_u64(itc_diff, cyc_per_usec); kt = ktime_set(0, 1000 * expires); vcpu->arch.ht_active = 1; hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); vcpu->arch.mp_state = KVM_MP_STATE_HALTED; kvm_vcpu_block(vcpu); hrtimer_cancel(p_ht); vcpu->arch.ht_active = 0; if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) || kvm_cpu_has_pending_timer(vcpu)) if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) return -EINTR; return 1; } else { printk(KERN_ERR"kvm: Unsupported userspace halt!"); return 0; } } static int handle_vm_shutdown(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } static int handle_external_interrupt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { return 1; } static int handle_vcpu_debug(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { printk("VMM: %s", vcpu->arch.log_buf); return 1; } static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) = { [EXIT_REASON_VM_PANIC] = handle_vm_error, [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, [EXIT_REASON_PAL_CALL] = handle_pal_call, [EXIT_REASON_SAL_CALL] = handle_sal_call, [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_IPI] = handle_ipi, [EXIT_REASON_PTC_G] = handle_global_purge, [EXIT_REASON_DEBUG] = handle_vcpu_debug, }; static const int kvm_vti_max_exit_handlers = sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p_exit_data; p_exit_data = kvm_get_exit_data(vcpu); return p_exit_data->exit_reason; } /* * The guest has exited. See if we can fix it or if we need userspace * assistance. */ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { u32 exit_reason = kvm_get_exit_reason(vcpu); vcpu->arch.last_exit = exit_reason; if (exit_reason < kvm_vti_max_exit_handlers && kvm_vti_exit_handlers[exit_reason]) return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); else { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = exit_reason; } return 0; } static inline void vti_set_rr6(unsigned long rr6) { ia64_set_rr(RR6, rr6); ia64_srlz_i(); } static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) { unsigned long pte; struct kvm *kvm = vcpu->kvm; int r; /*Insert a pair of tr to map vmm*/ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); if (r < 0) goto out; vcpu->arch.vmm_tr_slot = r; /*Insert a pairt of tr to map data of vm*/ pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, pte, KVM_VM_DATA_SHIFT); if (r < 0) goto out; vcpu->arch.vm_tr_slot = r; #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) if (kvm->arch.is_sn2) { r = kvm_sn2_setup_mappings(vcpu); if (r < 0) goto out; } #endif r = 0; out: return r; } static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) if (kvm->arch.is_sn2) ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot); #endif } static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) { unsigned long psr; int r; int cpu = smp_processor_id(); if (vcpu->arch.last_run_cpu != cpu || per_cpu(last_vcpu, cpu) != vcpu) { per_cpu(last_vcpu, cpu) = vcpu; vcpu->arch.last_run_cpu = cpu; kvm_flush_tlb_all(); } vcpu->arch.host_rr6 = ia64_get_rr(RR6); vti_set_rr6(vcpu->arch.vmm_rr); local_irq_save(psr); r = kvm_insert_vmm_mapping(vcpu); local_irq_restore(psr); return r; } static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) { kvm_purge_vmm_mapping(vcpu); vti_set_rr6(vcpu->arch.host_rr6); } static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { union context *host_ctx, *guest_ctx; int r, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); again: if (signal_pending(current)) { r = -EINTR; kvm_run->exit_reason = KVM_EXIT_INTR; goto out; } preempt_disable(); local_irq_disable(); /*Get host and guest context with guest address space.*/ host_ctx = kvm_get_host_context(vcpu); guest_ctx = kvm_get_guest_context(vcpu); clear_bit(KVM_REQ_KICK, &vcpu->requests); r = kvm_vcpu_pre_transition(vcpu); if (r < 0) goto vcpu_run_fail; srcu_read_unlock(&vcpu->kvm->srcu, idx); vcpu->mode = IN_GUEST_MODE; kvm_guest_enter(); /* * Transition to the guest */ kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); kvm_vcpu_post_transition(vcpu); vcpu->arch.launched = 1; set_bit(KVM_REQ_KICK, &vcpu->requests); local_irq_enable(); /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); vcpu->mode = OUTSIDE_GUEST_MODE; preempt_enable(); idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_handle_exit(kvm_run, vcpu); if (r > 0) { if (!need_resched()) goto again; } out: srcu_read_unlock(&vcpu->kvm->srcu, idx); if (r > 0) { kvm_resched(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); goto again; } return r; vcpu_run_fail: local_irq_enable(); preempt_enable(); kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; goto out; } static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) { struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); if (!vcpu->mmio_is_write) memcpy(&p->data, vcpu->mmio_data, 8); p->state = STATE_IORESP_READY; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } if (vcpu->mmio_needed) { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); kvm_set_mmio_data(vcpu); vcpu->mmio_read_completed = 1; vcpu->mmio_needed = 0; } r = __vcpu_run(vcpu, kvm_run); out: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } struct kvm *kvm_arch_alloc_vm(void) { struct kvm *kvm; uint64_t vm_base; BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); if (!vm_base) return NULL; memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); kvm = (struct kvm *)(vm_base + offsetof(struct kvm_vm_data, kvm_vm_struct)); kvm->arch.vm_base = vm_base; printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); return kvm; } struct kvm_ia64_io_range { unsigned long start; unsigned long size; unsigned long type; }; static const struct kvm_ia64_io_range io_ranges[] = { {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, {PIB_START, PIB_SIZE, GPFN_PIB}, }; static void kvm_build_io_pmt(struct kvm *kvm) { unsigned long i, j; /* Mark I/O ranges */ for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); i++) { for (j = io_ranges[i].start; j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE) kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, io_ranges[i].type, 0); } } /*Use unused rids to virtualize guest rid.*/ #define GUEST_PHYSICAL_RR0 0x1739 #define GUEST_PHYSICAL_RR4 0x2739 #define VMM_INIT_RR 0x1660 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { BUG_ON(!kvm); if (type) return -EINVAL; kvm->arch.is_sn2 = ia64_platform_is("sn2"); kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; kvm->arch.vmm_init_rr = VMM_INIT_RR; /* *Fill P2M entries for MMIO/IO ranges */ kvm_build_io_pmt(kvm); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); return 0; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); int i; for (i = 0; i < 16; i++) { vpd->vgr[i] = regs->vpd.vgr[i]; vpd->vbgr[i] = regs->vpd.vbgr[i]; } for (i = 0; i < 128; i++) vpd->vcr[i] = regs->vpd.vcr[i]; vpd->vhpi = regs->vpd.vhpi; vpd->vnat = regs->vpd.vnat; vpd->vbnat = regs->vpd.vbnat; vpd->vpsr = regs->vpd.vpsr; vpd->vpr = regs->vpd.vpr; memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context)); RESTORE_REGS(mp_state); RESTORE_REGS(vmm_rr); memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); RESTORE_REGS(itr_regions); RESTORE_REGS(dtr_regions); RESTORE_REGS(tc_regions); RESTORE_REGS(irq_check); RESTORE_REGS(itc_check); RESTORE_REGS(timer_check); RESTORE_REGS(timer_pending); RESTORE_REGS(last_itc); for (i = 0; i < 8; i++) { vcpu->arch.vrr[i] = regs->vrr[i]; vcpu->arch.ibr[i] = regs->ibr[i]; vcpu->arch.dbr[i] = regs->dbr[i]; } for (i = 0; i < 4; i++) vcpu->arch.insvc[i] = regs->insvc[i]; RESTORE_REGS(xtp); RESTORE_REGS(metaphysical_rr0); RESTORE_REGS(metaphysical_rr4); RESTORE_REGS(metaphysical_saved_rr0); RESTORE_REGS(metaphysical_saved_rr4); RESTORE_REGS(fp_psr); RESTORE_REGS(saved_gp); vcpu->arch.irq_new_pending = 1; vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); set_bit(KVM_REQ_RESUME, &vcpu->requests); return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; switch (ioctl) { case KVM_SET_MEMORY_REGION: { struct kvm_memory_region kvm_mem; struct kvm_userspace_memory_region kvm_userspace_mem; r = -EFAULT; if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) goto out; kvm_userspace_mem.slot = kvm_mem.slot; kvm_userspace_mem.flags = kvm_mem.flags; kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr; kvm_userspace_mem.memory_size = kvm_mem.memory_size; r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0); if (r) goto out; break; } case KVM_CREATE_IRQCHIP: r = -EFAULT; r = kvm_ioapic_init(kvm); if (r) goto out; r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_ioapic_destroy(kvm); mutex_unlock(&kvm->slots_lock); goto out; } break; case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; r = -ENXIO; if (irqchip_in_kernel(kvm)) { __s32 status; status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); if (ioctl == KVM_IRQ_LINE_STATUS) { r = -EFAULT; irq_event.status = status; if (copy_to_user(argp, &irq_event, sizeof irq_event)) goto out; } r = 0; } break; } case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip chip; r = -EFAULT; if (copy_from_user(&chip, argp, sizeof chip)) goto out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto out; r = kvm_vm_ioctl_get_irqchip(kvm, &chip); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &chip, sizeof chip)) goto out; r = 0; break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip chip; r = -EFAULT; if (copy_from_user(&chip, argp, sizeof chip)) goto out; r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto out; r = kvm_vm_ioctl_set_irqchip(kvm, &chip); if (r) goto out; r = 0; break; } default: ; } out: return r; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -EINVAL; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -EINVAL; } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { return -EINVAL; } static int kvm_alloc_vmm_area(void) { if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { kvm_vmm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VMM_SIZE)); if (!kvm_vmm_base) return -ENOMEM; memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", kvm_vmm_base, kvm_vm_buffer); } return 0; } static void kvm_free_vmm_area(void) { if (kvm_vmm_base) { /*Zero this area before free to avoid bits leak!!*/ memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); kvm_vmm_base = 0; kvm_vm_buffer = 0; kvm_vsa_base = 0; } } static int vti_init_vpd(struct kvm_vcpu *vcpu) { int i; union cpuid3_t cpuid3; struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (IS_ERR(vpd)) return PTR_ERR(vpd); /* CPUID init */ for (i = 0; i < 5; i++) vpd->vcpuid[i] = ia64_get_cpuid(i); /* Limit the CPUID number to 5 */ cpuid3.value = vpd->vcpuid[3]; cpuid3.number = 4; /* 5 - 1 */ vpd->vcpuid[3] = cpuid3.value; /*Set vac and vdc fields*/ vpd->vac.a_from_int_cr = 1; vpd->vac.a_to_int_cr = 1; vpd->vac.a_from_psr = 1; vpd->vac.a_from_cpuid = 1; vpd->vac.a_cover = 1; vpd->vac.a_bsw = 1; vpd->vac.a_int = 1; vpd->vdc.d_vmsw = 1; /*Set virtual buffer*/ vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; return 0; } static int vti_create_vp(struct kvm_vcpu *vcpu) { long ret; struct vpd *vpd = vcpu->arch.vpd; unsigned long vmm_ivt; vmm_ivt = kvm_vmm_info->vmm_ivt; printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); if (ret) { printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); return -EINVAL; } return 0; } static void init_ptce_info(struct kvm_vcpu *vcpu) { ia64_ptce_info_t ptce = {0}; ia64_get_ptce(&ptce); vcpu->arch.ptce_base = ptce.base; vcpu->arch.ptce_count[0] = ptce.count[0]; vcpu->arch.ptce_count[1] = ptce.count[1]; vcpu->arch.ptce_stride[0] = ptce.stride[0]; vcpu->arch.ptce_stride[1] = ptce.stride[1]; } static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) { struct hrtimer *p_ht = &vcpu->arch.hlt_timer; if (hrtimer_cancel(p_ht)) hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); } static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) { struct kvm_vcpu *vcpu; wait_queue_head_t *q; vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); q = &vcpu->wq; if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) goto out; if (waitqueue_active(q)) wake_up_interruptible(q); out: vcpu->arch.timer_fired = 1; vcpu->arch.timer_check = 1; return HRTIMER_NORESTART; } #define PALE_RESET_ENTRY 0x80000000ffffffb0UL bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct kvm_vcpu *v; int r; int i; long itc_offset; struct kvm *kvm = vcpu->kvm; struct kvm_pt_regs *regs = vcpu_regs(vcpu); union context *p_ctx = &vcpu->arch.guest; struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); /*Init vcpu context for first run.*/ if (IS_ERR(vmm_vcpu)) return PTR_ERR(vmm_vcpu); if (kvm_vcpu_is_bsp(vcpu)) { vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; /*Set entry address for first run.*/ regs->cr_iip = PALE_RESET_ENTRY; /*Initialize itc offset for vcpus*/ itc_offset = 0UL - kvm_get_itc(vcpu); for (i = 0; i < KVM_MAX_VCPUS; i++) { v = (struct kvm_vcpu *)((char *)vcpu + sizeof(struct kvm_vcpu_data) * i); v->arch.itc_offset = itc_offset; v->arch.last_itc = 0; } } else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; r = -ENOMEM; vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); if (!vcpu->arch.apic) goto out; vcpu->arch.apic->vcpu = vcpu; p_ctx->gr[1] = 0; p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); p_ctx->gr[13] = (unsigned long)vmm_vcpu; p_ctx->psr = 0x1008522000UL; p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ p_ctx->caller_unat = 0; p_ctx->pr = 0x0; p_ctx->ar[36] = 0x0; /*unat*/ p_ctx->ar[19] = 0x0; /*rnat*/ p_ctx->ar[18] = (unsigned long)vmm_vcpu + ((sizeof(struct kvm_vcpu)+15) & ~15); p_ctx->ar[64] = 0x0; /*pfs*/ p_ctx->cr[0] = 0x7e04UL; p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; p_ctx->cr[8] = 0x3c; /*Initialize region register*/ p_ctx->rr[0] = 0x30; p_ctx->rr[1] = 0x30; p_ctx->rr[2] = 0x30; p_ctx->rr[3] = 0x30; p_ctx->rr[4] = 0x30; p_ctx->rr[5] = 0x30; p_ctx->rr[7] = 0x30; /*Initialize branch register 0*/ p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); vcpu->arch.hlt_timer.function = hlt_timer_fn; vcpu->arch.last_run_cpu = -1; vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); vcpu->arch.vsa_base = kvm_vsa_base; vcpu->arch.__gp = kvm_vmm_gp; vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); init_ptce_info(vcpu); r = 0; out: return r; } static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) { unsigned long psr; int r; local_irq_save(psr); r = kvm_insert_vmm_mapping(vcpu); local_irq_restore(psr); if (r) goto fail; r = kvm_vcpu_init(vcpu, vcpu->kvm, id); if (r) goto fail; r = vti_init_vpd(vcpu); if (r) { printk(KERN_DEBUG"kvm: vpd init error!!\n"); goto uninit; } r = vti_create_vp(vcpu); if (r) goto uninit; kvm_purge_vmm_mapping(vcpu); return 0; uninit: kvm_vcpu_uninit(vcpu); fail: return r; } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; unsigned long vm_base = kvm->arch.vm_base; int r; int cpu; BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); r = -EINVAL; if (id >= KVM_MAX_VCPUS) { printk(KERN_ERR"kvm: Can't configure vcpus > %ld", KVM_MAX_VCPUS); goto fail; } r = -ENOMEM; if (!vm_base) { printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); goto fail; } vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, vcpu_data[id].vcpu_struct)); vcpu->kvm = kvm; cpu = get_cpu(); r = vti_vcpu_setup(vcpu, id); put_cpu(); if (r) { printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); goto fail; } return vcpu; fail: return ERR_PTR(r); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { return -EINVAL; } void kvm_arch_free_vm(struct kvm *kvm) { unsigned long vm_base = kvm->arch.vm_base; if (vm_base) { memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); } } static void kvm_release_vm_pages(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int j; unsigned long base_gfn; slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { base_gfn = memslot->base_gfn; for (j = 0; j < memslot->npages; j++) { if (memslot->rmap[j]) put_page((struct page *)memslot->rmap[j]); } } } void kvm_arch_sync_events(struct kvm *kvm) { } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_iommu_unmap_guest(kvm); #ifdef KVM_CAP_DEVICE_ASSIGNMENT kvm_free_all_assigned_devices(kvm); #endif kfree(kvm->arch.vioapic); kvm_release_vm_pages(kvm); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { if (cpu != vcpu->cpu) { vcpu->cpu = cpu; if (vcpu->arch.ht_active) kvm_migrate_hlt_timer(vcpu); } } #define SAVE_REGS(_x) regs->_x = vcpu->arch._x int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); int i; vcpu_load(vcpu); for (i = 0; i < 16; i++) { regs->vpd.vgr[i] = vpd->vgr[i]; regs->vpd.vbgr[i] = vpd->vbgr[i]; } for (i = 0; i < 128; i++) regs->vpd.vcr[i] = vpd->vcr[i]; regs->vpd.vhpi = vpd->vhpi; regs->vpd.vnat = vpd->vnat; regs->vpd.vbnat = vpd->vbnat; regs->vpd.vpsr = vpd->vpsr; regs->vpd.vpr = vpd->vpr; memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context)); SAVE_REGS(mp_state); SAVE_REGS(vmm_rr); memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); SAVE_REGS(itr_regions); SAVE_REGS(dtr_regions); SAVE_REGS(tc_regions); SAVE_REGS(irq_check); SAVE_REGS(itc_check); SAVE_REGS(timer_check); SAVE_REGS(timer_pending); SAVE_REGS(last_itc); for (i = 0; i < 8; i++) { regs->vrr[i] = vcpu->arch.vrr[i]; regs->ibr[i] = vcpu->arch.ibr[i]; regs->dbr[i] = vcpu->arch.dbr[i]; } for (i = 0; i < 4; i++) regs->insvc[i] = vcpu->arch.insvc[i]; regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu); SAVE_REGS(xtp); SAVE_REGS(metaphysical_rr0); SAVE_REGS(metaphysical_rr4); SAVE_REGS(metaphysical_saved_rr0); SAVE_REGS(metaphysical_saved_rr4); SAVE_REGS(fp_psr); SAVE_REGS(saved_gp); vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, struct kvm_ia64_vcpu_stack *stack) { memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); return 0; } int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, struct kvm_ia64_vcpu_stack *stack) { memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; return 0; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { hrtimer_cancel(&vcpu->arch.hlt_timer); kfree(vcpu->arch.apic); } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; struct kvm_ia64_vcpu_stack *stack = NULL; long r; switch (ioctl) { case KVM_IA64_VCPU_GET_STACK: { struct kvm_ia64_vcpu_stack __user *user_stack; void __user *first_p = argp; r = -EFAULT; if (copy_from_user(&user_stack, first_p, sizeof(void *))) goto out; if (!access_ok(VERIFY_WRITE, user_stack, sizeof(struct kvm_ia64_vcpu_stack))) { printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " "Illegal user destination address for stack\n"); goto out; } stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); if (!stack) { r = -ENOMEM; goto out; } r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); if (r) goto out; if (copy_to_user(user_stack, stack, sizeof(struct kvm_ia64_vcpu_stack))) { r = -EFAULT; goto out; } break; } case KVM_IA64_VCPU_SET_STACK: { struct kvm_ia64_vcpu_stack __user *user_stack; void __user *first_p = argp; r = -EFAULT; if (copy_from_user(&user_stack, first_p, sizeof(void *))) goto out; if (!access_ok(VERIFY_READ, user_stack, sizeof(struct kvm_ia64_vcpu_stack))) { printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " "Illegal user address for stack\n"); goto out; } stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); if (!stack) { r = -ENOMEM; goto out; } if (copy_from_user(stack, user_stack, sizeof(struct kvm_ia64_vcpu_stack))) goto out; r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); break; } default: r = -EINVAL; } out: kfree(stack); return r; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { return 0; } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, struct kvm_userspace_memory_region *mem, int user_alloc) { unsigned long i; unsigned long pfn; int npages = memslot->npages; unsigned long base_gfn = memslot->base_gfn; if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) return -ENOMEM; for (i = 0; i < npages; i++) { pfn = gfn_to_pfn(kvm, base_gfn + i); if (!kvm_is_mmio_pfn(pfn)) { kvm_set_pmt_entry(kvm, base_gfn + i, pfn << PAGE_SHIFT, _PAGE_AR_RWX | _PAGE_MA_WB); memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); } else { kvm_set_pmt_entry(kvm, base_gfn + i, GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), _PAGE_MA_UC); memslot->rmap[i] = 0; } } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, struct kvm_memory_slot old, int user_alloc) { return; } void kvm_arch_flush_shadow(struct kvm *kvm) { kvm_flush_remote_tlbs(kvm); } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -EINVAL; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_vcpu_uninit(vcpu); } static int vti_cpu_has_kvm_support(void) { long avail = 1, status = 1, control = 1; long ret; ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); if (ret) goto out; if (!(avail & PAL_PROC_VM_BIT)) goto out; printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); if (ret) goto out; printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); if (!(vp_env_info & VP_OPCODE)) { printk(KERN_WARNING"kvm: No opcode ability on hardware, " "vm_env_info:0x%lx\n", vp_env_info); } return 1; out: return 0; } /* * On SN2, the ITC isn't stable, so copy in fast path code to use the * SN2 RTC, replacing the ITC based default verion. */ static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info, struct module *module) { unsigned long new_ar, new_ar_sn2; unsigned long module_base; if (!ia64_platform_is("sn2")) return; module_base = (unsigned long)module->module_core; new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base; new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base; printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC " "as source\n"); /* * Copy the SN2 version of mov_ar into place. They are both * the same size, so 6 bundles is sufficient (6 * 0x10). */ memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60); } static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, struct module *module) { unsigned long module_base; unsigned long vmm_size; unsigned long vmm_offset, func_offset, fdesc_offset; struct fdesc *p_fdesc; BUG_ON(!module); if (!kvm_vmm_base) { printk("kvm: kvm area hasn't been initialized yet!!\n"); return -EFAULT; } /*Calculate new position of relocated vmm module.*/ module_base = (unsigned long)module->module_core; vmm_size = module->core_size; if (unlikely(vmm_size > KVM_VMM_SIZE)) return -EFAULT; memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); kvm_patch_vmm(vmm_info, module); kvm_flush_icache(kvm_vmm_base, vmm_size); /*Recalculate kvm_vmm_info based on new VMM*/ vmm_offset = vmm_info->vmm_ivt - module_base; kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", kvm_vmm_info->vmm_ivt); fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + fdesc_offset); func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); p_fdesc->ip = KVM_VMM_BASE + func_offset; p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", KVM_VMM_BASE+func_offset); fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + fdesc_offset); func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); p_fdesc->ip = KVM_VMM_BASE + func_offset; p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); kvm_vmm_gp = p_fdesc->gp; printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", kvm_vmm_info->vmm_entry); printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", KVM_VMM_BASE + func_offset); return 0; } int kvm_arch_init(void *opaque) { int r; struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; if (!vti_cpu_has_kvm_support()) { printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); r = -EOPNOTSUPP; goto out; } if (kvm_vmm_info) { printk(KERN_ERR "kvm: Already loaded VMM module!\n"); r = -EEXIST; goto out; } r = -ENOMEM; kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); if (!kvm_vmm_info) goto out; if (kvm_alloc_vmm_area()) goto out_free0; r = kvm_relocate_vmm(vmm_info, vmm_info->module); if (r) goto out_free1; return 0; out_free1: kvm_free_vmm_area(); out_free0: kfree(kvm_vmm_info); out: return r; } void kvm_arch_exit(void) { kvm_free_vmm_area(); kfree(kvm_vmm_info); kvm_vmm_info = NULL; } static void kvm_ia64_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { int i; long base; unsigned long n; unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); n = kvm_dirty_bitmap_bytes(memslot); base = memslot->base_gfn / BITS_PER_LONG; spin_lock(&kvm->arch.dirty_log_lock); for (i = 0; i < n/sizeof(long); ++i) { memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; dirty_bitmap[base + i] = 0; } spin_unlock(&kvm->arch.dirty_log_lock); } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; unsigned long n; struct kvm_memory_slot *memslot; int is_dirty = 0; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_MEMORY_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); r = -ENOENT; if (!memslot->dirty_bitmap) goto out; kvm_ia64_sync_dirty_log(kvm, memslot); r = kvm_get_dirty_log(kvm, log, &is_dirty); if (r) goto out; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { kvm_flush_remote_tlbs(kvm); n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } int kvm_arch_hardware_setup(void) { return 0; } void kvm_arch_hardware_unsetup(void) { } void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int me; int cpu = vcpu->cpu; if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); me = get_cpu(); if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu)) if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests)) smp_send_reschedule(cpu); put_cpu(); } int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) { return __apic_accept_irq(vcpu, irq->vector); } int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) { return apic->vcpu->vcpu_id == dest; } int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) { return 0; } int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) { return vcpu1->arch.xtp - vcpu2->arch.xtp; } int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int short_hand, int dest, int dest_mode) { struct kvm_lapic *target = vcpu->arch.apic; return (dest_mode == 0) ? kvm_apic_match_physical_addr(target, dest) : kvm_apic_match_logical_addr(target, dest); } static int find_highest_bits(int *dat) { u32 bits, bitnum; int i; /* loop for all 256 bits */ for (i = 7; i >= 0 ; i--) { bits = dat[i]; if (bits) { bitnum = fls(bits); return i * 32 + bitnum - 1; } } return -1; } int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) { struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); if (vpd->irr[0] & (1UL << NMI_VECTOR)) return NMI_VECTOR; if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) return ExtINT_VECTOR; return find_highest_bits((int *)&vpd->irr[0]); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return vcpu->arch.timer_fired; } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || (kvm_highest_pending_irq(vcpu) != -1); } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { mp_state->mp_state = vcpu->arch.mp_state; return 0; } static int vcpu_reset(struct kvm_vcpu *vcpu) { int r; long psr; local_irq_save(psr); r = kvm_insert_vmm_mapping(vcpu); local_irq_restore(psr); if (r) goto fail; vcpu->arch.launched = 0; kvm_arch_vcpu_uninit(vcpu); r = kvm_arch_vcpu_init(vcpu); if (r) goto fail; kvm_purge_vmm_mapping(vcpu); r = 0; fail: return r; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { int r = 0; vcpu->arch.mp_state = mp_state->mp_state; if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) r = vcpu_reset(vcpu); return r; }
gpl-2.0
armani-dev/android_kernel_xiaomi_armani
sound/pci/ad1889.c
4894
27080
/* Analog Devices 1889 audio driver * * This is a driver for the AD1889 PCI audio chipset found * on the HP PA-RISC [BCJ]-xxx0 workstations. * * Copyright (C) 2004-2005, Kyle McMartin <kyle@parisc-linux.org> * Copyright (C) 2005, Thibaut Varene <varenet@parisc-linux.org> * Based on the OSS AD1889 driver by Randolph Chung <tausq@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * TODO: * Do we need to take care of CCS register? * Maybe we could use finer grained locking (separate locks for pb/cap)? * Wishlist: * Control Interface (mixer) support * Better AC97 support (VSR...)? * PM support * MIDI support * Game Port support * SG DMA support (this will need *a lot* of work) */ #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/ac97_codec.h> #include <asm/io.h> #include "ad1889.h" #include "ac97/ac97_id.h" #define AD1889_DRVVER "Version: 1.7" MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>, Thibaut Varene <t-bone@parisc-linux.org>"); MODULE_DESCRIPTION("Analog Devices AD1889 ALSA sound driver"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Analog Devices,AD1889}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the AD1889 soundcard."); static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the AD1889 soundcard."); static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable AD1889 soundcard."); static char *ac97_quirk[SNDRV_CARDS]; module_param_array(ac97_quirk, charp, NULL, 0444); MODULE_PARM_DESC(ac97_quirk, "AC'97 workaround for strange hardware."); #define DEVNAME "ad1889" #define PFX DEVNAME ": " /* let's use the global sound debug interfaces */ #define ad1889_debug(fmt, arg...) snd_printd(KERN_DEBUG fmt, ## arg) /* keep track of some hw registers */ struct ad1889_register_state { u16 reg; /* reg setup */ u32 addr; /* dma base address */ unsigned long size; /* DMA buffer size */ }; struct snd_ad1889 { struct snd_card *card; struct pci_dev *pci; int irq; unsigned long bar; void __iomem *iobase; struct snd_ac97 *ac97; struct snd_ac97_bus *ac97_bus; struct snd_pcm *pcm; struct snd_info_entry *proc; struct snd_pcm_substream *psubs; struct snd_pcm_substream *csubs; /* playback register state */ struct ad1889_register_state wave; struct ad1889_register_state ramc; spinlock_t lock; }; static inline u16 ad1889_readw(struct snd_ad1889 *chip, unsigned reg) { return readw(chip->iobase + reg); } static inline void ad1889_writew(struct snd_ad1889 *chip, unsigned reg, u16 val) { writew(val, chip->iobase + reg); } static inline u32 ad1889_readl(struct snd_ad1889 *chip, unsigned reg) { return readl(chip->iobase + reg); } static inline void ad1889_writel(struct snd_ad1889 *chip, unsigned reg, u32 val) { writel(val, chip->iobase + reg); } static inline void ad1889_unmute(struct snd_ad1889 *chip) { u16 st; st = ad1889_readw(chip, AD_DS_WADA) & ~(AD_DS_WADA_RWAM | AD_DS_WADA_LWAM); ad1889_writew(chip, AD_DS_WADA, st); ad1889_readw(chip, AD_DS_WADA); } static inline void ad1889_mute(struct snd_ad1889 *chip) { u16 st; st = ad1889_readw(chip, AD_DS_WADA) | AD_DS_WADA_RWAM | AD_DS_WADA_LWAM; ad1889_writew(chip, AD_DS_WADA, st); ad1889_readw(chip, AD_DS_WADA); } static inline void ad1889_load_adc_buffer_address(struct snd_ad1889 *chip, u32 address) { ad1889_writel(chip, AD_DMA_ADCBA, address); ad1889_writel(chip, AD_DMA_ADCCA, address); } static inline void ad1889_load_adc_buffer_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_ADCBC, count); ad1889_writel(chip, AD_DMA_ADCCC, count); } static inline void ad1889_load_adc_interrupt_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_ADCIB, count); ad1889_writel(chip, AD_DMA_ADCIC, count); } static inline void ad1889_load_wave_buffer_address(struct snd_ad1889 *chip, u32 address) { ad1889_writel(chip, AD_DMA_WAVBA, address); ad1889_writel(chip, AD_DMA_WAVCA, address); } static inline void ad1889_load_wave_buffer_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_WAVBC, count); ad1889_writel(chip, AD_DMA_WAVCC, count); } static inline void ad1889_load_wave_interrupt_count(struct snd_ad1889 *chip, u32 count) { ad1889_writel(chip, AD_DMA_WAVIB, count); ad1889_writel(chip, AD_DMA_WAVIC, count); } static void ad1889_channel_reset(struct snd_ad1889 *chip, unsigned int channel) { u16 reg; if (channel & AD_CHAN_WAV) { /* Disable wave channel */ reg = ad1889_readw(chip, AD_DS_WSMC) & ~AD_DS_WSMC_WAEN; ad1889_writew(chip, AD_DS_WSMC, reg); chip->wave.reg = reg; /* disable IRQs */ reg = ad1889_readw(chip, AD_DMA_WAV); reg &= AD_DMA_IM_DIS; reg &= ~AD_DMA_LOOP; ad1889_writew(chip, AD_DMA_WAV, reg); /* clear IRQ and address counters and pointers */ ad1889_load_wave_buffer_address(chip, 0x0); ad1889_load_wave_buffer_count(chip, 0x0); ad1889_load_wave_interrupt_count(chip, 0x0); /* flush */ ad1889_readw(chip, AD_DMA_WAV); } if (channel & AD_CHAN_ADC) { /* Disable ADC channel */ reg = ad1889_readw(chip, AD_DS_RAMC) & ~AD_DS_RAMC_ADEN; ad1889_writew(chip, AD_DS_RAMC, reg); chip->ramc.reg = reg; reg = ad1889_readw(chip, AD_DMA_ADC); reg &= AD_DMA_IM_DIS; reg &= ~AD_DMA_LOOP; ad1889_writew(chip, AD_DMA_ADC, reg); ad1889_load_adc_buffer_address(chip, 0x0); ad1889_load_adc_buffer_count(chip, 0x0); ad1889_load_adc_interrupt_count(chip, 0x0); /* flush */ ad1889_readw(chip, AD_DMA_ADC); } } static u16 snd_ad1889_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_ad1889 *chip = ac97->private_data; return ad1889_readw(chip, AD_AC97_BASE + reg); } static void snd_ad1889_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_ad1889 *chip = ac97->private_data; ad1889_writew(chip, AD_AC97_BASE + reg, val); } static int snd_ad1889_ac97_ready(struct snd_ad1889 *chip) { int retry = 400; /* average needs 352 msec */ while (!(ad1889_readw(chip, AD_AC97_ACIC) & AD_AC97_ACIC_ACRDY) && --retry) mdelay(1); if (!retry) { snd_printk(KERN_ERR PFX "[%s] Link is not ready.\n", __func__); return -EIO; } ad1889_debug("[%s] ready after %d ms\n", __func__, 400 - retry); return 0; } static int snd_ad1889_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ad1889_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static struct snd_pcm_hardware snd_ad1889_playback_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, /* docs say 7000, but we're lazy */ .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = BUFFER_BYTES_MAX, .period_bytes_min = PERIOD_BYTES_MIN, .period_bytes_max = PERIOD_BYTES_MAX, .periods_min = PERIODS_MIN, .periods_max = PERIODS_MAX, /*.fifo_size = 0,*/ }; static struct snd_pcm_hardware snd_ad1889_capture_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, /* docs say we could to VSR, but we're lazy */ .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = BUFFER_BYTES_MAX, .period_bytes_min = PERIOD_BYTES_MIN, .period_bytes_max = PERIOD_BYTES_MAX, .periods_min = PERIODS_MIN, .periods_max = PERIODS_MAX, /*.fifo_size = 0,*/ }; static int snd_ad1889_playback_open(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; chip->psubs = ss; rt->hw = snd_ad1889_playback_hw; return 0; } static int snd_ad1889_capture_open(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; chip->csubs = ss; rt->hw = snd_ad1889_capture_hw; return 0; } static int snd_ad1889_playback_close(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); chip->psubs = NULL; return 0; } static int snd_ad1889_capture_close(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); chip->csubs = NULL; return 0; } static int snd_ad1889_playback_prepare(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(ss); unsigned int count = snd_pcm_lib_period_bytes(ss); u16 reg; ad1889_channel_reset(chip, AD_CHAN_WAV); reg = ad1889_readw(chip, AD_DS_WSMC); /* Mask out 16-bit / Stereo */ reg &= ~(AD_DS_WSMC_WA16 | AD_DS_WSMC_WAST); if (snd_pcm_format_width(rt->format) == 16) reg |= AD_DS_WSMC_WA16; if (rt->channels > 1) reg |= AD_DS_WSMC_WAST; /* let's make sure we don't clobber ourselves */ spin_lock_irq(&chip->lock); chip->wave.size = size; chip->wave.reg = reg; chip->wave.addr = rt->dma_addr; ad1889_writew(chip, AD_DS_WSMC, chip->wave.reg); /* Set sample rates on the codec */ ad1889_writew(chip, AD_DS_WAS, rt->rate); /* Set up DMA */ ad1889_load_wave_buffer_address(chip, chip->wave.addr); ad1889_load_wave_buffer_count(chip, size); ad1889_load_wave_interrupt_count(chip, count); /* writes flush */ ad1889_readw(chip, AD_DS_WSMC); spin_unlock_irq(&chip->lock); ad1889_debug("prepare playback: addr = 0x%x, count = %u, " "size = %u, reg = 0x%x, rate = %u\n", chip->wave.addr, count, size, reg, rt->rate); return 0; } static int snd_ad1889_capture_prepare(struct snd_pcm_substream *ss) { struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); struct snd_pcm_runtime *rt = ss->runtime; unsigned int size = snd_pcm_lib_buffer_bytes(ss); unsigned int count = snd_pcm_lib_period_bytes(ss); u16 reg; ad1889_channel_reset(chip, AD_CHAN_ADC); reg = ad1889_readw(chip, AD_DS_RAMC); /* Mask out 16-bit / Stereo */ reg &= ~(AD_DS_RAMC_AD16 | AD_DS_RAMC_ADST); if (snd_pcm_format_width(rt->format) == 16) reg |= AD_DS_RAMC_AD16; if (rt->channels > 1) reg |= AD_DS_RAMC_ADST; /* let's make sure we don't clobber ourselves */ spin_lock_irq(&chip->lock); chip->ramc.size = size; chip->ramc.reg = reg; chip->ramc.addr = rt->dma_addr; ad1889_writew(chip, AD_DS_RAMC, chip->ramc.reg); /* Set up DMA */ ad1889_load_adc_buffer_address(chip, chip->ramc.addr); ad1889_load_adc_buffer_count(chip, size); ad1889_load_adc_interrupt_count(chip, count); /* writes flush */ ad1889_readw(chip, AD_DS_RAMC); spin_unlock_irq(&chip->lock); ad1889_debug("prepare capture: addr = 0x%x, count = %u, " "size = %u, reg = 0x%x, rate = %u\n", chip->ramc.addr, count, size, reg, rt->rate); return 0; } /* this is called in atomic context with IRQ disabled. Must be as fast as possible and not sleep. DMA should be *triggered* by this call. The WSMC "WAEN" bit triggers DMA Wave On/Off */ static int snd_ad1889_playback_trigger(struct snd_pcm_substream *ss, int cmd) { u16 wsmc; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); wsmc = ad1889_readw(chip, AD_DS_WSMC); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* enable DMA loop & interrupts */ ad1889_writew(chip, AD_DMA_WAV, AD_DMA_LOOP | AD_DMA_IM_CNT); wsmc |= AD_DS_WSMC_WAEN; /* 1 to clear CHSS bit */ ad1889_writel(chip, AD_DMA_CHSS, AD_DMA_CHSS_WAVS); ad1889_unmute(chip); break; case SNDRV_PCM_TRIGGER_STOP: ad1889_mute(chip); wsmc &= ~AD_DS_WSMC_WAEN; break; default: snd_BUG(); return -EINVAL; } chip->wave.reg = wsmc; ad1889_writew(chip, AD_DS_WSMC, wsmc); ad1889_readw(chip, AD_DS_WSMC); /* flush */ /* reset the chip when STOP - will disable IRQs */ if (cmd == SNDRV_PCM_TRIGGER_STOP) ad1889_channel_reset(chip, AD_CHAN_WAV); return 0; } /* this is called in atomic context with IRQ disabled. Must be as fast as possible and not sleep. DMA should be *triggered* by this call. The RAMC "ADEN" bit triggers DMA ADC On/Off */ static int snd_ad1889_capture_trigger(struct snd_pcm_substream *ss, int cmd) { u16 ramc; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); ramc = ad1889_readw(chip, AD_DS_RAMC); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* enable DMA loop & interrupts */ ad1889_writew(chip, AD_DMA_ADC, AD_DMA_LOOP | AD_DMA_IM_CNT); ramc |= AD_DS_RAMC_ADEN; /* 1 to clear CHSS bit */ ad1889_writel(chip, AD_DMA_CHSS, AD_DMA_CHSS_ADCS); break; case SNDRV_PCM_TRIGGER_STOP: ramc &= ~AD_DS_RAMC_ADEN; break; default: return -EINVAL; } chip->ramc.reg = ramc; ad1889_writew(chip, AD_DS_RAMC, ramc); ad1889_readw(chip, AD_DS_RAMC); /* flush */ /* reset the chip when STOP - will disable IRQs */ if (cmd == SNDRV_PCM_TRIGGER_STOP) ad1889_channel_reset(chip, AD_CHAN_ADC); return 0; } /* Called in atomic context with IRQ disabled */ static snd_pcm_uframes_t snd_ad1889_playback_pointer(struct snd_pcm_substream *ss) { size_t ptr = 0; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); if (unlikely(!(chip->wave.reg & AD_DS_WSMC_WAEN))) return 0; ptr = ad1889_readl(chip, AD_DMA_WAVCA); ptr -= chip->wave.addr; if (snd_BUG_ON(ptr >= chip->wave.size)) return 0; return bytes_to_frames(ss->runtime, ptr); } /* Called in atomic context with IRQ disabled */ static snd_pcm_uframes_t snd_ad1889_capture_pointer(struct snd_pcm_substream *ss) { size_t ptr = 0; struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); if (unlikely(!(chip->ramc.reg & AD_DS_RAMC_ADEN))) return 0; ptr = ad1889_readl(chip, AD_DMA_ADCCA); ptr -= chip->ramc.addr; if (snd_BUG_ON(ptr >= chip->ramc.size)) return 0; return bytes_to_frames(ss->runtime, ptr); } static struct snd_pcm_ops snd_ad1889_playback_ops = { .open = snd_ad1889_playback_open, .close = snd_ad1889_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ad1889_hw_params, .hw_free = snd_ad1889_hw_free, .prepare = snd_ad1889_playback_prepare, .trigger = snd_ad1889_playback_trigger, .pointer = snd_ad1889_playback_pointer, }; static struct snd_pcm_ops snd_ad1889_capture_ops = { .open = snd_ad1889_capture_open, .close = snd_ad1889_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ad1889_hw_params, .hw_free = snd_ad1889_hw_free, .prepare = snd_ad1889_capture_prepare, .trigger = snd_ad1889_capture_trigger, .pointer = snd_ad1889_capture_pointer, }; static irqreturn_t snd_ad1889_interrupt(int irq, void *dev_id) { unsigned long st; struct snd_ad1889 *chip = dev_id; st = ad1889_readl(chip, AD_DMA_DISR); /* clear ISR */ ad1889_writel(chip, AD_DMA_DISR, st); st &= AD_INTR_MASK; if (unlikely(!st)) return IRQ_NONE; if (st & (AD_DMA_DISR_PMAI|AD_DMA_DISR_PTAI)) ad1889_debug("Unexpected master or target abort interrupt!\n"); if ((st & AD_DMA_DISR_WAVI) && chip->psubs) snd_pcm_period_elapsed(chip->psubs); if ((st & AD_DMA_DISR_ADCI) && chip->csubs) snd_pcm_period_elapsed(chip->csubs); return IRQ_HANDLED; } static int __devinit snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device, struct snd_pcm **rpcm) { int err; struct snd_pcm *pcm; if (rpcm) *rpcm = NULL; err = snd_pcm_new(chip->card, chip->card->driver, device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ad1889_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ad1889_capture_ops); pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, chip->card->shortname); chip->pcm = pcm; chip->psubs = NULL; chip->csubs = NULL; err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), BUFFER_BYTES_MAX / 2, BUFFER_BYTES_MAX); if (err < 0) { snd_printk(KERN_ERR PFX "buffer allocation error: %d\n", err); return err; } if (rpcm) *rpcm = pcm; return 0; } static void snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ad1889 *chip = entry->private_data; u16 reg; int tmp; reg = ad1889_readw(chip, AD_DS_WSMC); snd_iprintf(buffer, "Wave output: %s\n", (reg & AD_DS_WSMC_WAEN) ? "enabled" : "disabled"); snd_iprintf(buffer, "Wave Channels: %s\n", (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); snd_iprintf(buffer, "Wave Quality: %d-bit linear\n", (reg & AD_DS_WSMC_WA16) ? 16 : 8); /* WARQ is at offset 12 */ tmp = (reg & AD_DS_WSMC_WARQ) ? (((reg & AD_DS_WSMC_WARQ >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Wave FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); snd_iprintf(buffer, "Synthesis output: %s\n", reg & AD_DS_WSMC_SYEN ? "enabled" : "disabled"); /* SYRQ is at offset 4 */ tmp = (reg & AD_DS_WSMC_SYRQ) ? (((reg & AD_DS_WSMC_SYRQ >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Synthesis FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); reg = ad1889_readw(chip, AD_DS_RAMC); snd_iprintf(buffer, "ADC input: %s\n", (reg & AD_DS_RAMC_ADEN) ? "enabled" : "disabled"); snd_iprintf(buffer, "ADC Channels: %s\n", (reg & AD_DS_RAMC_ADST) ? "stereo" : "mono"); snd_iprintf(buffer, "ADC Quality: %d-bit linear\n", (reg & AD_DS_RAMC_AD16) ? 16 : 8); /* ACRQ is at offset 4 */ tmp = (reg & AD_DS_RAMC_ACRQ) ? (((reg & AD_DS_RAMC_ACRQ >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "ADC FIFO: %d %s words\n\n", tmp, (reg & AD_DS_RAMC_ADST) ? "stereo" : "mono"); snd_iprintf(buffer, "Resampler input: %s\n", reg & AD_DS_RAMC_REEN ? "enabled" : "disabled"); /* RERQ is at offset 12 */ tmp = (reg & AD_DS_RAMC_RERQ) ? (((reg & AD_DS_RAMC_RERQ >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "Resampler FIFO: %d %s words\n\n", tmp, (reg & AD_DS_WSMC_WAST) ? "stereo" : "mono"); /* doc says LSB represents -1.5dB, but the max value (-94.5dB) suggests that LSB is -3dB, which is more coherent with the logarithmic nature of the dB scale */ reg = ad1889_readw(chip, AD_DS_WADA); snd_iprintf(buffer, "Left: %s, -%d dB\n", (reg & AD_DS_WADA_LWAM) ? "mute" : "unmute", ((reg & AD_DS_WADA_LWAA) >> 8) * 3); reg = ad1889_readw(chip, AD_DS_WADA); snd_iprintf(buffer, "Right: %s, -%d dB\n", (reg & AD_DS_WADA_RWAM) ? "mute" : "unmute", ((reg & AD_DS_WADA_RWAA) >> 8) * 3); reg = ad1889_readw(chip, AD_DS_WAS); snd_iprintf(buffer, "Wave samplerate: %u Hz\n", reg); reg = ad1889_readw(chip, AD_DS_RES); snd_iprintf(buffer, "Resampler samplerate: %u Hz\n", reg); } static void __devinit snd_ad1889_proc_init(struct snd_ad1889 *chip) { struct snd_info_entry *entry; if (!snd_card_proc_new(chip->card, chip->card->driver, &entry)) snd_info_set_text_ops(entry, chip, snd_ad1889_proc_read); } static struct ac97_quirk ac97_quirks[] = { { .subvendor = 0x11d4, /* AD */ .subdevice = 0x1889, /* AD1889 */ .codec_id = AC97_ID_AD1819, .name = "AD1889", .type = AC97_TUNE_HP_ONLY }, { } /* terminator */ }; static void __devinit snd_ad1889_ac97_xinit(struct snd_ad1889 *chip) { u16 reg; reg = ad1889_readw(chip, AD_AC97_ACIC); reg |= AD_AC97_ACIC_ACRD; /* Reset Disable */ ad1889_writew(chip, AD_AC97_ACIC, reg); ad1889_readw(chip, AD_AC97_ACIC); /* flush posted write */ udelay(10); /* Interface Enable */ reg |= AD_AC97_ACIC_ACIE; ad1889_writew(chip, AD_AC97_ACIC, reg); snd_ad1889_ac97_ready(chip); /* Audio Stream Output | Variable Sample Rate Mode */ reg = ad1889_readw(chip, AD_AC97_ACIC); reg |= AD_AC97_ACIC_ASOE | AD_AC97_ACIC_VSRM; ad1889_writew(chip, AD_AC97_ACIC, reg); ad1889_readw(chip, AD_AC97_ACIC); /* flush posted write */ } static void snd_ad1889_ac97_bus_free(struct snd_ac97_bus *bus) { struct snd_ad1889 *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_ad1889_ac97_free(struct snd_ac97 *ac97) { struct snd_ad1889 *chip = ac97->private_data; chip->ac97 = NULL; } static int __devinit snd_ad1889_ac97_init(struct snd_ad1889 *chip, const char *quirk_override) { int err; struct snd_ac97_template ac97; static struct snd_ac97_bus_ops ops = { .write = snd_ad1889_ac97_write, .read = snd_ad1889_ac97_read, }; /* doing that here, it works. */ snd_ad1889_ac97_xinit(chip); err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus); if (err < 0) return err; chip->ac97_bus->private_free = snd_ad1889_ac97_bus_free; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_ad1889_ac97_free; ac97.pci = chip->pci; err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97); if (err < 0) return err; snd_ac97_tune_hardware(chip->ac97, ac97_quirks, quirk_override); return 0; } static int snd_ad1889_free(struct snd_ad1889 *chip) { if (chip->irq < 0) goto skip_hw; spin_lock_irq(&chip->lock); ad1889_mute(chip); /* Turn off interrupt on count and zero DMA registers */ ad1889_channel_reset(chip, AD_CHAN_WAV | AD_CHAN_ADC); /* clear DISR. If we don't, we'd better jump off the Eiffel Tower */ ad1889_writel(chip, AD_DMA_DISR, AD_DMA_DISR_PTAI | AD_DMA_DISR_PMAI); ad1889_readl(chip, AD_DMA_DISR); /* flush, dammit! */ spin_unlock_irq(&chip->lock); if (chip->irq >= 0) free_irq(chip->irq, chip); skip_hw: if (chip->iobase) iounmap(chip->iobase); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_ad1889_dev_free(struct snd_device *device) { struct snd_ad1889 *chip = device->device_data; return snd_ad1889_free(chip); } static int __devinit snd_ad1889_init(struct snd_ad1889 *chip) { ad1889_writew(chip, AD_DS_CCS, AD_DS_CCS_CLKEN); /* turn on clock */ ad1889_readw(chip, AD_DS_CCS); /* flush posted write */ mdelay(10); /* enable Master and Target abort interrupts */ ad1889_writel(chip, AD_DMA_DISR, AD_DMA_DISR_PMAE | AD_DMA_DISR_PTAE); return 0; } static int __devinit snd_ad1889_create(struct snd_card *card, struct pci_dev *pci, struct snd_ad1889 **rchip) { int err; struct snd_ad1889 *chip; static struct snd_device_ops ops = { .dev_free = snd_ad1889_dev_free, }; *rchip = NULL; if ((err = pci_enable_device(pci)) < 0) return err; /* check PCI availability (32bit DMA) */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) { printk(KERN_ERR PFX "error setting 32-bit DMA mask.\n"); pci_disable_device(pci); return -ENXIO; } /* allocate chip specific data with zero-filled memory */ if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; card->private_data = chip; chip->pci = pci; chip->irq = -1; /* (1) PCI resource allocation */ if ((err = pci_request_regions(pci, card->driver)) < 0) goto free_and_ret; chip->bar = pci_resource_start(pci, 0); chip->iobase = pci_ioremap_bar(pci, 0); if (chip->iobase == NULL) { printk(KERN_ERR PFX "unable to reserve region.\n"); err = -EBUSY; goto free_and_ret; } pci_set_master(pci); spin_lock_init(&chip->lock); /* only now can we call ad1889_free */ if (request_irq(pci->irq, snd_ad1889_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { printk(KERN_ERR PFX "cannot obtain IRQ %d\n", pci->irq); snd_ad1889_free(chip); return -EBUSY; } chip->irq = pci->irq; synchronize_irq(chip->irq); /* (2) initialization of the chip hardware */ if ((err = snd_ad1889_init(chip)) < 0) { snd_ad1889_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_ad1889_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; free_and_ret: kfree(chip); pci_disable_device(pci); return err; } static int __devinit snd_ad1889_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { int err; static int devno; struct snd_card *card; struct snd_ad1889 *chip; /* (1) */ if (devno >= SNDRV_CARDS) return -ENODEV; if (!enable[devno]) { devno++; return -ENOENT; } /* (2) */ err = snd_card_create(index[devno], id[devno], THIS_MODULE, 0, &card); /* XXX REVISIT: we can probably allocate chip in this call */ if (err < 0) return err; strcpy(card->driver, "AD1889"); strcpy(card->shortname, "Analog Devices AD1889"); /* (3) */ err = snd_ad1889_create(card, pci, &chip); if (err < 0) goto free_and_ret; /* (4) */ sprintf(card->longname, "%s at 0x%lx irq %i", card->shortname, chip->bar, chip->irq); /* (5) */ /* register AC97 mixer */ err = snd_ad1889_ac97_init(chip, ac97_quirk[devno]); if (err < 0) goto free_and_ret; err = snd_ad1889_pcm_init(chip, 0, NULL); if (err < 0) goto free_and_ret; /* register proc interface */ snd_ad1889_proc_init(chip); /* (6) */ err = snd_card_register(card); if (err < 0) goto free_and_ret; /* (7) */ pci_set_drvdata(pci, card); devno++; return 0; free_and_ret: snd_card_free(card); return err; } static void __devexit snd_ad1889_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static DEFINE_PCI_DEVICE_TABLE(snd_ad1889_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_ANALOG_DEVICES, PCI_DEVICE_ID_AD1889JS) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, snd_ad1889_ids); static struct pci_driver ad1889_pci_driver = { .name = KBUILD_MODNAME, .id_table = snd_ad1889_ids, .probe = snd_ad1889_probe, .remove = __devexit_p(snd_ad1889_remove), }; static int __init alsa_ad1889_init(void) { return pci_register_driver(&ad1889_pci_driver); } static void __exit alsa_ad1889_fini(void) { pci_unregister_driver(&ad1889_pci_driver); } module_init(alsa_ad1889_init); module_exit(alsa_ad1889_fini);
gpl-2.0
shao2610/3.4-kernel
drivers/media/rc/ir-rc5-sz-decoder.c
4894
3949
/* ir-rc5-sz-decoder.c - handle RC5 Streamzap IR Pulse/Space protocol * * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * This code handles the 15 bit RC5-ish protocol used by the Streamzap * PC Remote. * It considers a carrier of 36 kHz, with a total of 15 bits, where * the first two bits are start bits, and a third one is a filing bit */ #include "rc-core-priv.h" #include <linux/module.h> #define RC5_SZ_NBITS 15 #define RC5_UNIT 888888 /* ns */ #define RC5_BIT_START (1 * RC5_UNIT) #define RC5_BIT_END (1 * RC5_UNIT) enum rc5_sz_state { STATE_INACTIVE, STATE_BIT_START, STATE_BIT_END, STATE_FINISHED, }; /** * ir_rc5_sz_decode() - Decode one RC-5 Streamzap pulse or space * @dev: the struct rc_dev descriptor of the device * @ev: the struct ir_raw_event descriptor of the pulse/space * * This function returns -EINVAL if the pulse violates the state machine */ static int ir_rc5_sz_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct rc5_sz_dec *data = &dev->raw->rc5_sz; u8 toggle, command, system; u32 scancode; if (!(dev->raw->enabled_protocols & RC_TYPE_RC5_SZ)) return 0; if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; return 0; } if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2)) goto out; again: IR_dprintk(2, "RC5-sz decode started at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2)) return 0; switch (data->state) { case STATE_INACTIVE: if (!ev.pulse) break; data->state = STATE_BIT_START; data->count = 1; data->wanted_bits = RC5_SZ_NBITS; decrease_duration(&ev, RC5_BIT_START); goto again; case STATE_BIT_START: if (!eq_margin(ev.duration, RC5_BIT_START, RC5_UNIT / 2)) break; data->bits <<= 1; if (!ev.pulse) data->bits |= 1; data->count++; data->state = STATE_BIT_END; return 0; case STATE_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == data->wanted_bits) data->state = STATE_FINISHED; else data->state = STATE_BIT_START; decrease_duration(&ev, RC5_BIT_END); goto again; case STATE_FINISHED: if (ev.pulse) break; /* RC5-sz */ command = (data->bits & 0x0003F) >> 0; system = (data->bits & 0x02FC0) >> 6; toggle = (data->bits & 0x01000) ? 1 : 0; scancode = system << 6 | command; IR_dprintk(1, "RC5-sz scancode 0x%04x (toggle: %u)\n", scancode, toggle); rc_keydown(dev, scancode, toggle); data->state = STATE_INACTIVE; return 0; } out: IR_dprintk(1, "RC5-sz decode failed at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); data->state = STATE_INACTIVE; return -EINVAL; } static struct ir_raw_handler rc5_sz_handler = { .protocols = RC_TYPE_RC5_SZ, .decode = ir_rc5_sz_decode, }; static int __init ir_rc5_sz_decode_init(void) { ir_raw_handler_register(&rc5_sz_handler); printk(KERN_INFO "IR RC5 (streamzap) protocol handler initialized\n"); return 0; } static void __exit ir_rc5_sz_decode_exit(void) { ir_raw_handler_unregister(&rc5_sz_handler); } module_init(ir_rc5_sz_decode_init); module_exit(ir_rc5_sz_decode_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); MODULE_DESCRIPTION("RC5 (streamzap) IR protocol decoder");
gpl-2.0
fus1on/3.4.xx_LG_kernel
drivers/media/video/saa717x.c
5150
33585
/* * saa717x - Philips SAA717xHL video decoder driver * * Based on the saa7115 driver * * Changes by Ohta Kyuma <alpha292@bremen.or.jp> * - Apply to SAA717x,NEC uPD64031,uPD64083. (1/31/2004) * * Changes by T.Adachi (tadachi@tadachi-net.com) * - support audio, video scaler etc, and checked the initialize sequence. * * Cleaned up by Hans Verkuil <hverkuil@xs4all.nl> * * Note: this is a reversed engineered driver based on captures from * the I2C bus under Windows. This chip is very similar to the saa7134, * though. Unfortunately, this driver is currently only working for NTSC. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> MODULE_DESCRIPTION("Philips SAA717x audio/video decoder driver"); MODULE_AUTHOR("K. Ohta, T. Adachi, Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ struct saa717x_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; v4l2_std_id std; int input; int enable; int radio; int playback; int audio; int tuner_audio_mode; int audio_main_mute; int audio_main_vol_r; int audio_main_vol_l; u16 audio_main_bass; u16 audio_main_treble; u16 audio_main_volume; u16 audio_main_balance; int audio_input; }; static inline struct saa717x_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct saa717x_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct saa717x_state, hdl)->sd; } /* ----------------------------------------------------------------------- */ /* for audio mode */ #define TUNER_AUDIO_MONO 0 /* LL */ #define TUNER_AUDIO_STEREO 1 /* LR */ #define TUNER_AUDIO_LANG1 2 /* LL */ #define TUNER_AUDIO_LANG2 3 /* RR */ #define SAA717X_NTSC_WIDTH (704) #define SAA717X_NTSC_HEIGHT (480) /* ----------------------------------------------------------------------- */ static int saa717x_write(struct v4l2_subdev *sd, u32 reg, u32 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_adapter *adap = client->adapter; int fw_addr = reg == 0x454 || (reg >= 0x464 && reg <= 0x478) || reg == 0x480 || reg == 0x488; unsigned char mm1[6]; struct i2c_msg msg; msg.flags = 0; msg.addr = client->addr; mm1[0] = (reg >> 8) & 0xff; mm1[1] = reg & 0xff; if (fw_addr) { mm1[4] = (value >> 16) & 0xff; mm1[3] = (value >> 8) & 0xff; mm1[2] = value & 0xff; } else { mm1[2] = value & 0xff; } msg.len = fw_addr ? 5 : 3; /* Long Registers have *only* three bytes! */ msg.buf = mm1; v4l2_dbg(2, debug, sd, "wrote: reg 0x%03x=%08x\n", reg, value); return i2c_transfer(adap, &msg, 1) == 1; } static void saa717x_write_regs(struct v4l2_subdev *sd, u32 *data) { while (data[0] || data[1]) { saa717x_write(sd, data[0], data[1]); data += 2; } } static u32 saa717x_read(struct v4l2_subdev *sd, u32 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_adapter *adap = client->adapter; int fw_addr = (reg >= 0x404 && reg <= 0x4b8) || reg == 0x528; unsigned char mm1[2]; unsigned char mm2[4] = { 0, 0, 0, 0 }; struct i2c_msg msgs[2]; u32 value; msgs[0].flags = 0; msgs[1].flags = I2C_M_RD; msgs[0].addr = msgs[1].addr = client->addr; mm1[0] = (reg >> 8) & 0xff; mm1[1] = reg & 0xff; msgs[0].len = 2; msgs[0].buf = mm1; msgs[1].len = fw_addr ? 3 : 1; /* Multibyte Registers contains *only* 3 bytes */ msgs[1].buf = mm2; i2c_transfer(adap, msgs, 2); if (fw_addr) value = (mm2[2] & 0xff) | ((mm2[1] & 0xff) >> 8) | ((mm2[0] & 0xff) >> 16); else value = mm2[0] & 0xff; v4l2_dbg(2, debug, sd, "read: reg 0x%03x=0x%08x\n", reg, value); return value; } /* ----------------------------------------------------------------------- */ static u32 reg_init_initialize[] = { /* from linux driver */ 0x101, 0x008, /* Increment delay */ 0x103, 0x000, /* Analog input control 2 */ 0x104, 0x090, /* Analog input control 3 */ 0x105, 0x090, /* Analog input control 4 */ 0x106, 0x0eb, /* Horizontal sync start */ 0x107, 0x0e0, /* Horizontal sync stop */ 0x109, 0x055, /* Luminance control */ 0x10f, 0x02a, /* Chroma gain control */ 0x110, 0x000, /* Chroma control 2 */ 0x114, 0x045, /* analog/ADC */ 0x118, 0x040, /* RAW data gain */ 0x119, 0x080, /* RAW data offset */ 0x044, 0x000, /* VBI horizontal input window start (L) TASK A */ 0x045, 0x000, /* VBI horizontal input window start (H) TASK A */ 0x046, 0x0cf, /* VBI horizontal input window stop (L) TASK A */ 0x047, 0x002, /* VBI horizontal input window stop (H) TASK A */ 0x049, 0x000, /* VBI vertical input window start (H) TASK A */ 0x04c, 0x0d0, /* VBI horizontal output length (L) TASK A */ 0x04d, 0x002, /* VBI horizontal output length (H) TASK A */ 0x064, 0x080, /* Lumina brightness TASK A */ 0x065, 0x040, /* Luminance contrast TASK A */ 0x066, 0x040, /* Chroma saturation TASK A */ /* 067H: Reserved */ 0x068, 0x000, /* VBI horizontal scaling increment (L) TASK A */ 0x069, 0x004, /* VBI horizontal scaling increment (H) TASK A */ 0x06a, 0x000, /* VBI phase offset TASK A */ 0x06e, 0x000, /* Horizontal phase offset Luma TASK A */ 0x06f, 0x000, /* Horizontal phase offset Chroma TASK A */ 0x072, 0x000, /* Vertical filter mode TASK A */ 0x084, 0x000, /* VBI horizontal input window start (L) TAKS B */ 0x085, 0x000, /* VBI horizontal input window start (H) TAKS B */ 0x086, 0x0cf, /* VBI horizontal input window stop (L) TAKS B */ 0x087, 0x002, /* VBI horizontal input window stop (H) TAKS B */ 0x089, 0x000, /* VBI vertical input window start (H) TAKS B */ 0x08c, 0x0d0, /* VBI horizontal output length (L) TASK B */ 0x08d, 0x002, /* VBI horizontal output length (H) TASK B */ 0x0a4, 0x080, /* Lumina brightness TASK B */ 0x0a5, 0x040, /* Luminance contrast TASK B */ 0x0a6, 0x040, /* Chroma saturation TASK B */ /* 0A7H reserved */ 0x0a8, 0x000, /* VBI horizontal scaling increment (L) TASK B */ 0x0a9, 0x004, /* VBI horizontal scaling increment (H) TASK B */ 0x0aa, 0x000, /* VBI phase offset TASK B */ 0x0ae, 0x000, /* Horizontal phase offset Luma TASK B */ 0x0af, 0x000, /*Horizontal phase offset Chroma TASK B */ 0x0b2, 0x000, /* Vertical filter mode TASK B */ 0x00c, 0x000, /* Start point GREEN path */ 0x00d, 0x000, /* Start point BLUE path */ 0x00e, 0x000, /* Start point RED path */ 0x010, 0x010, /* GREEN path gamma curve --- */ 0x011, 0x020, 0x012, 0x030, 0x013, 0x040, 0x014, 0x050, 0x015, 0x060, 0x016, 0x070, 0x017, 0x080, 0x018, 0x090, 0x019, 0x0a0, 0x01a, 0x0b0, 0x01b, 0x0c0, 0x01c, 0x0d0, 0x01d, 0x0e0, 0x01e, 0x0f0, 0x01f, 0x0ff, /* --- GREEN path gamma curve */ 0x020, 0x010, /* BLUE path gamma curve --- */ 0x021, 0x020, 0x022, 0x030, 0x023, 0x040, 0x024, 0x050, 0x025, 0x060, 0x026, 0x070, 0x027, 0x080, 0x028, 0x090, 0x029, 0x0a0, 0x02a, 0x0b0, 0x02b, 0x0c0, 0x02c, 0x0d0, 0x02d, 0x0e0, 0x02e, 0x0f0, 0x02f, 0x0ff, /* --- BLUE path gamma curve */ 0x030, 0x010, /* RED path gamma curve --- */ 0x031, 0x020, 0x032, 0x030, 0x033, 0x040, 0x034, 0x050, 0x035, 0x060, 0x036, 0x070, 0x037, 0x080, 0x038, 0x090, 0x039, 0x0a0, 0x03a, 0x0b0, 0x03b, 0x0c0, 0x03c, 0x0d0, 0x03d, 0x0e0, 0x03e, 0x0f0, 0x03f, 0x0ff, /* --- RED path gamma curve */ 0x109, 0x085, /* Luminance control */ /**** from app start ****/ 0x584, 0x000, /* AGC gain control */ 0x585, 0x000, /* Program count */ 0x586, 0x003, /* Status reset */ 0x588, 0x0ff, /* Number of audio samples (L) */ 0x589, 0x00f, /* Number of audio samples (M) */ 0x58a, 0x000, /* Number of audio samples (H) */ 0x58b, 0x000, /* Audio select */ 0x58c, 0x010, /* Audio channel assign1 */ 0x58d, 0x032, /* Audio channel assign2 */ 0x58e, 0x054, /* Audio channel assign3 */ 0x58f, 0x023, /* Audio format */ 0x590, 0x000, /* SIF control */ 0x595, 0x000, /* ?? */ 0x596, 0x000, /* ?? */ 0x597, 0x000, /* ?? */ 0x464, 0x00, /* Digital input crossbar1 */ 0x46c, 0xbbbb10, /* Digital output selection1-3 */ 0x470, 0x101010, /* Digital output selection4-6 */ 0x478, 0x00, /* Sound feature control */ 0x474, 0x18, /* Softmute control */ 0x454, 0x0425b9, /* Sound Easy programming(reset) */ 0x454, 0x042539, /* Sound Easy programming(reset) */ /**** common setting( of DVD play, including scaler commands) ****/ 0x042, 0x003, /* Data path configuration for VBI (TASK A) */ 0x082, 0x003, /* Data path configuration for VBI (TASK B) */ 0x108, 0x0f8, /* Sync control */ 0x2a9, 0x0fd, /* ??? */ 0x102, 0x089, /* select video input "mode 9" */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x00a, /* Chroma control 1 */ 0x594, 0x002, /* SIF, analog I/O select */ 0x454, 0x0425b9, /* Sound */ 0x454, 0x042539, 0x111, 0x000, 0x10e, 0x00a, 0x464, 0x000, 0x300, 0x000, 0x301, 0x006, 0x302, 0x000, 0x303, 0x006, 0x308, 0x040, 0x309, 0x000, 0x30a, 0x000, 0x30b, 0x000, 0x000, 0x002, 0x001, 0x000, 0x002, 0x000, 0x003, 0x000, 0x004, 0x033, 0x040, 0x01d, 0x041, 0x001, 0x042, 0x004, 0x043, 0x000, 0x080, 0x01e, 0x081, 0x001, 0x082, 0x004, 0x083, 0x000, 0x190, 0x018, 0x115, 0x000, 0x116, 0x012, 0x117, 0x018, 0x04a, 0x011, 0x08a, 0x011, 0x04b, 0x000, 0x08b, 0x000, 0x048, 0x000, 0x088, 0x000, 0x04e, 0x012, 0x08e, 0x012, 0x058, 0x012, 0x098, 0x012, 0x059, 0x000, 0x099, 0x000, 0x05a, 0x003, 0x09a, 0x003, 0x05b, 0x001, 0x09b, 0x001, 0x054, 0x008, 0x094, 0x008, 0x055, 0x000, 0x095, 0x000, 0x056, 0x0c7, 0x096, 0x0c7, 0x057, 0x002, 0x097, 0x002, 0x0ff, 0x0ff, 0x060, 0x001, 0x0a0, 0x001, 0x061, 0x000, 0x0a1, 0x000, 0x062, 0x000, 0x0a2, 0x000, 0x063, 0x000, 0x0a3, 0x000, 0x070, 0x000, 0x0b0, 0x000, 0x071, 0x004, 0x0b1, 0x004, 0x06c, 0x0e9, 0x0ac, 0x0e9, 0x06d, 0x003, 0x0ad, 0x003, 0x05c, 0x0d0, 0x09c, 0x0d0, 0x05d, 0x002, 0x09d, 0x002, 0x05e, 0x0f2, 0x09e, 0x0f2, 0x05f, 0x000, 0x09f, 0x000, 0x074, 0x000, 0x0b4, 0x000, 0x075, 0x000, 0x0b5, 0x000, 0x076, 0x000, 0x0b6, 0x000, 0x077, 0x000, 0x0b7, 0x000, 0x195, 0x008, 0x0ff, 0x0ff, 0x108, 0x0f8, 0x111, 0x000, 0x10e, 0x00a, 0x2a9, 0x0fd, 0x464, 0x001, 0x454, 0x042135, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x193, 0x000, 0x300, 0x000, 0x301, 0x006, 0x302, 0x000, 0x303, 0x006, 0x308, 0x040, 0x309, 0x000, 0x30a, 0x000, 0x30b, 0x000, 0x000, 0x002, 0x001, 0x000, 0x002, 0x000, 0x003, 0x000, 0x004, 0x033, 0x040, 0x01d, 0x041, 0x001, 0x042, 0x004, 0x043, 0x000, 0x080, 0x01e, 0x081, 0x001, 0x082, 0x004, 0x083, 0x000, 0x190, 0x018, 0x115, 0x000, 0x116, 0x012, 0x117, 0x018, 0x04a, 0x011, 0x08a, 0x011, 0x04b, 0x000, 0x08b, 0x000, 0x048, 0x000, 0x088, 0x000, 0x04e, 0x012, 0x08e, 0x012, 0x058, 0x012, 0x098, 0x012, 0x059, 0x000, 0x099, 0x000, 0x05a, 0x003, 0x09a, 0x003, 0x05b, 0x001, 0x09b, 0x001, 0x054, 0x008, 0x094, 0x008, 0x055, 0x000, 0x095, 0x000, 0x056, 0x0c7, 0x096, 0x0c7, 0x057, 0x002, 0x097, 0x002, 0x060, 0x001, 0x0a0, 0x001, 0x061, 0x000, 0x0a1, 0x000, 0x062, 0x000, 0x0a2, 0x000, 0x063, 0x000, 0x0a3, 0x000, 0x070, 0x000, 0x0b0, 0x000, 0x071, 0x004, 0x0b1, 0x004, 0x06c, 0x0e9, 0x0ac, 0x0e9, 0x06d, 0x003, 0x0ad, 0x003, 0x05c, 0x0d0, 0x09c, 0x0d0, 0x05d, 0x002, 0x09d, 0x002, 0x05e, 0x0f2, 0x09e, 0x0f2, 0x05f, 0x000, 0x09f, 0x000, 0x074, 0x000, 0x0b4, 0x000, 0x075, 0x000, 0x0b5, 0x000, 0x076, 0x000, 0x0b6, 0x000, 0x077, 0x000, 0x0b7, 0x000, 0x195, 0x008, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x193, 0x0a6, 0x108, 0x0f8, 0x042, 0x003, 0x082, 0x003, 0x454, 0x0425b9, 0x454, 0x042539, 0x193, 0x000, 0x193, 0x0a6, 0x464, 0x000, 0, 0 }; /* Tuner */ static u32 reg_init_tuner_input[] = { 0x108, 0x0f8, /* Sync control */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x00a, /* Chroma control 1 */ 0, 0 }; /* Composite */ static u32 reg_init_composite_input[] = { 0x108, 0x0e8, /* Sync control */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x04a, /* Chroma control 1 */ 0, 0 }; /* S-Video */ static u32 reg_init_svideo_input[] = { 0x108, 0x0e8, /* Sync control */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x04a, /* Chroma control 1 */ 0, 0 }; static u32 reg_set_audio_template[4][2] = { { /* for MONO tadachi 6/29 DMA audio output select? Register 0x46c 7-4: DMA2, 3-0: DMA1 ch. DMA4, DMA3 DMA2, DMA1 0: MAIN left, 1: MAIN right 2: AUX1 left, 3: AUX1 right 4: AUX2 left, 5: AUX2 right 6: DPL left, 7: DPL right 8: DPL center, 9: DPL surround A: monitor output, B: digital sense */ 0xbbbb00, /* tadachi 6/29 DAC and I2S output select? Register 0x470 7-4:DAC right ch. 3-0:DAC left ch. I2S1 right,left I2S2 right,left */ 0x00, }, { /* for STEREO */ 0xbbbb10, 0x101010, }, { /* for LANG1 */ 0xbbbb00, 0x00, }, { /* for LANG2/SAP */ 0xbbbb11, 0x111111, } }; /* Get detected audio flags (from saa7134 driver) */ static void get_inf_dev_status(struct v4l2_subdev *sd, int *dual_flag, int *stereo_flag) { u32 reg_data3; static char *stdres[0x20] = { [0x00] = "no standard detected", [0x01] = "B/G (in progress)", [0x02] = "D/K (in progress)", [0x03] = "M (in progress)", [0x04] = "B/G A2", [0x05] = "B/G NICAM", [0x06] = "D/K A2 (1)", [0x07] = "D/K A2 (2)", [0x08] = "D/K A2 (3)", [0x09] = "D/K NICAM", [0x0a] = "L NICAM", [0x0b] = "I NICAM", [0x0c] = "M Korea", [0x0d] = "M BTSC ", [0x0e] = "M EIAJ", [0x0f] = "FM radio / IF 10.7 / 50 deemp", [0x10] = "FM radio / IF 10.7 / 75 deemp", [0x11] = "FM radio / IF sel / 50 deemp", [0x12] = "FM radio / IF sel / 75 deemp", [0x13 ... 0x1e] = "unknown", [0x1f] = "??? [in progress]", }; *dual_flag = *stereo_flag = 0; /* (demdec status: 0x528) */ /* read current status */ reg_data3 = saa717x_read(sd, 0x0528); v4l2_dbg(1, debug, sd, "tvaudio thread status: 0x%x [%s%s%s]\n", reg_data3, stdres[reg_data3 & 0x1f], (reg_data3 & 0x000020) ? ",stereo" : "", (reg_data3 & 0x000040) ? ",dual" : ""); v4l2_dbg(1, debug, sd, "detailed status: " "%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s\n", (reg_data3 & 0x000080) ? " A2/EIAJ pilot tone " : "", (reg_data3 & 0x000100) ? " A2/EIAJ dual " : "", (reg_data3 & 0x000200) ? " A2/EIAJ stereo " : "", (reg_data3 & 0x000400) ? " A2/EIAJ noise mute " : "", (reg_data3 & 0x000800) ? " BTSC/FM radio pilot " : "", (reg_data3 & 0x001000) ? " SAP carrier " : "", (reg_data3 & 0x002000) ? " BTSC stereo noise mute " : "", (reg_data3 & 0x004000) ? " SAP noise mute " : "", (reg_data3 & 0x008000) ? " VDSP " : "", (reg_data3 & 0x010000) ? " NICST " : "", (reg_data3 & 0x020000) ? " NICDU " : "", (reg_data3 & 0x040000) ? " NICAM muted " : "", (reg_data3 & 0x080000) ? " NICAM reserve sound " : "", (reg_data3 & 0x100000) ? " init done " : ""); if (reg_data3 & 0x000220) { v4l2_dbg(1, debug, sd, "ST!!!\n"); *stereo_flag = 1; } if (reg_data3 & 0x000140) { v4l2_dbg(1, debug, sd, "DUAL!!!\n"); *dual_flag = 1; } } /* regs write to set audio mode */ static void set_audio_mode(struct v4l2_subdev *sd, int audio_mode) { v4l2_dbg(1, debug, sd, "writing registers to set audio mode by set %d\n", audio_mode); saa717x_write(sd, 0x46c, reg_set_audio_template[audio_mode][0]); saa717x_write(sd, 0x470, reg_set_audio_template[audio_mode][1]); } /* write regs to set audio volume, bass and treble */ static int set_audio_regs(struct v4l2_subdev *sd, struct saa717x_state *decoder) { u8 mute = 0xac; /* -84 dB */ u32 val; unsigned int work_l, work_r; /* set SIF analog I/O select */ saa717x_write(sd, 0x0594, decoder->audio_input); v4l2_dbg(1, debug, sd, "set audio input %d\n", decoder->audio_input); /* normalize ( 65535 to 0 -> 24 to -40 (not -84)) */ work_l = (min(65536 - decoder->audio_main_balance, 32768) * decoder->audio_main_volume) / 32768; work_r = (min(decoder->audio_main_balance, (u16)32768) * decoder->audio_main_volume) / 32768; decoder->audio_main_vol_l = (long)work_l * (24 - (-40)) / 65535 - 40; decoder->audio_main_vol_r = (long)work_r * (24 - (-40)) / 65535 - 40; /* set main volume */ /* main volume L[7-0],R[7-0],0x00 24=24dB,-83dB, -84(mute) */ /* def:0dB->6dB(MPG600GR) */ /* if mute is on, set mute */ if (decoder->audio_main_mute) { val = mute | (mute << 8); } else { val = (u8)decoder->audio_main_vol_l | ((u8)decoder->audio_main_vol_r << 8); } saa717x_write(sd, 0x480, val); /* set bass and treble */ val = decoder->audio_main_bass & 0x1f; val |= (decoder->audio_main_treble & 0x1f) << 5; saa717x_write(sd, 0x488, val); return 0; } /********** scaling staff ***********/ static void set_h_prescale(struct v4l2_subdev *sd, int task, int prescale) { static const struct { int xpsc; int xacl; int xc2_1; int xdcg; int vpfy; } vals[] = { /* XPSC XACL XC2_1 XDCG VPFY */ { 1, 0, 0, 0, 0 }, { 2, 2, 1, 2, 2 }, { 3, 4, 1, 3, 2 }, { 4, 8, 1, 4, 2 }, { 5, 8, 1, 4, 2 }, { 6, 8, 1, 4, 3 }, { 7, 8, 1, 4, 3 }, { 8, 15, 0, 4, 3 }, { 9, 15, 0, 4, 3 }, { 10, 16, 1, 5, 3 }, }; static const int count = ARRAY_SIZE(vals); int i, task_shift; task_shift = task * 0x40; for (i = 0; i < count; i++) if (vals[i].xpsc == prescale) break; if (i == count) return; /* horizonal prescaling */ saa717x_write(sd, 0x60 + task_shift, vals[i].xpsc); /* accumulation length */ saa717x_write(sd, 0x61 + task_shift, vals[i].xacl); /* level control */ saa717x_write(sd, 0x62 + task_shift, (vals[i].xc2_1 << 3) | vals[i].xdcg); /*FIR prefilter control */ saa717x_write(sd, 0x63 + task_shift, (vals[i].vpfy << 2) | vals[i].vpfy); } /********** scaling staff ***********/ static void set_v_scale(struct v4l2_subdev *sd, int task, int yscale) { int task_shift; task_shift = task * 0x40; /* Vertical scaling ratio (LOW) */ saa717x_write(sd, 0x70 + task_shift, yscale & 0xff); /* Vertical scaling ratio (HI) */ saa717x_write(sd, 0x71 + task_shift, yscale >> 8); } static int saa717x_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct saa717x_state *state = to_state(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: saa717x_write(sd, 0x10a, ctrl->val); return 0; case V4L2_CID_CONTRAST: saa717x_write(sd, 0x10b, ctrl->val); return 0; case V4L2_CID_SATURATION: saa717x_write(sd, 0x10c, ctrl->val); return 0; case V4L2_CID_HUE: saa717x_write(sd, 0x10d, ctrl->val); return 0; case V4L2_CID_AUDIO_MUTE: state->audio_main_mute = ctrl->val; break; case V4L2_CID_AUDIO_VOLUME: state->audio_main_volume = ctrl->val; break; case V4L2_CID_AUDIO_BALANCE: state->audio_main_balance = ctrl->val; break; case V4L2_CID_AUDIO_TREBLE: state->audio_main_treble = ctrl->val; break; case V4L2_CID_AUDIO_BASS: state->audio_main_bass = ctrl->val; break; default: return 0; } set_audio_regs(sd, state); return 0; } static int saa717x_s_video_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct saa717x_state *decoder = to_state(sd); int is_tuner = input & 0x80; /* tuner input flag */ input &= 0x7f; v4l2_dbg(1, debug, sd, "decoder set input (%d)\n", input); /* inputs from 0-9 are available*/ /* saa717x have mode0-mode9 but mode5 is reserved. */ if (input > 9 || input == 5) return -EINVAL; if (decoder->input != input) { int input_line = input; decoder->input = input_line; v4l2_dbg(1, debug, sd, "now setting %s input %d\n", input_line >= 6 ? "S-Video" : "Composite", input_line); /* select mode */ saa717x_write(sd, 0x102, (saa717x_read(sd, 0x102) & 0xf0) | input_line); /* bypass chrominance trap for modes 6..9 */ saa717x_write(sd, 0x109, (saa717x_read(sd, 0x109) & 0x7f) | (input_line < 6 ? 0x0 : 0x80)); /* change audio_mode */ if (is_tuner) { /* tuner */ set_audio_mode(sd, decoder->tuner_audio_mode); } else { /* Force to STEREO mode if Composite or * S-Video were chosen */ set_audio_mode(sd, TUNER_AUDIO_STEREO); } /* change initialize procedure (Composite/S-Video) */ if (is_tuner) saa717x_write_regs(sd, reg_init_tuner_input); else if (input_line >= 6) saa717x_write_regs(sd, reg_init_svideo_input); else saa717x_write_regs(sd, reg_init_composite_input); } return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int saa717x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->val = saa717x_read(sd, reg->reg); reg->size = 1; return 0; } static int saa717x_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); u16 addr = reg->reg & 0xffff; u8 val = reg->val & 0xff; if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; saa717x_write(sd, addr, val); return 0; } #endif static int saa717x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { int prescale, h_scale, v_scale; v4l2_dbg(1, debug, sd, "decoder set size\n"); if (fmt->code != V4L2_MBUS_FMT_FIXED) return -EINVAL; /* FIXME need better bounds checking here */ if (fmt->width < 1 || fmt->width > 1440) return -EINVAL; if (fmt->height < 1 || fmt->height > 960) return -EINVAL; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; /* scaling setting */ /* NTSC and interlace only */ prescale = SAA717X_NTSC_WIDTH / fmt->width; if (prescale == 0) prescale = 1; h_scale = 1024 * SAA717X_NTSC_WIDTH / prescale / fmt->width; /* interlace */ v_scale = 512 * 2 * SAA717X_NTSC_HEIGHT / fmt->height; /* Horizontal prescaling etc */ set_h_prescale(sd, 0, prescale); set_h_prescale(sd, 1, prescale); /* Horizontal scaling increment */ /* TASK A */ saa717x_write(sd, 0x6C, (u8)(h_scale & 0xFF)); saa717x_write(sd, 0x6D, (u8)((h_scale >> 8) & 0xFF)); /* TASK B */ saa717x_write(sd, 0xAC, (u8)(h_scale & 0xFF)); saa717x_write(sd, 0xAD, (u8)((h_scale >> 8) & 0xFF)); /* Vertical prescaling etc */ set_v_scale(sd, 0, v_scale); set_v_scale(sd, 1, v_scale); /* set video output size */ /* video number of pixels at output */ /* TASK A */ saa717x_write(sd, 0x5C, (u8)(fmt->width & 0xFF)); saa717x_write(sd, 0x5D, (u8)((fmt->width >> 8) & 0xFF)); /* TASK B */ saa717x_write(sd, 0x9C, (u8)(fmt->width & 0xFF)); saa717x_write(sd, 0x9D, (u8)((fmt->width >> 8) & 0xFF)); /* video number of lines at output */ /* TASK A */ saa717x_write(sd, 0x5E, (u8)(fmt->height & 0xFF)); saa717x_write(sd, 0x5F, (u8)((fmt->height >> 8) & 0xFF)); /* TASK B */ saa717x_write(sd, 0x9E, (u8)(fmt->height & 0xFF)); saa717x_write(sd, 0x9F, (u8)((fmt->height >> 8) & 0xFF)); return 0; } static int saa717x_s_radio(struct v4l2_subdev *sd) { struct saa717x_state *decoder = to_state(sd); decoder->radio = 1; return 0; } static int saa717x_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct saa717x_state *decoder = to_state(sd); v4l2_dbg(1, debug, sd, "decoder set norm "); v4l2_dbg(1, debug, sd, "(not yet implementd)\n"); decoder->radio = 0; decoder->std = std; return 0; } static int saa717x_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct saa717x_state *decoder = to_state(sd); if (input < 3) { /* FIXME! --tadachi */ decoder->audio_input = input; v4l2_dbg(1, debug, sd, "set decoder audio input to %d\n", decoder->audio_input); set_audio_regs(sd, decoder); return 0; } return -ERANGE; } static int saa717x_s_stream(struct v4l2_subdev *sd, int enable) { struct saa717x_state *decoder = to_state(sd); v4l2_dbg(1, debug, sd, "decoder %s output\n", enable ? "enable" : "disable"); decoder->enable = enable; saa717x_write(sd, 0x193, enable ? 0xa6 : 0x26); return 0; } /* change audio mode */ static int saa717x_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa717x_state *decoder = to_state(sd); int audio_mode; char *mes[4] = { "MONO", "STEREO", "LANG1", "LANG2/SAP" }; audio_mode = TUNER_AUDIO_STEREO; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: audio_mode = TUNER_AUDIO_MONO; break; case V4L2_TUNER_MODE_STEREO: audio_mode = TUNER_AUDIO_STEREO; break; case V4L2_TUNER_MODE_LANG2: audio_mode = TUNER_AUDIO_LANG2; break; case V4L2_TUNER_MODE_LANG1: audio_mode = TUNER_AUDIO_LANG1; break; } v4l2_dbg(1, debug, sd, "change audio mode to %s\n", mes[audio_mode]); decoder->tuner_audio_mode = audio_mode; /* The registers are not changed here. */ /* See DECODER_ENABLE_OUTPUT section. */ set_audio_mode(sd, decoder->tuner_audio_mode); return 0; } static int saa717x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa717x_state *decoder = to_state(sd); int dual_f, stereo_f; if (decoder->radio) return 0; get_inf_dev_status(sd, &dual_f, &stereo_f); v4l2_dbg(1, debug, sd, "DETECT==st:%d dual:%d\n", stereo_f, dual_f); /* mono */ if ((dual_f == 0) && (stereo_f == 0)) { vt->rxsubchans = V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==MONO\n"); } /* stereo */ if (stereo_f == 1) { if (vt->audmode == V4L2_TUNER_MODE_STEREO || vt->audmode == V4L2_TUNER_MODE_LANG1) { vt->rxsubchans = V4L2_TUNER_SUB_STEREO; v4l2_dbg(1, debug, sd, "DETECT==ST(ST)\n"); } else { vt->rxsubchans = V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==ST(MONO)\n"); } } /* dual */ if (dual_f == 1) { if (vt->audmode == V4L2_TUNER_MODE_LANG2) { vt->rxsubchans = V4L2_TUNER_SUB_LANG2 | V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==DUAL1\n"); } else { vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==DUAL2\n"); } } return 0; } static int saa717x_log_status(struct v4l2_subdev *sd) { struct saa717x_state *state = to_state(sd); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops saa717x_ctrl_ops = { .s_ctrl = saa717x_s_ctrl, }; static const struct v4l2_subdev_core_ops saa717x_core_ops = { #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = saa717x_g_register, .s_register = saa717x_s_register, #endif .s_std = saa717x_s_std, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, .log_status = saa717x_log_status, }; static const struct v4l2_subdev_tuner_ops saa717x_tuner_ops = { .g_tuner = saa717x_g_tuner, .s_tuner = saa717x_s_tuner, .s_radio = saa717x_s_radio, }; static const struct v4l2_subdev_video_ops saa717x_video_ops = { .s_routing = saa717x_s_video_routing, .s_mbus_fmt = saa717x_s_mbus_fmt, .s_stream = saa717x_s_stream, }; static const struct v4l2_subdev_audio_ops saa717x_audio_ops = { .s_routing = saa717x_s_audio_routing, }; static const struct v4l2_subdev_ops saa717x_ops = { .core = &saa717x_core_ops, .tuner = &saa717x_tuner_ops, .audio = &saa717x_audio_ops, .video = &saa717x_video_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* ----------------------------------------------------------------------- */ static int saa717x_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct saa717x_state *decoder; struct v4l2_ctrl_handler *hdl; struct v4l2_subdev *sd; u8 id = 0; char *p = ""; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; decoder = kzalloc(sizeof(struct saa717x_state), GFP_KERNEL); if (decoder == NULL) return -ENOMEM; sd = &decoder->sd; v4l2_i2c_subdev_init(sd, client, &saa717x_ops); if (saa717x_write(sd, 0x5a4, 0xfe) && saa717x_write(sd, 0x5a5, 0x0f) && saa717x_write(sd, 0x5a6, 0x00) && saa717x_write(sd, 0x5a7, 0x01)) id = saa717x_read(sd, 0x5a0); if (id != 0xc2 && id != 0x32 && id != 0xf2 && id != 0x6c) { v4l2_dbg(1, debug, sd, "saa717x not found (id=%02x)\n", id); kfree(decoder); return -ENODEV; } if (id == 0xc2) p = "saa7173"; else if (id == 0x32) p = "saa7174A"; else if (id == 0x6c) p = "saa7174HL"; else p = "saa7171"; v4l2_info(sd, "%s found @ 0x%x (%s)\n", p, client->addr << 1, client->adapter->name); hdl = &decoder->hdl; v4l2_ctrl_handler_init(hdl, 9); /* add in ascending ID order */ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 68); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 64); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 42000); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_BASS, -16, 15, 1, 0); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_TREBLE, -16, 15, 1, 0); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); sd->ctrl_handler = hdl; if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); kfree(decoder); return err; } decoder->std = V4L2_STD_NTSC; decoder->input = -1; decoder->enable = 1; /* FIXME!! */ decoder->playback = 0; /* initially capture mode used */ decoder->audio = 1; /* DECODER_AUDIO_48_KHZ */ decoder->audio_input = 2; /* FIXME!! */ decoder->tuner_audio_mode = TUNER_AUDIO_STEREO; /* set volume, bass and treble */ decoder->audio_main_vol_l = 6; decoder->audio_main_vol_r = 6; v4l2_dbg(1, debug, sd, "writing init values\n"); /* FIXME!! */ saa717x_write_regs(sd, reg_init_initialize); v4l2_ctrl_handler_setup(hdl); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(2*HZ); return 0; } static int saa717x_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); kfree(to_state(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id saa717x_id[] = { { "saa717x", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, saa717x_id); static struct i2c_driver saa717x_driver = { .driver = { .owner = THIS_MODULE, .name = "saa717x", }, .probe = saa717x_probe, .remove = saa717x_remove, .id_table = saa717x_id, }; module_i2c_driver(saa717x_driver);
gpl-2.0
XXMrHyde/android_kernel_lge_hammerhead
drivers/media/video/saa717x.c
5150
33585
/* * saa717x - Philips SAA717xHL video decoder driver * * Based on the saa7115 driver * * Changes by Ohta Kyuma <alpha292@bremen.or.jp> * - Apply to SAA717x,NEC uPD64031,uPD64083. (1/31/2004) * * Changes by T.Adachi (tadachi@tadachi-net.com) * - support audio, video scaler etc, and checked the initialize sequence. * * Cleaned up by Hans Verkuil <hverkuil@xs4all.nl> * * Note: this is a reversed engineered driver based on captures from * the I2C bus under Windows. This chip is very similar to the saa7134, * though. Unfortunately, this driver is currently only working for NTSC. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> MODULE_DESCRIPTION("Philips SAA717x audio/video decoder driver"); MODULE_AUTHOR("K. Ohta, T. Adachi, Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ struct saa717x_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; v4l2_std_id std; int input; int enable; int radio; int playback; int audio; int tuner_audio_mode; int audio_main_mute; int audio_main_vol_r; int audio_main_vol_l; u16 audio_main_bass; u16 audio_main_treble; u16 audio_main_volume; u16 audio_main_balance; int audio_input; }; static inline struct saa717x_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct saa717x_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct saa717x_state, hdl)->sd; } /* ----------------------------------------------------------------------- */ /* for audio mode */ #define TUNER_AUDIO_MONO 0 /* LL */ #define TUNER_AUDIO_STEREO 1 /* LR */ #define TUNER_AUDIO_LANG1 2 /* LL */ #define TUNER_AUDIO_LANG2 3 /* RR */ #define SAA717X_NTSC_WIDTH (704) #define SAA717X_NTSC_HEIGHT (480) /* ----------------------------------------------------------------------- */ static int saa717x_write(struct v4l2_subdev *sd, u32 reg, u32 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_adapter *adap = client->adapter; int fw_addr = reg == 0x454 || (reg >= 0x464 && reg <= 0x478) || reg == 0x480 || reg == 0x488; unsigned char mm1[6]; struct i2c_msg msg; msg.flags = 0; msg.addr = client->addr; mm1[0] = (reg >> 8) & 0xff; mm1[1] = reg & 0xff; if (fw_addr) { mm1[4] = (value >> 16) & 0xff; mm1[3] = (value >> 8) & 0xff; mm1[2] = value & 0xff; } else { mm1[2] = value & 0xff; } msg.len = fw_addr ? 5 : 3; /* Long Registers have *only* three bytes! */ msg.buf = mm1; v4l2_dbg(2, debug, sd, "wrote: reg 0x%03x=%08x\n", reg, value); return i2c_transfer(adap, &msg, 1) == 1; } static void saa717x_write_regs(struct v4l2_subdev *sd, u32 *data) { while (data[0] || data[1]) { saa717x_write(sd, data[0], data[1]); data += 2; } } static u32 saa717x_read(struct v4l2_subdev *sd, u32 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_adapter *adap = client->adapter; int fw_addr = (reg >= 0x404 && reg <= 0x4b8) || reg == 0x528; unsigned char mm1[2]; unsigned char mm2[4] = { 0, 0, 0, 0 }; struct i2c_msg msgs[2]; u32 value; msgs[0].flags = 0; msgs[1].flags = I2C_M_RD; msgs[0].addr = msgs[1].addr = client->addr; mm1[0] = (reg >> 8) & 0xff; mm1[1] = reg & 0xff; msgs[0].len = 2; msgs[0].buf = mm1; msgs[1].len = fw_addr ? 3 : 1; /* Multibyte Registers contains *only* 3 bytes */ msgs[1].buf = mm2; i2c_transfer(adap, msgs, 2); if (fw_addr) value = (mm2[2] & 0xff) | ((mm2[1] & 0xff) >> 8) | ((mm2[0] & 0xff) >> 16); else value = mm2[0] & 0xff; v4l2_dbg(2, debug, sd, "read: reg 0x%03x=0x%08x\n", reg, value); return value; } /* ----------------------------------------------------------------------- */ static u32 reg_init_initialize[] = { /* from linux driver */ 0x101, 0x008, /* Increment delay */ 0x103, 0x000, /* Analog input control 2 */ 0x104, 0x090, /* Analog input control 3 */ 0x105, 0x090, /* Analog input control 4 */ 0x106, 0x0eb, /* Horizontal sync start */ 0x107, 0x0e0, /* Horizontal sync stop */ 0x109, 0x055, /* Luminance control */ 0x10f, 0x02a, /* Chroma gain control */ 0x110, 0x000, /* Chroma control 2 */ 0x114, 0x045, /* analog/ADC */ 0x118, 0x040, /* RAW data gain */ 0x119, 0x080, /* RAW data offset */ 0x044, 0x000, /* VBI horizontal input window start (L) TASK A */ 0x045, 0x000, /* VBI horizontal input window start (H) TASK A */ 0x046, 0x0cf, /* VBI horizontal input window stop (L) TASK A */ 0x047, 0x002, /* VBI horizontal input window stop (H) TASK A */ 0x049, 0x000, /* VBI vertical input window start (H) TASK A */ 0x04c, 0x0d0, /* VBI horizontal output length (L) TASK A */ 0x04d, 0x002, /* VBI horizontal output length (H) TASK A */ 0x064, 0x080, /* Lumina brightness TASK A */ 0x065, 0x040, /* Luminance contrast TASK A */ 0x066, 0x040, /* Chroma saturation TASK A */ /* 067H: Reserved */ 0x068, 0x000, /* VBI horizontal scaling increment (L) TASK A */ 0x069, 0x004, /* VBI horizontal scaling increment (H) TASK A */ 0x06a, 0x000, /* VBI phase offset TASK A */ 0x06e, 0x000, /* Horizontal phase offset Luma TASK A */ 0x06f, 0x000, /* Horizontal phase offset Chroma TASK A */ 0x072, 0x000, /* Vertical filter mode TASK A */ 0x084, 0x000, /* VBI horizontal input window start (L) TAKS B */ 0x085, 0x000, /* VBI horizontal input window start (H) TAKS B */ 0x086, 0x0cf, /* VBI horizontal input window stop (L) TAKS B */ 0x087, 0x002, /* VBI horizontal input window stop (H) TAKS B */ 0x089, 0x000, /* VBI vertical input window start (H) TAKS B */ 0x08c, 0x0d0, /* VBI horizontal output length (L) TASK B */ 0x08d, 0x002, /* VBI horizontal output length (H) TASK B */ 0x0a4, 0x080, /* Lumina brightness TASK B */ 0x0a5, 0x040, /* Luminance contrast TASK B */ 0x0a6, 0x040, /* Chroma saturation TASK B */ /* 0A7H reserved */ 0x0a8, 0x000, /* VBI horizontal scaling increment (L) TASK B */ 0x0a9, 0x004, /* VBI horizontal scaling increment (H) TASK B */ 0x0aa, 0x000, /* VBI phase offset TASK B */ 0x0ae, 0x000, /* Horizontal phase offset Luma TASK B */ 0x0af, 0x000, /*Horizontal phase offset Chroma TASK B */ 0x0b2, 0x000, /* Vertical filter mode TASK B */ 0x00c, 0x000, /* Start point GREEN path */ 0x00d, 0x000, /* Start point BLUE path */ 0x00e, 0x000, /* Start point RED path */ 0x010, 0x010, /* GREEN path gamma curve --- */ 0x011, 0x020, 0x012, 0x030, 0x013, 0x040, 0x014, 0x050, 0x015, 0x060, 0x016, 0x070, 0x017, 0x080, 0x018, 0x090, 0x019, 0x0a0, 0x01a, 0x0b0, 0x01b, 0x0c0, 0x01c, 0x0d0, 0x01d, 0x0e0, 0x01e, 0x0f0, 0x01f, 0x0ff, /* --- GREEN path gamma curve */ 0x020, 0x010, /* BLUE path gamma curve --- */ 0x021, 0x020, 0x022, 0x030, 0x023, 0x040, 0x024, 0x050, 0x025, 0x060, 0x026, 0x070, 0x027, 0x080, 0x028, 0x090, 0x029, 0x0a0, 0x02a, 0x0b0, 0x02b, 0x0c0, 0x02c, 0x0d0, 0x02d, 0x0e0, 0x02e, 0x0f0, 0x02f, 0x0ff, /* --- BLUE path gamma curve */ 0x030, 0x010, /* RED path gamma curve --- */ 0x031, 0x020, 0x032, 0x030, 0x033, 0x040, 0x034, 0x050, 0x035, 0x060, 0x036, 0x070, 0x037, 0x080, 0x038, 0x090, 0x039, 0x0a0, 0x03a, 0x0b0, 0x03b, 0x0c0, 0x03c, 0x0d0, 0x03d, 0x0e0, 0x03e, 0x0f0, 0x03f, 0x0ff, /* --- RED path gamma curve */ 0x109, 0x085, /* Luminance control */ /**** from app start ****/ 0x584, 0x000, /* AGC gain control */ 0x585, 0x000, /* Program count */ 0x586, 0x003, /* Status reset */ 0x588, 0x0ff, /* Number of audio samples (L) */ 0x589, 0x00f, /* Number of audio samples (M) */ 0x58a, 0x000, /* Number of audio samples (H) */ 0x58b, 0x000, /* Audio select */ 0x58c, 0x010, /* Audio channel assign1 */ 0x58d, 0x032, /* Audio channel assign2 */ 0x58e, 0x054, /* Audio channel assign3 */ 0x58f, 0x023, /* Audio format */ 0x590, 0x000, /* SIF control */ 0x595, 0x000, /* ?? */ 0x596, 0x000, /* ?? */ 0x597, 0x000, /* ?? */ 0x464, 0x00, /* Digital input crossbar1 */ 0x46c, 0xbbbb10, /* Digital output selection1-3 */ 0x470, 0x101010, /* Digital output selection4-6 */ 0x478, 0x00, /* Sound feature control */ 0x474, 0x18, /* Softmute control */ 0x454, 0x0425b9, /* Sound Easy programming(reset) */ 0x454, 0x042539, /* Sound Easy programming(reset) */ /**** common setting( of DVD play, including scaler commands) ****/ 0x042, 0x003, /* Data path configuration for VBI (TASK A) */ 0x082, 0x003, /* Data path configuration for VBI (TASK B) */ 0x108, 0x0f8, /* Sync control */ 0x2a9, 0x0fd, /* ??? */ 0x102, 0x089, /* select video input "mode 9" */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x00a, /* Chroma control 1 */ 0x594, 0x002, /* SIF, analog I/O select */ 0x454, 0x0425b9, /* Sound */ 0x454, 0x042539, 0x111, 0x000, 0x10e, 0x00a, 0x464, 0x000, 0x300, 0x000, 0x301, 0x006, 0x302, 0x000, 0x303, 0x006, 0x308, 0x040, 0x309, 0x000, 0x30a, 0x000, 0x30b, 0x000, 0x000, 0x002, 0x001, 0x000, 0x002, 0x000, 0x003, 0x000, 0x004, 0x033, 0x040, 0x01d, 0x041, 0x001, 0x042, 0x004, 0x043, 0x000, 0x080, 0x01e, 0x081, 0x001, 0x082, 0x004, 0x083, 0x000, 0x190, 0x018, 0x115, 0x000, 0x116, 0x012, 0x117, 0x018, 0x04a, 0x011, 0x08a, 0x011, 0x04b, 0x000, 0x08b, 0x000, 0x048, 0x000, 0x088, 0x000, 0x04e, 0x012, 0x08e, 0x012, 0x058, 0x012, 0x098, 0x012, 0x059, 0x000, 0x099, 0x000, 0x05a, 0x003, 0x09a, 0x003, 0x05b, 0x001, 0x09b, 0x001, 0x054, 0x008, 0x094, 0x008, 0x055, 0x000, 0x095, 0x000, 0x056, 0x0c7, 0x096, 0x0c7, 0x057, 0x002, 0x097, 0x002, 0x0ff, 0x0ff, 0x060, 0x001, 0x0a0, 0x001, 0x061, 0x000, 0x0a1, 0x000, 0x062, 0x000, 0x0a2, 0x000, 0x063, 0x000, 0x0a3, 0x000, 0x070, 0x000, 0x0b0, 0x000, 0x071, 0x004, 0x0b1, 0x004, 0x06c, 0x0e9, 0x0ac, 0x0e9, 0x06d, 0x003, 0x0ad, 0x003, 0x05c, 0x0d0, 0x09c, 0x0d0, 0x05d, 0x002, 0x09d, 0x002, 0x05e, 0x0f2, 0x09e, 0x0f2, 0x05f, 0x000, 0x09f, 0x000, 0x074, 0x000, 0x0b4, 0x000, 0x075, 0x000, 0x0b5, 0x000, 0x076, 0x000, 0x0b6, 0x000, 0x077, 0x000, 0x0b7, 0x000, 0x195, 0x008, 0x0ff, 0x0ff, 0x108, 0x0f8, 0x111, 0x000, 0x10e, 0x00a, 0x2a9, 0x0fd, 0x464, 0x001, 0x454, 0x042135, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x193, 0x000, 0x300, 0x000, 0x301, 0x006, 0x302, 0x000, 0x303, 0x006, 0x308, 0x040, 0x309, 0x000, 0x30a, 0x000, 0x30b, 0x000, 0x000, 0x002, 0x001, 0x000, 0x002, 0x000, 0x003, 0x000, 0x004, 0x033, 0x040, 0x01d, 0x041, 0x001, 0x042, 0x004, 0x043, 0x000, 0x080, 0x01e, 0x081, 0x001, 0x082, 0x004, 0x083, 0x000, 0x190, 0x018, 0x115, 0x000, 0x116, 0x012, 0x117, 0x018, 0x04a, 0x011, 0x08a, 0x011, 0x04b, 0x000, 0x08b, 0x000, 0x048, 0x000, 0x088, 0x000, 0x04e, 0x012, 0x08e, 0x012, 0x058, 0x012, 0x098, 0x012, 0x059, 0x000, 0x099, 0x000, 0x05a, 0x003, 0x09a, 0x003, 0x05b, 0x001, 0x09b, 0x001, 0x054, 0x008, 0x094, 0x008, 0x055, 0x000, 0x095, 0x000, 0x056, 0x0c7, 0x096, 0x0c7, 0x057, 0x002, 0x097, 0x002, 0x060, 0x001, 0x0a0, 0x001, 0x061, 0x000, 0x0a1, 0x000, 0x062, 0x000, 0x0a2, 0x000, 0x063, 0x000, 0x0a3, 0x000, 0x070, 0x000, 0x0b0, 0x000, 0x071, 0x004, 0x0b1, 0x004, 0x06c, 0x0e9, 0x0ac, 0x0e9, 0x06d, 0x003, 0x0ad, 0x003, 0x05c, 0x0d0, 0x09c, 0x0d0, 0x05d, 0x002, 0x09d, 0x002, 0x05e, 0x0f2, 0x09e, 0x0f2, 0x05f, 0x000, 0x09f, 0x000, 0x074, 0x000, 0x0b4, 0x000, 0x075, 0x000, 0x0b5, 0x000, 0x076, 0x000, 0x0b6, 0x000, 0x077, 0x000, 0x0b7, 0x000, 0x195, 0x008, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x193, 0x0a6, 0x108, 0x0f8, 0x042, 0x003, 0x082, 0x003, 0x454, 0x0425b9, 0x454, 0x042539, 0x193, 0x000, 0x193, 0x0a6, 0x464, 0x000, 0, 0 }; /* Tuner */ static u32 reg_init_tuner_input[] = { 0x108, 0x0f8, /* Sync control */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x00a, /* Chroma control 1 */ 0, 0 }; /* Composite */ static u32 reg_init_composite_input[] = { 0x108, 0x0e8, /* Sync control */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x04a, /* Chroma control 1 */ 0, 0 }; /* S-Video */ static u32 reg_init_svideo_input[] = { 0x108, 0x0e8, /* Sync control */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x04a, /* Chroma control 1 */ 0, 0 }; static u32 reg_set_audio_template[4][2] = { { /* for MONO tadachi 6/29 DMA audio output select? Register 0x46c 7-4: DMA2, 3-0: DMA1 ch. DMA4, DMA3 DMA2, DMA1 0: MAIN left, 1: MAIN right 2: AUX1 left, 3: AUX1 right 4: AUX2 left, 5: AUX2 right 6: DPL left, 7: DPL right 8: DPL center, 9: DPL surround A: monitor output, B: digital sense */ 0xbbbb00, /* tadachi 6/29 DAC and I2S output select? Register 0x470 7-4:DAC right ch. 3-0:DAC left ch. I2S1 right,left I2S2 right,left */ 0x00, }, { /* for STEREO */ 0xbbbb10, 0x101010, }, { /* for LANG1 */ 0xbbbb00, 0x00, }, { /* for LANG2/SAP */ 0xbbbb11, 0x111111, } }; /* Get detected audio flags (from saa7134 driver) */ static void get_inf_dev_status(struct v4l2_subdev *sd, int *dual_flag, int *stereo_flag) { u32 reg_data3; static char *stdres[0x20] = { [0x00] = "no standard detected", [0x01] = "B/G (in progress)", [0x02] = "D/K (in progress)", [0x03] = "M (in progress)", [0x04] = "B/G A2", [0x05] = "B/G NICAM", [0x06] = "D/K A2 (1)", [0x07] = "D/K A2 (2)", [0x08] = "D/K A2 (3)", [0x09] = "D/K NICAM", [0x0a] = "L NICAM", [0x0b] = "I NICAM", [0x0c] = "M Korea", [0x0d] = "M BTSC ", [0x0e] = "M EIAJ", [0x0f] = "FM radio / IF 10.7 / 50 deemp", [0x10] = "FM radio / IF 10.7 / 75 deemp", [0x11] = "FM radio / IF sel / 50 deemp", [0x12] = "FM radio / IF sel / 75 deemp", [0x13 ... 0x1e] = "unknown", [0x1f] = "??? [in progress]", }; *dual_flag = *stereo_flag = 0; /* (demdec status: 0x528) */ /* read current status */ reg_data3 = saa717x_read(sd, 0x0528); v4l2_dbg(1, debug, sd, "tvaudio thread status: 0x%x [%s%s%s]\n", reg_data3, stdres[reg_data3 & 0x1f], (reg_data3 & 0x000020) ? ",stereo" : "", (reg_data3 & 0x000040) ? ",dual" : ""); v4l2_dbg(1, debug, sd, "detailed status: " "%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s\n", (reg_data3 & 0x000080) ? " A2/EIAJ pilot tone " : "", (reg_data3 & 0x000100) ? " A2/EIAJ dual " : "", (reg_data3 & 0x000200) ? " A2/EIAJ stereo " : "", (reg_data3 & 0x000400) ? " A2/EIAJ noise mute " : "", (reg_data3 & 0x000800) ? " BTSC/FM radio pilot " : "", (reg_data3 & 0x001000) ? " SAP carrier " : "", (reg_data3 & 0x002000) ? " BTSC stereo noise mute " : "", (reg_data3 & 0x004000) ? " SAP noise mute " : "", (reg_data3 & 0x008000) ? " VDSP " : "", (reg_data3 & 0x010000) ? " NICST " : "", (reg_data3 & 0x020000) ? " NICDU " : "", (reg_data3 & 0x040000) ? " NICAM muted " : "", (reg_data3 & 0x080000) ? " NICAM reserve sound " : "", (reg_data3 & 0x100000) ? " init done " : ""); if (reg_data3 & 0x000220) { v4l2_dbg(1, debug, sd, "ST!!!\n"); *stereo_flag = 1; } if (reg_data3 & 0x000140) { v4l2_dbg(1, debug, sd, "DUAL!!!\n"); *dual_flag = 1; } } /* regs write to set audio mode */ static void set_audio_mode(struct v4l2_subdev *sd, int audio_mode) { v4l2_dbg(1, debug, sd, "writing registers to set audio mode by set %d\n", audio_mode); saa717x_write(sd, 0x46c, reg_set_audio_template[audio_mode][0]); saa717x_write(sd, 0x470, reg_set_audio_template[audio_mode][1]); } /* write regs to set audio volume, bass and treble */ static int set_audio_regs(struct v4l2_subdev *sd, struct saa717x_state *decoder) { u8 mute = 0xac; /* -84 dB */ u32 val; unsigned int work_l, work_r; /* set SIF analog I/O select */ saa717x_write(sd, 0x0594, decoder->audio_input); v4l2_dbg(1, debug, sd, "set audio input %d\n", decoder->audio_input); /* normalize ( 65535 to 0 -> 24 to -40 (not -84)) */ work_l = (min(65536 - decoder->audio_main_balance, 32768) * decoder->audio_main_volume) / 32768; work_r = (min(decoder->audio_main_balance, (u16)32768) * decoder->audio_main_volume) / 32768; decoder->audio_main_vol_l = (long)work_l * (24 - (-40)) / 65535 - 40; decoder->audio_main_vol_r = (long)work_r * (24 - (-40)) / 65535 - 40; /* set main volume */ /* main volume L[7-0],R[7-0],0x00 24=24dB,-83dB, -84(mute) */ /* def:0dB->6dB(MPG600GR) */ /* if mute is on, set mute */ if (decoder->audio_main_mute) { val = mute | (mute << 8); } else { val = (u8)decoder->audio_main_vol_l | ((u8)decoder->audio_main_vol_r << 8); } saa717x_write(sd, 0x480, val); /* set bass and treble */ val = decoder->audio_main_bass & 0x1f; val |= (decoder->audio_main_treble & 0x1f) << 5; saa717x_write(sd, 0x488, val); return 0; } /********** scaling staff ***********/ static void set_h_prescale(struct v4l2_subdev *sd, int task, int prescale) { static const struct { int xpsc; int xacl; int xc2_1; int xdcg; int vpfy; } vals[] = { /* XPSC XACL XC2_1 XDCG VPFY */ { 1, 0, 0, 0, 0 }, { 2, 2, 1, 2, 2 }, { 3, 4, 1, 3, 2 }, { 4, 8, 1, 4, 2 }, { 5, 8, 1, 4, 2 }, { 6, 8, 1, 4, 3 }, { 7, 8, 1, 4, 3 }, { 8, 15, 0, 4, 3 }, { 9, 15, 0, 4, 3 }, { 10, 16, 1, 5, 3 }, }; static const int count = ARRAY_SIZE(vals); int i, task_shift; task_shift = task * 0x40; for (i = 0; i < count; i++) if (vals[i].xpsc == prescale) break; if (i == count) return; /* horizonal prescaling */ saa717x_write(sd, 0x60 + task_shift, vals[i].xpsc); /* accumulation length */ saa717x_write(sd, 0x61 + task_shift, vals[i].xacl); /* level control */ saa717x_write(sd, 0x62 + task_shift, (vals[i].xc2_1 << 3) | vals[i].xdcg); /*FIR prefilter control */ saa717x_write(sd, 0x63 + task_shift, (vals[i].vpfy << 2) | vals[i].vpfy); } /********** scaling staff ***********/ static void set_v_scale(struct v4l2_subdev *sd, int task, int yscale) { int task_shift; task_shift = task * 0x40; /* Vertical scaling ratio (LOW) */ saa717x_write(sd, 0x70 + task_shift, yscale & 0xff); /* Vertical scaling ratio (HI) */ saa717x_write(sd, 0x71 + task_shift, yscale >> 8); } static int saa717x_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct saa717x_state *state = to_state(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: saa717x_write(sd, 0x10a, ctrl->val); return 0; case V4L2_CID_CONTRAST: saa717x_write(sd, 0x10b, ctrl->val); return 0; case V4L2_CID_SATURATION: saa717x_write(sd, 0x10c, ctrl->val); return 0; case V4L2_CID_HUE: saa717x_write(sd, 0x10d, ctrl->val); return 0; case V4L2_CID_AUDIO_MUTE: state->audio_main_mute = ctrl->val; break; case V4L2_CID_AUDIO_VOLUME: state->audio_main_volume = ctrl->val; break; case V4L2_CID_AUDIO_BALANCE: state->audio_main_balance = ctrl->val; break; case V4L2_CID_AUDIO_TREBLE: state->audio_main_treble = ctrl->val; break; case V4L2_CID_AUDIO_BASS: state->audio_main_bass = ctrl->val; break; default: return 0; } set_audio_regs(sd, state); return 0; } static int saa717x_s_video_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct saa717x_state *decoder = to_state(sd); int is_tuner = input & 0x80; /* tuner input flag */ input &= 0x7f; v4l2_dbg(1, debug, sd, "decoder set input (%d)\n", input); /* inputs from 0-9 are available*/ /* saa717x have mode0-mode9 but mode5 is reserved. */ if (input > 9 || input == 5) return -EINVAL; if (decoder->input != input) { int input_line = input; decoder->input = input_line; v4l2_dbg(1, debug, sd, "now setting %s input %d\n", input_line >= 6 ? "S-Video" : "Composite", input_line); /* select mode */ saa717x_write(sd, 0x102, (saa717x_read(sd, 0x102) & 0xf0) | input_line); /* bypass chrominance trap for modes 6..9 */ saa717x_write(sd, 0x109, (saa717x_read(sd, 0x109) & 0x7f) | (input_line < 6 ? 0x0 : 0x80)); /* change audio_mode */ if (is_tuner) { /* tuner */ set_audio_mode(sd, decoder->tuner_audio_mode); } else { /* Force to STEREO mode if Composite or * S-Video were chosen */ set_audio_mode(sd, TUNER_AUDIO_STEREO); } /* change initialize procedure (Composite/S-Video) */ if (is_tuner) saa717x_write_regs(sd, reg_init_tuner_input); else if (input_line >= 6) saa717x_write_regs(sd, reg_init_svideo_input); else saa717x_write_regs(sd, reg_init_composite_input); } return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int saa717x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->val = saa717x_read(sd, reg->reg); reg->size = 1; return 0; } static int saa717x_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); u16 addr = reg->reg & 0xffff; u8 val = reg->val & 0xff; if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; saa717x_write(sd, addr, val); return 0; } #endif static int saa717x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { int prescale, h_scale, v_scale; v4l2_dbg(1, debug, sd, "decoder set size\n"); if (fmt->code != V4L2_MBUS_FMT_FIXED) return -EINVAL; /* FIXME need better bounds checking here */ if (fmt->width < 1 || fmt->width > 1440) return -EINVAL; if (fmt->height < 1 || fmt->height > 960) return -EINVAL; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; /* scaling setting */ /* NTSC and interlace only */ prescale = SAA717X_NTSC_WIDTH / fmt->width; if (prescale == 0) prescale = 1; h_scale = 1024 * SAA717X_NTSC_WIDTH / prescale / fmt->width; /* interlace */ v_scale = 512 * 2 * SAA717X_NTSC_HEIGHT / fmt->height; /* Horizontal prescaling etc */ set_h_prescale(sd, 0, prescale); set_h_prescale(sd, 1, prescale); /* Horizontal scaling increment */ /* TASK A */ saa717x_write(sd, 0x6C, (u8)(h_scale & 0xFF)); saa717x_write(sd, 0x6D, (u8)((h_scale >> 8) & 0xFF)); /* TASK B */ saa717x_write(sd, 0xAC, (u8)(h_scale & 0xFF)); saa717x_write(sd, 0xAD, (u8)((h_scale >> 8) & 0xFF)); /* Vertical prescaling etc */ set_v_scale(sd, 0, v_scale); set_v_scale(sd, 1, v_scale); /* set video output size */ /* video number of pixels at output */ /* TASK A */ saa717x_write(sd, 0x5C, (u8)(fmt->width & 0xFF)); saa717x_write(sd, 0x5D, (u8)((fmt->width >> 8) & 0xFF)); /* TASK B */ saa717x_write(sd, 0x9C, (u8)(fmt->width & 0xFF)); saa717x_write(sd, 0x9D, (u8)((fmt->width >> 8) & 0xFF)); /* video number of lines at output */ /* TASK A */ saa717x_write(sd, 0x5E, (u8)(fmt->height & 0xFF)); saa717x_write(sd, 0x5F, (u8)((fmt->height >> 8) & 0xFF)); /* TASK B */ saa717x_write(sd, 0x9E, (u8)(fmt->height & 0xFF)); saa717x_write(sd, 0x9F, (u8)((fmt->height >> 8) & 0xFF)); return 0; } static int saa717x_s_radio(struct v4l2_subdev *sd) { struct saa717x_state *decoder = to_state(sd); decoder->radio = 1; return 0; } static int saa717x_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct saa717x_state *decoder = to_state(sd); v4l2_dbg(1, debug, sd, "decoder set norm "); v4l2_dbg(1, debug, sd, "(not yet implementd)\n"); decoder->radio = 0; decoder->std = std; return 0; } static int saa717x_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct saa717x_state *decoder = to_state(sd); if (input < 3) { /* FIXME! --tadachi */ decoder->audio_input = input; v4l2_dbg(1, debug, sd, "set decoder audio input to %d\n", decoder->audio_input); set_audio_regs(sd, decoder); return 0; } return -ERANGE; } static int saa717x_s_stream(struct v4l2_subdev *sd, int enable) { struct saa717x_state *decoder = to_state(sd); v4l2_dbg(1, debug, sd, "decoder %s output\n", enable ? "enable" : "disable"); decoder->enable = enable; saa717x_write(sd, 0x193, enable ? 0xa6 : 0x26); return 0; } /* change audio mode */ static int saa717x_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa717x_state *decoder = to_state(sd); int audio_mode; char *mes[4] = { "MONO", "STEREO", "LANG1", "LANG2/SAP" }; audio_mode = TUNER_AUDIO_STEREO; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: audio_mode = TUNER_AUDIO_MONO; break; case V4L2_TUNER_MODE_STEREO: audio_mode = TUNER_AUDIO_STEREO; break; case V4L2_TUNER_MODE_LANG2: audio_mode = TUNER_AUDIO_LANG2; break; case V4L2_TUNER_MODE_LANG1: audio_mode = TUNER_AUDIO_LANG1; break; } v4l2_dbg(1, debug, sd, "change audio mode to %s\n", mes[audio_mode]); decoder->tuner_audio_mode = audio_mode; /* The registers are not changed here. */ /* See DECODER_ENABLE_OUTPUT section. */ set_audio_mode(sd, decoder->tuner_audio_mode); return 0; } static int saa717x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa717x_state *decoder = to_state(sd); int dual_f, stereo_f; if (decoder->radio) return 0; get_inf_dev_status(sd, &dual_f, &stereo_f); v4l2_dbg(1, debug, sd, "DETECT==st:%d dual:%d\n", stereo_f, dual_f); /* mono */ if ((dual_f == 0) && (stereo_f == 0)) { vt->rxsubchans = V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==MONO\n"); } /* stereo */ if (stereo_f == 1) { if (vt->audmode == V4L2_TUNER_MODE_STEREO || vt->audmode == V4L2_TUNER_MODE_LANG1) { vt->rxsubchans = V4L2_TUNER_SUB_STEREO; v4l2_dbg(1, debug, sd, "DETECT==ST(ST)\n"); } else { vt->rxsubchans = V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==ST(MONO)\n"); } } /* dual */ if (dual_f == 1) { if (vt->audmode == V4L2_TUNER_MODE_LANG2) { vt->rxsubchans = V4L2_TUNER_SUB_LANG2 | V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==DUAL1\n"); } else { vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==DUAL2\n"); } } return 0; } static int saa717x_log_status(struct v4l2_subdev *sd) { struct saa717x_state *state = to_state(sd); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops saa717x_ctrl_ops = { .s_ctrl = saa717x_s_ctrl, }; static const struct v4l2_subdev_core_ops saa717x_core_ops = { #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = saa717x_g_register, .s_register = saa717x_s_register, #endif .s_std = saa717x_s_std, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, .log_status = saa717x_log_status, }; static const struct v4l2_subdev_tuner_ops saa717x_tuner_ops = { .g_tuner = saa717x_g_tuner, .s_tuner = saa717x_s_tuner, .s_radio = saa717x_s_radio, }; static const struct v4l2_subdev_video_ops saa717x_video_ops = { .s_routing = saa717x_s_video_routing, .s_mbus_fmt = saa717x_s_mbus_fmt, .s_stream = saa717x_s_stream, }; static const struct v4l2_subdev_audio_ops saa717x_audio_ops = { .s_routing = saa717x_s_audio_routing, }; static const struct v4l2_subdev_ops saa717x_ops = { .core = &saa717x_core_ops, .tuner = &saa717x_tuner_ops, .audio = &saa717x_audio_ops, .video = &saa717x_video_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* ----------------------------------------------------------------------- */ static int saa717x_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct saa717x_state *decoder; struct v4l2_ctrl_handler *hdl; struct v4l2_subdev *sd; u8 id = 0; char *p = ""; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; decoder = kzalloc(sizeof(struct saa717x_state), GFP_KERNEL); if (decoder == NULL) return -ENOMEM; sd = &decoder->sd; v4l2_i2c_subdev_init(sd, client, &saa717x_ops); if (saa717x_write(sd, 0x5a4, 0xfe) && saa717x_write(sd, 0x5a5, 0x0f) && saa717x_write(sd, 0x5a6, 0x00) && saa717x_write(sd, 0x5a7, 0x01)) id = saa717x_read(sd, 0x5a0); if (id != 0xc2 && id != 0x32 && id != 0xf2 && id != 0x6c) { v4l2_dbg(1, debug, sd, "saa717x not found (id=%02x)\n", id); kfree(decoder); return -ENODEV; } if (id == 0xc2) p = "saa7173"; else if (id == 0x32) p = "saa7174A"; else if (id == 0x6c) p = "saa7174HL"; else p = "saa7171"; v4l2_info(sd, "%s found @ 0x%x (%s)\n", p, client->addr << 1, client->adapter->name); hdl = &decoder->hdl; v4l2_ctrl_handler_init(hdl, 9); /* add in ascending ID order */ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 68); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 64); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 42000); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_BASS, -16, 15, 1, 0); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_TREBLE, -16, 15, 1, 0); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); sd->ctrl_handler = hdl; if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); kfree(decoder); return err; } decoder->std = V4L2_STD_NTSC; decoder->input = -1; decoder->enable = 1; /* FIXME!! */ decoder->playback = 0; /* initially capture mode used */ decoder->audio = 1; /* DECODER_AUDIO_48_KHZ */ decoder->audio_input = 2; /* FIXME!! */ decoder->tuner_audio_mode = TUNER_AUDIO_STEREO; /* set volume, bass and treble */ decoder->audio_main_vol_l = 6; decoder->audio_main_vol_r = 6; v4l2_dbg(1, debug, sd, "writing init values\n"); /* FIXME!! */ saa717x_write_regs(sd, reg_init_initialize); v4l2_ctrl_handler_setup(hdl); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(2*HZ); return 0; } static int saa717x_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); kfree(to_state(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id saa717x_id[] = { { "saa717x", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, saa717x_id); static struct i2c_driver saa717x_driver = { .driver = { .owner = THIS_MODULE, .name = "saa717x", }, .probe = saa717x_probe, .remove = saa717x_remove, .id_table = saa717x_id, }; module_i2c_driver(saa717x_driver);
gpl-2.0
Kali-/android_kernel_sony_apq8064
drivers/media/video/upd64031a.c
5150
7643
/* * upd64031A - NEC Electronics Ghost Reduction for NTSC in Japan * * 2003 by T.Adachi <tadachi@tadachi-net.com> * 2003 by Takeru KOMORIYA <komoriya@paken.org> * 2006 by Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/upd64031a.h> /* --------------------- read registers functions define -------------------- */ /* bit masks */ #define GR_MODE_MASK 0xc0 #define DIRECT_3DYCS_CONNECT_MASK 0xc0 #define SYNC_CIRCUIT_MASK 0xa0 /* -------------------------------------------------------------------------- */ MODULE_DESCRIPTION("uPD64031A driver"); MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); enum { R00 = 0, R01, R02, R03, R04, R05, R06, R07, R08, R09, R0A, R0B, R0C, R0D, R0E, R0F, /* unused registers R10, R11, R12, R13, R14, R15, R16, R17, */ TOT_REGS }; struct upd64031a_state { struct v4l2_subdev sd; u8 regs[TOT_REGS]; u8 gr_mode; u8 direct_3dycs_connect; u8 ext_comp_sync; u8 ext_vert_sync; }; static inline struct upd64031a_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct upd64031a_state, sd); } static u8 upd64031a_init[] = { 0x00, 0xb8, 0x48, 0xd2, 0xe6, 0x03, 0x10, 0x0b, 0xaf, 0x7f, 0x00, 0x00, 0x1d, 0x5e, 0x00, 0xd0 }; /* ------------------------------------------------------------------------ */ static u8 upd64031a_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[2]; if (reg >= sizeof(buf)) return 0xff; i2c_master_recv(client, buf, 2); return buf[reg]; } /* ------------------------------------------------------------------------ */ static void upd64031a_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[2]; buf[0] = reg; buf[1] = val; v4l2_dbg(1, debug, sd, "write reg: %02X val: %02X\n", reg, val); if (i2c_master_send(client, buf, 2) != 2) v4l2_err(sd, "I/O error write 0x%02x/0x%02x\n", reg, val); } /* ------------------------------------------------------------------------ */ /* The input changed due to new input or channel changed */ static int upd64031a_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq) { struct upd64031a_state *state = to_state(sd); u8 reg = state->regs[R00]; v4l2_dbg(1, debug, sd, "changed input or channel\n"); upd64031a_write(sd, R00, reg | 0x10); upd64031a_write(sd, R00, reg & ~0x10); return 0; } /* ------------------------------------------------------------------------ */ static int upd64031a_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct upd64031a_state *state = to_state(sd); u8 r00, r05, r08; state->gr_mode = (input & 3) << 6; state->direct_3dycs_connect = (input & 0xc) << 4; state->ext_comp_sync = (input & UPD64031A_COMPOSITE_EXTERNAL) << 1; state->ext_vert_sync = (input & UPD64031A_VERTICAL_EXTERNAL) << 2; r00 = (state->regs[R00] & ~GR_MODE_MASK) | state->gr_mode; r05 = (state->regs[R00] & ~SYNC_CIRCUIT_MASK) | state->ext_comp_sync | state->ext_vert_sync; r08 = (state->regs[R08] & ~DIRECT_3DYCS_CONNECT_MASK) | state->direct_3dycs_connect; upd64031a_write(sd, R00, r00); upd64031a_write(sd, R05, r05); upd64031a_write(sd, R08, r08); return upd64031a_s_frequency(sd, NULL); } static int upd64031a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_UPD64031A, 0); } static int upd64031a_log_status(struct v4l2_subdev *sd) { v4l2_info(sd, "Status: SA00=0x%02x SA01=0x%02x\n", upd64031a_read(sd, 0), upd64031a_read(sd, 1)); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int upd64031a_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->val = upd64031a_read(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int upd64031a_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; upd64031a_write(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops upd64031a_core_ops = { .log_status = upd64031a_log_status, .g_chip_ident = upd64031a_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = upd64031a_g_register, .s_register = upd64031a_s_register, #endif }; static const struct v4l2_subdev_tuner_ops upd64031a_tuner_ops = { .s_frequency = upd64031a_s_frequency, }; static const struct v4l2_subdev_video_ops upd64031a_video_ops = { .s_routing = upd64031a_s_routing, }; static const struct v4l2_subdev_ops upd64031a_ops = { .core = &upd64031a_core_ops, .tuner = &upd64031a_tuner_ops, .video = &upd64031a_video_ops, }; /* ------------------------------------------------------------------------ */ /* i2c implementation */ static int upd64031a_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct upd64031a_state *state; struct v4l2_subdev *sd; int i; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kzalloc(sizeof(struct upd64031a_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &upd64031a_ops); memcpy(state->regs, upd64031a_init, sizeof(state->regs)); state->gr_mode = UPD64031A_GR_ON << 6; state->direct_3dycs_connect = UPD64031A_3DYCS_COMPOSITE << 4; state->ext_comp_sync = state->ext_vert_sync = 0; for (i = 0; i < TOT_REGS; i++) upd64031a_write(sd, i, state->regs[i]); return 0; } static int upd64031a_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_state(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id upd64031a_id[] = { { "upd64031a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, upd64031a_id); static struct i2c_driver upd64031a_driver = { .driver = { .owner = THIS_MODULE, .name = "upd64031a", }, .probe = upd64031a_probe, .remove = upd64031a_remove, .id_table = upd64031a_id, }; module_i2c_driver(upd64031a_driver);
gpl-2.0
talexop/talexop_kernel_i9505_4_4_2
drivers/gpu/drm/nouveau/nv17_tv.c
5406
24157
/* * Copyright (C) 2009 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm_crtc_helper.h" #include "nouveau_drv.h" #include "nouveau_encoder.h" #include "nouveau_connector.h" #include "nouveau_crtc.h" #include "nouveau_gpio.h" #include "nouveau_hw.h" #include "nv17_tv.h" static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t testval, regoffset = nv04_dac_output_offset(encoder); uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; uint32_t sample = 0; int head; #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); if (dev_priv->vbios.tvdactestval) testval = dev_priv->vbios.tvdactestval; dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); head = (dacclk & 0x100) >> 8; /* Save the previous state. */ gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1); gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0); fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c); ctv_14 = NVReadRAMDAC(dev, head, 0x680c14); ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); /* Prepare the DAC for load detection. */ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG | NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, (dacclk & ~0xff) | 0x22); msleep(1); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, (dacclk & ~0xff) | 0x21); NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20); NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16); /* Sample pin 0x4 (usually S-video luma). */ NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff); msleep(20); sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) & 0x4 << 28; /* Sample the remaining pins. */ NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff); msleep(20); sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) & 0xa << 28; /* Restore the previous state. */ NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c); NVWriteRAMDAC(dev, head, 0x680c14, ctv_14); NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0); return sample; } static bool get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) { /* Zotac FX5200 */ if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) || nv_match_device(dev, 0x0322, 0x19da, 0x2035)) { *pin_mask = 0xc; return false; } /* MSI nForce2 IGP */ if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) { *pin_mask = 0xc; return false; } return true; } static enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_mode_config *conf = &dev->mode_config; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct dcb_entry *dcb = tv_enc->base.dcb; bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask); if (nv04_dac_in_use(encoder)) return connector_status_disconnected; if (reliable) { if (dev_priv->chipset == 0x42 || dev_priv->chipset == 0x43) tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe; else tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe; } switch (tv_enc->pin_mask) { case 0x2: case 0x4: tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite; break; case 0xc: tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; break; case 0xe: if (dcb->tvconf.has_component_output) tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; else tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; break; default: tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; break; } drm_connector_property_set_value(connector, conf->tv_subconnector_property, tv_enc->subconnector); if (!reliable) { return connector_status_unknown; } else if (tv_enc->subconnector) { NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); return connector_status_connected; } else { return connector_status_disconnected; } } static int nv17_tv_get_ld_modes(struct drm_encoder *encoder, struct drm_connector *connector) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); const struct drm_display_mode *tv_mode; int n = 0; for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { struct drm_display_mode *mode; mode = drm_mode_duplicate(encoder->dev, tv_mode); mode->clock = tv_norm->tv_enc_mode.vrefresh * mode->htotal / 1000 * mode->vtotal / 1000; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) mode->clock *= 2; if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay && mode->vdisplay == tv_norm->tv_enc_mode.vdisplay) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); n++; } return n; } static int nv17_tv_get_hd_modes(struct drm_encoder *encoder, struct drm_connector *connector) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode; struct drm_display_mode *mode; const struct { int hdisplay; int vdisplay; } modes[] = { { 640, 400 }, { 640, 480 }, { 720, 480 }, { 720, 576 }, { 800, 600 }, { 1024, 768 }, { 1280, 720 }, { 1280, 1024 }, { 1920, 1080 } }; int i, n = 0; for (i = 0; i < ARRAY_SIZE(modes); i++) { if (modes[i].hdisplay > output_mode->hdisplay || modes[i].vdisplay > output_mode->vdisplay) continue; if (modes[i].hdisplay == output_mode->hdisplay && modes[i].vdisplay == output_mode->vdisplay) { mode = drm_mode_duplicate(encoder->dev, output_mode); mode->type |= DRM_MODE_TYPE_PREFERRED; } else { mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay, modes[i].vdisplay, 60, false, (output_mode->flags & DRM_MODE_FLAG_INTERLACE), false); } /* CVT modes are sometimes unsuitable... */ if (output_mode->hdisplay <= 720 || output_mode->hdisplay >= 1920) { mode->htotal = output_mode->htotal; mode->hsync_start = (mode->hdisplay + (mode->htotal - mode->hdisplay) * 9 / 10) & ~7; mode->hsync_end = mode->hsync_start + 8; } if (output_mode->vdisplay >= 1024) { mode->vtotal = output_mode->vtotal; mode->vsync_start = output_mode->vsync_start; mode->vsync_end = output_mode->vsync_end; } mode->type |= DRM_MODE_TYPE_DRIVER; drm_mode_probed_add(connector, mode); n++; } return n; } static int nv17_tv_get_modes(struct drm_encoder *encoder, struct drm_connector *connector) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); if (tv_norm->kind == CTV_ENC_MODE) return nv17_tv_get_hd_modes(encoder, connector); else return nv17_tv_get_ld_modes(encoder, connector); } static int nv17_tv_mode_valid(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); if (tv_norm->kind == CTV_ENC_MODE) { struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode; if (mode->clock > 400000) return MODE_CLOCK_HIGH; if (mode->hdisplay > output_mode->hdisplay || mode->vdisplay > output_mode->vdisplay) return MODE_BAD; if ((mode->flags & DRM_MODE_FLAG_INTERLACE) != (output_mode->flags & DRM_MODE_FLAG_INTERLACE)) return MODE_NO_INTERLACE; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; } else { const int vsync_tolerance = 600; if (mode->clock > 70000) return MODE_CLOCK_HIGH; if (abs(drm_mode_vrefresh(mode) * 1000 - tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance) return MODE_VSYNC; /* The encoder takes care of the actual interlacing */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; } return MODE_OK; } static bool nv17_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); if (nv04_dac_in_use(encoder)) return false; if (tv_norm->kind == CTV_ENC_MODE) adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock; else adjusted_mode->clock = 90000; return true; } static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); if (nouveau_encoder(encoder)->last_dpms == mode) return; nouveau_encoder(encoder)->last_dpms = mode; NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", mode, nouveau_encoder(encoder)->dcb->index); regs->ptv_200 &= ~1; if (tv_norm->kind == CTV_ENC_MODE) { nv04_dfp_update_fp_control(encoder, mode); } else { nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF); if (mode == DRM_MODE_DPMS_ON) regs->ptv_200 |= 1; } nv_load_ptv(dev, regs, 200); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON); nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); } static void nv17_tv_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_encoder_helper_funcs *helper = encoder->helper_private; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); int head = nouveau_crtc(encoder->crtc)->index; uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[ NV_CIO_CRE_LCD__INDEX]; uint32_t dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder); uint32_t dacclk; helper->dpms(encoder, DRM_MODE_DPMS_OFF); nv04_dfp_disable(dev, head); /* Unbind any FP encoders from this head if we need the FP * stuff enabled. */ if (tv_norm->kind == CTV_ENC_MODE) { struct drm_encoder *enc; list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { struct dcb_entry *dcb = nouveau_encoder(enc)->dcb; if ((dcb->type == OUTPUT_TMDS || dcb->type == OUTPUT_LVDS) && !enc->crtc && nv04_dfp_get_bound_head(dev, dcb) == head) { nv04_dfp_bind_head(dev, dcb, head ^ 1, dev_priv->vbios.fp.dual_link); } } } if (tv_norm->kind == CTV_ENC_MODE) *cr_lcd |= 0x1 | (head ? 0x0 : 0x8); /* Set the DACCLK register */ dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; if (dev_priv->card_type == NV_40) dacclk |= 0x1a << 16; if (tv_norm->kind == CTV_ENC_MODE) { dacclk |= 0x20; if (head) dacclk |= 0x100; else dacclk &= ~0x100; } else { dacclk |= 0x10; } NVWriteRAMDAC(dev, 0, dacclk_off, dacclk); } static void nv17_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *drm_mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; int head = nouveau_crtc(encoder->crtc)->index; struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); int i; regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */ regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */ regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */ regs->tv_setup = 1; regs->ramdac_8c0 = 0x0; if (tv_norm->kind == TV_ENC_MODE) { tv_regs->ptv_200 = 0x13111100; if (head) tv_regs->ptv_200 |= 0x10; tv_regs->ptv_20c = 0x808010; tv_regs->ptv_304 = 0x2d00000; tv_regs->ptv_600 = 0x0; tv_regs->ptv_60c = 0x0; tv_regs->ptv_610 = 0x1e00000; if (tv_norm->tv_enc_mode.vdisplay == 576) { tv_regs->ptv_508 = 0x1200000; tv_regs->ptv_614 = 0x33; } else if (tv_norm->tv_enc_mode.vdisplay == 480) { tv_regs->ptv_508 = 0xf00000; tv_regs->ptv_614 = 0x13; } if (dev_priv->card_type >= NV_30) { tv_regs->ptv_500 = 0xe8e0; tv_regs->ptv_504 = 0x1710; tv_regs->ptv_604 = 0x0; tv_regs->ptv_608 = 0x0; } else { if (tv_norm->tv_enc_mode.vdisplay == 576) { tv_regs->ptv_604 = 0x20; tv_regs->ptv_608 = 0x10; tv_regs->ptv_500 = 0x19710; tv_regs->ptv_504 = 0x68f0; } else if (tv_norm->tv_enc_mode.vdisplay == 480) { tv_regs->ptv_604 = 0x10; tv_regs->ptv_608 = 0x20; tv_regs->ptv_500 = 0x4b90; tv_regs->ptv_504 = 0x1b480; } } for (i = 0; i < 0x40; i++) tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i]; } else { struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode; /* The registers in PRAMDAC+0xc00 control some timings and CSC * parameters for the CTV encoder (It's only used for "HD" TV * modes, I don't think I have enough working to guess what * they exactly mean...), it's probably connected at the * output of the FP encoder, but it also needs the analog * encoder in its OR enabled and routed to the head it's * using. It's enabled with the DACCLK register, bits [5:4]. */ for (i = 0; i < 38; i++) regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i]; regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1; regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; regs->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay + max((output_mode->hdisplay-600)/40 - 1, 1); regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1; regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1; regs->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1; regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1; regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1; regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG | NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; if (output_mode->flags & DRM_MODE_FLAG_PVSYNC) regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS; if (output_mode->flags & DRM_MODE_FLAG_PHSYNC) regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS; regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND | NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND | NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR | NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR | NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED | NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE | NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE; regs->fp_debug_2 = 0; regs->fp_margin_color = 0x801080; } } static void nv17_tv_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_encoder_helper_funcs *helper = encoder->helper_private; if (get_tv_norm(encoder)->kind == TV_ENC_MODE) { nv17_tv_update_rescaler(encoder); nv17_tv_update_properties(encoder); } else { nv17_ctv_update_rescaler(encoder); } nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); /* This could use refinement for flatpanels, but it should work */ if (dev_priv->chipset < 0x44) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); else NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); helper->dpms(encoder, DRM_MODE_DPMS_ON); NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", drm_get_connector_name( &nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } static void nv17_tv_save(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); nouveau_encoder(encoder)->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder)); nv17_tv_state_save(dev, &tv_enc->saved_state); tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200; } static void nv17_tv_restore(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder), nouveau_encoder(encoder)->restore.output); nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; } static int nv17_tv_create_resources(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct drm_mode_config *conf = &dev->mode_config; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS : NUM_LD_TV_NORMS; int i; if (nouveau_tv_norm) { for (i = 0; i < num_tv_norms; i++) { if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) { tv_enc->tv_norm = i; break; } } if (i == num_tv_norms) NV_WARN(dev, "Invalid TV norm setting \"%s\"\n", nouveau_tv_norm); } drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); drm_connector_attach_property(connector, conf->tv_select_subconnector_property, tv_enc->select_subconnector); drm_connector_attach_property(connector, conf->tv_subconnector_property, tv_enc->subconnector); drm_connector_attach_property(connector, conf->tv_mode_property, tv_enc->tv_norm); drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, tv_enc->flicker); drm_connector_attach_property(connector, conf->tv_saturation_property, tv_enc->saturation); drm_connector_attach_property(connector, conf->tv_hue_property, tv_enc->hue); drm_connector_attach_property(connector, conf->tv_overscan_property, tv_enc->overscan); return 0; } static int nv17_tv_set_property(struct drm_encoder *encoder, struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct drm_mode_config *conf = &encoder->dev->mode_config; struct drm_crtc *crtc = encoder->crtc; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); bool modes_changed = false; if (property == conf->tv_overscan_property) { tv_enc->overscan = val; if (encoder->crtc) { if (tv_norm->kind == CTV_ENC_MODE) nv17_ctv_update_rescaler(encoder); else nv17_tv_update_rescaler(encoder); } } else if (property == conf->tv_saturation_property) { if (tv_norm->kind != TV_ENC_MODE) return -EINVAL; tv_enc->saturation = val; nv17_tv_update_properties(encoder); } else if (property == conf->tv_hue_property) { if (tv_norm->kind != TV_ENC_MODE) return -EINVAL; tv_enc->hue = val; nv17_tv_update_properties(encoder); } else if (property == conf->tv_flicker_reduction_property) { if (tv_norm->kind != TV_ENC_MODE) return -EINVAL; tv_enc->flicker = val; if (encoder->crtc) nv17_tv_update_rescaler(encoder); } else if (property == conf->tv_mode_property) { if (connector->dpms != DRM_MODE_DPMS_OFF) return -EINVAL; tv_enc->tv_norm = val; modes_changed = true; } else if (property == conf->tv_select_subconnector_property) { if (tv_norm->kind != TV_ENC_MODE) return -EINVAL; tv_enc->select_subconnector = val; nv17_tv_update_properties(encoder); } else { return -EINVAL; } if (modes_changed) { drm_helper_probe_single_connector_modes(connector, 0, 0); /* Disable the crtc to ensure a full modeset is * performed whenever it's turned on again. */ if (crtc) { struct drm_mode_set modeset = { .crtc = crtc, }; crtc->funcs->set_config(&modeset); } } return 0; } static void nv17_tv_destroy(struct drm_encoder *encoder) { struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); NV_DEBUG_KMS(encoder->dev, "\n"); drm_encoder_cleanup(encoder); kfree(tv_enc); } static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = { .dpms = nv17_tv_dpms, .save = nv17_tv_save, .restore = nv17_tv_restore, .mode_fixup = nv17_tv_mode_fixup, .prepare = nv17_tv_prepare, .commit = nv17_tv_commit, .mode_set = nv17_tv_mode_set, .detect = nv17_tv_detect, }; static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { .get_modes = nv17_tv_get_modes, .mode_valid = nv17_tv_mode_valid, .create_resources = nv17_tv_create_resources, .set_property = nv17_tv_set_property, }; static struct drm_encoder_funcs nv17_tv_funcs = { .destroy = nv17_tv_destroy, }; int nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry) { struct drm_device *dev = connector->dev; struct drm_encoder *encoder; struct nv17_tv_encoder *tv_enc = NULL; tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL); if (!tv_enc) return -ENOMEM; tv_enc->overscan = 50; tv_enc->flicker = 50; tv_enc->saturation = 50; tv_enc->hue = 0; tv_enc->tv_norm = TV_NORM_PAL; tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic; tv_enc->pin_mask = 0; encoder = to_drm_encoder(&tv_enc->base); tv_enc->base.dcb = entry; tv_enc->base.or = ffs(entry->or) - 1; drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs); to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs; encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; nv17_tv_create_resources(encoder, connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; }
gpl-2.0
dan-sw/linux-v2.6.38.8-dan3400
arch/s390/lib/div64.c
9246
3889
/* * arch/s390/lib/div64.c * * __div64_32 implementation for 31 bit. * * Copyright (C) IBM Corp. 2006 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), */ #include <linux/types.h> #include <linux/module.h> #ifdef CONFIG_MARCH_G5 /* * Function to divide an unsigned 64 bit integer by an unsigned * 31 bit integer using signed 64/32 bit division. */ static uint32_t __div64_31(uint64_t *n, uint32_t base) { register uint32_t reg2 asm("2"); register uint32_t reg3 asm("3"); uint32_t *words = (uint32_t *) n; uint32_t tmp; /* Special case base==1, remainder = 0, quotient = n */ if (base == 1) return 0; /* * Special case base==0 will cause a fixed point divide exception * on the dr instruction and may not happen anyway. For the * following calculation we can assume base > 1. The first * signed 64 / 32 bit division with an upper half of 0 will * give the correct upper half of the 64 bit quotient. */ reg2 = 0UL; reg3 = words[0]; asm volatile( " dr %0,%2\n" : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); words[0] = reg3; reg3 = words[1]; /* * To get the lower half of the 64 bit quotient and the 32 bit * remainder we have to use a little trick. Since we only have * a signed division the quotient can get too big. To avoid this * the 64 bit dividend is halved, then the signed division will * work. Afterwards the quotient and the remainder are doubled. * If the last bit of the dividend has been one the remainder * is increased by one then checked against the base. If the * remainder has overflown subtract base and increase the * quotient. Simple, no ? */ asm volatile( " nr %2,%1\n" " srdl %0,1\n" " dr %0,%3\n" " alr %0,%0\n" " alr %1,%1\n" " alr %0,%2\n" " clr %0,%3\n" " jl 0f\n" " slr %0,%3\n" " ahi %1,1\n" "0:\n" : "+d" (reg2), "+d" (reg3), "=d" (tmp) : "d" (base), "2" (1UL) : "cc" ); words[1] = reg3; return reg2; } /* * Function to divide an unsigned 64 bit integer by an unsigned * 32 bit integer using the unsigned 64/31 bit division. */ uint32_t __div64_32(uint64_t *n, uint32_t base) { uint32_t r; /* * If the most significant bit of base is set, divide n by * (base/2). That allows to use 64/31 bit division and gives a * good approximation of the result: n = (base/2)*q + r. The * result needs to be corrected with two simple transformations. * If base is already < 2^31-1 __div64_31 can be used directly. */ r = __div64_31(n, ((signed) base < 0) ? (base/2) : base); if ((signed) base < 0) { uint64_t q = *n; /* * First transformation: * n = (base/2)*q + r * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r * Since r < (base/2), r + (base/2) < base. * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0) * n = ((base/2)*2)*q1 + r1 with r1 < base. */ if (q & 1) r += base/2; q >>= 1; /* * Second transformation. ((base/2)*2) could have lost the * last bit. * n = ((base/2)*2)*q1 + r1 * = base*q1 - ((base&1) ? q1 : 0) + r1 */ if (base & 1) { int64_t rx = r - q; /* * base is >= 2^31. The worst case for the while * loop is n=2^64-1 base=2^31+1. That gives a * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since * base >= 2^31 the loop is finished after a maximum * of three iterations. */ while (rx < 0) { rx += base; q--; } r = rx; } *n = q; } return r; } #else /* MARCH_G5 */ uint32_t __div64_32(uint64_t *n, uint32_t base) { register uint32_t reg2 asm("2"); register uint32_t reg3 asm("3"); uint32_t *words = (uint32_t *) n; reg2 = 0UL; reg3 = words[0]; asm volatile( " dlr %0,%2\n" : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); words[0] = reg3; reg3 = words[1]; asm volatile( " dlr %0,%2\n" : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); words[1] = reg3; return reg2; } #endif /* MARCH_G5 */
gpl-2.0
suncycheng/linux
drivers/parisc/asp.c
13854
3607
/* * ASP Device Driver * * (c) Copyright 2000 The Puffin Group Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * by Helge Deller <deller@gmx.de> */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/types.h> #include <asm/io.h> #include <asm/led.h> #include "gsc.h" #define ASP_GSC_IRQ 3 /* hardcoded interrupt for GSC */ #define ASP_VER_OFFSET 0x20 /* offset of ASP version */ #define ASP_LED_ADDR 0xf0800020 #define VIPER_INT_WORD 0xFFFBF088 /* addr of viper interrupt word */ static struct gsc_asic asp; static void asp_choose_irq(struct parisc_device *dev, void *ctrl) { int irq; switch (dev->id.sversion) { case 0x71: irq = 9; break; /* SCSI */ case 0x72: irq = 8; break; /* LAN */ case 0x73: irq = 1; break; /* HIL */ case 0x74: irq = 7; break; /* Centronics */ case 0x75: irq = (dev->hw_path == 4) ? 5 : 6; break; /* RS232 */ case 0x76: irq = 10; break; /* EISA BA */ case 0x77: irq = 11; break; /* Graphics1 */ case 0x7a: irq = 13; break; /* Audio (Bushmaster) */ case 0x7b: irq = 13; break; /* Audio (Scorpio) */ case 0x7c: irq = 3; break; /* FW SCSI */ case 0x7d: irq = 4; break; /* FDDI */ case 0x7f: irq = 13; break; /* Audio (Outfield) */ default: return; /* Unknown */ } gsc_asic_assign_irq(ctrl, irq, &dev->irq); switch (dev->id.sversion) { case 0x73: irq = 2; break; /* i8042 High-priority */ case 0x76: irq = 0; break; /* EISA BA */ default: return; /* Other */ } gsc_asic_assign_irq(ctrl, irq, &dev->aux_irq); } /* There are two register ranges we're interested in. Interrupt / * Status / LED are at 0xf080xxxx and Asp special registers are at * 0xf082fxxx. PDC only tells us that Asp is at 0xf082f000, so for * the purposes of interrupt handling, we have to tell other bits of * the kernel to look at the other registers. */ #define ASP_INTERRUPT_ADDR 0xf0800000 static int __init asp_init_chip(struct parisc_device *dev) { struct gsc_irq gsc_irq; int ret; asp.version = gsc_readb(dev->hpa.start + ASP_VER_OFFSET) & 0xf; asp.name = (asp.version == 1) ? "Asp" : "Cutoff"; asp.hpa = ASP_INTERRUPT_ADDR; printk(KERN_INFO "%s version %d at 0x%lx found.\n", asp.name, asp.version, (unsigned long)dev->hpa.start); /* the IRQ ASP should use */ ret = -EBUSY; dev->irq = gsc_claim_irq(&gsc_irq, ASP_GSC_IRQ); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); goto out; } asp.eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "asp", &asp); if (ret < 0) goto out; /* Program VIPER to interrupt on the ASP irq */ gsc_writel((1 << (31 - ASP_GSC_IRQ)),VIPER_INT_WORD); /* Done init'ing, register this driver */ ret = gsc_common_setup(dev, &asp); if (ret) goto out; gsc_fixup_irqs(dev, &asp, asp_choose_irq); /* Mongoose is a sibling of Asp, not a child... */ gsc_fixup_irqs(parisc_parent(dev), &asp, asp_choose_irq); /* initialize the chassis LEDs */ #ifdef CONFIG_CHASSIS_LCD_LED register_led_driver(DISPLAY_MODEL_OLD_ASP, LED_CMD_REG_NONE, ASP_LED_ADDR); #endif out: return ret; } static struct parisc_device_id asp_tbl[] = { { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00070 }, { 0, } }; struct parisc_driver asp_driver = { .name = "asp", .id_table = asp_tbl, .probe = asp_init_chip, };
gpl-2.0
0ida/coreboot
util/vgabios/int1a.c
31
5012
#include <stdio.h> #include "test.h" #include "pci-userspace.h" #define DEBUG_INT1A #define SUCCESSFUL 0x00 #define DEVICE_NOT_FOUND 0x86 #define BAD_REGISTER_NUMBER 0x87 void x86emu_dump_xregs(void); extern int verbose; int int1A_handler() { PCITAG tag; pciVideoPtr pvp = NULL; if (verbose) { printf("\nint1a encountered.\n"); //x86emu_dump_xregs(); } switch (X86_AX) { case 0xb101: X86_EAX = 0x00; /* no config space/special cycle support */ X86_AL = 0x01; /* config mechanism 1 */ X86_EDX = 0x20494350; /* " ICP" */ X86_EBX = 0x0210; /* Version 2.10 */ X86_ECX &= 0xFF00; X86_ECX |= (pciNumBuses & 0xFF); /* Max bus number in system */ X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ #ifdef DEBUG_INT1A if (verbose) printf("PCI bios present.\n"); #endif return 1; case 0xb102: if (X86_DX == pvp->vendor_id && X86_CX == pvp->device_id && X86_ESI == 0) { X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ X86_EBX = pciSlotBX(pvp); } #ifdef SHOW_ALL_DEVICES else if ((pvp = xf86FindPciDeviceVendor(X86_EDX, X86_ECX, X86_ESI, pvp))) { X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ X86_EBX = pciSlotBX(pvp); } #endif else { X86_EAX = X86_AL | (DEVICE_NOT_FOUND << 8); X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ } #ifdef DEBUG_INT1A printf("eax=0x%x ebx=0x%x eflags=0x%x\n", X86_EAX, X86_EBX, X86_EFLAGS); #endif return 1; case 0xb103: #if 0 if (X86_CL == pvp->interface && X86_CH == pvp->subclass && ((X86_ECX & 0xFFFF0000) >> 16) == pvp->class) { X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EBX = pciSlotBX(pvp); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ } #else /* FIXME: dirty hack */ if (0); #endif #ifdef SHOW_ALL_DEVICES else if ((pvp = FindPciClass(X86_CL, X86_CH, (X86_ECX & 0xffff0000) >> 16, X86_ESI, pvp))) { X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ X86_EBX = pciSlotBX(pvp); } #endif else { X86_EAX = X86_AL | (DEVICE_NOT_FOUND << 8); X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ } #ifdef DEBUG_INT1A printf("eax=0x%x eflags=0x%x\n", X86_EAX, X86_EFLAGS); #endif return 1; case 0xb108: if ((tag = findPci(X86_EBX))) { X86_CL = pciReadByte(tag, X86_EDI); X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ } else { X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ } #ifdef DEBUG_INT1A printf("eax=0x%x ecx=0x%x eflags=0x%x\n", X86_EAX, X86_ECX, X86_EFLAGS); #endif return 1; case 0xb109: if ((tag = findPci(X86_EBX))) { X86_CX = pciReadWord(tag, X86_EDI); X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ } else { X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ } #ifdef DEBUG_INT1A printf("eax=0x%x ecx=0x%x eflags=0x%x\n", X86_EAX, X86_ECX, X86_EFLAGS); #endif return 1; case 0xb10a: if ((tag = findPci(X86_EBX))) { X86_ECX = pciReadLong(tag, X86_EDI); X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ } else { X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ } #ifdef DEBUG_INT1A printf("eax=0x%x ecx=0x%x eflags=0x%x\n", X86_EAX, X86_ECX, X86_EFLAGS); #endif return 1; case 0xb10b: if ((tag = findPci(X86_EBX))) { pciWriteByte(tag, X86_EDI, X86_CL); X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ } else { X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ } #ifdef DEBUG_INT1A printf("eax=0x%x eflags=0x%x\n", X86_EAX, X86_EFLAGS); #endif return 1; case 0xb10c: if ((tag = findPci(X86_EBX))) { pciWriteWord(tag, X86_EDI, X86_CX); X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ } else { X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ } #ifdef DEBUG_INT1A printf("eax=0x%x eflags=0x%x\n", X86_EAX, X86_EFLAGS); #endif return 1; case 0xb10d: if ((tag = findPci(X86_EBX))) { pciWriteLong(tag, X86_EDI, X86_ECX); X86_EAX = X86_AL | (SUCCESSFUL << 8); X86_EFLAGS &= ~((unsigned long) 0x01); /* clear carry flag */ } else { X86_EAX = X86_AL | (BAD_REGISTER_NUMBER << 8); X86_EFLAGS |= ((unsigned long) 0x01); /* set carry flag */ } #ifdef DEBUG_INT1A printf("eax=0x%x eflags=0x%x\n", X86_EAX, X86_EFLAGS); #endif return 1; default: printf("int1a: subfunction not implemented.\n"); return 0; } }
gpl-2.0
bloyl/fieldtrip
peer/src/localhost.c
31
2252
/* * Copyright (C) 2010, Robert Oostenveld * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/ * */ #include <stdio.h> #include <stdlib.h> #include "platform_includes.h" #include "peer.h" #include "extern.h" /* this function returns 1 if the IP address corresponds with the local host or 0 if the IP address cannot be associated with the local host */ int check_localhost(const char *ipaddr) { #if defined (PLATFORM_WIN32) || defined(PLATFORM_WIN64) return 0; #elif defined (PLATFORM_LINUX) || defined(PLATFORM_OSX) int family, s, found = 0; struct ifaddrs *ifaddr = NULL; struct ifaddrs *ifa; char host[NI_MAXHOST]; /* get the list with network interfaces */ if (getifaddrs(&ifaddr) == -1) { perror("getifaddrs"); DEBUG(LOG_ERR, "error: getifaddrs"); } /* walk through the linked list, maintaining head pointer so we can free list later */ for (ifa = ifaddr; ifa != NULL; ifa = ifa->ifa_next) { /* the following line fails when -fpack-struct is used during compilation */ family = ifa->ifa_addr->sa_family; if (family == AF_INET) s = getnameinfo(ifa->ifa_addr, sizeof(struct sockaddr_in ), host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST); else if (family == AF_INET6) s = getnameinfo(ifa->ifa_addr, sizeof(struct sockaddr_in6), host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST); else s = -1; /* compare this hosts address with the user-specified address */ found = (s==0) && (strcmp(host, ipaddr)==0); if (found) break; } /* for looping over list */ freeifaddrs(ifaddr); if (found) DEBUG(LOG_DEBUG, "localhost: <%s>", ipaddr); return found; #endif }
gpl-2.0
lisa-project/lisa-kernel
drivers/cpufreq/acpi-cpufreq.c
31
25917
/* * acpi-cpufreq.c - ACPI Processor P-States Driver * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/compiler.h> #include <linux/dmi.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <acpi/processor.h> #include <asm/msr.h> #include <asm/processor.h> #include <asm/cpufeature.h> #include "mperf.h" MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); #define PFX "acpi-cpufreq: " enum { UNDEFINED_CAPABLE = 0, SYSTEM_INTEL_MSR_CAPABLE, SYSTEM_AMD_MSR_CAPABLE, SYSTEM_IO_CAPABLE, }; #define INTEL_MSR_RANGE (0xffff) #define AMD_MSR_RANGE (0x7) #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) struct acpi_cpufreq_data { struct acpi_processor_performance *acpi_data; struct cpufreq_frequency_table *freq_table; unsigned int resume; unsigned int cpu_feature; }; static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); /* acpi_perf_data is a pointer to percpu data. */ static struct acpi_processor_performance __percpu *acpi_perf_data; static struct cpufreq_driver acpi_cpufreq_driver; static unsigned int acpi_pstate_strict; static bool boost_enabled, boost_supported; static struct msr __percpu *msrs; static bool boost_state(unsigned int cpu) { u32 lo, hi; u64 msr; switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); msr = lo | ((u64)hi << 32); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); case X86_VENDOR_AMD: rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); msr = lo | ((u64)hi << 32); return !(msr & MSR_K7_HWCR_CPB_DIS); } return false; } static void boost_set_msrs(bool enable, const struct cpumask *cpumask) { u32 cpu; u32 msr_addr; u64 msr_mask; switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: msr_addr = MSR_IA32_MISC_ENABLE; msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; break; case X86_VENDOR_AMD: msr_addr = MSR_K7_HWCR; msr_mask = MSR_K7_HWCR_CPB_DIS; break; default: return; } rdmsr_on_cpus(cpumask, msr_addr, msrs); for_each_cpu(cpu, cpumask) { struct msr *reg = per_cpu_ptr(msrs, cpu); if (enable) reg->q &= ~msr_mask; else reg->q |= msr_mask; } wrmsr_on_cpus(cpumask, msr_addr, msrs); } static ssize_t _store_boost(const char *buf, size_t count) { int ret; unsigned long val = 0; if (!boost_supported) return -EINVAL; ret = kstrtoul(buf, 10, &val); if (ret || (val > 1)) return -EINVAL; if ((val && boost_enabled) || (!val && !boost_enabled)) return count; get_online_cpus(); boost_set_msrs(val, cpu_online_mask); put_online_cpus(); boost_enabled = val; pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); return count; } static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { return _store_boost(buf, count); } static ssize_t show_global_boost(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", boost_enabled); } static struct global_attr global_boost = __ATTR(boost, 0644, show_global_boost, store_global_boost); #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, size_t count) { return _store_boost(buf, count); } static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) { return sprintf(buf, "%u\n", boost_enabled); } static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb); #endif static int check_est_cpu(unsigned int cpuid) { struct cpuinfo_x86 *cpu = &cpu_data(cpuid); return cpu_has(cpu, X86_FEATURE_EST); } static int check_amd_hwpstate_cpu(unsigned int cpuid) { struct cpuinfo_x86 *cpu = &cpu_data(cpuid); return cpu_has(cpu, X86_FEATURE_HW_PSTATE); } static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) { struct acpi_processor_performance *perf; int i; perf = data->acpi_data; for (i = 0; i < perf->state_count; i++) { if (value == perf->states[i].status) return data->freq_table[i].frequency; } return 0; } static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) { int i; struct acpi_processor_performance *perf; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) msr &= AMD_MSR_RANGE; else msr &= INTEL_MSR_RANGE; perf = data->acpi_data; for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { if (msr == perf->states[data->freq_table[i].index].status) return data->freq_table[i].frequency; } return data->freq_table[0].frequency; } static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) { switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: case SYSTEM_AMD_MSR_CAPABLE: return extract_msr(val, data); case SYSTEM_IO_CAPABLE: return extract_io(val, data); default: return 0; } } struct msr_addr { u32 reg; }; struct io_addr { u16 port; u8 bit_width; }; struct drv_cmd { unsigned int type; const struct cpumask *mask; union { struct msr_addr msr; struct io_addr io; } addr; u32 val; }; /* Called via smp_call_function_single(), on the target CPU */ static void do_drv_read(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 h; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: case SYSTEM_AMD_MSR_CAPABLE: rdmsr(cmd->addr.msr.reg, cmd->val, h); break; case SYSTEM_IO_CAPABLE: acpi_os_read_port((acpi_io_address)cmd->addr.io.port, &cmd->val, (u32)cmd->addr.io.bit_width); break; default: break; } } /* Called via smp_call_function_many(), on the target CPUs */ static void do_drv_write(void *_cmd) { struct drv_cmd *cmd = _cmd; u32 lo, hi; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: rdmsr(cmd->addr.msr.reg, lo, hi); lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); wrmsr(cmd->addr.msr.reg, lo, hi); break; case SYSTEM_AMD_MSR_CAPABLE: wrmsr(cmd->addr.msr.reg, cmd->val, 0); break; case SYSTEM_IO_CAPABLE: acpi_os_write_port((acpi_io_address)cmd->addr.io.port, cmd->val, (u32)cmd->addr.io.bit_width); break; default: break; } } static void drv_read(struct drv_cmd *cmd) { int err; cmd->val = 0; err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ } static void drv_write(struct drv_cmd *cmd) { int this_cpu; this_cpu = get_cpu(); if (cpumask_test_cpu(this_cpu, cmd->mask)) do_drv_write(cmd); smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); put_cpu(); } static u32 get_cur_val(const struct cpumask *mask) { struct acpi_processor_performance *perf; struct drv_cmd cmd; if (unlikely(cpumask_empty(mask))) return 0; switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; break; case SYSTEM_AMD_MSR_CAPABLE: cmd.type = SYSTEM_AMD_MSR_CAPABLE; cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; cmd.addr.io.port = perf->control_register.address; cmd.addr.io.bit_width = perf->control_register.bit_width; break; default: return 0; } cmd.mask = mask; drv_read(&cmd); pr_debug("get_cur_val = %u\n", cmd.val); return cmd.val; } static unsigned int get_cur_freq_on_cpu(unsigned int cpu) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); unsigned int freq; unsigned int cached_freq; pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { return 0; } cached_freq = data->freq_table[data->acpi_data->state].frequency; freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); if (freq != cached_freq) { /* * The dreaded BIOS frequency change behind our back. * Force set the frequency on next target call. */ data->resume = 1; } pr_debug("cur freq = %u\n", freq); return freq; } static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, struct acpi_cpufreq_data *data) { unsigned int cur_freq; unsigned int i; for (i = 0; i < 100; i++) { cur_freq = extract_freq(get_cur_val(mask), data); if (cur_freq == freq) return 1; udelay(10); } return 0; } static int acpi_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_processor_performance *perf; struct cpufreq_freqs freqs; struct drv_cmd cmd; unsigned int next_state = 0; /* Index into freq_table */ unsigned int next_perf_state = 0; /* Index into perf table */ unsigned int i; int result = 0; pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { return -ENODEV; } perf = data->acpi_data; result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); if (unlikely(result)) { result = -ENODEV; goto out; } next_perf_state = data->freq_table[next_state].index; if (perf->state == next_perf_state) { if (unlikely(data->resume)) { pr_debug("Called after resume, resetting to P%d\n", next_perf_state); data->resume = 0; } else { pr_debug("Already at target state (P%d)\n", next_perf_state); goto out; } } switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_CTL; cmd.val = (u32) perf->states[next_perf_state].control; break; case SYSTEM_AMD_MSR_CAPABLE: cmd.type = SYSTEM_AMD_MSR_CAPABLE; cmd.addr.msr.reg = MSR_AMD_PERF_CTL; cmd.val = (u32) perf->states[next_perf_state].control; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; cmd.addr.io.port = perf->control_register.address; cmd.addr.io.bit_width = perf->control_register.bit_width; cmd.val = (u32) perf->states[next_perf_state].control; break; default: result = -ENODEV; goto out; } /* cpufreq holds the hotplug lock, so we are safe from here on */ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) cmd.mask = policy->cpus; else cmd.mask = cpumask_of(policy->cpu); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } drv_write(&cmd); if (acpi_pstate_strict) { if (!check_freqs(cmd.mask, freqs.new, data)) { pr_debug("acpi_cpufreq_target failed (%d)\n", policy->cpu); result = -EAGAIN; goto out; } } for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } perf->state = next_perf_state; out: return result; } static int acpi_cpufreq_verify(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); pr_debug("acpi_cpufreq_verify\n"); return cpufreq_frequency_table_verify(policy, data->freq_table); } static unsigned long acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) { struct acpi_processor_performance *perf = data->acpi_data; if (cpu_khz) { /* search the closest match to cpu_khz */ unsigned int i; unsigned long freq; unsigned long freqn = perf->states[0].core_frequency * 1000; for (i = 0; i < (perf->state_count-1); i++) { freq = freqn; freqn = perf->states[i+1].core_frequency * 1000; if ((2 * cpu_khz) > (freqn + freq)) { perf->state = i; return freq; } } perf->state = perf->state_count-1; return freqn; } else { /* assume CPU is at P0... */ perf->state = 0; return perf->states[0].core_frequency * 1000; } } static void free_acpi_perf_data(void) { unsigned int i; /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ for_each_possible_cpu(i) free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) ->shared_cpu_map); free_percpu(acpi_perf_data); } static int boost_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned cpu = (long)hcpu; const struct cpumask *cpumask; cpumask = get_cpu_mask(cpu); /* * Clear the boost-disable bit on the CPU_DOWN path so that * this cpu cannot block the remaining ones from boosting. On * the CPU_UP path we simply keep the boost-disable flag in * sync with the current global state. */ switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: boost_set_msrs(boost_enabled, cpumask); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: boost_set_msrs(1, cpumask); break; default: break; } return NOTIFY_OK; } static struct notifier_block boost_nb = { .notifier_call = boost_notify, }; /* * acpi_cpufreq_early_init - initialize ACPI P-States library * * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) * in order to determine correct frequency and voltage pairings. We can * do _PDC and _PSD and find out the processor dependency for the * actual init that will happen later... */ static int __init acpi_cpufreq_early_init(void) { unsigned int i; pr_debug("acpi_cpufreq_early_init\n"); acpi_perf_data = alloc_percpu(struct acpi_processor_performance); if (!acpi_perf_data) { pr_debug("Memory allocation error for acpi_perf_data.\n"); return -ENOMEM; } for_each_possible_cpu(i) { if (!zalloc_cpumask_var_node( &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, GFP_KERNEL, cpu_to_node(i))) { /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ free_acpi_perf_data(); return -ENOMEM; } } /* Do initialization in ACPI core */ acpi_processor_preregister_performance(acpi_perf_data); return 0; } #ifdef CONFIG_SMP /* * Some BIOSes do SW_ANY coordination internally, either set it up in hw * or do it in BIOS firmware and won't inform about it to OS. If not * detected, this has a side effect of making CPU run at a different speed * than OS intended it to run at. Detect it and handle it cleanly. */ static int bios_with_sw_any_bug; static int sw_any_bug_found(const struct dmi_system_id *d) { bios_with_sw_any_bug = 1; return 0; } static const struct dmi_system_id sw_any_bug_dmi_table[] = { { .callback = sw_any_bug_found, .ident = "Supermicro Server X6DLP", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), DMI_MATCH(DMI_BIOS_VERSION, "080010"), DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), }, }, { } }; static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) { /* Intel Xeon Processor 7100 Series Specification Update * http://www.intel.com/Assets/PDF/specupdate/314554.pdf * AL30: A Machine Check Exception (MCE) Occurring during an * Enhanced Intel SpeedStep Technology Ratio Change May Cause * Both Processor Cores to Lock Up. */ if (c->x86_vendor == X86_VENDOR_INTEL) { if ((c->x86 == 15) && (c->x86_model == 6) && (c->x86_mask == 8)) { printk(KERN_INFO "acpi-cpufreq: Intel(R) " "Xeon(R) 7100 Errata AL30, processors may " "lock up on frequency changes: disabling " "acpi-cpufreq.\n"); return -ENODEV; } } return 0; } #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) { unsigned int i; unsigned int valid_states = 0; unsigned int cpu = policy->cpu; struct acpi_cpufreq_data *data; unsigned int result = 0; struct cpuinfo_x86 *c = &cpu_data(policy->cpu); struct acpi_processor_performance *perf; #ifdef CONFIG_SMP static int blacklisted; #endif pr_debug("acpi_cpufreq_cpu_init\n"); #ifdef CONFIG_SMP if (blacklisted) return blacklisted; blacklisted = acpi_cpufreq_blacklist(c); if (blacklisted) return blacklisted; #endif data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); if (!data) return -ENOMEM; data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); per_cpu(acfreq_data, cpu) = data; if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; result = acpi_processor_register_performance(data->acpi_data, cpu); if (result) goto err_free; perf = data->acpi_data; policy->shared_type = perf->shared_type; /* * Will let policy->cpus know about dependency only when software * coordination is required. */ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { cpumask_copy(policy->cpus, perf->shared_cpu_map); } #ifdef CONFIG_SMP dmi_check_system(sw_any_bug_dmi_table); if (bios_with_sw_any_bug && !policy_is_shared(policy)) { policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; cpumask_copy(policy->cpus, cpu_core_mask(cpu)); } if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { cpumask_clear(policy->cpus); cpumask_set_cpu(cpu, policy->cpus); policy->shared_type = CPUFREQ_SHARED_TYPE_HW; pr_info_once(PFX "overriding BIOS provided _PSD data\n"); } #endif /* capability check */ if (perf->state_count <= 1) { pr_debug("No P-States\n"); result = -ENODEV; goto err_unreg; } if (perf->control_register.space_id != perf->status_register.space_id) { result = -ENODEV; goto err_unreg; } switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 == 0xf) { pr_debug("AMD K8 systems must use native drivers.\n"); result = -ENODEV; goto err_unreg; } pr_debug("SYSTEM IO addr space\n"); data->cpu_feature = SYSTEM_IO_CAPABLE; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: pr_debug("HARDWARE addr space\n"); if (check_est_cpu(cpu)) { data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; break; } if (check_amd_hwpstate_cpu(cpu)) { data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; break; } result = -ENODEV; goto err_unreg; default: pr_debug("Unknown addr space %d\n", (u32) (perf->control_register.space_id)); result = -ENODEV; goto err_unreg; } data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count+1), GFP_KERNEL); if (!data->freq_table) { result = -ENOMEM; goto err_unreg; } /* detect transition latency */ policy->cpuinfo.transition_latency = 0; for (i = 0; i < perf->state_count; i++) { if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000; } /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && policy->cpuinfo.transition_latency > 20 * 1000) { policy->cpuinfo.transition_latency = 20 * 1000; printk_once(KERN_INFO "P-state transition latency capped at 20 uS\n"); } /* table init */ for (i = 0; i < perf->state_count; i++) { if (i > 0 && perf->states[i].core_frequency >= data->freq_table[valid_states-1].frequency / 1000) continue; data->freq_table[valid_states].index = i; data->freq_table[valid_states].frequency = perf->states[i].core_frequency * 1000; valid_states++; } data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; perf->state = 0; result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); if (result) goto err_freqfree; if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: /* Current speed is unknown and not detectable by IO port */ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); break; case ACPI_ADR_SPACE_FIXED_HARDWARE: acpi_cpufreq_driver.get = get_cur_freq_on_cpu; policy->cur = get_cur_freq_on_cpu(cpu); break; default: break; } /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); /* Check for APERF/MPERF support in hardware */ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; pr_debug("CPU%u - ACPI performance management activated.\n", cpu); for (i = 0; i < perf->state_count; i++) pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", (i == perf->state ? '*' : ' '), i, (u32) perf->states[i].core_frequency, (u32) perf->states[i].power, (u32) perf->states[i].transition_latency); cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); /* * the first call to ->target() should result in us actually * writing something to the appropriate registers. */ data->resume = 1; return result; err_freqfree: kfree(data->freq_table); err_unreg: acpi_processor_unregister_performance(perf, cpu); err_free: kfree(data); per_cpu(acfreq_data, cpu) = NULL; return result; } static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); per_cpu(acfreq_data, policy->cpu) = NULL; acpi_processor_unregister_performance(data->acpi_data, policy->cpu); kfree(data->freq_table); kfree(data); } return 0; } static int acpi_cpufreq_resume(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); pr_debug("acpi_cpufreq_resume\n"); data->resume = 1; return 0; } static struct freq_attr *acpi_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, /* this is a placeholder for cpb, do not remove */ NULL, }; static struct cpufreq_driver acpi_cpufreq_driver = { .verify = acpi_cpufreq_verify, .target = acpi_cpufreq_target, .bios_limit = acpi_processor_get_bios_limit, .init = acpi_cpufreq_cpu_init, .exit = acpi_cpufreq_cpu_exit, .resume = acpi_cpufreq_resume, .name = "acpi-cpufreq", .owner = THIS_MODULE, .attr = acpi_cpufreq_attr, }; static void __init acpi_cpufreq_boost_init(void) { if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { msrs = msrs_alloc(); if (!msrs) return; boost_supported = true; boost_enabled = boost_state(0); get_online_cpus(); /* Force all MSRs to the same value */ boost_set_msrs(boost_enabled, cpu_online_mask); register_cpu_notifier(&boost_nb); put_online_cpus(); } else global_boost.attr.mode = 0444; /* We create the boost file in any case, though for systems without * hardware support it will be read-only and hardwired to return 0. */ if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr))) pr_warn(PFX "could not register global boost sysfs file\n"); else pr_debug("registered global boost sysfs file\n"); } static void __exit acpi_cpufreq_boost_exit(void) { sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr)); if (msrs) { unregister_cpu_notifier(&boost_nb); msrs_free(msrs); msrs = NULL; } } static int __init acpi_cpufreq_init(void) { int ret; if (acpi_disabled) return 0; pr_debug("acpi_cpufreq_init\n"); ret = acpi_cpufreq_early_init(); if (ret) return ret; #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB /* this is a sysfs file with a strange name and an even stranger * semantic - per CPU instantiation, but system global effect. * Lets enable it only on AMD CPUs for compatibility reasons and * only if configured. This is considered legacy code, which * will probably be removed at some point in the future. */ if (check_amd_hwpstate_cpu(0)) { struct freq_attr **iter; pr_debug("adding sysfs entry for cpb\n"); for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) ; /* make sure there is a terminator behind it */ if (iter[1] == NULL) *iter = &cpb; } #endif ret = cpufreq_register_driver(&acpi_cpufreq_driver); if (ret) free_acpi_perf_data(); else acpi_cpufreq_boost_init(); return ret; } static void __exit acpi_cpufreq_exit(void) { pr_debug("acpi_cpufreq_exit\n"); acpi_cpufreq_boost_exit(); cpufreq_unregister_driver(&acpi_cpufreq_driver); free_acpi_perf_data(); } module_param(acpi_pstate_strict, uint, 0644); MODULE_PARM_DESC(acpi_pstate_strict, "value 0 or non-zero. non-zero -> strict ACPI checks are " "performed during frequency changes."); late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit); static const struct x86_cpu_id acpi_cpufreq_ids[] = { X86_FEATURE_MATCH(X86_FEATURE_ACPI), X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), {} }; MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); MODULE_ALIAS("acpi");
gpl-2.0
tntdynamight/fieldtrip
external/dmlt/external/gpstuff/SuiteSparse/KLU/Demo/kluldemo.c
31
11396
/* ========================================================================== */ /* === KLU DEMO (long integer version) ====================================== */ /* ========================================================================== */ /* Read in a Matrix Market matrix (using CHOLMOD) and solve a linear system. * UF_long is normally a "long", but it becomes "_int64" on Windows 64. */ #include <math.h> #include <stdio.h> #include "klu.h" /* for handling complex matrices */ #define REAL(X,i) (X [2*(i)]) #define IMAG(X,i) (X [2*(i)+1]) #define CABS(X,i) (sqrt (REAL (X,i) * REAL (X,i) + IMAG (X,i) * IMAG (X,i))) #define MAX(a,b) (((a) > (b)) ? (a) : (b)) /* ========================================================================== */ /* === klu_l_backslash ====================================================== */ /* ========================================================================== */ static UF_long klu_l_backslash /* return 1 if successful, 0 otherwise */ ( /* --- input ---- */ UF_long n, /* A is n-by-n */ UF_long *Ap, /* size n+1, column pointers */ UF_long *Ai, /* size nz = Ap [n], row indices */ double *Ax, /* size nz, numerical values */ UF_long isreal, /* nonzero if A is real, 0 otherwise */ double *B, /* size n, right-hand-side */ /* --- output ---- */ double *X, /* size n, solution to Ax=b */ double *R, /* size n, residual r = b-A*x */ /* --- scalar output --- */ UF_long *lunz, /* nnz (L+U+F) */ double *rnorm, /* norm (b-A*x,1) / norm (A,1) */ /* --- workspace - */ klu_l_common *Common /* default parameters and statistics */ ) { double anorm = 0, asum ; klu_l_symbolic *Symbolic ; klu_l_numeric *Numeric ; UF_long i, j, p ; if (!Ap || !Ai || !Ax || !B || !X || !B) return (0) ; /* ---------------------------------------------------------------------- */ /* symbolic ordering and analysis */ /* ---------------------------------------------------------------------- */ Symbolic = klu_l_analyze (n, Ap, Ai, Common) ; if (!Symbolic) return (0) ; if (isreal) { /* ------------------------------------------------------------------ */ /* factorization */ /* ------------------------------------------------------------------ */ Numeric = klu_l_factor (Ap, Ai, Ax, Symbolic, Common) ; if (!Numeric) { klu_l_free_symbolic (&Symbolic, Common) ; return (0) ; } /* ------------------------------------------------------------------ */ /* statistics (not required to solve Ax=b) */ /* ------------------------------------------------------------------ */ klu_l_rgrowth (Ap, Ai, Ax, Symbolic, Numeric, Common) ; klu_l_condest (Ap, Ax, Symbolic, Numeric, Common) ; klu_l_rcond (Symbolic, Numeric, Common) ; klu_l_flops (Symbolic, Numeric, Common) ; *lunz = Numeric->lnz + Numeric->unz - n + ((Numeric->Offp) ? (Numeric->Offp [n]) : 0) ; /* ------------------------------------------------------------------ */ /* solve Ax=b */ /* ------------------------------------------------------------------ */ for (i = 0 ; i < n ; i++) { X [i] = B [i] ; } klu_l_solve (Symbolic, Numeric, n, 1, X, Common) ; /* ------------------------------------------------------------------ */ /* compute residual, rnorm = norm(b-Ax,1) / norm(A,1) */ /* ------------------------------------------------------------------ */ for (i = 0 ; i < n ; i++) { R [i] = B [i] ; } for (j = 0 ; j < n ; j++) { asum = 0 ; for (p = Ap [j] ; p < Ap [j+1] ; p++) { /* R (i) -= A (i,j) * X (j) */ R [Ai [p]] -= Ax [p] * X [j] ; asum += fabs (Ax [p]) ; } anorm = MAX (anorm, asum) ; } *rnorm = 0 ; for (i = 0 ; i < n ; i++) { *rnorm = MAX (*rnorm, fabs (R [i])) ; } /* ------------------------------------------------------------------ */ /* free numeric factorization */ /* ------------------------------------------------------------------ */ klu_l_free_numeric (&Numeric, Common) ; } else { /* ------------------------------------------------------------------ */ /* statistics (not required to solve Ax=b) */ /* ------------------------------------------------------------------ */ Numeric = klu_zl_factor (Ap, Ai, Ax, Symbolic, Common) ; if (!Numeric) { klu_l_free_symbolic (&Symbolic, Common) ; return (0) ; } /* ------------------------------------------------------------------ */ /* statistics */ /* ------------------------------------------------------------------ */ klu_zl_rgrowth (Ap, Ai, Ax, Symbolic, Numeric, Common) ; klu_zl_condest (Ap, Ax, Symbolic, Numeric, Common) ; klu_zl_rcond (Symbolic, Numeric, Common) ; klu_zl_flops (Symbolic, Numeric, Common) ; *lunz = Numeric->lnz + Numeric->unz - n + ((Numeric->Offp) ? (Numeric->Offp [n]) : 0) ; /* ------------------------------------------------------------------ */ /* solve Ax=b */ /* ------------------------------------------------------------------ */ for (i = 0 ; i < 2*n ; i++) { X [i] = B [i] ; } klu_zl_solve (Symbolic, Numeric, n, 1, X, Common) ; /* ------------------------------------------------------------------ */ /* compute residual, rnorm = norm(b-Ax,1) / norm(A,1) */ /* ------------------------------------------------------------------ */ for (i = 0 ; i < 2*n ; i++) { R [i] = B [i] ; } for (j = 0 ; j < n ; j++) { asum = 0 ; for (p = Ap [j] ; p < Ap [j+1] ; p++) { /* R (i) -= A (i,j) * X (j) */ i = Ai [p] ; REAL (R,i) -= REAL(Ax,p) * REAL(X,j) - IMAG(Ax,p) * IMAG(X,j) ; IMAG (R,i) -= IMAG(Ax,p) * REAL(X,j) + REAL(Ax,p) * IMAG(X,j) ; asum += CABS (Ax, p) ; } anorm = MAX (anorm, asum) ; } *rnorm = 0 ; for (i = 0 ; i < n ; i++) { *rnorm = MAX (*rnorm, CABS (R, i)) ; } /* ------------------------------------------------------------------ */ /* free numeric factorization */ /* ------------------------------------------------------------------ */ klu_zl_free_numeric (&Numeric, Common) ; } /* ---------------------------------------------------------------------- */ /* free symbolic analysis, and residual */ /* ---------------------------------------------------------------------- */ klu_l_free_symbolic (&Symbolic, Common) ; return (1) ; } /* ========================================================================== */ /* === klu_l_demo =========================================================== */ /* ========================================================================== */ /* Given a sparse matrix A, set up a right-hand-side and solve X = A\b */ static void klu_l_demo (UF_long n, UF_long *Ap, UF_long *Ai, double *Ax, UF_long isreal) { double rnorm ; klu_l_common Common ; double *B, *X, *R ; UF_long i, lunz ; printf ("KLU: %s, version: %d.%d.%d\n", KLU_DATE, KLU_MAIN_VERSION, KLU_SUB_VERSION, KLU_SUBSUB_VERSION) ; /* ---------------------------------------------------------------------- */ /* set defaults */ /* ---------------------------------------------------------------------- */ klu_l_defaults (&Common) ; /* ---------------------------------------------------------------------- */ /* create a right-hand-side */ /* ---------------------------------------------------------------------- */ if (isreal) { /* B = 1 + (1:n)/n */ B = klu_l_malloc (n, sizeof (double), &Common) ; X = klu_l_malloc (n, sizeof (double), &Common) ; R = klu_l_malloc (n, sizeof (double), &Common) ; if (B) { for (i = 0 ; i < n ; i++) { B [i] = 1 + ((double) i+1) / ((double) n) ; } } } else { /* real (B) = 1 + (1:n)/n, imag(B) = (n:-1:1)/n */ B = klu_l_malloc (n, 2 * sizeof (double), &Common) ; X = klu_l_malloc (n, 2 * sizeof (double), &Common) ; R = klu_l_malloc (n, 2 * sizeof (double), &Common) ; if (B) { for (i = 0 ; i < n ; i++) { REAL (B, i) = 1 + ((double) i+1) / ((double) n) ; IMAG (B, i) = ((double) n-i) / ((double) n) ; } } } /* ---------------------------------------------------------------------- */ /* X = A\b using KLU and print statistics */ /* ---------------------------------------------------------------------- */ if (!klu_l_backslash (n, Ap, Ai, Ax, isreal, B, X, R, &lunz, &rnorm, &Common)) { printf ("KLU failed\n") ; } else { printf ("n %ld nnz(A) %ld nnz(L+U+F) %ld resid %g\n" "recip growth %g condest %g rcond %g flops %g\n", n, Ap [n], lunz, rnorm, Common.rgrowth, Common.condest, Common.rcond, Common.flops) ; } /* ---------------------------------------------------------------------- */ /* free the problem */ /* ---------------------------------------------------------------------- */ if (isreal) { klu_l_free (B, n, sizeof (double), &Common) ; klu_l_free (X, n, sizeof (double), &Common) ; klu_l_free (R, n, sizeof (double), &Common) ; } else { klu_l_free (B, 2*n, sizeof (double), &Common) ; klu_l_free (X, 2*n, sizeof (double), &Common) ; klu_l_free (R, 2*n, sizeof (double), &Common) ; } printf ("peak memory usage: %g bytes\n\n", (double) (Common.mempeak)) ; } /* ========================================================================== */ /* === main ================================================================= */ /* ========================================================================== */ /* Read in a sparse matrix in Matrix Market format using CHOLMOD, and then * solve Ax=b with KLU. Note that CHOLMOD is only used to read the matrix. */ #include "cholmod.h" int main (void) { cholmod_sparse *A ; cholmod_common ch ; cholmod_l_start (&ch) ; A = cholmod_l_read_sparse (stdin, &ch) ; if (A) { if (A->nrow != A->ncol || A->stype != 0 || (!(A->xtype == CHOLMOD_REAL || A->xtype == CHOLMOD_COMPLEX))) { printf ("invalid matrix\n") ; } else { klu_l_demo (A->nrow, A->p, A->i, A->x, A->xtype == CHOLMOD_REAL) ; } cholmod_l_free_sparse (&A, &ch) ; } cholmod_l_finish (&ch) ; return (0) ; }
gpl-2.0
hroark13/Z750C_2_WARP
drivers/media/video/msm/csi/msm_csiphy.c
287
11173
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/module.h> #include <mach/board.h> #include <mach/camera.h> #include <mach/vreg.h> #include <media/msm_isp.h> #include "msm_csiphy.h" #include "msm.h" #define DBG_CSIPHY 0 #define V4L2_IDENT_CSIPHY 50003 /*MIPI CSI PHY registers*/ #define MIPI_CSIPHY_LNn_CFG1_ADDR 0x0 #define MIPI_CSIPHY_LNn_CFG2_ADDR 0x4 #define MIPI_CSIPHY_LNn_CFG3_ADDR 0x8 #define MIPI_CSIPHY_LNn_CFG4_ADDR 0xC #define MIPI_CSIPHY_LNn_CFG5_ADDR 0x10 #define MIPI_CSIPHY_LNCK_CFG1_ADDR 0x100 #define MIPI_CSIPHY_LNCK_CFG2_ADDR 0x104 #define MIPI_CSIPHY_LNCK_CFG3_ADDR 0x108 #define MIPI_CSIPHY_LNCK_CFG4_ADDR 0x10C #define MIPI_CSIPHY_LNCK_CFG5_ADDR 0x110 #define MIPI_CSIPHY_LNCK_MISC1_ADDR 0x128 #define MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR 0x1E0 #define MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR 0x1E8 #define MIPI_CSIPHY_T_WAKEUP_CFG1_ADDR 0x1EC #define MIPI_CSIPHY_GLBL_RESET_ADDR 0x0140 #define MIPI_CSIPHY_GLBL_PWR_CFG_ADDR 0x0144 #define MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR 0x0180 #define MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR 0x0184 #define MIPI_CSIPHY_INTERRUPT_STATUS2_ADDR 0x0188 #define MIPI_CSIPHY_INTERRUPT_STATUS3_ADDR 0x018C #define MIPI_CSIPHY_INTERRUPT_STATUS4_ADDR 0x0190 #define MIPI_CSIPHY_INTERRUPT_MASK0_ADDR 0x01A0 #define MIPI_CSIPHY_INTERRUPT_MASK1_ADDR 0x01A4 #define MIPI_CSIPHY_INTERRUPT_MASK2_ADDR 0x01A8 #define MIPI_CSIPHY_INTERRUPT_MASK3_ADDR 0x01AC #define MIPI_CSIPHY_INTERRUPT_MASK4_ADDR 0x01B0 #define MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR 0x01C0 #define MIPI_CSIPHY_INTERRUPT_CLEAR1_ADDR 0x01C4 #define MIPI_CSIPHY_INTERRUPT_CLEAR2_ADDR 0x01C8 #define MIPI_CSIPHY_INTERRUPT_CLEAR3_ADDR 0x01CC #define MIPI_CSIPHY_INTERRUPT_CLEAR4_ADDR 0x01D0 int msm_csiphy_config(struct csiphy_cfg_params *cfg_params) { int rc = 0; int j = 0; uint32_t val = 0; uint8_t lane_cnt = 0, lane_mask = 0; struct csiphy_device *csiphy_dev; struct msm_camera_csiphy_params *csiphy_params; void __iomem *csiphybase; csiphy_dev = v4l2_get_subdevdata(cfg_params->subdev); csiphybase = csiphy_dev->base; if (csiphybase == NULL) return -ENOMEM; csiphy_params = cfg_params->parms; lane_mask = csiphy_params->lane_mask; lane_cnt = csiphy_params->lane_cnt; if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) { CDBG("%s: unsupported lane cnt %d\n", __func__, csiphy_params->lane_cnt); return rc; } val = 0x3; msm_camera_io_w((csiphy_params->lane_mask << 2) | val, csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR); msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR); msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR); while (lane_mask & 0xf) { if (!(lane_mask & 0x1)) { j++; lane_mask >>= 1; continue; } msm_camera_io_w(0x10, csiphybase + MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*j); msm_camera_io_w(csiphy_params->settle_cnt, csiphybase + MIPI_CSIPHY_LNn_CFG3_ADDR + 0x40*j); msm_camera_io_w(0x6F, csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR + 0x4*(j+1)); msm_camera_io_w(0x6F, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR + 0x4*(j+1)); j++; lane_mask >>= 1; } msm_camera_io_w(0x10, csiphybase + MIPI_CSIPHY_LNCK_CFG2_ADDR); msm_camera_io_w(csiphy_params->settle_cnt, csiphybase + MIPI_CSIPHY_LNCK_CFG3_ADDR); msm_camera_io_w(0x24, csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR); msm_camera_io_w(0x24, csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR); return rc; } static irqreturn_t msm_csiphy_irq(int irq_num, void *data) { uint32_t irq; struct csiphy_device *csiphy_dev = data; irq = msm_camera_io_r( csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR); msm_camera_io_w(irq, csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR); CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS0 = 0x%x\n", __func__, csiphy_dev->pdev->id, irq); irq = msm_camera_io_r( csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR); msm_camera_io_w(irq, csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR1_ADDR); CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS1 = 0x%x\n", __func__, csiphy_dev->pdev->id, irq); irq = msm_camera_io_r( csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS2_ADDR); msm_camera_io_w(irq, csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR2_ADDR); CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS2 = 0x%x\n", __func__, csiphy_dev->pdev->id, irq); irq = msm_camera_io_r( csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS3_ADDR); msm_camera_io_w(irq, csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR3_ADDR); CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS3 = 0x%x\n", __func__, csiphy_dev->pdev->id, irq); irq = msm_camera_io_r( csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS4_ADDR); msm_camera_io_w(irq, csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR4_ADDR); CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS4 = 0x%x\n", __func__, csiphy_dev->pdev->id, irq); msm_camera_io_w(0x1, csiphy_dev->base + 0x164); msm_camera_io_w(0x0, csiphy_dev->base + 0x164); return IRQ_HANDLED; } static void msm_csiphy_reset(struct csiphy_device *csiphy_dev) { msm_camera_io_w(0x1, csiphy_dev->base + MIPI_CSIPHY_GLBL_RESET_ADDR); usleep_range(5000, 8000); msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_GLBL_RESET_ADDR); } static int msm_csiphy_subdev_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { BUG_ON(!chip); chip->ident = V4L2_IDENT_CSIPHY; chip->revision = 0; return 0; } static struct msm_cam_clk_info csiphy_clk_info[] = { {"csiphy_timer_src_clk", 177780000}, {"csiphy_timer_clk", -1}, }; static int msm_csiphy_init(struct v4l2_subdev *sd) { int rc = 0; struct csiphy_device *csiphy_dev; csiphy_dev = v4l2_get_subdevdata(sd); if (csiphy_dev == NULL) { rc = -ENOMEM; return rc; } csiphy_dev->base = ioremap(csiphy_dev->mem->start, resource_size(csiphy_dev->mem)); if (!csiphy_dev->base) { rc = -ENOMEM; return rc; } rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev, csiphy_clk_info, csiphy_dev->csiphy_clk, ARRAY_SIZE(csiphy_clk_info), 1); if (rc < 0) { iounmap(csiphy_dev->base); csiphy_dev->base = NULL; return rc; } #if DBG_CSIPHY enable_irq(csiphy_dev->irq->start); #endif msm_csiphy_reset(csiphy_dev); return 0; } static int msm_csiphy_release(struct v4l2_subdev *sd) { struct csiphy_device *csiphy_dev; int i; csiphy_dev = v4l2_get_subdevdata(sd); for (i = 0; i < 4; i++) msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i); msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_LNCK_CFG2_ADDR); msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR); #if DBG_CSIPHY disable_irq(csiphy_dev->irq->start); #endif msm_cam_clk_enable(&csiphy_dev->pdev->dev, csiphy_clk_info, csiphy_dev->csiphy_clk, ARRAY_SIZE(csiphy_clk_info), 0); iounmap(csiphy_dev->base); csiphy_dev->base = NULL; return 0; } static long msm_csiphy_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { int rc = -ENOIOCTLCMD; struct csiphy_cfg_params cfg_params; struct csiphy_device *csiphy_dev = v4l2_get_subdevdata(sd); mutex_lock(&csiphy_dev->mutex); switch (cmd) { case VIDIOC_MSM_CSIPHY_CFG: cfg_params.subdev = sd; cfg_params.parms = arg; rc = msm_csiphy_config( (struct csiphy_cfg_params *)&cfg_params); break; case VIDIOC_MSM_CSIPHY_INIT: rc = msm_csiphy_init(sd); break; case VIDIOC_MSM_CSIPHY_RELEASE: rc = msm_csiphy_release(sd); break; default: pr_err("%s: command not found\n", __func__); } mutex_unlock(&csiphy_dev->mutex); return rc; } static const struct v4l2_subdev_internal_ops msm_csiphy_internal_ops; static struct v4l2_subdev_core_ops msm_csiphy_subdev_core_ops = { .g_chip_ident = &msm_csiphy_subdev_g_chip_ident, .ioctl = &msm_csiphy_subdev_ioctl, }; static const struct v4l2_subdev_ops msm_csiphy_subdev_ops = { .core = &msm_csiphy_subdev_core_ops, }; static int __devinit csiphy_probe(struct platform_device *pdev) { struct csiphy_device *new_csiphy_dev; int rc = 0; CDBG("%s: device id = %d\n", __func__, pdev->id); new_csiphy_dev = kzalloc(sizeof(struct csiphy_device), GFP_KERNEL); if (!new_csiphy_dev) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&new_csiphy_dev->subdev, &msm_csiphy_subdev_ops); new_csiphy_dev->subdev.internal_ops = &msm_csiphy_internal_ops; new_csiphy_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(new_csiphy_dev->subdev.name, ARRAY_SIZE(new_csiphy_dev->subdev.name), "msm_csiphy"); v4l2_set_subdevdata(&new_csiphy_dev->subdev, new_csiphy_dev); platform_set_drvdata(pdev, &new_csiphy_dev->subdev); mutex_init(&new_csiphy_dev->mutex); new_csiphy_dev->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csiphy"); if (!new_csiphy_dev->mem) { pr_err("%s: no mem resource?\n", __func__); rc = -ENODEV; goto csiphy_no_resource; } new_csiphy_dev->irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "csiphy"); if (!new_csiphy_dev->irq) { pr_err("%s: no irq resource?\n", __func__); rc = -ENODEV; goto csiphy_no_resource; } new_csiphy_dev->io = request_mem_region(new_csiphy_dev->mem->start, resource_size(new_csiphy_dev->mem), pdev->name); if (!new_csiphy_dev->io) { pr_err("%s: no valid mem region\n", __func__); rc = -EBUSY; goto csiphy_no_resource; } rc = request_irq(new_csiphy_dev->irq->start, msm_csiphy_irq, IRQF_TRIGGER_RISING, "csiphy", new_csiphy_dev); if (rc < 0) { release_mem_region(new_csiphy_dev->mem->start, resource_size(new_csiphy_dev->mem)); pr_err("%s: irq request fail\n", __func__); rc = -EBUSY; goto csiphy_no_resource; } disable_irq(new_csiphy_dev->irq->start); new_csiphy_dev->pdev = pdev; msm_cam_register_subdev_node( &new_csiphy_dev->subdev, CSIPHY_DEV, pdev->id); return 0; csiphy_no_resource: mutex_destroy(&new_csiphy_dev->mutex); kfree(new_csiphy_dev); return 0; } static struct platform_driver csiphy_driver = { .probe = csiphy_probe, .driver = { .name = MSM_CSIPHY_DRV_NAME, .owner = THIS_MODULE, }, }; static int __init msm_csiphy_init_module(void) { return platform_driver_register(&csiphy_driver); } static void __exit msm_csiphy_exit_module(void) { platform_driver_unregister(&csiphy_driver); } module_init(msm_csiphy_init_module); module_exit(msm_csiphy_exit_module); MODULE_DESCRIPTION("MSM CSIPHY driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
gp-b2g/gp-keon-kernel
drivers/cpufreq/cpufreq_interactive.c
543
17977
/* * drivers/cpufreq/cpufreq_interactive.c * * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Mike Chan (mike@android.com) * */ #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/cpufreq.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/tick.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <asm/cputime.h> static atomic_t active_count = ATOMIC_INIT(0); struct cpufreq_interactive_cpuinfo { struct timer_list cpu_timer; int timer_idlecancel; u64 time_in_idle; u64 idle_exit_time; u64 timer_run_time; int idling; u64 freq_change_time; u64 freq_change_time_in_idle; struct cpufreq_policy *policy; struct cpufreq_frequency_table *freq_table; unsigned int target_freq; int governor_enabled; }; static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo); /* Workqueues handle frequency scaling */ static struct task_struct *up_task; static struct workqueue_struct *down_wq; static struct work_struct freq_scale_down_work; static cpumask_t up_cpumask; static spinlock_t up_cpumask_lock; static cpumask_t down_cpumask; static spinlock_t down_cpumask_lock; static struct mutex set_speed_lock; /* Hi speed to bump to from lo speed when load burst (default max) */ static u64 hispeed_freq; /* Go to hi speed when CPU load at or above this value. */ #define DEFAULT_GO_HISPEED_LOAD 95 static unsigned long go_hispeed_load; /* * The minimum amount of time to spend at a frequency before we can ramp down. */ #define DEFAULT_MIN_SAMPLE_TIME 20 * USEC_PER_MSEC static unsigned long min_sample_time; /* * The sample rate of the timer used to increase frequency */ #define DEFAULT_TIMER_RATE 20 * USEC_PER_MSEC static unsigned long timer_rate; static int cpufreq_governor_interactive(struct cpufreq_policy *policy, unsigned int event); #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE static #endif struct cpufreq_governor cpufreq_gov_interactive = { .name = "interactive", .governor = cpufreq_governor_interactive, .max_transition_latency = 10000000, .owner = THIS_MODULE, }; static void cpufreq_interactive_timer(unsigned long data) { unsigned int delta_idle; unsigned int delta_time; int cpu_load; int load_since_change; u64 time_in_idle; u64 idle_exit_time; struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, data); u64 now_idle; unsigned int new_freq; unsigned int index; unsigned long flags; smp_rmb(); if (!pcpu->governor_enabled) goto exit; /* * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time, * this lets idle exit know the current idle time sample has * been processed, and idle exit can generate a new sample and * re-arm the timer. This prevents a concurrent idle * exit on that CPU from writing a new set of info at the same time * the timer function runs (the timer function can't use that info * until more time passes). */ time_in_idle = pcpu->time_in_idle; idle_exit_time = pcpu->idle_exit_time; now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time); smp_wmb(); /* If we raced with cancelling a timer, skip. */ if (!idle_exit_time) goto exit; delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle); delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, idle_exit_time); /* * If timer ran less than 1ms after short-term sample started, retry. */ if (delta_time < 1000) goto rearm; if (delta_idle > delta_time) cpu_load = 0; else cpu_load = 100 * (delta_time - delta_idle) / delta_time; delta_idle = (unsigned int) cputime64_sub(now_idle, pcpu->freq_change_time_in_idle); delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time); if ((delta_time == 0) || (delta_idle > delta_time)) load_since_change = 0; else load_since_change = 100 * (delta_time - delta_idle) / delta_time; /* * Choose greater of short-term load (since last idle timer * started or timer function re-armed itself) or long-term load * (since last frequency change). */ if (load_since_change > cpu_load) cpu_load = load_since_change; if (cpu_load >= go_hispeed_load) { if (pcpu->policy->cur == pcpu->policy->min) new_freq = hispeed_freq; else new_freq = pcpu->policy->max * cpu_load / 100; } else { new_freq = pcpu->policy->cur * cpu_load / 100; } if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table, new_freq, CPUFREQ_RELATION_H, &index)) { pr_warn_once("timer %d: cpufreq_frequency_table_target error\n", (int) data); goto rearm; } new_freq = pcpu->freq_table[index].frequency; if (pcpu->target_freq == new_freq) goto rearm_if_notmax; /* * Do not scale down unless we have been at this frequency for the * minimum sample time. */ if (new_freq < pcpu->target_freq) { if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < min_sample_time) goto rearm; } if (new_freq < pcpu->target_freq) { pcpu->target_freq = new_freq; spin_lock_irqsave(&down_cpumask_lock, flags); cpumask_set_cpu(data, &down_cpumask); spin_unlock_irqrestore(&down_cpumask_lock, flags); queue_work(down_wq, &freq_scale_down_work); } else { pcpu->target_freq = new_freq; spin_lock_irqsave(&up_cpumask_lock, flags); cpumask_set_cpu(data, &up_cpumask); spin_unlock_irqrestore(&up_cpumask_lock, flags); wake_up_process(up_task); } rearm_if_notmax: /* * Already set max speed and don't see a need to change that, * wait until next idle to re-evaluate, don't need timer. */ if (pcpu->target_freq == pcpu->policy->max) goto exit; rearm: if (!timer_pending(&pcpu->cpu_timer)) { /* * If already at min: if that CPU is idle, don't set timer. * Else cancel the timer if that CPU goes idle. We don't * need to re-evaluate speed until the next idle exit. */ if (pcpu->target_freq == pcpu->policy->min) { smp_rmb(); if (pcpu->idling) goto exit; pcpu->timer_idlecancel = 1; } pcpu->time_in_idle = get_cpu_idle_time_us( data, &pcpu->idle_exit_time); mod_timer(&pcpu->cpu_timer, jiffies + usecs_to_jiffies(timer_rate)); } exit: return; } static void cpufreq_interactive_idle_start(void) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, smp_processor_id()); int pending; if (!pcpu->governor_enabled) return; pcpu->idling = 1; smp_wmb(); pending = timer_pending(&pcpu->cpu_timer); if (pcpu->target_freq != pcpu->policy->min) { #ifdef CONFIG_SMP /* * Entering idle while not at lowest speed. On some * platforms this can hold the other CPU(s) at that speed * even though the CPU is idle. Set a timer to re-evaluate * speed so this idle CPU doesn't hold the other CPUs above * min indefinitely. This should probably be a quirk of * the CPUFreq driver. */ if (!pending) { pcpu->time_in_idle = get_cpu_idle_time_us( smp_processor_id(), &pcpu->idle_exit_time); pcpu->timer_idlecancel = 0; mod_timer(&pcpu->cpu_timer, jiffies + usecs_to_jiffies(timer_rate)); } #endif } else { /* * If at min speed and entering idle after load has * already been evaluated, and a timer has been set just in * case the CPU suddenly goes busy, cancel that timer. The * CPU didn't go busy; we'll recheck things upon idle exit. */ if (pending && pcpu->timer_idlecancel) { del_timer(&pcpu->cpu_timer); /* * Ensure last timer run time is after current idle * sample start time, so next idle exit will always * start a new idle sampling period. */ pcpu->idle_exit_time = 0; pcpu->timer_idlecancel = 0; } } } static void cpufreq_interactive_idle_end(void) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, smp_processor_id()); pcpu->idling = 0; smp_wmb(); /* * Arm the timer for 1-2 ticks later if not already, and if the timer * function has already processed the previous load sampling * interval. (If the timer is not pending but has not processed * the previous interval, it is probably racing with us on another * CPU. Let it compute load based on the previous sample and then * re-arm the timer for another interval when it's done, rather * than updating the interval start time to be "now", which doesn't * give the timer function enough time to make a decision on this * run.) */ if (timer_pending(&pcpu->cpu_timer) == 0 && pcpu->timer_run_time >= pcpu->idle_exit_time && pcpu->governor_enabled) { pcpu->time_in_idle = get_cpu_idle_time_us(smp_processor_id(), &pcpu->idle_exit_time); pcpu->timer_idlecancel = 0; mod_timer(&pcpu->cpu_timer, jiffies + usecs_to_jiffies(timer_rate)); } } static int cpufreq_interactive_up_task(void *data) { unsigned int cpu; cpumask_t tmp_mask; unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; while (1) { set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&up_cpumask_lock, flags); if (cpumask_empty(&up_cpumask)) { spin_unlock_irqrestore(&up_cpumask_lock, flags); schedule(); if (kthread_should_stop()) break; spin_lock_irqsave(&up_cpumask_lock, flags); } set_current_state(TASK_RUNNING); tmp_mask = up_cpumask; cpumask_clear(&up_cpumask); spin_unlock_irqrestore(&up_cpumask_lock, flags); for_each_cpu(cpu, &tmp_mask) { unsigned int j; unsigned int max_freq = 0; pcpu = &per_cpu(cpuinfo, cpu); smp_rmb(); if (!pcpu->governor_enabled) continue; mutex_lock(&set_speed_lock); for_each_cpu(j, pcpu->policy->cpus) { struct cpufreq_interactive_cpuinfo *pjcpu = &per_cpu(cpuinfo, j); if (pjcpu->target_freq > max_freq) max_freq = pjcpu->target_freq; } if (max_freq != pcpu->policy->cur) __cpufreq_driver_target(pcpu->policy, max_freq, CPUFREQ_RELATION_H); mutex_unlock(&set_speed_lock); pcpu->freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &pcpu->freq_change_time); } } return 0; } static void cpufreq_interactive_freq_down(struct work_struct *work) { unsigned int cpu; cpumask_t tmp_mask; unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; spin_lock_irqsave(&down_cpumask_lock, flags); tmp_mask = down_cpumask; cpumask_clear(&down_cpumask); spin_unlock_irqrestore(&down_cpumask_lock, flags); for_each_cpu(cpu, &tmp_mask) { unsigned int j; unsigned int max_freq = 0; pcpu = &per_cpu(cpuinfo, cpu); smp_rmb(); if (!pcpu->governor_enabled) continue; mutex_lock(&set_speed_lock); for_each_cpu(j, pcpu->policy->cpus) { struct cpufreq_interactive_cpuinfo *pjcpu = &per_cpu(cpuinfo, j); if (pjcpu->target_freq > max_freq) max_freq = pjcpu->target_freq; } if (max_freq != pcpu->policy->cur) __cpufreq_driver_target(pcpu->policy, max_freq, CPUFREQ_RELATION_H); mutex_unlock(&set_speed_lock); pcpu->freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &pcpu->freq_change_time); } } static ssize_t show_hispeed_freq(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%llu\n", hispeed_freq); } static ssize_t store_hispeed_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; u64 val; ret = strict_strtoull(buf, 0, &val); if (ret < 0) return ret; hispeed_freq = val; return count; } static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644, show_hispeed_freq, store_hispeed_freq); static ssize_t show_go_hispeed_load(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%lu\n", go_hispeed_load); } static ssize_t store_go_hispeed_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = strict_strtoul(buf, 0, &val); if (ret < 0) return ret; go_hispeed_load = val; return count; } static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644, show_go_hispeed_load, store_go_hispeed_load); static ssize_t show_min_sample_time(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%lu\n", min_sample_time); } static ssize_t store_min_sample_time(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = strict_strtoul(buf, 0, &val); if (ret < 0) return ret; min_sample_time = val; return count; } static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, show_min_sample_time, store_min_sample_time); static ssize_t show_timer_rate(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%lu\n", timer_rate); } static ssize_t store_timer_rate(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = strict_strtoul(buf, 0, &val); if (ret < 0) return ret; timer_rate = val; return count; } static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644, show_timer_rate, store_timer_rate); static struct attribute *interactive_attributes[] = { &hispeed_freq_attr.attr, &go_hispeed_load_attr.attr, &min_sample_time_attr.attr, &timer_rate_attr.attr, NULL, }; static struct attribute_group interactive_attr_group = { .attrs = interactive_attributes, .name = "interactive", }; static int cpufreq_governor_interactive(struct cpufreq_policy *policy, unsigned int event) { int rc; unsigned int j; struct cpufreq_interactive_cpuinfo *pcpu; struct cpufreq_frequency_table *freq_table; switch (event) { case CPUFREQ_GOV_START: if (!cpu_online(policy->cpu)) return -EINVAL; freq_table = cpufreq_frequency_get_table(policy->cpu); for_each_cpu(j, policy->cpus) { pcpu = &per_cpu(cpuinfo, j); pcpu->policy = policy; pcpu->target_freq = policy->cur; pcpu->freq_table = freq_table; pcpu->freq_change_time_in_idle = get_cpu_idle_time_us(j, &pcpu->freq_change_time); pcpu->governor_enabled = 1; smp_wmb(); } if (!hispeed_freq) hispeed_freq = policy->max; /* * Do not register the idle hook and create sysfs * entries if we have already done so. */ if (atomic_inc_return(&active_count) > 1) return 0; rc = sysfs_create_group(cpufreq_global_kobject, &interactive_attr_group); if (rc) return rc; break; case CPUFREQ_GOV_STOP: for_each_cpu(j, policy->cpus) { pcpu = &per_cpu(cpuinfo, j); pcpu->governor_enabled = 0; smp_wmb(); del_timer_sync(&pcpu->cpu_timer); /* * Reset idle exit time since we may cancel the timer * before it can run after the last idle exit time, * to avoid tripping the check in idle exit for a timer * that is trying to run. */ pcpu->idle_exit_time = 0; } flush_work(&freq_scale_down_work); if (atomic_dec_return(&active_count) > 0) return 0; sysfs_remove_group(cpufreq_global_kobject, &interactive_attr_group); break; case CPUFREQ_GOV_LIMITS: if (policy->max < policy->cur) __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > policy->cur) __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); break; } return 0; } static int cpufreq_interactive_idle_notifier(struct notifier_block *nb, unsigned long val, void *data) { switch (val) { case IDLE_START: cpufreq_interactive_idle_start(); break; case IDLE_END: cpufreq_interactive_idle_end(); break; } return 0; } static struct notifier_block cpufreq_interactive_idle_nb = { .notifier_call = cpufreq_interactive_idle_notifier, }; static int __init cpufreq_interactive_init(void) { unsigned int i; struct cpufreq_interactive_cpuinfo *pcpu; struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; go_hispeed_load = DEFAULT_GO_HISPEED_LOAD; min_sample_time = DEFAULT_MIN_SAMPLE_TIME; timer_rate = DEFAULT_TIMER_RATE; /* Initalize per-cpu timers */ for_each_possible_cpu(i) { pcpu = &per_cpu(cpuinfo, i); init_timer(&pcpu->cpu_timer); pcpu->cpu_timer.function = cpufreq_interactive_timer; pcpu->cpu_timer.data = i; } up_task = kthread_create(cpufreq_interactive_up_task, NULL, "kinteractiveup"); if (IS_ERR(up_task)) return PTR_ERR(up_task); sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param); get_task_struct(up_task); /* No rescuer thread, bind to CPU queuing the work for possibly warm cache (probably doesn't matter much). */ down_wq = alloc_workqueue("knteractive_down", 0, 1); if (!down_wq) goto err_freeuptask; INIT_WORK(&freq_scale_down_work, cpufreq_interactive_freq_down); spin_lock_init(&up_cpumask_lock); spin_lock_init(&down_cpumask_lock); mutex_init(&set_speed_lock); idle_notifier_register(&cpufreq_interactive_idle_nb); return cpufreq_register_governor(&cpufreq_gov_interactive); err_freeuptask: put_task_struct(up_task); return -ENOMEM; } #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE fs_initcall(cpufreq_interactive_init); #else module_init(cpufreq_interactive_init); #endif static void __exit cpufreq_interactive_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_interactive); kthread_stop(up_task); put_task_struct(up_task); destroy_workqueue(down_wq); } module_exit(cpufreq_interactive_exit); MODULE_AUTHOR("Mike Chan <mike@android.com>"); MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for " "Latency sensitive workloads"); MODULE_LICENSE("GPL");
gpl-2.0
gauravdatir/linux
drivers/media/dvb-frontends/isl6421.c
543
4549
/* * isl6421.h - driver for lnb supply and control ic ISL6421 * * Copyright (C) 2006 Andrew de Quincey * Copyright (C) 2006 Oliver Endriss * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "isl6421.h" struct isl6421 { u8 config; u8 override_or; u8 override_and; struct i2c_adapter *i2c; u8 i2c_addr; }; static int isl6421_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, .buf = &isl6421->config, .len = sizeof(isl6421->config) }; isl6421->config &= ~(ISL6421_VSEL1 | ISL6421_EN1); switch(voltage) { case SEC_VOLTAGE_OFF: break; case SEC_VOLTAGE_13: isl6421->config |= ISL6421_EN1; break; case SEC_VOLTAGE_18: isl6421->config |= (ISL6421_EN1 | ISL6421_VSEL1); break; default: return -EINVAL; } isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int isl6421_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg) { struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, .buf = &isl6421->config, .len = sizeof(isl6421->config) }; if (arg) isl6421->config |= ISL6421_LLC1; else isl6421->config &= ~ISL6421_LLC1; isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int isl6421_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone) { struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, .buf = &isl6421->config, .len = sizeof(isl6421->config) }; switch (tone) { case SEC_TONE_ON: isl6421->config |= ISL6421_ENT1; break; case SEC_TONE_OFF: isl6421->config &= ~ISL6421_ENT1; break; default: return -EINVAL; } isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; } static void isl6421_release(struct dvb_frontend *fe) { /* power off */ isl6421_set_voltage(fe, SEC_VOLTAGE_OFF); /* free */ kfree(fe->sec_priv); fe->sec_priv = NULL; } struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 i2c_addr, u8 override_set, u8 override_clear, bool override_tone) { struct isl6421 *isl6421 = kmalloc(sizeof(struct isl6421), GFP_KERNEL); if (!isl6421) return NULL; /* default configuration */ isl6421->config = ISL6421_ISEL1; isl6421->i2c = i2c; isl6421->i2c_addr = i2c_addr; fe->sec_priv = isl6421; /* bits which should be forced to '1' */ isl6421->override_or = override_set; /* bits which should be forced to '0' */ isl6421->override_and = ~override_clear; /* detect if it is present or not */ if (isl6421_set_voltage(fe, SEC_VOLTAGE_OFF)) { kfree(isl6421); fe->sec_priv = NULL; return NULL; } /* install release callback */ fe->ops.release_sec = isl6421_release; /* override frontend ops */ fe->ops.set_voltage = isl6421_set_voltage; fe->ops.enable_high_lnb_voltage = isl6421_enable_high_lnb_voltage; if (override_tone) fe->ops.set_tone = isl6421_set_tone; return fe; } EXPORT_SYMBOL(isl6421_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6421"); MODULE_AUTHOR("Andrew de Quincey & Oliver Endriss"); MODULE_LICENSE("GPL");
gpl-2.0
kelcecil/linux
sound/soc/samsung/speyside.c
543
8657
/* * Speyside audio support * * Copyright 2011 Wolfson Microelectronics * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/jack.h> #include <linux/gpio.h> #include <linux/module.h> #include "../codecs/wm8996.h" #include "../codecs/wm9081.h" #define WM8996_HPSEL_GPIO 214 #define MCLK_AUDIO_RATE (512 * 48000) static int speyside_set_bias_level(struct snd_soc_card *card, struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { struct snd_soc_dai *codec_dai = card->rtd[1].codec_dai; int ret; if (dapm->dev != codec_dai->dev) return 0; switch (level) { case SND_SOC_BIAS_STANDBY: ret = snd_soc_dai_set_sysclk(codec_dai, WM8996_SYSCLK_MCLK2, 32768, SND_SOC_CLOCK_IN); if (ret < 0) return ret; ret = snd_soc_dai_set_pll(codec_dai, WM8996_FLL_MCLK2, 0, 0, 0); if (ret < 0) { pr_err("Failed to stop FLL\n"); return ret; } break; default: break; } return 0; } static int speyside_set_bias_level_post(struct snd_soc_card *card, struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { struct snd_soc_dai *codec_dai = card->rtd[1].codec_dai; int ret; if (dapm->dev != codec_dai->dev) return 0; switch (level) { case SND_SOC_BIAS_PREPARE: if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY) { ret = snd_soc_dai_set_pll(codec_dai, 0, WM8996_FLL_MCLK2, 32768, MCLK_AUDIO_RATE); if (ret < 0) { pr_err("Failed to start FLL\n"); return ret; } ret = snd_soc_dai_set_sysclk(codec_dai, WM8996_SYSCLK_FLL, MCLK_AUDIO_RATE, SND_SOC_CLOCK_IN); if (ret < 0) return ret; } break; default: break; } card->dapm.bias_level = level; return 0; } static struct snd_soc_jack speyside_headset; /* Headset jack detection DAPM pins */ static struct snd_soc_jack_pin speyside_headset_pins[] = { { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, }; /* Default the headphone selection to active high */ static int speyside_jack_polarity; static int speyside_get_micbias(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { if (speyside_jack_polarity && (strcmp(source->name, "MICB1") == 0)) return 1; if (!speyside_jack_polarity && (strcmp(source->name, "MICB2") == 0)) return 1; return 0; } static void speyside_set_polarity(struct snd_soc_codec *codec, int polarity) { speyside_jack_polarity = !polarity; gpio_direction_output(WM8996_HPSEL_GPIO, speyside_jack_polarity); /* Re-run DAPM to make sure we're using the correct mic bias */ snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec)); } static int speyside_wm0010_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *dai = rtd->codec_dai; int ret; ret = snd_soc_dai_set_sysclk(dai, 0, MCLK_AUDIO_RATE, 0); if (ret < 0) return ret; return 0; } static int speyside_wm8996_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *dai = rtd->codec_dai; struct snd_soc_codec *codec = rtd->codec; int ret; ret = snd_soc_dai_set_sysclk(dai, WM8996_SYSCLK_MCLK2, 32768, 0); if (ret < 0) return ret; ret = gpio_request(WM8996_HPSEL_GPIO, "HP_SEL"); if (ret != 0) pr_err("Failed to request HP_SEL GPIO: %d\n", ret); gpio_direction_output(WM8996_HPSEL_GPIO, speyside_jack_polarity); ret = snd_soc_card_jack_new(rtd->card, "Headset", SND_JACK_LINEOUT | SND_JACK_HEADSET | SND_JACK_BTN_0, &speyside_headset, speyside_headset_pins, ARRAY_SIZE(speyside_headset_pins)); if (ret) return ret; wm8996_detect(codec, &speyside_headset, speyside_set_polarity); return 0; } static int speyside_late_probe(struct snd_soc_card *card) { snd_soc_dapm_ignore_suspend(&card->dapm, "Headphone"); snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic"); snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC"); snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC"); snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker"); snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output"); snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input"); return 0; } static const struct snd_soc_pcm_stream dsp_codec_params = { .formats = SNDRV_PCM_FMTBIT_S32_LE, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, }; static struct snd_soc_dai_link speyside_dai[] = { { .name = "CPU-DSP", .stream_name = "CPU-DSP", .cpu_dai_name = "samsung-i2s.0", .codec_dai_name = "wm0010-sdi1", .platform_name = "samsung-i2s.0", .codec_name = "spi0.0", .init = speyside_wm0010_init, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, }, { .name = "DSP-CODEC", .stream_name = "DSP-CODEC", .cpu_dai_name = "wm0010-sdi2", .codec_dai_name = "wm8996-aif1", .codec_name = "wm8996.1-001a", .init = speyside_wm8996_init, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .params = &dsp_codec_params, .ignore_suspend = 1, }, { .name = "Baseband", .stream_name = "Baseband", .cpu_dai_name = "wm8996-aif2", .codec_dai_name = "wm1250-ev1", .codec_name = "wm1250-ev1.1-0027", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .ignore_suspend = 1, }, }; static int speyside_wm9081_init(struct snd_soc_component *component) { struct snd_soc_codec *codec = snd_soc_component_to_codec(component); /* At any time the WM9081 is active it will have this clock */ return snd_soc_codec_set_sysclk(codec, WM9081_SYSCLK_MCLK, 0, MCLK_AUDIO_RATE, 0); } static struct snd_soc_aux_dev speyside_aux_dev[] = { { .name = "wm9081", .codec_name = "wm9081.1-006c", .init = speyside_wm9081_init, }, }; static struct snd_soc_codec_conf speyside_codec_conf[] = { { .dev_name = "wm9081.1-006c", .name_prefix = "Sub", }, }; static const struct snd_kcontrol_new controls[] = { SOC_DAPM_PIN_SWITCH("Main Speaker"), SOC_DAPM_PIN_SWITCH("Main DMIC"), SOC_DAPM_PIN_SWITCH("Main AMIC"), SOC_DAPM_PIN_SWITCH("WM1250 Input"), SOC_DAPM_PIN_SWITCH("WM1250 Output"), SOC_DAPM_PIN_SWITCH("Headphone"), }; static struct snd_soc_dapm_widget widgets[] = { SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_SPK("Main Speaker", NULL), SND_SOC_DAPM_MIC("Main AMIC", NULL), SND_SOC_DAPM_MIC("Main DMIC", NULL), }; static struct snd_soc_dapm_route audio_paths[] = { { "IN1RN", NULL, "MICB1" }, { "IN1RP", NULL, "MICB1" }, { "IN1RN", NULL, "MICB2" }, { "IN1RP", NULL, "MICB2" }, { "MICB1", NULL, "Headset Mic", speyside_get_micbias }, { "MICB2", NULL, "Headset Mic", speyside_get_micbias }, { "IN1LP", NULL, "MICB2" }, { "IN1RN", NULL, "MICB1" }, { "MICB2", NULL, "Main AMIC" }, { "DMIC1DAT", NULL, "MICB1" }, { "DMIC2DAT", NULL, "MICB1" }, { "MICB1", NULL, "Main DMIC" }, { "Headphone", NULL, "HPOUT1L" }, { "Headphone", NULL, "HPOUT1R" }, { "Sub IN1", NULL, "HPOUT2L" }, { "Sub IN2", NULL, "HPOUT2R" }, { "Main Speaker", NULL, "Sub SPKN" }, { "Main Speaker", NULL, "Sub SPKP" }, { "Main Speaker", NULL, "SPKDAT" }, }; static struct snd_soc_card speyside = { .name = "Speyside", .owner = THIS_MODULE, .dai_link = speyside_dai, .num_links = ARRAY_SIZE(speyside_dai), .aux_dev = speyside_aux_dev, .num_aux_devs = ARRAY_SIZE(speyside_aux_dev), .codec_conf = speyside_codec_conf, .num_configs = ARRAY_SIZE(speyside_codec_conf), .set_bias_level = speyside_set_bias_level, .set_bias_level_post = speyside_set_bias_level_post, .controls = controls, .num_controls = ARRAY_SIZE(controls), .dapm_widgets = widgets, .num_dapm_widgets = ARRAY_SIZE(widgets), .dapm_routes = audio_paths, .num_dapm_routes = ARRAY_SIZE(audio_paths), .fully_routed = true, .late_probe = speyside_late_probe, }; static int speyside_probe(struct platform_device *pdev) { struct snd_soc_card *card = &speyside; int ret; card->dev = &pdev->dev; ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } static struct platform_driver speyside_driver = { .driver = { .name = "speyside", .pm = &snd_soc_pm_ops, }, .probe = speyside_probe, }; module_platform_driver(speyside_driver); MODULE_DESCRIPTION("Speyside audio support"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:speyside");
gpl-2.0
sunrunning/ok6410_linux
arch/x86/kernel/apic/bigsmp_32.c
543
6272
/* * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs. * * Drives the local APIC in "clustered mode". */ #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/dmi.h> #include <linux/smp.h> #include <asm/apicdef.h> #include <asm/fixmap.h> #include <asm/mpspec.h> #include <asm/apic.h> #include <asm/ipi.h> static unsigned bigsmp_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } static int bigsmp_apic_id_registered(void) { return 1; } static const struct cpumask *bigsmp_target_cpus(void) { #ifdef CONFIG_SMP return cpu_online_mask; #else return cpumask_of(0); #endif } static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid) { return 0; } static unsigned long bigsmp_check_apicid_present(int bit) { return 1; } static int bigsmp_early_logical_apicid(int cpu) { /* on bigsmp, logical apicid is the same as physical */ return early_per_cpu(x86_cpu_to_apicid, cpu); } static inline unsigned long calculate_ldr(int cpu) { unsigned long val, id; val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; id = per_cpu(x86_bios_cpu_apicid, cpu); val |= SET_APIC_LOGICAL_ID(id); return val; } /* * Set up the logical destination ID. * * Intel recommends to set DFR, LDR and TPR before enabling * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ static void bigsmp_init_apic_ldr(void) { unsigned long val; int cpu = smp_processor_id(); apic_write(APIC_DFR, APIC_DFR_FLAT); val = calculate_ldr(cpu); apic_write(APIC_LDR, val); } static void bigsmp_setup_apic_routing(void) { printk(KERN_INFO "Enabling APIC mode: Physflat. Using %d I/O APICs\n", nr_ioapics); } static int bigsmp_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return BAD_APICID; } static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) { /* For clustered we don't have a good way to do this yet - hack */ physids_promote(0xFFL, retmap); } static int bigsmp_check_phys_apicid_present(int phys_apicid) { return 1; } /* As we are using single CPU as destination, pick only one CPU here */ static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) { int cpu = cpumask_first(cpumask); if (cpu < nr_cpu_ids) return cpu_physical_id(cpu); return BAD_APICID; } static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, const struct cpumask *andmask) { int cpu; /* * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) return cpu_physical_id(cpu); } return BAD_APICID; } static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) { default_send_IPI_mask_sequence_phys(mask, vector); } static void bigsmp_send_IPI_allbutself(int vector) { default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); } static void bigsmp_send_IPI_all(int vector) { bigsmp_send_IPI_mask(cpu_online_mask, vector); } static int dmi_bigsmp; /* can be set by dmi scanners */ static int hp_ht_bigsmp(const struct dmi_system_id *d) { printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); dmi_bigsmp = 1; return 0; } static const struct dmi_system_id bigsmp_dmi_table[] = { { hp_ht_bigsmp, "HP ProLiant DL760 G2", { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), DMI_MATCH(DMI_BIOS_VERSION, "P44-"), } }, { hp_ht_bigsmp, "HP ProLiant DL740", { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), DMI_MATCH(DMI_BIOS_VERSION, "P47-"), } }, { } /* NULL entry stops DMI scanning */ }; static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask) { cpumask_clear(retmask); cpumask_set_cpu(cpu, retmask); } static int probe_bigsmp(void) { if (def_to_bigsmp) dmi_bigsmp = 1; else dmi_check_system(bigsmp_dmi_table); return dmi_bigsmp; } static struct apic apic_bigsmp = { .name = "bigsmp", .probe = probe_bigsmp, .acpi_madt_oem_check = NULL, .apic_id_registered = bigsmp_apic_id_registered, .irq_delivery_mode = dest_Fixed, /* phys delivery to target CPU: */ .irq_dest_mode = 0, .target_cpus = bigsmp_target_cpus, .disable_esr = 1, .dest_logical = 0, .check_apicid_used = bigsmp_check_apicid_used, .check_apicid_present = bigsmp_check_apicid_present, .vector_allocation_domain = bigsmp_vector_allocation_domain, .init_apic_ldr = bigsmp_init_apic_ldr, .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .setup_apic_routing = bigsmp_setup_apic_routing, .multi_timer_check = NULL, .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, .check_phys_apicid_present = bigsmp_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = bigsmp_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = bigsmp_get_apic_id, .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, .send_IPI_mask = bigsmp_send_IPI_mask, .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = bigsmp_send_IPI_allbutself, .send_IPI_all = bigsmp_send_IPI_all, .send_IPI_self = default_send_IPI_self, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, }; struct apic * __init generic_bigsmp_probe(void) { if (probe_bigsmp()) return &apic_bigsmp; return NULL; } apic_driver(apic_bigsmp);
gpl-2.0
kratos1988/operating_systems
arch/x86/kernel/crash_dump_32.c
543
2666
/* * Memory preserving reboot related code. * * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) * Copyright (C) IBM Corporation, 2004. All rights reserved */ #include <linux/errno.h> #include <linux/highmem.h> #include <linux/crash_dump.h> #include <asm/uaccess.h> static void *kdump_buf_page; /* Stores the physical address of elf header of crash image. */ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; static inline bool is_crashed_pfn_valid(unsigned long pfn) { #ifndef CONFIG_X86_PAE /* * non-PAE kdump kernel executed from a PAE one will crop high pte * bits and poke unwanted space counting again from address 0, we * don't want that. pte must fit into unsigned long. In fact the * test checks high 12 bits for being zero (pfn will be shifted left * by PAGE_SHIFT). */ return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn; #else return true; #endif } /** * copy_oldmem_page - copy one page from "oldmem" * @pfn: page frame number to be copied * @buf: target memory address for the copy; this can be in kernel address * space or user address space (see @userbuf) * @csize: number of bytes to copy * @offset: offset in bytes into the page (based on pfn) to begin the copy * @userbuf: if set, @buf is in user address space, use copy_to_user(), * otherwise @buf is in kernel address space, use memcpy(). * * Copy a page from "oldmem". For this page, there is no pte mapped * in the current kernel. We stitch up a pte, similar to kmap_atomic. * * Calling copy_to_user() in atomic context is not desirable. Hence first * copying the data to a pre-allocated kernel page and then copying to user * space in non-atomic context. */ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { void *vaddr; if (!csize) return 0; if (!is_crashed_pfn_valid(pfn)) return -EFAULT; vaddr = kmap_atomic_pfn(pfn, KM_PTE0); if (!userbuf) { memcpy(buf, (vaddr + offset), csize); kunmap_atomic(vaddr, KM_PTE0); } else { if (!kdump_buf_page) { printk(KERN_WARNING "Kdump: Kdump buffer page not" " allocated\n"); kunmap_atomic(vaddr, KM_PTE0); return -EFAULT; } copy_page(kdump_buf_page, vaddr); kunmap_atomic(vaddr, KM_PTE0); if (copy_to_user(buf, (kdump_buf_page + offset), csize)) return -EFAULT; } return csize; } static int __init kdump_buf_page_init(void) { int ret = 0; kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!kdump_buf_page) { printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer" " page\n"); ret = -ENOMEM; } return ret; } arch_initcall(kdump_buf_page_init);
gpl-2.0
garyd9/linux_kernel_sgh-i317
arch/arm/plat-samsung/irq-vic-timer.c
543
2484
/* arch/arm/plat-samsung/irq-vic-timer.c * originally part of arch/arm/plat-s3c64xx/irq.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C64XX - Interrupt handling * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/map.h> #include <plat/irq-vic-timer.h> #include <plat/regs-timer.h> #include <asm/mach/irq.h> static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_get_chip(irq); chained_irq_enter(chip, desc); generic_handle_irq((int)desc->irq_data.handler_data); chained_irq_exit(chip, desc); } /* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */ static void s3c_irq_timer_ack(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); u32 mask = (1 << 5) << (d->irq - gc->irq_base); irq_reg_writel(mask | gc->mask_cache, gc->reg_base); } /** * s3c_init_vic_timer_irq() - initialise timer irq chanined off VIC.\ * @num: Number of timers to initialize * @timer_irq: Base IRQ number to be used for the timers. * * Register the necessary IRQ chaining and support for the timer IRQs * chained of the VIC. */ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq) { unsigned int pirq[5] = { IRQ_TIMER0_VIC, IRQ_TIMER1_VIC, IRQ_TIMER2_VIC, IRQ_TIMER3_VIC, IRQ_TIMER4_VIC }; struct irq_chip_generic *s3c_tgc; struct irq_chip_type *ct; unsigned int i; s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, S3C64XX_TINT_CSTAT, handle_level_irq); if (!s3c_tgc) { pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n", __func__, timer_irq); return; } ct = s3c_tgc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_ack = s3c_irq_timer_ack; irq_setup_generic_chip(s3c_tgc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST | IRQ_NOPROBE, 0); /* Clear the upper bits of the mask_cache*/ s3c_tgc->mask_cache &= 0x1f; for (i = 0; i < num; i++, timer_irq++) { irq_set_chained_handler(pirq[i], s3c_irq_demux_vic_timer); irq_set_handler_data(pirq[i], (void *)timer_irq); } }
gpl-2.0
flaming-toast/unrm
drivers/net/usb/cx82310_eth.c
1055
9333
/* * Driver for USB ethernet port of Conexant CX82310-based ADSL routers * Copyright (C) 2010 by Ondrej Zary * some parts inspired by the cxacru driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> enum cx82310_cmd { CMD_START = 0x84, /* no effect? */ CMD_STOP = 0x85, /* no effect? */ CMD_GET_STATUS = 0x90, /* returns nothing? */ CMD_GET_MAC_ADDR = 0x91, /* read MAC address */ CMD_GET_LINK_STATUS = 0x92, /* not useful, link is always up */ CMD_ETHERNET_MODE = 0x99, /* unknown, needed during init */ }; enum cx82310_status { STATUS_UNDEFINED, STATUS_SUCCESS, STATUS_ERROR, STATUS_UNSUPPORTED, STATUS_UNIMPLEMENTED, STATUS_PARAMETER_ERROR, STATUS_DBG_LOOPBACK, }; #define CMD_PACKET_SIZE 64 #define CMD_TIMEOUT 100 #define CMD_REPLY_RETRY 5 #define CX82310_MTU 1514 #define CMD_EP 0x01 /* * execute control command * - optionally send some data (command parameters) * - optionally wait for the reply * - optionally read some data from the reply */ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, u8 *wdata, int wlen, u8 *rdata, int rlen) { int actual_len, retries, ret; struct usb_device *udev = dev->udev; u8 *buf = kzalloc(CMD_PACKET_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; /* create command packet */ buf[0] = cmd; if (wdata) memcpy(buf + 4, wdata, min_t(int, wlen, CMD_PACKET_SIZE - 4)); /* send command packet */ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); if (ret < 0) { if (cmd != CMD_GET_LINK_STATUS) dev_err(&dev->udev->dev, "send command %#x: error %d\n", cmd, ret); goto end; } if (reply) { /* wait for reply, retry if it's empty */ for (retries = 0; retries < CMD_REPLY_RETRY; retries++) { ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, CMD_EP), buf, CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); if (ret < 0) { if (cmd != CMD_GET_LINK_STATUS) dev_err(&dev->udev->dev, "reply receive error %d\n", ret); goto end; } if (actual_len > 0) break; } if (actual_len == 0) { dev_err(&dev->udev->dev, "no reply to command %#x\n", cmd); ret = -EIO; goto end; } if (buf[0] != cmd) { dev_err(&dev->udev->dev, "got reply to command %#x, expected: %#x\n", buf[0], cmd); ret = -EIO; goto end; } if (buf[1] != STATUS_SUCCESS) { dev_err(&dev->udev->dev, "command %#x failed: %#x\n", cmd, buf[1]); ret = -EIO; goto end; } if (rdata) memcpy(rdata, buf + 4, min_t(int, rlen, CMD_PACKET_SIZE - 4)); } end: kfree(buf); return ret; } #define partial_len data[0] /* length of partial packet data */ #define partial_rem data[1] /* remaining (missing) data length */ #define partial_data data[2] /* partial packet data */ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) { int ret; char buf[15]; struct usb_device *udev = dev->udev; u8 link[3]; int timeout = 50; /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 && strcmp(buf, "USB NET CARD")) { dev_info(&udev->dev, "ignoring: probably an ADSL modem\n"); return -ENODEV; } ret = usbnet_get_endpoints(dev, intf); if (ret) return ret; /* * this must not include ethernet header as the device can send partial * packets with no header (and sometimes even empty URBs) */ dev->net->hard_header_len = 0; /* we can send at most 1514 bytes of data (+ 2-byte header) per URB */ dev->hard_mtu = CX82310_MTU + 2; /* we can receive URBs up to 4KB from the device */ dev->rx_urb_size = 4096; dev->partial_data = (unsigned long) kmalloc(dev->hard_mtu, GFP_KERNEL); if (!dev->partial_data) return -ENOMEM; /* wait for firmware to become ready (indicated by the link being up) */ while (--timeout) { ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0, link, sizeof(link)); /* the command can time out during boot - it's not an error */ if (!ret && link[0] == 1 && link[2] == 1) break; msleep(500); } if (!timeout) { dev_err(&udev->dev, "firmware not ready in time\n"); return -ETIMEDOUT; } /* enable ethernet mode (?) */ ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); if (ret) { dev_err(&udev->dev, "unable to enable ethernet mode: %d\n", ret); goto err; } /* get the MAC address */ ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0, dev->net->dev_addr, ETH_ALEN); if (ret) { dev_err(&udev->dev, "unable to read MAC address: %d\n", ret); goto err; } /* start (does not seem to have any effect?) */ ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0); if (ret) goto err; return 0; err: kfree((void *)dev->partial_data); return ret; } static void cx82310_unbind(struct usbnet *dev, struct usb_interface *intf) { kfree((void *)dev->partial_data); } /* * RX is NOT easy - we can receive multiple packets per skb, each having 2-byte * packet length at the beginning. * The last packet might be incomplete (when it crosses the 4KB URB size), * continuing in the next skb (without any headers). * If a packet has odd length, there is one extra byte at the end (before next * packet or at the end of the URB). */ static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { int len; struct sk_buff *skb2; /* * If the last skb ended with an incomplete packet, this skb contains * end of that packet at the beginning. */ if (dev->partial_rem) { len = dev->partial_len + dev->partial_rem; skb2 = alloc_skb(len, GFP_ATOMIC); if (!skb2) return 0; skb_put(skb2, len); memcpy(skb2->data, (void *)dev->partial_data, dev->partial_len); memcpy(skb2->data + dev->partial_len, skb->data, dev->partial_rem); usbnet_skb_return(dev, skb2); skb_pull(skb, (dev->partial_rem + 1) & ~1); dev->partial_rem = 0; if (skb->len < 2) return 1; } /* a skb can contain multiple packets */ while (skb->len > 1) { /* first two bytes are packet length */ len = skb->data[0] | (skb->data[1] << 8); skb_pull(skb, 2); /* if last packet in the skb, let usbnet to process it */ if (len == skb->len || len + 1 == skb->len) { skb_trim(skb, len); break; } if (len > CX82310_MTU) { dev_err(&dev->udev->dev, "RX packet too long: %d B\n", len); return 0; } /* incomplete packet, save it for the next skb */ if (len > skb->len) { dev->partial_len = skb->len; dev->partial_rem = len - skb->len; memcpy((void *)dev->partial_data, skb->data, dev->partial_len); skb_pull(skb, skb->len); break; } skb2 = alloc_skb(len, GFP_ATOMIC); if (!skb2) return 0; skb_put(skb2, len); memcpy(skb2->data, skb->data, len); /* process the packet */ usbnet_skb_return(dev, skb2); skb_pull(skb, (len + 1) & ~1); } /* let usbnet process the last packet */ return 1; } /* TX is easy, just add 2 bytes of length at the beginning */ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int len = skb->len; if (skb_headroom(skb) < 2) { struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return NULL; } skb_push(skb, 2); skb->data[0] = len; skb->data[1] = len >> 8; return skb; } static const struct driver_info cx82310_info = { .description = "Conexant CX82310 USB ethernet", .flags = FLAG_ETHER, .bind = cx82310_bind, .unbind = cx82310_unbind, .rx_fixup = cx82310_rx_fixup, .tx_fixup = cx82310_tx_fixup, }; #define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_DEV_INFO, \ .idVendor = (vend), \ .idProduct = (prod), \ .bDeviceClass = (cl), \ .bDeviceSubClass = (sc), \ .bDeviceProtocol = (pr) static const struct usb_device_id products[] = { { USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), .driver_info = (unsigned long) &cx82310_info }, { }, }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver cx82310_driver = { .name = "cx82310_eth", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(cx82310_driver); MODULE_AUTHOR("Ondrej Zary"); MODULE_DESCRIPTION("Conexant CX82310-based ADSL router USB ethernet driver"); MODULE_LICENSE("GPL");
gpl-2.0
dmachaty/linux-bananapro
arch/mips/loongson32/common/reset.c
1055
1091
/* * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/io.h> #include <linux/pm.h> #include <asm/idle.h> #include <asm/reboot.h> #include <loongson1.h> static void __iomem *wdt_base; static void ls1x_halt(void) { while (1) { if (cpu_wait) cpu_wait(); } } static void ls1x_restart(char *command) { __raw_writel(0x1, wdt_base + WDT_EN); __raw_writel(0x1, wdt_base + WDT_TIMER); __raw_writel(0x1, wdt_base + WDT_SET); ls1x_halt(); } static void ls1x_power_off(void) { ls1x_halt(); } static int __init ls1x_reboot_setup(void) { wdt_base = ioremap_nocache(LS1X_WDT_BASE, 0x0f); if (!wdt_base) panic("Failed to remap watchdog registers"); _machine_restart = ls1x_restart; _machine_halt = ls1x_halt; pm_power_off = ls1x_power_off; return 0; } arch_initcall(ls1x_reboot_setup);
gpl-2.0
hsarkanen/linux-imx6
arch/cris/mm/init.c
1055
1632
/* * linux/arch/cris/mm/init.c * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2000,2001 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * */ #include <linux/gfp.h> #include <linux/init.h> #include <linux/bootmem.h> #include <asm/tlb.h> #include <asm/sections.h> unsigned long empty_zero_page; void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; unsigned long tmp; BUG_ON(!mem_map); /* max/min_low_pfn was set by setup.c * now we just copy it to some other necessary places... * * high_memory was also set in setup.c */ max_mapnr = num_physpages = max_low_pfn - min_low_pfn; /* this will put all memory onto the freelists */ totalram_pages = free_all_bootmem(); reservedpages = 0; for (tmp = 0; tmp < max_mapnr; tmp++) { /* * Only count reserved RAM pages */ if (PageReserved(mem_map + tmp)) reservedpages++; } codesize = (unsigned long) &_etext - (unsigned long) &_stext; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, " "%dk init)\n" , nr_free_pages() << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10 ); } /* free the pages occupied by initialization code */ void free_initmem(void) { free_initmem_default(0); }
gpl-2.0
cosmicexplorer/linux
arch/powerpc/lib/xor_vmx.c
1823
3480
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2012 * * Author: Anton Blanchard <anton@au.ibm.com> */ #include <altivec.h> #include <linux/preempt.h> #include <linux/export.h> #include <linux/sched.h> #include <asm/switch_to.h> typedef vector signed char unative_t; #define DEFINE(V) \ unative_t *V = (unative_t *)V##_in; \ unative_t V##_0, V##_1, V##_2, V##_3 #define LOAD(V) \ do { \ V##_0 = V[0]; \ V##_1 = V[1]; \ V##_2 = V[2]; \ V##_3 = V[3]; \ } while (0) #define STORE(V) \ do { \ V[0] = V##_0; \ V[1] = V##_1; \ V[2] = V##_2; \ V[3] = V##_3; \ } while (0) #define XOR(V1, V2) \ do { \ V1##_0 = vec_xor(V1##_0, V2##_0); \ V1##_1 = vec_xor(V1##_1, V2##_1); \ V1##_2 = vec_xor(V1##_2, V2##_2); \ V1##_3 = vec_xor(V1##_3, V2##_3); \ } while (0) void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, unsigned long *v2_in) { DEFINE(v1); DEFINE(v2); unsigned long lines = bytes / (sizeof(unative_t)) / 4; preempt_disable(); enable_kernel_altivec(); do { LOAD(v1); LOAD(v2); XOR(v1, v2); STORE(v1); v1 += 4; v2 += 4; } while (--lines > 0); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_2); void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, unsigned long *v2_in, unsigned long *v3_in) { DEFINE(v1); DEFINE(v2); DEFINE(v3); unsigned long lines = bytes / (sizeof(unative_t)) / 4; preempt_disable(); enable_kernel_altivec(); do { LOAD(v1); LOAD(v2); LOAD(v3); XOR(v1, v2); XOR(v1, v3); STORE(v1); v1 += 4; v2 += 4; v3 += 4; } while (--lines > 0); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_3); void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, unsigned long *v2_in, unsigned long *v3_in, unsigned long *v4_in) { DEFINE(v1); DEFINE(v2); DEFINE(v3); DEFINE(v4); unsigned long lines = bytes / (sizeof(unative_t)) / 4; preempt_disable(); enable_kernel_altivec(); do { LOAD(v1); LOAD(v2); LOAD(v3); LOAD(v4); XOR(v1, v2); XOR(v3, v4); XOR(v1, v3); STORE(v1); v1 += 4; v2 += 4; v3 += 4; v4 += 4; } while (--lines > 0); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_4); void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, unsigned long *v2_in, unsigned long *v3_in, unsigned long *v4_in, unsigned long *v5_in) { DEFINE(v1); DEFINE(v2); DEFINE(v3); DEFINE(v4); DEFINE(v5); unsigned long lines = bytes / (sizeof(unative_t)) / 4; preempt_disable(); enable_kernel_altivec(); do { LOAD(v1); LOAD(v2); LOAD(v3); LOAD(v4); LOAD(v5); XOR(v1, v2); XOR(v3, v4); XOR(v1, v5); XOR(v1, v3); STORE(v1); v1 += 4; v2 += 4; v3 += 4; v4 += 4; v5 += 4; } while (--lines > 0); preempt_enable(); } EXPORT_SYMBOL(xor_altivec_5);
gpl-2.0
glewarne/S6-UniBase
drivers/net/ethernet/3com/3c59x.c
2079
104489
/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */ /* Written 1996-1999 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. This driver is for the 3Com "Vortex" and "Boomerang" series ethercards. Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597 and the EtherLink XL 3c900 and 3c905 cards. Problem reports and questions should be directed to vortex@scyld.com The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 */ /* * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation * as well as other drivers * * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k * due to dead code elimination. There will be some performance benefits from this due to * elimination of all the tests and reduced cache footprint. */ #define DRV_NAME "3c59x" /* A few values that may be tweaked. */ /* Keep the ring sizes a power of two for efficiency. */ #define TX_RING_SIZE 16 #define RX_RING_SIZE 32 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ /* "Knobs" that adjust features and parameters. */ /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1512 effectively disables this feature. */ #ifndef __arm__ static int rx_copybreak = 200; #else /* ARM systems perform better by disregarding the bus-master transfer capability of these cards. -- rmk */ static int rx_copybreak = 1513; #endif /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ static const int mtu = 1500; /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 32; /* Tx timeout interval (millisecs) */ static int watchdog = 5000; /* Allow aggregation of Tx interrupts. Saves CPU load at the cost * of possible Tx stalls if the system is blocking interrupts * somewhere else. Undefine this to disable. */ #define tx_interrupt_mitigation 1 /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */ #define vortex_debug debug #ifdef VORTEX_DEBUG static int vortex_debug = VORTEX_DEBUG; #else static int vortex_debug = 1; #endif #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/mii.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/highmem.h> #include <linux/eisa.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <linux/gfp.h> #include <asm/irq.h> /* For nr_irqs only. */ #include <asm/io.h> #include <asm/uaccess.h> /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. This is only in the support-all-kernels source code. */ #define RUN_AT(x) (jiffies + (x)) #include <linux/delay.h> static const char version[] = DRV_NAME ": Donald Becker and others.\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); MODULE_LICENSE("GPL"); /* Operational parameter that usually are not changed. */ /* The Vortex size is twice that of the original EtherLinkIII series: the runtime register window, window 1, is now always mapped in. The Boomerang size is twice as large as the Vortex -- it has additional bus master control registers. */ #define VORTEX_TOTAL_SIZE 0x20 #define BOOMERANG_TOTAL_SIZE 0x40 /* Set iff a MII transceiver on any interface requires mdio preamble. This only set with the original DP83840 on older 3c905 boards, so the extra code size of a per-interface flag is not worthwhile. */ static char mii_preamble_required; #define PFX DRV_NAME ": " /* Theory of Operation I. Board Compatibility This device driver is designed for the 3Com FastEtherLink and FastEtherLink XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs versions of the FastEtherLink cards. The supported product IDs are 3c590, 3c592, 3c595, 3c597, 3c900, 3c905 The related ISA 3c515 is supported with a separate driver, 3c515.c, included with the kernel source or available from cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html II. Board-specific settings PCI bus devices are configured by the system at boot time, so no jumpers need to be set on the board. The system BIOS should be set to assign the PCI INTA signal to an otherwise unused system IRQ line. The EEPROM settings for media type and forced-full-duplex are observed. The EEPROM media type should be left at the default "autoselect" unless using 10base2 or AUI connections which cannot be reliably detected. III. Driver operation The 3c59x series use an interface that's very similar to the previous 3c5x9 series. The primary interface is two programmed-I/O FIFOs, with an alternate single-contiguous-region bus-master transfer (see next). The 3c900 "Boomerang" series uses a full-bus-master interface with separate lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, DEC Tulip and Intel Speedo3. The first chip version retains a compatible programmed-I/O interface that has been removed in 'B' and subsequent board revisions. One extension that is advertised in a very large font is that the adapters are capable of being bus masters. On the Vortex chip this capability was only for a single contiguous region making it far less useful than the full bus master capability. There is a significant performance impact of taking an extra interrupt or polling for the completion of each transfer, as well as difficulty sharing the single transfer engine between the transmit and receive threads. Using DMA transfers is a win only with large blocks or with the flawed versions of the Intel Orion motherboard PCI controller. The Boomerang chip's full-bus-master interface is useful, and has the currently-unused advantages over other similar chips that queued transmit packets may be reordered and receive buffer groups are associated with a single frame. With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme. Rather than a fixed intermediate receive buffer, this scheme allocates full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is chosen to trade-off the memory wasted by passing the full-sized skbuff to the queue layer for all frames vs. the copying cost of copying a frame to a correctly-sized skbuff. IIIC. Synchronization The driver runs as two independent, single-threaded flows of control. One is the send-packet routine, which enforces single-threaded use by the dev->tbusy flag. The other thread is the interrupt handler, which is single threaded by the hardware and other software. IV. Notes Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development 3c590, 3c595, and 3c900 boards. The name "Vortex" is the internal 3Com project name for the PCI ASIC, and the EISA version is called "Demon". According to Terry these names come from rides at the local amusement park. The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes! This driver only supports ethernet packets because of the skbuff allocation limit of 4K. */ /* This table drives the PCI probe routines. It's mostly boilerplate in all of the drivers, and will likely be provided by some future kernel. */ enum pci_flags_bit { PCI_USES_MASTER=4, }; enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000, EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, }; enum vortex_chips { CH_3C590 = 0, CH_3C592, CH_3C597, CH_3C595_1, CH_3C595_2, CH_3C595_3, CH_3C900_1, CH_3C900_2, CH_3C900_3, CH_3C900_4, CH_3C900_5, CH_3C900B_FL, CH_3C905_1, CH_3C905_2, CH_3C905B_TX, CH_3C905B_1, CH_3C905B_2, CH_3C905B_FX, CH_3C905C, CH_3C9202, CH_3C980, CH_3C9805, CH_3CSOHO100_TX, CH_3C555, CH_3C556, CH_3C556B, CH_3C575, CH_3C575_1, CH_3CCFE575, CH_3CCFE575CT, CH_3CCFE656, CH_3CCFEM656, CH_3CCFEM656_1, CH_3C450, CH_3C920, CH_3C982A, CH_3C982B, CH_905BT4, CH_920B_EMB_WNM, }; /* note: this array directly indexed by above enums, and MUST * be kept in sync with both the enums above, and the PCI device * table below */ static struct vortex_chip_info { const char *name; int flags; int drv_flags; int io_size; } vortex_info_tbl[] = { {"3c590 Vortex 10Mbps", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c595 Vortex 100baseTx", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c595 Vortex 100baseT4", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c595 Vortex 100base-MII", PCI_USES_MASTER, IS_VORTEX, 32, }, {"3c900 Boomerang 10baseT", PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, {"3c900 Boomerang 10Mbps Combo", PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c900 Cyclone 10Mbps Combo", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c900B-FL Cyclone 10base-FL", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c905 Boomerang 100baseTx", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, {"3c905 Boomerang 100baseT4", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, {"3C905B-TX Fast Etherlink XL PCI", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c905B Cyclone 100baseTx", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c905B Cyclone 10/100/BNC", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c905B-FX Cyclone 100baseFx", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, {"3c905C Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, {"3c980 Cyclone", PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c980C Python-T", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, {"3cSOHO100-TX Hurricane", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c555 Laptop Hurricane", PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, {"3c556 Laptop Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| HAS_HWCKSM, 128, }, {"3c556B Laptop Hurricane", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| WNO_XCVR_PWR|HAS_HWCKSM, 128, }, {"3c575 [Megahertz] 10/100 LAN CardBus", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, {"3c575 Boomerang CardBus", PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, {"3CCFE575BT Cyclone CardBus", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| INVERT_LED_PWR|HAS_HWCKSM, 128, }, {"3CCFE575CT Tornado CardBus", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, {"3CCFE656 Cyclone CardBus", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| INVERT_LED_PWR|HAS_HWCKSM, 128, }, {"3CCFEM656B Cyclone+Winmodem CardBus", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| INVERT_LED_PWR|HAS_HWCKSM, 128, }, {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c920 Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {"3c982 Hydra Dual Port A", PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, {"3c982 Hydra Dual Port B", PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, {"3c905B-T4", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c920B-EMB-WNM Tornado", PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, {NULL,}, /* NULL terminated list. */ }; static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = { { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 }, { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 }, { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 }, { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 }, { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 }, { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 }, { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 }, { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 }, { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX }, { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 }, { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX }, { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 }, { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 }, { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B }, { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 }, { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 }, { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 }, { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT }, { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 }, { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 }, { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 }, { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A }, { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B }, { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 }, { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM }, {0,} /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); /* Operational definitions. These are not used by other compilation units and thus are not exported in a ".h" file. First the windows. There are eight register windows, with the command and status registers available in each. */ #define EL3_CMD 0x0e #define EL3_STATUS 0x0e /* The top five bits written to EL3_CMD are a command, the lower 11 bits are the parameter, if applicable. Note that 11 parameters bits was fine for ethernet, but the new chip can handle FDDI length frames (~4500 octets) and now parameters count 32-bit 'Dwords' rather than octets. */ enum vortex_cmd { TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, UpStall = 6<<11, UpUnstall = (6<<11)+1, DownStall = (6<<11)+2, DownUnstall = (6<<11)+3, RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, SetTxThreshold = 18<<11, SetTxStart = 19<<11, StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11, StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,}; /* The SetRxFilter command accepts the following classes: */ enum RxFilter { RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; /* Bits in the general status register. */ enum vortex_status { IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004, TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, IntReq = 0x0040, StatsFull = 0x0080, DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10, DMAInProgress = 1<<11, /* DMA controller is still busy.*/ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/ }; /* Register window 1 offsets, the window used in normal operation. On the Vortex this window is always mapped at offsets 0x10-0x1f. */ enum Window1 { TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ }; enum Window0 { Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ Wn0EepromData = 12, /* Window 0: EEPROM results register. */ IntrStatus=0x0E, /* Valid in all windows. */ }; enum Win0_EEPROM_bits { EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ }; /* EEPROM locations. */ enum eeprom_offset { PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3, EtherLink3ID=7, IFXcvrIO=8, IRQLine=9, NodeAddr01=10, NodeAddr23=11, NodeAddr45=12, DriverTune=13, Checksum=15}; enum Window2 { /* Window 2. */ Wn2_ResetOptions=12, }; enum Window3 { /* Window 3: MAC/config bits. */ Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8, }; #define BFEXT(value, offset, bitcount) \ ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1)) #define BFINS(lhs, rhs, offset, bitcount) \ (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \ (((rhs) & ((1 << (bitcount)) - 1)) << (offset))) #define RAM_SIZE(v) BFEXT(v, 0, 3) #define RAM_WIDTH(v) BFEXT(v, 3, 1) #define RAM_SPEED(v) BFEXT(v, 4, 2) #define ROM_SIZE(v) BFEXT(v, 6, 2) #define RAM_SPLIT(v) BFEXT(v, 16, 2) #define XCVR(v) BFEXT(v, 20, 4) #define AUTOSELECT(v) BFEXT(v, 24, 1) enum Window4 { /* Window 4: Xcvr/media bits. */ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, }; enum Win4_Media_bits { Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ Media_LnkBeat = 0x0800, }; enum Window7 { /* Window 7: Bus Master control. */ Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6, Wn7_MasterStatus = 12, }; /* Boomerang bus master control registers. */ enum MasterCtrl { PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c, TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38, }; /* The Rx and Tx descriptor lists. Caution Alpha hackers: these types are 32 bits! Note also the 8 byte alignment contraint on tx_ring[] and rx_ring[]. */ #define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ #define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ struct boom_rx_desc { __le32 next; /* Last entry points to 0. */ __le32 status; __le32 addr; /* Up to 63 addr/len pairs possible. */ __le32 length; /* Set LAST_FRAG to indicate last pair. */ }; /* Values for the Rx status entry. */ enum rx_desc_status { RxDComplete=0x00008000, RxDError=0x4000, /* See boomerang_rx() for actual error bits */ IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27, IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31, }; #ifdef MAX_SKB_FRAGS #define DO_ZEROCOPY 1 #else #define DO_ZEROCOPY 0 #endif struct boom_tx_desc { __le32 next; /* Last entry points to 0. */ __le32 status; /* bits 0:12 length, others see below. */ #if DO_ZEROCOPY struct { __le32 addr; __le32 length; } frag[1+MAX_SKB_FRAGS]; #else __le32 addr; __le32 length; #endif }; /* Values for the Tx status entry. */ enum tx_desc_status { CRCDisable=0x2000, TxDComplete=0x8000, AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000, TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */ }; /* Chip features we care about in vp->capabilities, read from the EEPROM. */ enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; struct vortex_extra_stats { unsigned long tx_deferred; unsigned long tx_max_collisions; unsigned long tx_multiple_collisions; unsigned long tx_single_collisions; unsigned long rx_bad_ssd; }; struct vortex_private { /* The Rx and Tx rings should be quad-word-aligned. */ struct boom_rx_desc* rx_ring; struct boom_tx_desc* tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; /* The addresses of transmit- and receive-in-place skbuffs. */ struct sk_buff* rx_skbuff[RX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ struct vortex_extra_stats xstats; /* NIC-specific extra stats */ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ /* PCI configuration space information. */ struct device *gendev; void __iomem *ioaddr; /* IO address space */ void __iomem *cb_fn_base; /* CardBus function status addr space. */ /* Some values here only for performance evaluation and path-coverage */ int rx_nocopy, rx_copy, queued_packet, rx_csumhits; int card_idx; /* The remainder are related to chip state, mostly media selection. */ struct timer_list timer; /* Media selection timer. */ struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ int options; /* User-settable misc. driver options. */ unsigned int media_override:4, /* Passed-in media type. */ default_media:4, /* Read from the EEPROM/Wn3_Config. */ full_duplex:1, autoselect:1, bus_master:1, /* Vortex can only do a fragment bus-m. */ full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ partner_flow_ctrl:1, /* Partner supports flow control */ has_nway:1, enable_wol:1, /* Wake-on-LAN is enabled */ pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ open:1, medialock:1, large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ /* {get|set}_wol operations are already serialized by rtnl. * no additional locking is required for the enable_wol and acpi_set_WOL() */ int drv_flags; u16 status_enable; u16 intr_enable; u16 available_media; /* From Wn3_Options. */ u16 capabilities, info1, info2; /* Various, from EEPROM. */ u16 advertising; /* NWay media advertisement */ unsigned char phys[2]; /* MII device addresses. */ u16 deferred; /* Resend these interrupts when we * bale from the ISR */ u16 io_size; /* Size of PCI region (for release_region) */ /* Serialises access to hardware other than MII and variables below. * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ spinlock_t lock; spinlock_t mii_lock; /* Serialises access to MII */ struct mii_if_info mii; /* MII lib hooks/info */ spinlock_t window_lock; /* Serialises access to windowed regs */ int window; /* Register window */ }; static void window_set(struct vortex_private *vp, int window) { if (window != vp->window) { iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD); vp->window = window; } } #define DEFINE_WINDOW_IO(size) \ static u ## size \ window_read ## size(struct vortex_private *vp, int window, int addr) \ { \ unsigned long flags; \ u ## size ret; \ spin_lock_irqsave(&vp->window_lock, flags); \ window_set(vp, window); \ ret = ioread ## size(vp->ioaddr + addr); \ spin_unlock_irqrestore(&vp->window_lock, flags); \ return ret; \ } \ static void \ window_write ## size(struct vortex_private *vp, u ## size value, \ int window, int addr) \ { \ unsigned long flags; \ spin_lock_irqsave(&vp->window_lock, flags); \ window_set(vp, window); \ iowrite ## size(value, vp->ioaddr + addr); \ spin_unlock_irqrestore(&vp->window_lock, flags); \ } DEFINE_WINDOW_IO(8) DEFINE_WINDOW_IO(16) DEFINE_WINDOW_IO(32) #ifdef CONFIG_PCI #define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) #else #define DEVICE_PCI(dev) NULL #endif #define VORTEX_PCI(vp) \ ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)) #ifdef CONFIG_EISA #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) #else #define DEVICE_EISA(dev) NULL #endif #define VORTEX_EISA(vp) \ ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)) /* The action to take with a media selection timer tick. Note that we deviate from the 3Com order by checking 10base2 before AUI. */ enum xcvr_types { XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, }; static const struct media_table { char *name; unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ mask:8, /* The transceiver-present bit in Wn3_Config.*/ next:8; /* The media type to try next. */ int wait; /* Time before we check media status. */ } media_tbl[] = { { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10}, { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10}, { "undefined", 0, 0x80, XCVR_10baseT, 10000}, { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10}, { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10}, { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10}, { "MII", 0, 0x41, XCVR_10baseT, 3*HZ }, { "undefined", 0, 0x01, XCVR_10baseT, 10000}, { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ}, { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ }, { "Default", 0, 0xFF, XCVR_10baseT, 10000}, }; static struct { const char str[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "tx_deferred" }, { "tx_max_collisions" }, { "tx_multiple_collisions" }, { "tx_single_collisions" }, { "rx_bad_ssd" }, }; /* number of ETHTOOL_GSTATS u64's */ #define VORTEX_NUM_STATS 5 static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, int chip_idx, int card_idx); static int vortex_up(struct net_device *dev); static void vortex_down(struct net_device *dev, int final); static int vortex_open(struct net_device *dev); static void mdio_sync(struct vortex_private *vp, int bits); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *vp, int phy_id, int location, int value); static void vortex_timer(unsigned long arg); static void rx_oom_timer(unsigned long arg); static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev); static int vortex_rx(struct net_device *dev); static int boomerang_rx(struct net_device *dev); static irqreturn_t vortex_interrupt(int irq, void *dev_id); static irqreturn_t boomerang_interrupt(int irq, void *dev_id); static int vortex_close(struct net_device *dev); static void dump_tx_ring(struct net_device *dev); static void update_stats(void __iomem *ioaddr, struct net_device *dev); static struct net_device_stats *vortex_get_stats(struct net_device *dev); static void set_rx_mode(struct net_device *dev); #ifdef CONFIG_PCI static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); #endif static void vortex_tx_timeout(struct net_device *dev); static void acpi_set_WOL(struct net_device *dev); static const struct ethtool_ops vortex_ethtool_ops; static void set_8021q_mode(struct net_device *dev, int enable); /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ /* Option count limit only -- unlimited interfaces are supported. */ #define MAX_UNITS 8 static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 }; static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; static int global_options = -1; static int global_full_duplex = -1; static int global_enable_wol = -1; static int global_use_mmio = -1; /* Variables to work-around the Compaq PCI BIOS32 problem. */ static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; static struct net_device *compaq_net_device; static int vortex_cards_found; module_param(debug, int, 0); module_param(global_options, int, 0); module_param_array(options, int, NULL, 0); module_param(global_full_duplex, int, 0); module_param_array(full_duplex, int, NULL, 0); module_param_array(hw_checksums, int, NULL, 0); module_param_array(flow_ctrl, int, NULL, 0); module_param(global_enable_wol, int, 0); module_param_array(enable_wol, int, NULL, 0); module_param(rx_copybreak, int, 0); module_param(max_interrupt_work, int, 0); module_param(compaq_ioaddr, int, 0); module_param(compaq_irq, int, 0); module_param(compaq_device_id, int, 0); module_param(watchdog, int, 0); module_param(global_use_mmio, int, 0); module_param_array(use_mmio, int, NULL, 0); MODULE_PARM_DESC(debug, "3c59x debug level (0-6)"); MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex"); MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset"); MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)"); MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset"); MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)"); MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)"); MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)"); MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset"); MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt"); MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)"); MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)"); MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)"); MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds"); MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset"); MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_vortex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); unsigned long flags; local_irq_save(flags); (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); local_irq_restore(flags); } #endif #ifdef CONFIG_PM static int vortex_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); if (!ndev || !netif_running(ndev)) return 0; netif_device_detach(ndev); vortex_down(ndev, 1); return 0; } static int vortex_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *ndev = pci_get_drvdata(pdev); int err; if (!ndev || !netif_running(ndev)) return 0; err = vortex_up(ndev); if (err) return err; netif_device_attach(ndev); return 0; } static const struct dev_pm_ops vortex_pm_ops = { .suspend = vortex_suspend, .resume = vortex_resume, .freeze = vortex_suspend, .thaw = vortex_resume, .poweroff = vortex_suspend, .restore = vortex_resume, }; #define VORTEX_PM_OPS (&vortex_pm_ops) #else /* !CONFIG_PM */ #define VORTEX_PM_OPS NULL #endif /* !CONFIG_PM */ #ifdef CONFIG_EISA static struct eisa_device_id vortex_eisa_ids[] = { { "TCM5920", CH_3C592 }, { "TCM5970", CH_3C597 }, { "" } }; MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); static int __init vortex_eisa_probe(struct device *device) { void __iomem *ioaddr; struct eisa_device *edev; edev = to_eisa_device(device); if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) return -EBUSY; ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE); if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, edev->id.driver_data, vortex_cards_found)) { release_region(edev->base_addr, VORTEX_TOTAL_SIZE); return -ENODEV; } vortex_cards_found++; return 0; } static int vortex_eisa_remove(struct device *device) { struct eisa_device *edev; struct net_device *dev; struct vortex_private *vp; void __iomem *ioaddr; edev = to_eisa_device(device); dev = eisa_get_drvdata(edev); if (!dev) { pr_err("vortex_eisa_remove called for Compaq device!\n"); BUG(); } vp = netdev_priv(dev); ioaddr = vp->ioaddr; unregister_netdev(dev); iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); release_region(edev->base_addr, VORTEX_TOTAL_SIZE); free_netdev(dev); return 0; } static struct eisa_driver vortex_eisa_driver = { .id_table = vortex_eisa_ids, .driver = { .name = "3c59x", .probe = vortex_eisa_probe, .remove = vortex_eisa_remove } }; #endif /* CONFIG_EISA */ /* returns count found (>= 0), or negative on error */ static int __init vortex_eisa_init(void) { int eisa_found = 0; int orig_cards_found = vortex_cards_found; #ifdef CONFIG_EISA int err; err = eisa_driver_register (&vortex_eisa_driver); if (!err) { /* * Because of the way EISA bus is probed, we cannot assume * any device have been found when we exit from * eisa_driver_register (the bus root driver may not be * initialized yet). So we blindly assume something was * found, and let the sysfs magic happened... */ eisa_found = 1; } #endif /* Special code to work-around the Compaq PCI BIOS32 problem. */ if (compaq_ioaddr) { vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE), compaq_irq, compaq_device_id, vortex_cards_found++); } return vortex_cards_found - orig_cards_found + eisa_found; } /* returns count (>= 0), or negative on error */ static int vortex_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc, unit, pci_bar; struct vortex_chip_info *vci; void __iomem *ioaddr; /* wake up and enable device */ rc = pci_enable_device(pdev); if (rc < 0) goto out; rc = pci_request_regions(pdev, DRV_NAME); if (rc < 0) { pci_disable_device(pdev); goto out; } unit = vortex_cards_found; if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { /* Determine the default if the user didn't override us */ vci = &vortex_info_tbl[ent->driver_data]; pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0; } else if (unit < MAX_UNITS && use_mmio[unit] >= 0) pci_bar = use_mmio[unit] ? 1 : 0; else pci_bar = global_use_mmio ? 1 : 0; ioaddr = pci_iomap(pdev, pci_bar, 0); if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { pci_release_regions(pdev); pci_disable_device(pdev); rc = -ENOMEM; goto out; } rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, ent->driver_data, unit); if (rc < 0) { pci_iounmap(pdev, ioaddr); pci_release_regions(pdev); pci_disable_device(pdev); goto out; } vortex_cards_found++; out: return rc; } static const struct net_device_ops boomrang_netdev_ops = { .ndo_open = vortex_open, .ndo_stop = vortex_close, .ndo_start_xmit = boomerang_start_xmit, .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI .ndo_do_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_vortex, #endif }; static const struct net_device_ops vortex_netdev_ops = { .ndo_open = vortex_open, .ndo_stop = vortex_close, .ndo_start_xmit = vortex_start_xmit, .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI .ndo_do_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_vortex, #endif }; /* * Start up the PCI/EISA device which is described by *gendev. * Return 0 on success. * * NOTE: pdev can be NULL, for the case of a Compaq device */ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, int chip_idx, int card_idx) { struct vortex_private *vp; int option; unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ int i, step; struct net_device *dev; static int printed_version; int retval, print_info; struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx]; const char *print_name = "3c59x"; struct pci_dev *pdev = NULL; struct eisa_device *edev = NULL; if (!printed_version) { pr_info("%s", version); printed_version = 1; } if (gendev) { if ((pdev = DEVICE_PCI(gendev))) { print_name = pci_name(pdev); } if ((edev = DEVICE_EISA(gendev))) { print_name = dev_name(&edev->dev); } } dev = alloc_etherdev(sizeof(*vp)); retval = -ENOMEM; if (!dev) goto out; SET_NETDEV_DEV(dev, gendev); vp = netdev_priv(dev); option = global_options; /* The lower four bits are the media type. */ if (dev->mem_start) { /* * The 'options' param is passed in as the third arg to the * LILO 'ether=' argument for non-modular use */ option = dev->mem_start; } else if (card_idx < MAX_UNITS) { if (options[card_idx] >= 0) option = options[card_idx]; } if (option > 0) { if (option & 0x8000) vortex_debug = 7; if (option & 0x4000) vortex_debug = 2; if (option & 0x0400) vp->enable_wol = 1; } print_info = (vortex_debug > 1); if (print_info) pr_info("See Documentation/networking/vortex.txt\n"); pr_info("%s: 3Com %s %s at %p.\n", print_name, pdev ? "PCI" : "EISA", vci->name, ioaddr); dev->base_addr = (unsigned long)ioaddr; dev->irq = irq; dev->mtu = mtu; vp->ioaddr = ioaddr; vp->large_frames = mtu > 1500; vp->drv_flags = vci->drv_flags; vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; vp->io_size = vci->io_size; vp->card_idx = card_idx; vp->window = -1; /* module list only for Compaq device */ if (gendev == NULL) { compaq_net_device = dev; } /* PCI-only startup logic */ if (pdev) { /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master(pdev); if (vci->drv_flags & IS_VORTEX) { u8 pci_latency; u8 new_latency = 248; /* Check the PCI latency value. On the 3c590 series the latency timer must be set to the maximum value to avoid data corruption that occurs when the timer expires during a transfer. This bug exists the Vortex chip only. */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); if (pci_latency < new_latency) { pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n", print_name, pci_latency, new_latency); pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); } } } spin_lock_init(&vp->lock); spin_lock_init(&vp->mii_lock); spin_lock_init(&vp->window_lock); vp->gendev = gendev; vp->mii.dev = dev; vp->mii.mdio_read = mdio_read; vp->mii.mdio_write = mdio_write; vp->mii.phy_id_mask = 0x1f; vp->mii.reg_num_mask = 0x1f; /* Makes sure rings are at least 16 byte aligned. */ vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, &vp->rx_ring_dma); retval = -ENOMEM; if (!vp->rx_ring) goto free_device; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; /* if we are a PCI driver, we store info in pdev->driver_data * instead of a module list */ if (pdev) pci_set_drvdata(pdev, dev); if (edev) eisa_set_drvdata(edev, dev); vp->media_override = 7; if (option >= 0) { vp->media_override = ((option & 7) == 2) ? 0 : option & 15; if (vp->media_override != 7) vp->medialock = 1; vp->full_duplex = (option & 0x200) ? 1 : 0; vp->bus_master = (option & 16) ? 1 : 0; } if (global_full_duplex > 0) vp->full_duplex = 1; if (global_enable_wol > 0) vp->enable_wol = 1; if (card_idx < MAX_UNITS) { if (full_duplex[card_idx] > 0) vp->full_duplex = 1; if (flow_ctrl[card_idx] > 0) vp->flow_ctrl = 1; if (enable_wol[card_idx] > 0) vp->enable_wol = 1; } vp->mii.force_media = vp->full_duplex; vp->options = option; /* Read the station address from the EEPROM. */ { int base; if (vci->drv_flags & EEPROM_8BIT) base = 0x230; else if (vci->drv_flags & EEPROM_OFFSET) base = EEPROM_Read + 0x30; else base = EEPROM_Read; for (i = 0; i < 0x40; i++) { int timer; window_write16(vp, base + i, 0, Wn0EepromCmd); /* Pause for at least 162 us. for the read to take place. */ for (timer = 10; timer >= 0; timer--) { udelay(162); if ((window_read16(vp, 0, Wn0EepromCmd) & 0x8000) == 0) break; } eeprom[i] = window_read16(vp, 0, Wn0EepromData); } } for (i = 0; i < 0x18; i++) checksum ^= eeprom[i]; checksum = (checksum ^ (checksum >> 8)) & 0xff; if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */ while (i < 0x21) checksum ^= eeprom[i++]; checksum = (checksum ^ (checksum >> 8)) & 0xff; } if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); for (i = 0; i < 3; i++) ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); if (print_info) pr_cont(" %pM", dev->dev_addr); /* Unfortunately an all zero eeprom passes the checksum and this gets found in the wild in failure cases. Crypto is hard 8) */ if (!is_valid_ether_addr(dev->dev_addr)) { retval = -EINVAL; pr_err("*** EEPROM MAC address is invalid.\n"); goto free_ring; /* With every pack */ } for (i = 0; i < 6; i++) window_write8(vp, dev->dev_addr[i], 2, i); if (print_info) pr_cont(", IRQ %d\n", dev->irq); /* Tell them about an invalid IRQ. */ if (dev->irq <= 0 || dev->irq >= nr_irqs) pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", dev->irq); step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; if (print_info) { pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); } if (pdev && vci->drv_flags & HAS_CB_FNS) { unsigned short n; vp->cb_fn_base = pci_iomap(pdev, 2, 0); if (!vp->cb_fn_base) { retval = -ENOMEM; goto free_ring; } if (print_info) { pr_info("%s: CardBus functions mapped %16.16llx->%p\n", print_name, (unsigned long long)pci_resource_start(pdev, 2), vp->cb_fn_base); } n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; if (vp->drv_flags & INVERT_LED_PWR) n |= 0x10; if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; window_write16(vp, n, 2, Wn2_ResetOptions); if (vp->drv_flags & WNO_XCVR_PWR) { window_write16(vp, 0x0800, 0, 0); } } /* Extract our information from the EEPROM data. */ vp->info1 = eeprom[13]; vp->info2 = eeprom[15]; vp->capabilities = eeprom[16]; if (vp->info1 & 0x8000) { vp->full_duplex = 1; if (print_info) pr_info("Full duplex capable\n"); } { static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; unsigned int config; vp->available_media = window_read16(vp, 3, Wn3_Options); if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ vp->available_media = 0x40; config = window_read32(vp, 3, Wn3_Config); if (print_info) { pr_debug(" Internal config register is %4.4x, transceivers %#x.\n", config, window_read16(vp, 3, Wn3_Options)); pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", 8 << RAM_SIZE(config), RAM_WIDTH(config) ? "word" : "byte", ram_split[RAM_SPLIT(config)], AUTOSELECT(config) ? "autoselect/" : "", XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" : media_tbl[XCVR(config)].name); } vp->default_media = XCVR(config); if (vp->default_media == XCVR_NWAY) vp->has_nway = 1; vp->autoselect = AUTOSELECT(config); } if (vp->media_override != 7) { pr_info("%s: Media override to transceiver type %d (%s).\n", print_name, vp->media_override, media_tbl[vp->media_override].name); dev->if_port = vp->media_override; } else dev->if_port = vp->default_media; if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { int phy, phy_idx = 0; mii_preamble_required++; if (vp->drv_flags & EXTRA_PREAMBLE) mii_preamble_required++; mdio_sync(vp, 32); mdio_read(dev, 24, MII_BMSR); for (phy = 0; phy < 32 && phy_idx < 1; phy++) { int mii_status, phyx; /* * For the 3c905CX we look at index 24 first, because it bogusly * reports an external PHY at all indices */ if (phy == 0) phyx = 24; else if (phy <= 24) phyx = phy - 1; else phyx = phy; mii_status = mdio_read(dev, phyx, MII_BMSR); if (mii_status && mii_status != 0xffff) { vp->phys[phy_idx++] = phyx; if (print_info) { pr_info(" MII transceiver found at address %d, status %4x.\n", phyx, mii_status); } if ((mii_status & 0x0040) == 0) mii_preamble_required++; } } mii_preamble_required--; if (phy_idx == 0) { pr_warning(" ***WARNING*** No MII transceivers found!\n"); vp->phys[0] = 24; } else { vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE); if (vp->full_duplex) { /* Only advertise the FD media types. */ vp->advertising &= ~0x02A0; mdio_write(dev, vp->phys[0], 4, vp->advertising); } } vp->mii.phy_id = vp->phys[0]; } if (vp->capabilities & CapBusMaster) { vp->full_bus_master_tx = 1; if (print_info) { pr_info(" Enabling bus-master transmits and %s receives.\n", (vp->info2 & 1) ? "early" : "whole-frame" ); } vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2; vp->bus_master = 0; /* AKPM: vortex only */ } /* The 3c59x-specific entries in the device structure. */ if (vp->full_bus_master_tx) { dev->netdev_ops = &boomrang_netdev_ops; /* Actually, it still should work with iommu. */ if (card_idx < MAX_UNITS && ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || hw_checksums[card_idx] == 1)) { dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; } } else dev->netdev_ops = &vortex_netdev_ops; if (print_info) { pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n", print_name, (dev->features & NETIF_F_SG) ? "en":"dis", (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); } dev->ethtool_ops = &vortex_ethtool_ops; dev->watchdog_timeo = (watchdog * HZ) / 1000; if (pdev) { vp->pm_state_valid = 1; pci_save_state(VORTEX_PCI(vp)); acpi_set_WOL(dev); } retval = register_netdev(dev); if (retval == 0) return 0; free_ring: pci_free_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); free_device: free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); out: return retval; } static void issue_and_wait(struct net_device *dev, int cmd) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int i; iowrite16(cmd, ioaddr + EL3_CMD); for (i = 0; i < 2000; i++) { if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) return; } /* OK, that didn't work. Do it the slow way. One second */ for (i = 0; i < 100000; i++) { if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) { if (vortex_debug > 1) pr_info("%s: command 0x%04x took %d usecs\n", dev->name, cmd, i * 10); return; } udelay(10); } pr_err("%s: command 0x%04x did not complete! Status=0x%x\n", dev->name, cmd, ioread16(ioaddr + EL3_STATUS)); } static void vortex_set_duplex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); pr_info("%s: setting %s-duplex.\n", dev->name, (vp->full_duplex) ? "full" : "half"); /* Set the full-duplex bit. */ window_write16(vp, ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | (vp->large_frames ? 0x40 : 0) | ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), 3, Wn3_MAC_Ctrl); } static void vortex_check_media(struct net_device *dev, unsigned int init) { struct vortex_private *vp = netdev_priv(dev); unsigned int ok_to_print = 0; if (vortex_debug > 3) ok_to_print = 1; if (mii_check_media(&vp->mii, ok_to_print, init)) { vp->full_duplex = vp->mii.full_duplex; vortex_set_duplex(dev); } else if (init) { vortex_set_duplex(dev); } } static int vortex_up(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned int config; int i, mii_reg1, mii_reg5, err = 0; if (VORTEX_PCI(vp)) { pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ if (vp->pm_state_valid) pci_restore_state(VORTEX_PCI(vp)); err = pci_enable_device(VORTEX_PCI(vp)); if (err) { pr_warning("%s: Could not enable device\n", dev->name); goto err_out; } } /* Before initializing select the active media port. */ config = window_read32(vp, 3, Wn3_Config); if (vp->media_override != 7) { pr_info("%s: Media override to transceiver %d (%s).\n", dev->name, vp->media_override, media_tbl[vp->media_override].name); dev->if_port = vp->media_override; } else if (vp->autoselect) { if (vp->has_nway) { if (vortex_debug > 1) pr_info("%s: using NWAY device table, not %d\n", dev->name, dev->if_port); dev->if_port = XCVR_NWAY; } else { /* Find first available media type, starting with 100baseTx. */ dev->if_port = XCVR_100baseTx; while (! (vp->available_media & media_tbl[dev->if_port].mask)) dev->if_port = media_tbl[dev->if_port].next; if (vortex_debug > 1) pr_info("%s: first available media type: %s\n", dev->name, media_tbl[dev->if_port].name); } } else { dev->if_port = vp->default_media; if (vortex_debug > 1) pr_info("%s: using default media %s\n", dev->name, media_tbl[dev->if_port].name); } init_timer(&vp->timer); vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); vp->timer.data = (unsigned long)dev; vp->timer.function = vortex_timer; /* timer handler */ add_timer(&vp->timer); init_timer(&vp->rx_oom_timer); vp->rx_oom_timer.data = (unsigned long)dev; vp->rx_oom_timer.function = rx_oom_timer; if (vortex_debug > 1) pr_debug("%s: Initial media type %s.\n", dev->name, media_tbl[dev->if_port].name); vp->full_duplex = vp->mii.force_media; config = BFINS(config, dev->if_port, 20, 4); if (vortex_debug > 6) pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config); window_write32(vp, config, 3, Wn3_Config); if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); vp->mii.full_duplex = vp->full_duplex; vortex_check_media(dev, 1); } else vortex_set_duplex(dev); issue_and_wait(dev, TxReset); /* * Don't reset the PHY - that upsets autonegotiation during DHCP operations. */ issue_and_wait(dev, RxReset|0x04); iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); if (vortex_debug > 1) { pr_debug("%s: vortex_up() irq %d media status %4.4x.\n", dev->name, dev->irq, window_read16(vp, 4, Wn4_Media)); } /* Set the station address and mask in window 2 each time opened. */ for (i = 0; i < 6; i++) window_write8(vp, dev->dev_addr[i], 2, i); for (; i < 12; i+=2) window_write16(vp, 0, 2, i); if (vp->cb_fn_base) { unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; if (vp->drv_flags & INVERT_LED_PWR) n |= 0x10; if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; window_write16(vp, n, 2, Wn2_ResetOptions); } if (dev->if_port == XCVR_10base2) /* Start the thinnet transceiver. We should really wait 50ms...*/ iowrite16(StartCoax, ioaddr + EL3_CMD); if (dev->if_port != XCVR_NWAY) { window_write16(vp, (window_read16(vp, 4, Wn4_Media) & ~(Media_10TP|Media_SQE)) | media_tbl[dev->if_port].media_bits, 4, Wn4_Media); } /* Switch to the stats window, and clear all stats by reading. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); for (i = 0; i < 10; i++) window_read8(vp, 6, i); window_read16(vp, 6, 10); window_read16(vp, 6, 12); /* New: On the Vortex we must also clear the BadSSD counter. */ window_read8(vp, 4, 12); /* ..and on the Boomerang we enable the extra statistics bits. */ window_write16(vp, 0x0040, 4, Wn4_NetDiag); if (vp->full_bus_master_rx) { /* Boomerang bus master. */ vp->cur_rx = vp->dirty_rx = 0; /* Initialize the RxEarly register as recommended. */ iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); iowrite32(0x0020, ioaddr + PktStatus); iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr); } if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ vp->cur_tx = vp->dirty_tx = 0; if (vp->drv_flags & IS_BOOMERANG) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ /* Clear the Rx, Tx rings. */ for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ vp->rx_ring[i].status = 0; for (i = 0; i < TX_RING_SIZE; i++) vp->tx_skbuff[i] = NULL; iowrite32(0, ioaddr + DownListPtr); } /* Set receiver mode: presumably accept b-case and phys addr only. */ set_rx_mode(dev); /* enable 802.1q tagged frames */ set_8021q_mode(dev, 1); iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ /* Allow status bits to be seen. */ vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| (vp->full_bus_master_tx ? DownComplete : TxAvailable) | (vp->full_bus_master_rx ? UpComplete : RxComplete) | (vp->bus_master ? DMADone : 0); vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | (vp->full_bus_master_rx ? 0 : RxComplete) | StatsFull | HostError | TxComplete | IntReq | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; iowrite16(vp->status_enable, ioaddr + EL3_CMD); /* Ack all pending events, and set active indicator mask. */ iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, ioaddr + EL3_CMD); iowrite16(vp->intr_enable, ioaddr + EL3_CMD); if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ iowrite32(0x8000, vp->cb_fn_base + 4); netif_start_queue (dev); err_out: return err; } static int vortex_open(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); int i; int retval; /* Use the now-standard shared IRQ implementation. */ if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) { pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); goto err; } if (vp->full_bus_master_rx) { /* Boomerang bus master. */ if (vortex_debug > 2) pr_debug("%s: Filling in the Rx ring.\n", dev->name); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); vp->rx_ring[i].status = 0; /* Clear complete bit. */ vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, GFP_KERNEL); vp->rx_skbuff[i] = skb; if (skb == NULL) break; /* Bad news! */ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); } if (i != RX_RING_SIZE) { int j; pr_emerg("%s: no memory for rx ring\n", dev->name); for (j = 0; j < i; j++) { if (vp->rx_skbuff[j]) { dev_kfree_skb(vp->rx_skbuff[j]); vp->rx_skbuff[j] = NULL; } } retval = -ENOMEM; goto err_free_irq; } /* Wrap the ring. */ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); } retval = vortex_up(dev); if (!retval) goto out; err_free_irq: free_irq(dev->irq, dev); err: if (vortex_debug > 1) pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval); out: return retval; } static void vortex_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int next_tick = 60*HZ; int ok = 0; int media_status; if (vortex_debug > 2) { pr_debug("%s: Media selection timer tick happened, %s.\n", dev->name, media_tbl[dev->if_port].name); pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); } media_status = window_read16(vp, 4, Wn4_Media); switch (dev->if_port) { case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: if (media_status & Media_LnkBeat) { netif_carrier_on(dev); ok = 1; if (vortex_debug > 1) pr_debug("%s: Media %s has link beat, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); } else { netif_carrier_off(dev); if (vortex_debug > 1) { pr_debug("%s: Media %s has no link beat, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); } } break; case XCVR_MII: case XCVR_NWAY: { ok = 1; vortex_check_media(dev, 0); } break; default: /* Other media types handled by Tx timeouts. */ if (vortex_debug > 1) pr_debug("%s: Media %s has no indication, %x.\n", dev->name, media_tbl[dev->if_port].name, media_status); ok = 1; } if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev)) next_tick = 5*HZ; if (vp->medialock) goto leave_media_alone; if (!ok) { unsigned int config; spin_lock_irq(&vp->lock); do { dev->if_port = media_tbl[dev->if_port].next; } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); if (dev->if_port == XCVR_Default) { /* Go back to default. */ dev->if_port = vp->default_media; if (vortex_debug > 1) pr_debug("%s: Media selection failing, using default %s port.\n", dev->name, media_tbl[dev->if_port].name); } else { if (vortex_debug > 1) pr_debug("%s: Media selection failed, now trying %s port.\n", dev->name, media_tbl[dev->if_port].name); next_tick = media_tbl[dev->if_port].wait; } window_write16(vp, (media_status & ~(Media_10TP|Media_SQE)) | media_tbl[dev->if_port].media_bits, 4, Wn4_Media); config = window_read32(vp, 3, Wn3_Config); config = BFINS(config, dev->if_port, 20, 4); window_write32(vp, config, 3, Wn3_Config); iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, ioaddr + EL3_CMD); if (vortex_debug > 1) pr_debug("wrote 0x%08x to Wn3_Config\n", config); /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ spin_unlock_irq(&vp->lock); } leave_media_alone: if (vortex_debug > 2) pr_debug("%s: Media selection timer finished, %s.\n", dev->name, media_tbl[dev->if_port].name); mod_timer(&vp->timer, RUN_AT(next_tick)); if (vp->deferred) iowrite16(FakeIntr, ioaddr + EL3_CMD); } static void vortex_tx_timeout(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", dev->name, ioread8(ioaddr + TxStatus), ioread16(ioaddr + EL3_STATUS)); pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n", window_read16(vp, 4, Wn4_NetDiag), window_read16(vp, 4, Wn4_Media), ioread32(ioaddr + PktStatus), window_read16(vp, 4, Wn4_FIFODiag)); /* Slight code bloat to be user friendly. */ if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88) pr_err("%s: Transmitter encountered 16 collisions --" " network cable problem?\n", dev->name); if (ioread16(ioaddr + EL3_STATUS) & IntLatch) { pr_err("%s: Interrupt posted but not delivered --" " IRQ blocked by another device?\n", dev->name); /* Bad idea here.. but we might as well handle a few events. */ { /* * Block interrupts because vortex_interrupt does a bare spin_lock() */ unsigned long flags; local_irq_save(flags); if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev); else vortex_interrupt(dev->irq, dev); local_irq_restore(flags); } } if (vortex_debug > 0) dump_tx_ring(dev); issue_and_wait(dev, TxReset); dev->stats.tx_errors++; if (vp->full_bus_master_tx) { pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name); if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) netif_wake_queue (dev); if (vp->drv_flags & IS_BOOMERANG) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); iowrite16(DownUnstall, ioaddr + EL3_CMD); } else { dev->stats.tx_dropped++; netif_wake_queue(dev); } /* Issue Tx Enable */ iowrite16(TxEnable, ioaddr + EL3_CMD); dev->trans_start = jiffies; /* prevent tx timeout */ } /* * Handle uncommon interrupt sources. This is a separate routine to minimize * the cache impact. */ static void vortex_error(struct net_device *dev, int status) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int do_tx_reset = 0, reset_mask = 0; unsigned char tx_status = 0; if (vortex_debug > 2) { pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status); } if (status & TxComplete) { /* Really "TxError" for us. */ tx_status = ioread8(ioaddr + TxStatus); /* Presumably a tx-timeout. We must merely re-enable. */ if (vortex_debug > 2 || (tx_status != 0x88 && vortex_debug > 0)) { pr_err("%s: Transmit error, Tx status register %2.2x.\n", dev->name, tx_status); if (tx_status == 0x82) { pr_err("Probably a duplex mismatch. See " "Documentation/networking/vortex.txt\n"); } dump_tx_ring(dev); } if (tx_status & 0x14) dev->stats.tx_fifo_errors++; if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x08) vp->xstats.tx_max_collisions++; iowrite8(0, ioaddr + TxStatus); if (tx_status & 0x30) { /* txJabber or txUnderrun */ do_tx_reset = 1; } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ do_tx_reset = 1; reset_mask = 0x0108; /* Reset interface logic, but not download logic */ } else { /* Merely re-enable the transmitter. */ iowrite16(TxEnable, ioaddr + EL3_CMD); } } if (status & RxEarly) /* Rx early is unused. */ iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); if (status & StatsFull) { /* Empty statistics. */ static int DoneDidThat; if (vortex_debug > 4) pr_debug("%s: Updating stats.\n", dev->name); update_stats(ioaddr, dev); /* HACK: Disable statistics as an interrupt source. */ /* This occurs when we have the wrong media type! */ if (DoneDidThat == 0 && ioread16(ioaddr + EL3_STATUS) & StatsFull) { pr_warning("%s: Updating statistics failed, disabling " "stats as an interrupt source.\n", dev->name); iowrite16(SetIntrEnb | (window_read16(vp, 5, 10) & ~StatsFull), ioaddr + EL3_CMD); vp->intr_enable &= ~StatsFull; DoneDidThat++; } } if (status & IntReq) { /* Restore all interrupt sources. */ iowrite16(vp->status_enable, ioaddr + EL3_CMD); iowrite16(vp->intr_enable, ioaddr + EL3_CMD); } if (status & HostError) { u16 fifo_diag; fifo_diag = window_read16(vp, 4, Wn4_FIFODiag); pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n", dev->name, fifo_diag); /* Adapter failure requires Tx/Rx reset and reinit. */ if (vp->full_bus_master_tx) { int bus_status = ioread32(ioaddr + PktStatus); /* 0x80000000 PCI master abort. */ /* 0x40000000 PCI target abort. */ if (vortex_debug) pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status); /* In this case, blow the card away */ /* Must not enter D3 or we can't legally issue the reset! */ vortex_down(dev, 0); issue_and_wait(dev, TotalReset | 0xff); vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ } else if (fifo_diag & 0x0400) do_tx_reset = 1; if (fifo_diag & 0x3000) { /* Reset Rx fifo and upload logic */ issue_and_wait(dev, RxReset|0x07); /* Set the Rx filter to the current state. */ set_rx_mode(dev); /* enable 802.1q VLAN tagged frames */ set_8021q_mode(dev, 1); iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ iowrite16(AckIntr | HostError, ioaddr + EL3_CMD); } } if (do_tx_reset) { issue_and_wait(dev, TxReset|reset_mask); iowrite16(TxEnable, ioaddr + EL3_CMD); if (!vp->full_bus_master_tx) netif_wake_queue(dev); } } static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; /* Put out the doubleword header... */ iowrite32(skb->len, ioaddr + TX_FIFO); if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ int len = (skb->len + 3) & ~3; vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE); spin_lock_irq(&vp->window_lock); window_set(vp, 7); iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); iowrite16(len, ioaddr + Wn7_MasterLen); spin_unlock_irq(&vp->window_lock); vp->tx_skb = skb; iowrite16(StartDMADown, ioaddr + EL3_CMD); /* netif_wake_queue() will be called at the DMADone interrupt. */ } else { /* ... and the packet rounded to a doubleword. */ iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); dev_kfree_skb (skb); if (ioread16(ioaddr + TxFree) > 1536) { netif_start_queue (dev); /* AKPM: redundant? */ } else { /* Interrupt us when the FIFO has room for max-sized packet. */ netif_stop_queue(dev); iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); } } /* Clear the Tx status stack. */ { int tx_status; int i = 32; while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) { if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ if (vortex_debug > 2) pr_debug("%s: Tx error, status %2.2x.\n", dev->name, tx_status); if (tx_status & 0x04) dev->stats.tx_fifo_errors++; if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x30) { issue_and_wait(dev, TxReset); } iowrite16(TxEnable, ioaddr + EL3_CMD); } iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */ } } return NETDEV_TX_OK; } static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; /* Calculate the next Tx descriptor entry. */ int entry = vp->cur_tx % TX_RING_SIZE; struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; unsigned long flags; if (vortex_debug > 6) { pr_debug("boomerang_start_xmit()\n"); pr_debug("%s: Trying to send a packet, Tx index %d.\n", dev->name, vp->cur_tx); } /* * We can't allow a recursion from our interrupt handler back into the * tx routine, as they take the same spin lock, and that causes * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in * a bit */ if (vp->handling_irq) return NETDEV_TX_BUSY; if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { if (vortex_debug > 0) pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", dev->name); netif_stop_queue(dev); return NETDEV_TX_BUSY; } vp->tx_skbuff[entry] = skb; vp->tx_ring[entry].next = 0; #if DO_ZEROCOPY if (skb->ip_summed != CHECKSUM_PARTIAL) vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); else vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); if (!skb_shinfo(skb)->nr_frags) { vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); } else { int i; vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb_headlen(skb), PCI_DMA_TODEVICE)); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; vp->tx_ring[entry].frag[i+1].addr = cpu_to_le32(pci_map_single( VORTEX_PCI(vp), (void *)skb_frag_address(frag), skb_frag_size(frag), PCI_DMA_TODEVICE)); if (i == skb_shinfo(skb)->nr_frags-1) vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); else vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)); } } #else vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); #endif spin_lock_irqsave(&vp->lock, flags); /* Wait for the stall to complete. */ issue_and_wait(dev, DownStall); prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); if (ioread32(ioaddr + DownListPtr) == 0) { iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); vp->queued_packet++; } vp->cur_tx++; if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { netif_stop_queue (dev); } else { /* Clear previous interrupt enable. */ #if defined(tx_interrupt_mitigation) /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef * were selected, this would corrupt DN_COMPLETE. No? */ prev_entry->status &= cpu_to_le32(~TxIntrUploaded); #endif } iowrite16(DownUnstall, ioaddr + EL3_CMD); spin_unlock_irqrestore(&vp->lock, flags); return NETDEV_TX_OK; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ /* * This is the ISR for the vortex series chips. * full_bus_master_tx == 0 && full_bus_master_rx == 0 */ static irqreturn_t vortex_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; int work_done = max_interrupt_work; int handled = 0; ioaddr = vp->ioaddr; spin_lock(&vp->lock); status = ioread16(ioaddr + EL3_STATUS); if (vortex_debug > 6) pr_debug("vortex_interrupt(). status=0x%4x\n", status); if ((status & IntLatch) == 0) goto handler_exit; /* No interrupt: shared IRQs cause this */ handled = 1; if (status & IntReq) { status |= vp->deferred; vp->deferred = 0; } if (status == 0xffff) /* h/w no longer present (hotplug)? */ goto handler_exit; if (vortex_debug > 4) pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", dev->name, status, ioread8(ioaddr + Timer)); spin_lock(&vp->window_lock); window_set(vp, 7); do { if (vortex_debug > 5) pr_debug("%s: In interrupt loop, status %4.4x.\n", dev->name, status); if (status & RxComplete) vortex_rx(dev); if (status & TxAvailable) { if (vortex_debug > 5) pr_debug(" TX room bit was handled.\n"); /* There's room in the FIFO for a full-sized packet. */ iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD); netif_wake_queue (dev); } if (status & DMADone) { if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ if (ioread16(ioaddr + TxFree) > 1536) { /* * AKPM: FIXME: I don't think we need this. If the queue was stopped due to * insufficient FIFO room, the TxAvailable test will succeed and call * netif_wake_queue() */ netif_wake_queue(dev); } else { /* Interrupt when FIFO has room for max-sized packet. */ iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); netif_stop_queue(dev); } } } /* Check for all uncommon interrupts at once. */ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { if (status == 0xffff) break; if (status & RxEarly) vortex_rx(dev); spin_unlock(&vp->window_lock); vortex_error(dev, status); spin_lock(&vp->window_lock); window_set(vp, 7); } if (--work_done < 0) { pr_warning("%s: Too much work in interrupt, status %4.4x.\n", dev->name, status); /* Disable all pending interrupts. */ do { vp->deferred |= status; iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), ioaddr + EL3_CMD); iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); /* The timer will reenable interrupts. */ mod_timer(&vp->timer, jiffies + 1*HZ); break; } /* Acknowledge the IRQ. */ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); spin_unlock(&vp->window_lock); if (vortex_debug > 4) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); handler_exit: spin_unlock(&vp->lock); return IRQ_RETVAL(handled); } /* * This is the ISR for the boomerang series chips. * full_bus_master_tx == 1 && full_bus_master_rx == 1 */ static irqreturn_t boomerang_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; int work_done = max_interrupt_work; ioaddr = vp->ioaddr; /* * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout * and boomerang_start_xmit */ spin_lock(&vp->lock); vp->handling_irq = 1; status = ioread16(ioaddr + EL3_STATUS); if (vortex_debug > 6) pr_debug("boomerang_interrupt. status=0x%4x\n", status); if ((status & IntLatch) == 0) goto handler_exit; /* No interrupt: shared IRQs can cause this */ if (status == 0xffff) { /* h/w no longer present (hotplug)? */ if (vortex_debug > 1) pr_debug("boomerang_interrupt(1): status = 0xffff\n"); goto handler_exit; } if (status & IntReq) { status |= vp->deferred; vp->deferred = 0; } if (vortex_debug > 4) pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", dev->name, status, ioread8(ioaddr + Timer)); do { if (vortex_debug > 5) pr_debug("%s: In interrupt loop, status %4.4x.\n", dev->name, status); if (status & UpComplete) { iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD); if (vortex_debug > 5) pr_debug("boomerang_interrupt->boomerang_rx\n"); boomerang_rx(dev); } if (status & DownComplete) { unsigned int dirty_tx = vp->dirty_tx; iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD); while (vp->cur_tx - dirty_tx > 0) { int entry = dirty_tx % TX_RING_SIZE; #if 1 /* AKPM: the latter is faster, but cyclone-only */ if (ioread32(ioaddr + DownListPtr) == vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) break; /* It still hasn't been processed. */ #else if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0) break; /* It still hasn't been processed. */ #endif if (vp->tx_skbuff[entry]) { struct sk_buff *skb = vp->tx_skbuff[entry]; #if DO_ZEROCOPY int i; for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].frag[i].addr), le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, PCI_DMA_TODEVICE); #else pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); #endif dev_kfree_skb_irq(skb); vp->tx_skbuff[entry] = NULL; } else { pr_debug("boomerang_interrupt: no skb!\n"); } /* dev->stats.tx_packets++; Counted below. */ dirty_tx++; } vp->dirty_tx = dirty_tx; if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { if (vortex_debug > 6) pr_debug("boomerang_interrupt: wake queue\n"); netif_wake_queue (dev); } } /* Check for all uncommon interrupts at once. */ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) vortex_error(dev, status); if (--work_done < 0) { pr_warning("%s: Too much work in interrupt, status %4.4x.\n", dev->name, status); /* Disable all pending interrupts. */ do { vp->deferred |= status; iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), ioaddr + EL3_CMD); iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); /* The timer will reenable interrupts. */ mod_timer(&vp->timer, jiffies + 1*HZ); break; } /* Acknowledge the IRQ. */ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ iowrite32(0x8000, vp->cb_fn_base + 4); } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch); if (vortex_debug > 4) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); handler_exit: vp->handling_irq = 0; spin_unlock(&vp->lock); return IRQ_HANDLED; } static int vortex_rx(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int i; short rx_status; if (vortex_debug > 5) pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n", ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus)); while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) { if (rx_status & 0x4000) { /* Error, update stats. */ unsigned char rx_error = ioread8(ioaddr + RxErrors); if (vortex_debug > 2) pr_debug(" Rx error: status %2.2x.\n", rx_error); dev->stats.rx_errors++; if (rx_error & 0x01) dev->stats.rx_over_errors++; if (rx_error & 0x02) dev->stats.rx_length_errors++; if (rx_error & 0x04) dev->stats.rx_frame_errors++; if (rx_error & 0x08) dev->stats.rx_crc_errors++; if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; struct sk_buff *skb; skb = netdev_alloc_skb(dev, pkt_len + 5); if (vortex_debug > 4) pr_debug("Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); if (skb != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* 'skb_put()' points to the start of sk_buff data area. */ if (vp->bus_master && ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), pkt_len, PCI_DMA_FROMDEVICE); iowrite32(dma, ioaddr + Wn7_MasterAddr); iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); iowrite16(StartDMAUp, ioaddr + EL3_CMD); while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) ; pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); } else { ioread32_rep(ioaddr + RX_FIFO, skb_put(skb, pkt_len), (pkt_len + 3) >> 2); } iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; /* Wait a limited time to go to next packet. */ for (i = 200; i >= 0; i--) if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) break; continue; } else if (vortex_debug > 0) pr_notice("%s: No memory to allocate a sk_buff of size %d.\n", dev->name, pkt_len); dev->stats.rx_dropped++; } issue_and_wait(dev, RxDiscard); } return 0; } static int boomerang_rx(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); int entry = vp->cur_rx % RX_RING_SIZE; void __iomem *ioaddr = vp->ioaddr; int rx_status; int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; if (vortex_debug > 5) pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ if (--rx_work_limit < 0) break; if (rx_status & RxDError) { /* Error, update stats. */ unsigned char rx_error = rx_status >> 16; if (vortex_debug > 2) pr_debug(" Rx error: status %2.2x.\n", rx_error); dev->stats.rx_errors++; if (rx_error & 0x01) dev->stats.rx_over_errors++; if (rx_error & 0x02) dev->stats.rx_length_errors++; if (rx_error & 0x04) dev->stats.rx_frame_errors++; if (rx_error & 0x08) dev->stats.rx_crc_errors++; if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; struct sk_buff *skb; dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); if (vortex_debug > 4) pr_debug("Receiving packet size %d status %4.4x.\n", pkt_len, rx_status); /* Check if the packet is long enough to just accept without copying to a properly sized skbuff. */ if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); /* 'skb_put()' points to the start of sk_buff data area. */ memcpy(skb_put(skb, pkt_len), vp->rx_skbuff[entry]->data, pkt_len); pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); vp->rx_copy++; } else { /* Pass up the skbuff already on the Rx ring. */ skb = vp->rx_skbuff[entry]; vp->rx_skbuff[entry] = NULL; skb_put(skb, pkt_len); pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); vp->rx_nocopy++; } skb->protocol = eth_type_trans(skb, dev); { /* Use hardware checksum info. */ int csum_bits = rx_status & 0xee000000; if (csum_bits && (csum_bits == (IPChksumValid | TCPChksumValid) || csum_bits == (IPChksumValid | UDPChksumValid))) { skb->ip_summed = CHECKSUM_UNNECESSARY; vp->rx_csumhits++; } } netif_rx(skb); dev->stats.rx_packets++; } entry = (++vp->cur_rx) % RX_RING_SIZE; } /* Refill the Rx ring buffers. */ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { struct sk_buff *skb; entry = vp->dirty_rx % RX_RING_SIZE; if (vp->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); if (skb == NULL) { static unsigned long last_jif; if (time_after(jiffies, last_jif + 10 * HZ)) { pr_warning("%s: memory shortage\n", dev->name); last_jif = jiffies; } if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); break; /* Bad news! */ } vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); vp->rx_skbuff[entry] = skb; } vp->rx_ring[entry].status = 0; /* Clear complete bit. */ iowrite16(UpUnstall, ioaddr + EL3_CMD); } return 0; } /* * If we've hit a total OOM refilling the Rx ring we poll once a second * for some memory. Otherwise there is no way to restart the rx process. */ static void rx_oom_timer(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; struct vortex_private *vp = netdev_priv(dev); spin_lock_irq(&vp->lock); if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ boomerang_rx(dev); if (vortex_debug > 1) { pr_debug("%s: rx_oom_timer %s\n", dev->name, ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); } spin_unlock_irq(&vp->lock); } static void vortex_down(struct net_device *dev, int final_down) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; netif_stop_queue (dev); del_timer_sync(&vp->rx_oom_timer); del_timer_sync(&vp->timer); /* Turn off statistics ASAP. We update dev->stats below. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); /* Disable the receiver and transmitter. */ iowrite16(RxDisable, ioaddr + EL3_CMD); iowrite16(TxDisable, ioaddr + EL3_CMD); /* Disable receiving 802.1q tagged frames */ set_8021q_mode(dev, 0); if (dev->if_port == XCVR_10base2) /* Turn off thinnet power. Green! */ iowrite16(StopCoax, ioaddr + EL3_CMD); iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); update_stats(ioaddr, dev); if (vp->full_bus_master_rx) iowrite32(0, ioaddr + UpListPtr); if (vp->full_bus_master_tx) iowrite32(0, ioaddr + DownListPtr); if (final_down && VORTEX_PCI(vp)) { vp->pm_state_valid = 1; pci_save_state(VORTEX_PCI(vp)); acpi_set_WOL(dev); } } static int vortex_close(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int i; if (netif_device_present(dev)) vortex_down(dev, 1); if (vortex_debug > 1) { pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n", dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus)); pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d" " tx_queued %d Rx pre-checksummed %d.\n", dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); } #if DO_ZEROCOPY if (vp->rx_csumhits && (vp->drv_flags & HAS_HWCKSM) == 0 && (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) { pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name); } #endif free_irq(dev->irq, dev); if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ for (i = 0; i < RX_RING_SIZE; i++) if (vp->rx_skbuff[i]) { pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), PKT_BUF_SZ, PCI_DMA_FROMDEVICE); dev_kfree_skb(vp->rx_skbuff[i]); vp->rx_skbuff[i] = NULL; } } if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ for (i = 0; i < TX_RING_SIZE; i++) { if (vp->tx_skbuff[i]) { struct sk_buff *skb = vp->tx_skbuff[i]; #if DO_ZEROCOPY int k; for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].frag[k].addr), le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, PCI_DMA_TODEVICE); #else pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); #endif dev_kfree_skb(skb); vp->tx_skbuff[i] = NULL; } } } return 0; } static void dump_tx_ring(struct net_device *dev) { if (vortex_debug > 0) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; if (vp->full_bus_master_tx) { int i; int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", vp->full_bus_master_tx, vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, vp->cur_tx, vp->cur_tx % TX_RING_SIZE); pr_err(" Transmit list %8.8x vs. %p.\n", ioread32(ioaddr + DownListPtr), &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); issue_and_wait(dev, DownStall); for (i = 0; i < TX_RING_SIZE; i++) { unsigned int length; #if DO_ZEROCOPY length = le32_to_cpu(vp->tx_ring[i].frag[0].length); #else length = le32_to_cpu(vp->tx_ring[i].length); #endif pr_err(" %d: @%p length %8.8x status %8.8x\n", i, &vp->tx_ring[i], length, le32_to_cpu(vp->tx_ring[i].status)); } if (!stalled) iowrite16(DownUnstall, ioaddr + EL3_CMD); } } } static struct net_device_stats *vortex_get_stats(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned long flags; if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */ spin_lock_irqsave (&vp->lock, flags); update_stats(ioaddr, dev); spin_unlock_irqrestore (&vp->lock, flags); } return &dev->stats; } /* Update statistics. Unlike with the EL3 we need not worry about interrupts changing the window setting from underneath us, but we must still guard against a race condition with a StatsUpdate interrupt updating the table. This is done by checking that the ASM (!) code generated uses atomic updates with '+='. */ static void update_stats(void __iomem *ioaddr, struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ /* Switch to the stats window, and read everything. */ dev->stats.tx_carrier_errors += window_read8(vp, 6, 0); dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1); dev->stats.tx_window_errors += window_read8(vp, 6, 4); dev->stats.rx_fifo_errors += window_read8(vp, 6, 5); dev->stats.tx_packets += window_read8(vp, 6, 6); dev->stats.tx_packets += (window_read8(vp, 6, 9) & 0x30) << 4; /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */ /* Don't bother with register 9, an extension of registers 6&7. If we do use the 6&7 values the atomic update assumption above is invalid. */ dev->stats.rx_bytes += window_read16(vp, 6, 10); dev->stats.tx_bytes += window_read16(vp, 6, 12); /* Extra stats for get_ethtool_stats() */ vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2); vp->xstats.tx_single_collisions += window_read8(vp, 6, 3); vp->xstats.tx_deferred += window_read8(vp, 6, 8); vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12); dev->stats.collisions = vp->xstats.tx_multiple_collisions + vp->xstats.tx_single_collisions + vp->xstats.tx_max_collisions; { u8 up = window_read8(vp, 4, 13); dev->stats.rx_bytes += (up & 0x0f) << 16; dev->stats.tx_bytes += (up & 0xf0) << 12; } } static int vortex_nway_reset(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); return mii_nway_restart(&vp->mii); } static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct vortex_private *vp = netdev_priv(dev); return mii_ethtool_gset(&vp->mii, cmd); } static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct vortex_private *vp = netdev_priv(dev); return mii_ethtool_sset(&vp->mii, cmd); } static u32 vortex_get_msglevel(struct net_device *dev) { return vortex_debug; } static void vortex_set_msglevel(struct net_device *dev, u32 dbg) { vortex_debug = dbg; } static int vortex_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return VORTEX_NUM_STATS; default: return -EOPNOTSUPP; } } static void vortex_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned long flags; spin_lock_irqsave(&vp->lock, flags); update_stats(ioaddr, dev); spin_unlock_irqrestore(&vp->lock, flags); data[0] = vp->xstats.tx_deferred; data[1] = vp->xstats.tx_max_collisions; data[2] = vp->xstats.tx_multiple_collisions; data[3] = vp->xstats.tx_single_collisions; data[4] = vp->xstats.rx_bad_ssd; } static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); break; default: WARN_ON(1); break; } } static void vortex_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct vortex_private *vp = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); if (VORTEX_PCI(vp)) { strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)), sizeof(info->bus_info)); } else { if (VORTEX_EISA(vp)) strlcpy(info->bus_info, dev_name(vp->gendev), sizeof(info->bus_info)); else snprintf(info->bus_info, sizeof(info->bus_info), "EISA 0x%lx %d", dev->base_addr, dev->irq); } } static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct vortex_private *vp = netdev_priv(dev); if (!VORTEX_PCI(vp)) return; wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (vp->enable_wol) wol->wolopts |= WAKE_MAGIC; } static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct vortex_private *vp = netdev_priv(dev); if (!VORTEX_PCI(vp)) return -EOPNOTSUPP; if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; if (wol->wolopts & WAKE_MAGIC) vp->enable_wol = 1; else vp->enable_wol = 0; acpi_set_WOL(dev); return 0; } static const struct ethtool_ops vortex_ethtool_ops = { .get_drvinfo = vortex_get_drvinfo, .get_strings = vortex_get_strings, .get_msglevel = vortex_get_msglevel, .set_msglevel = vortex_set_msglevel, .get_ethtool_stats = vortex_get_ethtool_stats, .get_sset_count = vortex_get_sset_count, .get_settings = vortex_get_settings, .set_settings = vortex_set_settings, .get_link = ethtool_op_get_link, .nway_reset = vortex_nway_reset, .get_wol = vortex_get_wol, .set_wol = vortex_set_wol, }; #ifdef CONFIG_PCI /* * Must power the device up to do MDIO operations */ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int err; struct vortex_private *vp = netdev_priv(dev); pci_power_t state = 0; if(VORTEX_PCI(vp)) state = VORTEX_PCI(vp)->current_state; /* The kernel core really should have pci_get_power_state() */ if(state != 0) pci_set_power_state(VORTEX_PCI(vp), PCI_D0); err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); if(state != 0) pci_set_power_state(VORTEX_PCI(vp), state); return err; } #endif /* Pre-Cyclone chips have no documented multicast filter, so the only multicast setting is to receive all multicast frames. At least the chip has a very clean way to set the mode, unlike many others. */ static void set_rx_mode(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; int new_mode; if (dev->flags & IFF_PROMISC) { if (vortex_debug > 3) pr_notice("%s: Setting promiscuous mode.\n", dev->name); new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; } else new_mode = SetRxFilter | RxStation | RxBroadcast; iowrite16(new_mode, ioaddr + EL3_CMD); } #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) /* Setup the card so that it can receive frames with an 802.1q VLAN tag. Note that this must be done after each RxReset due to some backwards compatibility logic in the Cyclone and Tornado ASICs */ /* The Ethernet Type used for 802.1q tagged frames */ #define VLAN_ETHER_TYPE 0x8100 static void set_8021q_mode(struct net_device *dev, int enable) { struct vortex_private *vp = netdev_priv(dev); int mac_ctrl; if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { /* cyclone and tornado chipsets can recognize 802.1q * tagged frames and treat them correctly */ int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */ if (enable) max_pkt_size += 4; /* 802.1Q VLAN tag */ window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize); /* set VlanEtherType to let the hardware checksumming treat tagged frames correctly */ window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType); } else { /* on older cards we have to enable large frames */ vp->large_frames = dev->mtu > 1500 || enable; mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl); if (vp->large_frames) mac_ctrl |= 0x40; else mac_ctrl &= ~0x40; window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl); } } #else static void set_8021q_mode(struct net_device *dev, int enable) { } #endif /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See the MII specifications or DP83840A data sheet for details. */ /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues. */ static void mdio_delay(struct vortex_private *vp) { window_read32(vp, 4, Wn4_PhysicalMgmt); } #define MDIO_SHIFT_CLK 0x01 #define MDIO_DIR_WRITE 0x04 #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) #define MDIO_DATA_READ 0x02 #define MDIO_ENB_IN 0x00 /* Generate the preamble required for initial synchronization and a few older transceivers. */ static void mdio_sync(struct vortex_private *vp, int bits) { /* Establish sync by sending at least 32 logic ones. */ while (-- bits >= 0) { window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } } static int mdio_read(struct net_device *dev, int phy_id, int location) { int i; struct vortex_private *vp = netdev_priv(dev); int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; unsigned int retval = 0; spin_lock_bh(&vp->mii_lock); if (mii_preamble_required) mdio_sync(vp, 32); /* Shift the read command bits out. */ for (i = 14; i >= 0; i--) { int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; window_write16(vp, dataval, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, dataval | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); mdio_delay(vp); retval = (retval << 1) | ((window_read16(vp, 4, Wn4_PhysicalMgmt) & MDIO_DATA_READ) ? 1 : 0); window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } spin_unlock_bh(&vp->mii_lock); return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { struct vortex_private *vp = netdev_priv(dev); int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; int i; spin_lock_bh(&vp->mii_lock); if (mii_preamble_required) mdio_sync(vp, 32); /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; window_write16(vp, dataval, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, dataval | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } /* Leave the interface idle. */ for (i = 1; i >= 0; i--) { window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); mdio_delay(vp); window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, 4, Wn4_PhysicalMgmt); mdio_delay(vp); } spin_unlock_bh(&vp->mii_lock); } /* ACPI: Advanced Configuration and Power Interface. */ /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ static void acpi_set_WOL(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; device_set_wakeup_enable(vp->gendev, vp->enable_wol); if (vp->enable_wol) { /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ window_write16(vp, 2, 7, 0x0c); /* The RxFilter must accept the WOL frames. */ iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); iowrite16(RxEnable, ioaddr + EL3_CMD); if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) { pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp))); vp->enable_wol = 0; return; } if (VORTEX_PCI(vp)->current_state < PCI_D3hot) return; /* Change the power state to D3; RxEnable doesn't take effect. */ pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); } } static void vortex_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct vortex_private *vp; if (!dev) { pr_err("vortex_remove_one called for Compaq device!\n"); BUG(); } vp = netdev_priv(dev); if (vp->cb_fn_base) pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); unregister_netdev(dev); if (VORTEX_PCI(vp)) { pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ if (vp->pm_state_valid) pci_restore_state(VORTEX_PCI(vp)); pci_disable_device(VORTEX_PCI(vp)); } /* Should really use issue_and_wait() here */ iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), vp->ioaddr + EL3_CMD); pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); pci_free_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); pci_release_regions(pdev); free_netdev(dev); } static struct pci_driver vortex_driver = { .name = "3c59x", .probe = vortex_init_one, .remove = vortex_remove_one, .id_table = vortex_pci_tbl, .driver.pm = VORTEX_PM_OPS, }; static int vortex_have_pci; static int vortex_have_eisa; static int __init vortex_init(void) { int pci_rc, eisa_rc; pci_rc = pci_register_driver(&vortex_driver); eisa_rc = vortex_eisa_init(); if (pci_rc == 0) vortex_have_pci = 1; if (eisa_rc > 0) vortex_have_eisa = 1; return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV; } static void __exit vortex_eisa_cleanup(void) { struct vortex_private *vp; void __iomem *ioaddr; #ifdef CONFIG_EISA /* Take care of the EISA devices */ eisa_driver_unregister(&vortex_eisa_driver); #endif if (compaq_net_device) { vp = netdev_priv(compaq_net_device); ioaddr = ioport_map(compaq_net_device->base_addr, VORTEX_TOTAL_SIZE); unregister_netdev(compaq_net_device); iowrite16(TotalReset, ioaddr + EL3_CMD); release_region(compaq_net_device->base_addr, VORTEX_TOTAL_SIZE); free_netdev(compaq_net_device); } } static void __exit vortex_cleanup(void) { if (vortex_have_pci) pci_unregister_driver(&vortex_driver); if (vortex_have_eisa) vortex_eisa_cleanup(); } module_init(vortex_init); module_exit(vortex_cleanup);
gpl-2.0
frenkowski/Tyrannus_Kernel_MM_SM-G925F
drivers/rapidio/rio-driver.c
2079
6267
/* * RapidIO driver support * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/rio.h> #include <linux/rio_ids.h> #include "rio.h" /** * rio_match_device - Tell if a RIO device has a matching RIO device id structure * @id: the RIO device id structure to match against * @rdev: the RIO device structure to match against * * Used from driver probe and bus matching to check whether a RIO device * matches a device id structure provided by a RIO driver. Returns the * matching &struct rio_device_id or %NULL if there is no match. */ static const struct rio_device_id *rio_match_device(const struct rio_device_id *id, const struct rio_dev *rdev) { while (id->vid || id->asm_vid) { if (((id->vid == RIO_ANY_ID) || (id->vid == rdev->vid)) && ((id->did == RIO_ANY_ID) || (id->did == rdev->did)) && ((id->asm_vid == RIO_ANY_ID) || (id->asm_vid == rdev->asm_vid)) && ((id->asm_did == RIO_ANY_ID) || (id->asm_did == rdev->asm_did))) return id; id++; } return NULL; } /** * rio_dev_get - Increments the reference count of the RIO device structure * * @rdev: RIO device being referenced * * Each live reference to a device should be refcounted. * * Drivers for RIO devices should normally record such references in * their probe() methods, when they bind to a device, and release * them by calling rio_dev_put(), in their disconnect() methods. */ struct rio_dev *rio_dev_get(struct rio_dev *rdev) { if (rdev) get_device(&rdev->dev); return rdev; } /** * rio_dev_put - Release a use of the RIO device structure * * @rdev: RIO device being disconnected * * Must be called when a user of a device is finished with it. * When the last user of the device calls this function, the * memory of the device is freed. */ void rio_dev_put(struct rio_dev *rdev) { if (rdev) put_device(&rdev->dev); } /** * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure * @dev: the RIO device structure to match against * * return 0 and set rio_dev->driver when drv claims rio_dev, else error */ static int rio_device_probe(struct device *dev) { struct rio_driver *rdrv = to_rio_driver(dev->driver); struct rio_dev *rdev = to_rio_dev(dev); int error = -ENODEV; const struct rio_device_id *id; if (!rdev->driver && rdrv->probe) { if (!rdrv->id_table) return error; id = rio_match_device(rdrv->id_table, rdev); rio_dev_get(rdev); if (id) error = rdrv->probe(rdev, id); if (error >= 0) { rdev->driver = rdrv; error = 0; } else rio_dev_put(rdev); } return error; } /** * rio_device_remove - Remove a RIO device from the system * * @dev: the RIO device structure to match against * * Remove a RIO device from the system. If it has an associated * driver, then run the driver remove() method. Then update * the reference count. */ static int rio_device_remove(struct device *dev) { struct rio_dev *rdev = to_rio_dev(dev); struct rio_driver *rdrv = rdev->driver; if (rdrv) { if (rdrv->remove) rdrv->remove(rdev); rdev->driver = NULL; } rio_dev_put(rdev); return 0; } /** * rio_register_driver - register a new RIO driver * @rdrv: the RIO driver structure to register * * Adds a &struct rio_driver to the list of registered drivers. * Returns a negative value on error, otherwise 0. If no error * occurred, the driver remains registered even if no device * was claimed during registration. */ int rio_register_driver(struct rio_driver *rdrv) { /* initialize common driver fields */ rdrv->driver.name = rdrv->name; rdrv->driver.bus = &rio_bus_type; /* register with core */ return driver_register(&rdrv->driver); } /** * rio_unregister_driver - unregister a RIO driver * @rdrv: the RIO driver structure to unregister * * Deletes the &struct rio_driver from the list of registered RIO * drivers, gives it a chance to clean up by calling its remove() * function for each device it was responsible for, and marks those * devices as driverless. */ void rio_unregister_driver(struct rio_driver *rdrv) { driver_unregister(&rdrv->driver); } void rio_attach_device(struct rio_dev *rdev) { rdev->dev.bus = &rio_bus_type; rdev->dev.parent = &rio_bus; } EXPORT_SYMBOL_GPL(rio_attach_device); /** * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure * @dev: the standard device structure to match against * @drv: the standard driver structure containing the ids to match against * * Used by a driver to check whether a RIO device present in the * system is in its list of supported devices. Returns 1 if * there is a matching &struct rio_device_id or 0 if there is * no match. */ static int rio_match_bus(struct device *dev, struct device_driver *drv) { struct rio_dev *rdev = to_rio_dev(dev); struct rio_driver *rdrv = to_rio_driver(drv); const struct rio_device_id *id = rdrv->id_table; const struct rio_device_id *found_id; if (!id) goto out; found_id = rio_match_device(id, rdev); if (found_id) return 1; out:return 0; } struct device rio_bus = { .init_name = "rapidio", }; struct bus_type rio_bus_type = { .name = "rapidio", .match = rio_match_bus, .dev_attrs = rio_dev_attrs, .bus_attrs = rio_bus_attrs, .probe = rio_device_probe, .remove = rio_device_remove, }; /** * rio_bus_init - Register the RapidIO bus with the device model * * Registers the RIO bus device and RIO bus type with the Linux * device model. */ static int __init rio_bus_init(void) { if (device_register(&rio_bus) < 0) printk("RIO: failed to register RIO bus device\n"); return bus_register(&rio_bus_type); } postcore_initcall(rio_bus_init); EXPORT_SYMBOL_GPL(rio_register_driver); EXPORT_SYMBOL_GPL(rio_unregister_driver); EXPORT_SYMBOL_GPL(rio_bus_type); EXPORT_SYMBOL_GPL(rio_dev_get); EXPORT_SYMBOL_GPL(rio_dev_put);
gpl-2.0
str90/RK3188_tablet_kernel_sources
arch/m68k/platform/coldfire/intc-2.c
2335
5387
/* * intc-2.c * * General interrupt controller code for the many ColdFire cores that use * interrupt controllers with 63 interrupt sources, organized as 56 fully- * programmable + 7 fixed-level interrupt sources. This includes the 523x * family, the 5270, 5271, 5274, 5275, and the 528x family which have two such * controllers, and the 547x and 548x families which have only one of them. * * The external 7 fixed interrupts are part the the Edge Port unit of these * ColdFire parts. They can be configured as level or edge triggered. * * (C) Copyright 2009-2011, Greg Ungerer <gerg@snapgear.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/traps.h> /* * Bit definitions for the ICR family of registers. */ #define MCFSIM_ICR_LEVEL(l) ((l)<<3) /* Level l intr */ #define MCFSIM_ICR_PRI(p) (p) /* Priority p intr */ /* * The EDGE Port interrupts are the fixed 7 external interrupts. * They need some special treatment, for example they need to be acked. */ #define EINT0 64 /* Is not actually used, but spot reserved for it */ #define EINT1 65 /* EDGE Port interrupt 1 */ #define EINT7 71 /* EDGE Port interrupt 7 */ #ifdef MCFICM_INTC1 #define NR_VECS 128 #else #define NR_VECS 64 #endif static void intc_irq_mask(struct irq_data *d) { unsigned int irq = d->irq - MCFINT_VECBASE; unsigned long imraddr; u32 val, imrbit; #ifdef MCFICM_INTC1 imraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; #else imraddr = MCFICM_INTC0; #endif imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL; imrbit = 0x1 << (irq & 0x1f); val = __raw_readl(imraddr); __raw_writel(val | imrbit, imraddr); } static void intc_irq_unmask(struct irq_data *d) { unsigned int irq = d->irq - MCFINT_VECBASE; unsigned long imraddr; u32 val, imrbit; #ifdef MCFICM_INTC1 imraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; #else imraddr = MCFICM_INTC0; #endif imraddr += ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL); imrbit = 0x1 << (irq & 0x1f); /* Don't set the "maskall" bit! */ if ((irq & 0x20) == 0) imrbit |= 0x1; val = __raw_readl(imraddr); __raw_writel(val & ~imrbit, imraddr); } /* * Only the external (or EDGE Port) interrupts need to be acknowledged * here, as part of the IRQ handler. They only really need to be ack'ed * if they are in edge triggered mode, but there is no harm in doing it * for all types. */ static void intc_irq_ack(struct irq_data *d) { unsigned int irq = d->irq; __raw_writeb(0x1 << (irq - EINT0), MCFEPORT_EPFR); } /* * Each vector needs a unique priority and level associated with it. * We don't really care so much what they are, we don't rely on the * traditional priority interrupt scheme of the m68k/ColdFire. This * only needs to be set once for an interrupt, and we will never change * these values once we have set them. */ static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6); static unsigned int intc_irq_startup(struct irq_data *d) { unsigned int irq = d->irq - MCFINT_VECBASE; unsigned long icraddr; #ifdef MCFICM_INTC1 icraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; #else icraddr = MCFICM_INTC0; #endif icraddr += MCFINTC_ICR0 + (irq & 0x3f); if (__raw_readb(icraddr) == 0) __raw_writeb(intc_intpri--, icraddr); irq = d->irq; if ((irq >= EINT1) && (irq <= EINT7)) { u8 v; irq -= EINT0; /* Set EPORT line as input */ v = __raw_readb(MCFEPORT_EPDDR); __raw_writeb(v & ~(0x1 << irq), MCFEPORT_EPDDR); /* Set EPORT line as interrupt source */ v = __raw_readb(MCFEPORT_EPIER); __raw_writeb(v | (0x1 << irq), MCFEPORT_EPIER); } intc_irq_unmask(d); return 0; } static int intc_irq_set_type(struct irq_data *d, unsigned int type) { unsigned int irq = d->irq; u16 pa, tb; switch (type) { case IRQ_TYPE_EDGE_RISING: tb = 0x1; break; case IRQ_TYPE_EDGE_FALLING: tb = 0x2; break; case IRQ_TYPE_EDGE_BOTH: tb = 0x3; break; default: /* Level triggered */ tb = 0; break; } if (tb) irq_set_handler(irq, handle_edge_irq); irq -= EINT0; pa = __raw_readw(MCFEPORT_EPPAR); pa = (pa & ~(0x3 << (irq * 2))) | (tb << (irq * 2)); __raw_writew(pa, MCFEPORT_EPPAR); return 0; } static struct irq_chip intc_irq_chip = { .name = "CF-INTC", .irq_startup = intc_irq_startup, .irq_mask = intc_irq_mask, .irq_unmask = intc_irq_unmask, }; static struct irq_chip intc_irq_chip_edge_port = { .name = "CF-INTC-EP", .irq_startup = intc_irq_startup, .irq_mask = intc_irq_mask, .irq_unmask = intc_irq_unmask, .irq_ack = intc_irq_ack, .irq_set_type = intc_irq_set_type, }; void __init init_IRQ(void) { int irq; init_vectors(); /* Mask all interrupt sources */ __raw_writel(0x1, MCFICM_INTC0 + MCFINTC_IMRL); #ifdef MCFICM_INTC1 __raw_writel(0x1, MCFICM_INTC1 + MCFINTC_IMRL); #endif for (irq = MCFINT_VECBASE; (irq < MCFINT_VECBASE + NR_VECS); irq++) { if ((irq >= EINT1) && (irq <=EINT7)) irq_set_chip(irq, &intc_irq_chip_edge_port); else irq_set_chip(irq, &intc_irq_chip); irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); irq_set_handler(irq, handle_level_irq); } }
gpl-2.0
GearCM/android_kernel_samsung_exynos5410
drivers/tty/serial/ifx6x60.c
3359
37279
/**************************************************************************** * * Driver for the IFX 6x60 spi modem. * * Copyright (C) 2008 Option International * Copyright (C) 2008 Filip Aben <f.aben@option.com> * Denis Joseph Barrow <d.barow@option.com> * Jan Dumon <j.dumon@option.com> * * Copyright (C) 2009, 2010 Intel Corp * Russ Gorby <russ.gorby@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA * * Driver modified by Intel from Option gtm501l_spi.c * * Notes * o The driver currently assumes a single device only. If you need to * change this then look for saved_ifx_dev and add a device lookup * o The driver is intended to be big-endian safe but has never been * tested that way (no suitable hardware). There are a couple of FIXME * notes by areas that may need addressing * o Some of the GPIO naming/setup assumptions may need revisiting if * you need to use this driver for another platform. * *****************************************************************************/ #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/termios.h> #include <linux/tty.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/kfifo.h> #include <linux/tty_flip.h> #include <linux/timer.h> #include <linux/serial.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/rfkill.h> #include <linux/fs.h> #include <linux/ip.h> #include <linux/dmapool.h> #include <linux/gpio.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/spi/ifx_modem.h> #include <linux/delay.h> #include "ifx6x60.h" #define IFX_SPI_MORE_MASK 0x10 #define IFX_SPI_MORE_BIT 12 /* bit position in u16 */ #define IFX_SPI_CTS_BIT 13 /* bit position in u16 */ #define IFX_SPI_MODE SPI_MODE_1 #define IFX_SPI_TTY_ID 0 #define IFX_SPI_TIMEOUT_SEC 2 #define IFX_SPI_HEADER_0 (-1) #define IFX_SPI_HEADER_F (-2) /* forward reference */ static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev); /* local variables */ static int spi_bpw = 16; /* 8, 16 or 32 bit word length */ static struct tty_driver *tty_drv; static struct ifx_spi_device *saved_ifx_dev; static struct lock_class_key ifx_spi_key; /* GPIO/GPE settings */ /** * mrdy_set_high - set MRDY GPIO * @ifx: device we are controlling * */ static inline void mrdy_set_high(struct ifx_spi_device *ifx) { gpio_set_value(ifx->gpio.mrdy, 1); } /** * mrdy_set_low - clear MRDY GPIO * @ifx: device we are controlling * */ static inline void mrdy_set_low(struct ifx_spi_device *ifx) { gpio_set_value(ifx->gpio.mrdy, 0); } /** * ifx_spi_power_state_set * @ifx_dev: our SPI device * @val: bits to set * * Set bit in power status and signal power system if status becomes non-0 */ static void ifx_spi_power_state_set(struct ifx_spi_device *ifx_dev, unsigned char val) { unsigned long flags; spin_lock_irqsave(&ifx_dev->power_lock, flags); /* * if power status is already non-0, just update, else * tell power system */ if (!ifx_dev->power_status) pm_runtime_get(&ifx_dev->spi_dev->dev); ifx_dev->power_status |= val; spin_unlock_irqrestore(&ifx_dev->power_lock, flags); } /** * ifx_spi_power_state_clear - clear power bit * @ifx_dev: our SPI device * @val: bits to clear * * clear bit in power status and signal power system if status becomes 0 */ static void ifx_spi_power_state_clear(struct ifx_spi_device *ifx_dev, unsigned char val) { unsigned long flags; spin_lock_irqsave(&ifx_dev->power_lock, flags); if (ifx_dev->power_status) { ifx_dev->power_status &= ~val; if (!ifx_dev->power_status) pm_runtime_put(&ifx_dev->spi_dev->dev); } spin_unlock_irqrestore(&ifx_dev->power_lock, flags); } /** * swap_buf * @buf: our buffer * @len : number of bytes (not words) in the buffer * @end: end of buffer * * Swap the contents of a buffer into big endian format */ static inline void swap_buf(u16 *buf, int len, void *end) { int n; len = ((len + 1) >> 1); if ((void *)&buf[len] > end) { pr_err("swap_buf: swap exceeds boundary (%p > %p)!", &buf[len], end); return; } for (n = 0; n < len; n++) { *buf = cpu_to_be16(*buf); buf++; } } /** * mrdy_assert - assert MRDY line * @ifx_dev: our SPI device * * Assert mrdy and set timer to wait for SRDY interrupt, if SRDY is low * now. * * FIXME: Can SRDY even go high as we are running this code ? */ static void mrdy_assert(struct ifx_spi_device *ifx_dev) { int val = gpio_get_value(ifx_dev->gpio.srdy); if (!val) { if (!test_and_set_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags)) { ifx_dev->spi_timer.expires = jiffies + IFX_SPI_TIMEOUT_SEC*HZ; add_timer(&ifx_dev->spi_timer); } } ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_DATA_PENDING); mrdy_set_high(ifx_dev); } /** * ifx_spi_hangup - hang up an IFX device * @ifx_dev: our SPI device * * Hang up the tty attached to the IFX device if one is currently * open. If not take no action */ static void ifx_spi_ttyhangup(struct ifx_spi_device *ifx_dev) { struct tty_port *pport = &ifx_dev->tty_port; struct tty_struct *tty = tty_port_tty_get(pport); if (tty) { tty_hangup(tty); tty_kref_put(tty); } } /** * ifx_spi_timeout - SPI timeout * @arg: our SPI device * * The SPI has timed out: hang up the tty. Users will then see a hangup * and error events. */ static void ifx_spi_timeout(unsigned long arg) { struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg; dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***"); ifx_spi_ttyhangup(ifx_dev); mrdy_set_low(ifx_dev); clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); } /* char/tty operations */ /** * ifx_spi_tiocmget - get modem lines * @tty: our tty device * @filp: file handle issuing the request * * Map the signal state into Linux modem flags and report the value * in Linux terms */ static int ifx_spi_tiocmget(struct tty_struct *tty) { unsigned int value; struct ifx_spi_device *ifx_dev = tty->driver_data; value = (test_bit(IFX_SPI_RTS, &ifx_dev->signal_state) ? TIOCM_RTS : 0) | (test_bit(IFX_SPI_DTR, &ifx_dev->signal_state) ? TIOCM_DTR : 0) | (test_bit(IFX_SPI_CTS, &ifx_dev->signal_state) ? TIOCM_CTS : 0) | (test_bit(IFX_SPI_DSR, &ifx_dev->signal_state) ? TIOCM_DSR : 0) | (test_bit(IFX_SPI_DCD, &ifx_dev->signal_state) ? TIOCM_CAR : 0) | (test_bit(IFX_SPI_RI, &ifx_dev->signal_state) ? TIOCM_RNG : 0); return value; } /** * ifx_spi_tiocmset - set modem bits * @tty: the tty structure * @set: bits to set * @clear: bits to clear * * The IFX6x60 only supports DTR and RTS. Set them accordingly * and flag that an update to the modem is needed. * * FIXME: do we need to kick the tranfers when we do this ? */ static int ifx_spi_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct ifx_spi_device *ifx_dev = tty->driver_data; if (set & TIOCM_RTS) set_bit(IFX_SPI_RTS, &ifx_dev->signal_state); if (set & TIOCM_DTR) set_bit(IFX_SPI_DTR, &ifx_dev->signal_state); if (clear & TIOCM_RTS) clear_bit(IFX_SPI_RTS, &ifx_dev->signal_state); if (clear & TIOCM_DTR) clear_bit(IFX_SPI_DTR, &ifx_dev->signal_state); set_bit(IFX_SPI_UPDATE, &ifx_dev->signal_state); return 0; } /** * ifx_spi_open - called on tty open * @tty: our tty device * @filp: file handle being associated with the tty * * Open the tty interface. We let the tty_port layer do all the work * for us. * * FIXME: Remove single device assumption and saved_ifx_dev */ static int ifx_spi_open(struct tty_struct *tty, struct file *filp) { return tty_port_open(&saved_ifx_dev->tty_port, tty, filp); } /** * ifx_spi_close - called when our tty closes * @tty: the tty being closed * @filp: the file handle being closed * * Perform the close of the tty. We use the tty_port layer to do all * our hard work. */ static void ifx_spi_close(struct tty_struct *tty, struct file *filp) { struct ifx_spi_device *ifx_dev = tty->driver_data; tty_port_close(&ifx_dev->tty_port, tty, filp); /* FIXME: should we do an ifx_spi_reset here ? */ } /** * ifx_decode_spi_header - decode received header * @buffer: the received data * @length: decoded length * @more: decoded more flag * @received_cts: status of cts we received * * Note how received_cts is handled -- if header is all F it is left * the same as it was, if header is all 0 it is set to 0 otherwise it is * taken from the incoming header. * * FIXME: endianness */ static int ifx_spi_decode_spi_header(unsigned char *buffer, int *length, unsigned char *more, unsigned char *received_cts) { u16 h1; u16 h2; u16 *in_buffer = (u16 *)buffer; h1 = *in_buffer; h2 = *(in_buffer+1); if (h1 == 0 && h2 == 0) { *received_cts = 0; return IFX_SPI_HEADER_0; } else if (h1 == 0xffff && h2 == 0xffff) { /* spi_slave_cts remains as it was */ return IFX_SPI_HEADER_F; } *length = h1 & 0xfff; /* upper bits of byte are flags */ *more = (buffer[1] >> IFX_SPI_MORE_BIT) & 1; *received_cts = (buffer[3] >> IFX_SPI_CTS_BIT) & 1; return 0; } /** * ifx_setup_spi_header - set header fields * @txbuffer: pointer to start of SPI buffer * @tx_count: bytes * @more: indicate if more to follow * * Format up an SPI header for a transfer * * FIXME: endianness? */ static void ifx_spi_setup_spi_header(unsigned char *txbuffer, int tx_count, unsigned char more) { *(u16 *)(txbuffer) = tx_count; *(u16 *)(txbuffer+2) = IFX_SPI_PAYLOAD_SIZE; txbuffer[1] |= (more << IFX_SPI_MORE_BIT) & IFX_SPI_MORE_MASK; } /** * ifx_spi_wakeup_serial - SPI space made * @port_data: our SPI device * * We have emptied the FIFO enough that we want to get more data * queued into it. Poke the line discipline via tty_wakeup so that * it will feed us more bits */ static void ifx_spi_wakeup_serial(struct ifx_spi_device *ifx_dev) { struct tty_struct *tty; tty = tty_port_tty_get(&ifx_dev->tty_port); if (!tty) return; tty_wakeup(tty); tty_kref_put(tty); } /** * ifx_spi_prepare_tx_buffer - prepare transmit frame * @ifx_dev: our SPI device * * The transmit buffr needs a header and various other bits of * information followed by as much data as we can pull from the FIFO * and transfer. This function formats up a suitable buffer in the * ifx_dev->tx_buffer * * FIXME: performance - should we wake the tty when the queue is half * empty ? */ static int ifx_spi_prepare_tx_buffer(struct ifx_spi_device *ifx_dev) { int temp_count; int queue_length; int tx_count; unsigned char *tx_buffer; tx_buffer = ifx_dev->tx_buffer; memset(tx_buffer, 0, IFX_SPI_TRANSFER_SIZE); /* make room for required SPI header */ tx_buffer += IFX_SPI_HEADER_OVERHEAD; tx_count = IFX_SPI_HEADER_OVERHEAD; /* clear to signal no more data if this turns out to be the * last buffer sent in a sequence */ ifx_dev->spi_more = 0; /* if modem cts is set, just send empty buffer */ if (!ifx_dev->spi_slave_cts) { /* see if there's tx data */ queue_length = kfifo_len(&ifx_dev->tx_fifo); if (queue_length != 0) { /* data to mux -- see if there's room for it */ temp_count = min(queue_length, IFX_SPI_PAYLOAD_SIZE); temp_count = kfifo_out_locked(&ifx_dev->tx_fifo, tx_buffer, temp_count, &ifx_dev->fifo_lock); /* update buffer pointer and data count in message */ tx_buffer += temp_count; tx_count += temp_count; if (temp_count == queue_length) /* poke port to get more data */ ifx_spi_wakeup_serial(ifx_dev); else /* more data in port, use next SPI message */ ifx_dev->spi_more = 1; } } /* have data and info for header -- set up SPI header in buffer */ /* spi header needs payload size, not entire buffer size */ ifx_spi_setup_spi_header(ifx_dev->tx_buffer, tx_count-IFX_SPI_HEADER_OVERHEAD, ifx_dev->spi_more); /* swap actual data in the buffer */ swap_buf((u16 *)(ifx_dev->tx_buffer), tx_count, &ifx_dev->tx_buffer[IFX_SPI_TRANSFER_SIZE]); return tx_count; } /** * ifx_spi_write - line discipline write * @tty: our tty device * @buf: pointer to buffer to write (kernel space) * @count: size of buffer * * Write the characters we have been given into the FIFO. If the device * is not active then activate it, when the SRDY line is asserted back * this will commence I/O */ static int ifx_spi_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct ifx_spi_device *ifx_dev = tty->driver_data; unsigned char *tmp_buf = (unsigned char *)buf; int tx_count = kfifo_in_locked(&ifx_dev->tx_fifo, tmp_buf, count, &ifx_dev->fifo_lock); mrdy_assert(ifx_dev); return tx_count; } /** * ifx_spi_chars_in_buffer - line discipline helper * @tty: our tty device * * Report how much data we can accept before we drop bytes. As we use * a simple FIFO this is nice and easy. */ static int ifx_spi_write_room(struct tty_struct *tty) { struct ifx_spi_device *ifx_dev = tty->driver_data; return IFX_SPI_FIFO_SIZE - kfifo_len(&ifx_dev->tx_fifo); } /** * ifx_spi_chars_in_buffer - line discipline helper * @tty: our tty device * * Report how many characters we have buffered. In our case this is the * number of bytes sitting in our transmit FIFO. */ static int ifx_spi_chars_in_buffer(struct tty_struct *tty) { struct ifx_spi_device *ifx_dev = tty->driver_data; return kfifo_len(&ifx_dev->tx_fifo); } /** * ifx_port_hangup * @port: our tty port * * tty port hang up. Called when tty_hangup processing is invoked either * by loss of carrier, or by software (eg vhangup). Serialized against * activate/shutdown by the tty layer. */ static void ifx_spi_hangup(struct tty_struct *tty) { struct ifx_spi_device *ifx_dev = tty->driver_data; tty_port_hangup(&ifx_dev->tty_port); } /** * ifx_port_activate * @port: our tty port * * tty port activate method - called for first open. Serialized * with hangup and shutdown by the tty layer. */ static int ifx_port_activate(struct tty_port *port, struct tty_struct *tty) { struct ifx_spi_device *ifx_dev = container_of(port, struct ifx_spi_device, tty_port); /* clear any old data; can't do this in 'close' */ kfifo_reset(&ifx_dev->tx_fifo); /* put port data into this tty */ tty->driver_data = ifx_dev; /* allows flip string push from int context */ tty->low_latency = 1; return 0; } /** * ifx_port_shutdown * @port: our tty port * * tty port shutdown method - called for last port close. Serialized * with hangup and activate by the tty layer. */ static void ifx_port_shutdown(struct tty_port *port) { struct ifx_spi_device *ifx_dev = container_of(port, struct ifx_spi_device, tty_port); mrdy_set_low(ifx_dev); clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); tasklet_kill(&ifx_dev->io_work_tasklet); } static const struct tty_port_operations ifx_tty_port_ops = { .activate = ifx_port_activate, .shutdown = ifx_port_shutdown, }; static const struct tty_operations ifx_spi_serial_ops = { .open = ifx_spi_open, .close = ifx_spi_close, .write = ifx_spi_write, .hangup = ifx_spi_hangup, .write_room = ifx_spi_write_room, .chars_in_buffer = ifx_spi_chars_in_buffer, .tiocmget = ifx_spi_tiocmget, .tiocmset = ifx_spi_tiocmset, }; /** * ifx_spi_insert_fip_string - queue received data * @ifx_ser: our SPI device * @chars: buffer we have received * @size: number of chars reeived * * Queue bytes to the tty assuming the tty side is currently open. If * not the discard the data. */ static void ifx_spi_insert_flip_string(struct ifx_spi_device *ifx_dev, unsigned char *chars, size_t size) { struct tty_struct *tty = tty_port_tty_get(&ifx_dev->tty_port); if (!tty) return; tty_insert_flip_string(tty, chars, size); tty_flip_buffer_push(tty); tty_kref_put(tty); } /** * ifx_spi_complete - SPI transfer completed * @ctx: our SPI device * * An SPI transfer has completed. Process any received data and kick off * any further transmits we can commence. */ static void ifx_spi_complete(void *ctx) { struct ifx_spi_device *ifx_dev = ctx; struct tty_struct *tty; struct tty_ldisc *ldisc = NULL; int length; int actual_length; unsigned char more; unsigned char cts; int local_write_pending = 0; int queue_length; int srdy; int decode_result; mrdy_set_low(ifx_dev); if (!ifx_dev->spi_msg.status) { /* check header validity, get comm flags */ swap_buf((u16 *)ifx_dev->rx_buffer, IFX_SPI_HEADER_OVERHEAD, &ifx_dev->rx_buffer[IFX_SPI_HEADER_OVERHEAD]); decode_result = ifx_spi_decode_spi_header(ifx_dev->rx_buffer, &length, &more, &cts); if (decode_result == IFX_SPI_HEADER_0) { dev_dbg(&ifx_dev->spi_dev->dev, "ignore input: invalid header 0"); ifx_dev->spi_slave_cts = 0; goto complete_exit; } else if (decode_result == IFX_SPI_HEADER_F) { dev_dbg(&ifx_dev->spi_dev->dev, "ignore input: invalid header F"); goto complete_exit; } ifx_dev->spi_slave_cts = cts; actual_length = min((unsigned int)length, ifx_dev->spi_msg.actual_length); swap_buf((u16 *)(ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD), actual_length, &ifx_dev->rx_buffer[IFX_SPI_TRANSFER_SIZE]); ifx_spi_insert_flip_string( ifx_dev, ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD, (size_t)actual_length); } else { dev_dbg(&ifx_dev->spi_dev->dev, "SPI transfer error %d", ifx_dev->spi_msg.status); } complete_exit: if (ifx_dev->write_pending) { ifx_dev->write_pending = 0; local_write_pending = 1; } clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &(ifx_dev->flags)); queue_length = kfifo_len(&ifx_dev->tx_fifo); srdy = gpio_get_value(ifx_dev->gpio.srdy); if (!srdy) ifx_spi_power_state_clear(ifx_dev, IFX_SPI_POWER_SRDY); /* schedule output if there is more to do */ if (test_and_clear_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags)) tasklet_schedule(&ifx_dev->io_work_tasklet); else { if (more || ifx_dev->spi_more || queue_length > 0 || local_write_pending) { if (ifx_dev->spi_slave_cts) { if (more) mrdy_assert(ifx_dev); } else mrdy_assert(ifx_dev); } else { /* * poke line discipline driver if any for more data * may or may not get more data to write * for now, say not busy */ ifx_spi_power_state_clear(ifx_dev, IFX_SPI_POWER_DATA_PENDING); tty = tty_port_tty_get(&ifx_dev->tty_port); if (tty) { ldisc = tty_ldisc_ref(tty); if (ldisc) { ldisc->ops->write_wakeup(tty); tty_ldisc_deref(ldisc); } tty_kref_put(tty); } } } } /** * ifx_spio_io - I/O tasklet * @data: our SPI device * * Queue data for transmission if possible and then kick off the * transfer. */ static void ifx_spi_io(unsigned long data) { int retval; struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *) data; if (!test_and_set_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags)) { if (ifx_dev->gpio.unack_srdy_int_nb > 0) ifx_dev->gpio.unack_srdy_int_nb--; ifx_spi_prepare_tx_buffer(ifx_dev); spi_message_init(&ifx_dev->spi_msg); INIT_LIST_HEAD(&ifx_dev->spi_msg.queue); ifx_dev->spi_msg.context = ifx_dev; ifx_dev->spi_msg.complete = ifx_spi_complete; /* set up our spi transfer */ /* note len is BYTES, not transfers */ ifx_dev->spi_xfer.len = IFX_SPI_TRANSFER_SIZE; ifx_dev->spi_xfer.cs_change = 0; ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz; /* ifx_dev->spi_xfer.speed_hz = 390625; */ ifx_dev->spi_xfer.bits_per_word = spi_bpw; ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer; ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer; /* * setup dma pointers */ if (ifx_dev->use_dma) { ifx_dev->spi_msg.is_dma_mapped = 1; ifx_dev->tx_dma = ifx_dev->tx_bus; ifx_dev->rx_dma = ifx_dev->rx_bus; ifx_dev->spi_xfer.tx_dma = ifx_dev->tx_dma; ifx_dev->spi_xfer.rx_dma = ifx_dev->rx_dma; } else { ifx_dev->spi_msg.is_dma_mapped = 0; ifx_dev->tx_dma = (dma_addr_t)0; ifx_dev->rx_dma = (dma_addr_t)0; ifx_dev->spi_xfer.tx_dma = (dma_addr_t)0; ifx_dev->spi_xfer.rx_dma = (dma_addr_t)0; } spi_message_add_tail(&ifx_dev->spi_xfer, &ifx_dev->spi_msg); /* Assert MRDY. This may have already been done by the write * routine. */ mrdy_assert(ifx_dev); retval = spi_async(ifx_dev->spi_dev, &ifx_dev->spi_msg); if (retval) { clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags); tasklet_schedule(&ifx_dev->io_work_tasklet); return; } } else ifx_dev->write_pending = 1; } /** * ifx_spi_free_port - free up the tty side * @ifx_dev: IFX device going away * * Unregister and free up a port when the device goes away */ static void ifx_spi_free_port(struct ifx_spi_device *ifx_dev) { if (ifx_dev->tty_dev) tty_unregister_device(tty_drv, ifx_dev->minor); kfifo_free(&ifx_dev->tx_fifo); } /** * ifx_spi_create_port - create a new port * @ifx_dev: our spi device * * Allocate and initialise the tty port that goes with this interface * and add it to the tty layer so that it can be opened. */ static int ifx_spi_create_port(struct ifx_spi_device *ifx_dev) { int ret = 0; struct tty_port *pport = &ifx_dev->tty_port; spin_lock_init(&ifx_dev->fifo_lock); lockdep_set_class_and_subclass(&ifx_dev->fifo_lock, &ifx_spi_key, 0); if (kfifo_alloc(&ifx_dev->tx_fifo, IFX_SPI_FIFO_SIZE, GFP_KERNEL)) { ret = -ENOMEM; goto error_ret; } tty_port_init(pport); pport->ops = &ifx_tty_port_ops; ifx_dev->minor = IFX_SPI_TTY_ID; ifx_dev->tty_dev = tty_register_device(tty_drv, ifx_dev->minor, &ifx_dev->spi_dev->dev); if (IS_ERR(ifx_dev->tty_dev)) { dev_dbg(&ifx_dev->spi_dev->dev, "%s: registering tty device failed", __func__); ret = PTR_ERR(ifx_dev->tty_dev); goto error_ret; } return 0; error_ret: ifx_spi_free_port(ifx_dev); return ret; } /** * ifx_spi_handle_srdy - handle SRDY * @ifx_dev: device asserting SRDY * * Check our device state and see what we need to kick off when SRDY * is asserted. This usually means killing the timer and firing off the * I/O processing. */ static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev) { if (test_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags)) { del_timer_sync(&ifx_dev->spi_timer); clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); } ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_SRDY); if (!test_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags)) tasklet_schedule(&ifx_dev->io_work_tasklet); else set_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags); } /** * ifx_spi_srdy_interrupt - SRDY asserted * @irq: our IRQ number * @dev: our ifx device * * The modem asserted SRDY. Handle the srdy event */ static irqreturn_t ifx_spi_srdy_interrupt(int irq, void *dev) { struct ifx_spi_device *ifx_dev = dev; ifx_dev->gpio.unack_srdy_int_nb++; ifx_spi_handle_srdy(ifx_dev); return IRQ_HANDLED; } /** * ifx_spi_reset_interrupt - Modem has changed reset state * @irq: interrupt number * @dev: our device pointer * * The modem has either entered or left reset state. Check the GPIO * line to see which. * * FIXME: review locking on MR_INPROGRESS versus * parallel unsolicited reset/solicited reset */ static irqreturn_t ifx_spi_reset_interrupt(int irq, void *dev) { struct ifx_spi_device *ifx_dev = dev; int val = gpio_get_value(ifx_dev->gpio.reset_out); int solreset = test_bit(MR_START, &ifx_dev->mdm_reset_state); if (val == 0) { /* entered reset */ set_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state); if (!solreset) { /* unsolicited reset */ ifx_spi_ttyhangup(ifx_dev); } } else { /* exited reset */ clear_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state); if (solreset) { set_bit(MR_COMPLETE, &ifx_dev->mdm_reset_state); wake_up(&ifx_dev->mdm_reset_wait); } } return IRQ_HANDLED; } /** * ifx_spi_free_device - free device * @ifx_dev: device to free * * Free the IFX device */ static void ifx_spi_free_device(struct ifx_spi_device *ifx_dev) { ifx_spi_free_port(ifx_dev); dma_free_coherent(&ifx_dev->spi_dev->dev, IFX_SPI_TRANSFER_SIZE, ifx_dev->tx_buffer, ifx_dev->tx_bus); dma_free_coherent(&ifx_dev->spi_dev->dev, IFX_SPI_TRANSFER_SIZE, ifx_dev->rx_buffer, ifx_dev->rx_bus); } /** * ifx_spi_reset - reset modem * @ifx_dev: modem to reset * * Perform a reset on the modem */ static int ifx_spi_reset(struct ifx_spi_device *ifx_dev) { int ret; /* * set up modem power, reset * * delays are required on some platforms for the modem * to reset properly */ set_bit(MR_START, &ifx_dev->mdm_reset_state); gpio_set_value(ifx_dev->gpio.po, 0); gpio_set_value(ifx_dev->gpio.reset, 0); msleep(25); gpio_set_value(ifx_dev->gpio.reset, 1); msleep(1); gpio_set_value(ifx_dev->gpio.po, 1); msleep(1); gpio_set_value(ifx_dev->gpio.po, 0); ret = wait_event_timeout(ifx_dev->mdm_reset_wait, test_bit(MR_COMPLETE, &ifx_dev->mdm_reset_state), IFX_RESET_TIMEOUT); if (!ret) dev_warn(&ifx_dev->spi_dev->dev, "Modem reset timeout: (state:%lx)", ifx_dev->mdm_reset_state); ifx_dev->mdm_reset_state = 0; return ret; } /** * ifx_spi_spi_probe - probe callback * @spi: our possible matching SPI device * * Probe for a 6x60 modem on SPI bus. Perform any needed device and * GPIO setup. * * FIXME: * - Support for multiple devices * - Split out MID specific GPIO handling eventually */ static int ifx_spi_spi_probe(struct spi_device *spi) { int ret; int srdy; struct ifx_modem_platform_data *pl_data; struct ifx_spi_device *ifx_dev; if (saved_ifx_dev) { dev_dbg(&spi->dev, "ignoring subsequent detection"); return -ENODEV; } pl_data = (struct ifx_modem_platform_data *)spi->dev.platform_data; if (!pl_data) { dev_err(&spi->dev, "missing platform data!"); return -ENODEV; } /* initialize structure to hold our device variables */ ifx_dev = kzalloc(sizeof(struct ifx_spi_device), GFP_KERNEL); if (!ifx_dev) { dev_err(&spi->dev, "spi device allocation failed"); return -ENOMEM; } saved_ifx_dev = ifx_dev; ifx_dev->spi_dev = spi; clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags); spin_lock_init(&ifx_dev->write_lock); spin_lock_init(&ifx_dev->power_lock); ifx_dev->power_status = 0; init_timer(&ifx_dev->spi_timer); ifx_dev->spi_timer.function = ifx_spi_timeout; ifx_dev->spi_timer.data = (unsigned long)ifx_dev; ifx_dev->modem = pl_data->modem_type; ifx_dev->use_dma = pl_data->use_dma; ifx_dev->max_hz = pl_data->max_hz; /* initialize spi mode, etc */ spi->max_speed_hz = ifx_dev->max_hz; spi->mode = IFX_SPI_MODE | (SPI_LOOP & spi->mode); spi->bits_per_word = spi_bpw; ret = spi_setup(spi); if (ret) { dev_err(&spi->dev, "SPI setup wasn't successful %d", ret); return -ENODEV; } /* ensure SPI protocol flags are initialized to enable transfer */ ifx_dev->spi_more = 0; ifx_dev->spi_slave_cts = 0; /*initialize transfer and dma buffers */ ifx_dev->tx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent, IFX_SPI_TRANSFER_SIZE, &ifx_dev->tx_bus, GFP_KERNEL); if (!ifx_dev->tx_buffer) { dev_err(&spi->dev, "DMA-TX buffer allocation failed"); ret = -ENOMEM; goto error_ret; } ifx_dev->rx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent, IFX_SPI_TRANSFER_SIZE, &ifx_dev->rx_bus, GFP_KERNEL); if (!ifx_dev->rx_buffer) { dev_err(&spi->dev, "DMA-RX buffer allocation failed"); ret = -ENOMEM; goto error_ret; } /* initialize waitq for modem reset */ init_waitqueue_head(&ifx_dev->mdm_reset_wait); spi_set_drvdata(spi, ifx_dev); tasklet_init(&ifx_dev->io_work_tasklet, ifx_spi_io, (unsigned long)ifx_dev); set_bit(IFX_SPI_STATE_PRESENT, &ifx_dev->flags); /* create our tty port */ ret = ifx_spi_create_port(ifx_dev); if (ret != 0) { dev_err(&spi->dev, "create default tty port failed"); goto error_ret; } ifx_dev->gpio.reset = pl_data->rst_pmu; ifx_dev->gpio.po = pl_data->pwr_on; ifx_dev->gpio.mrdy = pl_data->mrdy; ifx_dev->gpio.srdy = pl_data->srdy; ifx_dev->gpio.reset_out = pl_data->rst_out; dev_info(&spi->dev, "gpios %d, %d, %d, %d, %d", ifx_dev->gpio.reset, ifx_dev->gpio.po, ifx_dev->gpio.mrdy, ifx_dev->gpio.srdy, ifx_dev->gpio.reset_out); /* Configure gpios */ ret = gpio_request(ifx_dev->gpio.reset, "ifxModem"); if (ret < 0) { dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET)", ifx_dev->gpio.reset); goto error_ret; } ret += gpio_direction_output(ifx_dev->gpio.reset, 0); ret += gpio_export(ifx_dev->gpio.reset, 1); if (ret) { dev_err(&spi->dev, "Unable to configure GPIO%d (RESET)", ifx_dev->gpio.reset); ret = -EBUSY; goto error_ret2; } ret = gpio_request(ifx_dev->gpio.po, "ifxModem"); ret += gpio_direction_output(ifx_dev->gpio.po, 0); ret += gpio_export(ifx_dev->gpio.po, 1); if (ret) { dev_err(&spi->dev, "Unable to configure GPIO%d (ON)", ifx_dev->gpio.po); ret = -EBUSY; goto error_ret3; } ret = gpio_request(ifx_dev->gpio.mrdy, "ifxModem"); if (ret < 0) { dev_err(&spi->dev, "Unable to allocate GPIO%d (MRDY)", ifx_dev->gpio.mrdy); goto error_ret3; } ret += gpio_export(ifx_dev->gpio.mrdy, 1); ret += gpio_direction_output(ifx_dev->gpio.mrdy, 0); if (ret) { dev_err(&spi->dev, "Unable to configure GPIO%d (MRDY)", ifx_dev->gpio.mrdy); ret = -EBUSY; goto error_ret4; } ret = gpio_request(ifx_dev->gpio.srdy, "ifxModem"); if (ret < 0) { dev_err(&spi->dev, "Unable to allocate GPIO%d (SRDY)", ifx_dev->gpio.srdy); ret = -EBUSY; goto error_ret4; } ret += gpio_export(ifx_dev->gpio.srdy, 1); ret += gpio_direction_input(ifx_dev->gpio.srdy); if (ret) { dev_err(&spi->dev, "Unable to configure GPIO%d (SRDY)", ifx_dev->gpio.srdy); ret = -EBUSY; goto error_ret5; } ret = gpio_request(ifx_dev->gpio.reset_out, "ifxModem"); if (ret < 0) { dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET_OUT)", ifx_dev->gpio.reset_out); goto error_ret5; } ret += gpio_export(ifx_dev->gpio.reset_out, 1); ret += gpio_direction_input(ifx_dev->gpio.reset_out); if (ret) { dev_err(&spi->dev, "Unable to configure GPIO%d (RESET_OUT)", ifx_dev->gpio.reset_out); ret = -EBUSY; goto error_ret6; } ret = request_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_spi_reset_interrupt, IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, DRVNAME, (void *)ifx_dev); if (ret) { dev_err(&spi->dev, "Unable to get irq %x\n", gpio_to_irq(ifx_dev->gpio.reset_out)); goto error_ret6; } ret = ifx_spi_reset(ifx_dev); ret = request_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_spi_srdy_interrupt, IRQF_TRIGGER_RISING, DRVNAME, (void *)ifx_dev); if (ret) { dev_err(&spi->dev, "Unable to get irq %x", gpio_to_irq(ifx_dev->gpio.srdy)); goto error_ret7; } /* set pm runtime power state and register with power system */ pm_runtime_set_active(&spi->dev); pm_runtime_enable(&spi->dev); /* handle case that modem is already signaling SRDY */ /* no outgoing tty open at this point, this just satisfies the * modem's read and should reset communication properly */ srdy = gpio_get_value(ifx_dev->gpio.srdy); if (srdy) { mrdy_assert(ifx_dev); ifx_spi_handle_srdy(ifx_dev); } else mrdy_set_low(ifx_dev); return 0; error_ret7: free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev); error_ret6: gpio_free(ifx_dev->gpio.srdy); error_ret5: gpio_free(ifx_dev->gpio.mrdy); error_ret4: gpio_free(ifx_dev->gpio.reset); error_ret3: gpio_free(ifx_dev->gpio.po); error_ret2: gpio_free(ifx_dev->gpio.reset_out); error_ret: ifx_spi_free_device(ifx_dev); saved_ifx_dev = NULL; return ret; } /** * ifx_spi_spi_remove - SPI device was removed * @spi: SPI device * * FIXME: We should be shutting the device down here not in * the module unload path. */ static int ifx_spi_spi_remove(struct spi_device *spi) { struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi); /* stop activity */ tasklet_kill(&ifx_dev->io_work_tasklet); /* free irq */ free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev); free_irq(gpio_to_irq(ifx_dev->gpio.srdy), (void *)ifx_dev); gpio_free(ifx_dev->gpio.srdy); gpio_free(ifx_dev->gpio.mrdy); gpio_free(ifx_dev->gpio.reset); gpio_free(ifx_dev->gpio.po); gpio_free(ifx_dev->gpio.reset_out); /* free allocations */ ifx_spi_free_device(ifx_dev); saved_ifx_dev = NULL; return 0; } /** * ifx_spi_spi_shutdown - called on SPI shutdown * @spi: SPI device * * No action needs to be taken here */ static void ifx_spi_spi_shutdown(struct spi_device *spi) { } /* * various suspends and resumes have nothing to do * no hardware to save state for */ /** * ifx_spi_spi_suspend - suspend SPI on system suspend * @dev: device being suspended * * Suspend the SPI side. No action needed on Intel MID platforms, may * need extending for other systems. */ static int ifx_spi_spi_suspend(struct spi_device *spi, pm_message_t msg) { return 0; } /** * ifx_spi_spi_resume - resume SPI side on system resume * @dev: device being suspended * * Suspend the SPI side. No action needed on Intel MID platforms, may * need extending for other systems. */ static int ifx_spi_spi_resume(struct spi_device *spi) { return 0; } /** * ifx_spi_pm_suspend - suspend modem on system suspend * @dev: device being suspended * * Suspend the modem. No action needed on Intel MID platforms, may * need extending for other systems. */ static int ifx_spi_pm_suspend(struct device *dev) { return 0; } /** * ifx_spi_pm_resume - resume modem on system resume * @dev: device being suspended * * Allow the modem to resume. No action needed. * * FIXME: do we need to reset anything here ? */ static int ifx_spi_pm_resume(struct device *dev) { return 0; } /** * ifx_spi_pm_runtime_resume - suspend modem * @dev: device being suspended * * Allow the modem to resume. No action needed. */ static int ifx_spi_pm_runtime_resume(struct device *dev) { return 0; } /** * ifx_spi_pm_runtime_suspend - suspend modem * @dev: device being suspended * * Allow the modem to suspend and thus suspend to continue up the * device tree. */ static int ifx_spi_pm_runtime_suspend(struct device *dev) { return 0; } /** * ifx_spi_pm_runtime_idle - check if modem idle * @dev: our device * * Check conditions and queue runtime suspend if idle. */ static int ifx_spi_pm_runtime_idle(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi); if (!ifx_dev->power_status) pm_runtime_suspend(dev); return 0; } static const struct dev_pm_ops ifx_spi_pm = { .resume = ifx_spi_pm_resume, .suspend = ifx_spi_pm_suspend, .runtime_resume = ifx_spi_pm_runtime_resume, .runtime_suspend = ifx_spi_pm_runtime_suspend, .runtime_idle = ifx_spi_pm_runtime_idle }; static const struct spi_device_id ifx_id_table[] = { {"ifx6160", 0}, {"ifx6260", 0}, { } }; MODULE_DEVICE_TABLE(spi, ifx_id_table); /* spi operations */ static const struct spi_driver ifx_spi_driver = { .driver = { .name = DRVNAME, .pm = &ifx_spi_pm, .owner = THIS_MODULE}, .probe = ifx_spi_spi_probe, .shutdown = ifx_spi_spi_shutdown, .remove = __devexit_p(ifx_spi_spi_remove), .suspend = ifx_spi_spi_suspend, .resume = ifx_spi_spi_resume, .id_table = ifx_id_table }; /** * ifx_spi_exit - module exit * * Unload the module. */ static void __exit ifx_spi_exit(void) { /* unregister */ tty_unregister_driver(tty_drv); spi_unregister_driver((void *)&ifx_spi_driver); } /** * ifx_spi_init - module entry point * * Initialise the SPI and tty interfaces for the IFX SPI driver * We need to initialize upper-edge spi driver after the tty * driver because otherwise the spi probe will race */ static int __init ifx_spi_init(void) { int result; tty_drv = alloc_tty_driver(1); if (!tty_drv) { pr_err("%s: alloc_tty_driver failed", DRVNAME); return -ENOMEM; } tty_drv->driver_name = DRVNAME; tty_drv->name = TTYNAME; tty_drv->minor_start = IFX_SPI_TTY_ID; tty_drv->type = TTY_DRIVER_TYPE_SERIAL; tty_drv->subtype = SERIAL_TYPE_NORMAL; tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; tty_drv->init_termios = tty_std_termios; tty_set_operations(tty_drv, &ifx_spi_serial_ops); result = tty_register_driver(tty_drv); if (result) { pr_err("%s: tty_register_driver failed(%d)", DRVNAME, result); put_tty_driver(tty_drv); return result; } result = spi_register_driver((void *)&ifx_spi_driver); if (result) { pr_err("%s: spi_register_driver failed(%d)", DRVNAME, result); tty_unregister_driver(tty_drv); } return result; } module_init(ifx_spi_init); module_exit(ifx_spi_exit); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("IFX6x60 spi driver"); MODULE_LICENSE("GPL"); MODULE_INFO(Version, "0.1-IFX6x60");
gpl-2.0
semdoc/kernel_moto_shamu
arch/mips/jazz/irq.c
3615
4294
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle */ #include <linux/clockchips.h> #include <linux/i8253.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <asm/irq_cpu.h> #include <asm/i8259.h> #include <asm/io.h> #include <asm/jazz.h> #include <asm/pgtable.h> #include <asm/tlbmisc.h> static DEFINE_RAW_SPINLOCK(r4030_lock); static void enable_r4030_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq - JAZZ_IRQ_START); unsigned long flags; raw_spin_lock_irqsave(&r4030_lock, flags); mask |= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE); r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask); raw_spin_unlock_irqrestore(&r4030_lock, flags); } void disable_r4030_irq(struct irq_data *d) { unsigned int mask = ~(1 << (d->irq - JAZZ_IRQ_START)); unsigned long flags; raw_spin_lock_irqsave(&r4030_lock, flags); mask &= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE); r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask); raw_spin_unlock_irqrestore(&r4030_lock, flags); } static struct irq_chip r4030_irq_type = { .name = "R4030", .irq_mask = disable_r4030_irq, .irq_unmask = enable_r4030_irq, }; void __init init_r4030_ints(void) { int i; for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++) irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq); r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0); r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */ r4030_read_reg32(JAZZ_R4030_INVAL_ADDR); /* clear error bits */ } /* * On systems with i8259-style interrupt controllers we assume for * driver compatibility reasons interrupts 0 - 15 to be the i8259 * interrupts even if the hardware uses a different interrupt numbering. */ void __init arch_init_irq(void) { /* * this is a hack to get back the still needed wired mapping * killed by init_mm() */ /* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */ add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K); /* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */ add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M); /* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */ add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M); init_i8259_irqs(); /* Integrated i8259 */ mips_cpu_irq_init(); init_r4030_ints(); change_c0_status(ST0_IM, IE_IRQ2 | IE_IRQ1); } asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_cause() & read_c0_status(); unsigned int irq; if (pending & IE_IRQ4) { r4030_read_reg32(JAZZ_TIMER_REGISTER); do_IRQ(JAZZ_TIMER_IRQ); } else if (pending & IE_IRQ2) { irq = *(volatile u8 *)JAZZ_EISA_IRQ_ACK; do_IRQ(irq); } else if (pending & IE_IRQ1) { irq = *(volatile u8 *)JAZZ_IO_IRQ_SOURCE >> 2; if (likely(irq > 0)) do_IRQ(irq + JAZZ_IRQ_START - 1); else panic("Unimplemented loc_no_irq handler"); } } static void r4030_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { /* Nothing to do ... */ } struct clock_event_device r4030_clockevent = { .name = "r4030", .features = CLOCK_EVT_FEAT_PERIODIC, .rating = 300, .irq = JAZZ_TIMER_IRQ, .set_mode = r4030_set_mode, }; static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *cd = dev_id; cd->event_handler(cd); return IRQ_HANDLED; } static struct irqaction r4030_timer_irqaction = { .handler = r4030_timer_interrupt, .flags = IRQF_TIMER, .name = "R4030 timer", }; void __init plat_time_init(void) { struct clock_event_device *cd = &r4030_clockevent; struct irqaction *action = &r4030_timer_irqaction; unsigned int cpu = smp_processor_id(); BUG_ON(HZ != 100); cd->cpumask = cpumask_of(cpu); clockevents_register_device(cd); action->dev_id = cd; setup_irq(JAZZ_TIMER_IRQ, action); /* * Set clock to 100Hz. * * The R4030 timer receives an input clock of 1kHz which is divieded by * a programmable 4-bit divider. This makes it fairly inflexible. */ r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9); setup_pit_timer(); }
gpl-2.0
Quallenauge/kernel-archos
sound/usb/usx2y/usbusx2y.c
4127
13768
/* * usbusy2y.c - ALSA USB US-428 Driver * 2005-04-14 Karsten Wiese Version 0.8.7.2: Call snd_card_free() instead of snd_card_free_in_thread() to prevent oops with dead keyboard symptom. Tested ok with kernel 2.6.12-rc2. 2004-12-14 Karsten Wiese Version 0.8.7.1: snd_pcm_open for rawusb pcm-devices now returns -EBUSY if called without rawusb's hwdep device being open. 2004-12-02 Karsten Wiese Version 0.8.7: Use macro usb_maxpacket() for portability. 2004-10-26 Karsten Wiese Version 0.8.6: wake_up() process waiting in usX2Y_urbs_start() on error. 2004-10-21 Karsten Wiese Version 0.8.5: nrpacks is runtime or compiletime configurable now with tested values from 1 to 4. 2004-10-03 Karsten Wiese Version 0.8.2: Avoid any possible racing while in prepare callback. 2004-09-30 Karsten Wiese Version 0.8.0: Simplified things and made ohci work again. 2004-09-20 Karsten Wiese Version 0.7.3: Use usb_kill_urb() instead of deprecated (kernel 2.6.9) usb_unlink_urb(). 2004-07-13 Karsten Wiese Version 0.7.1: Don't sleep in START/STOP callbacks anymore. us428 channels C/D not handled just for this version, sorry. 2004-06-21 Karsten Wiese Version 0.6.4: Temporarely suspend midi input to sanely call usb_set_interface() when setting format. 2004-06-12 Karsten Wiese Version 0.6.3: Made it thus the following rule is enforced: "All pcm substreams of one usX2Y have to operate at the same rate & format." 2004-04-06 Karsten Wiese Version 0.6.0: Runs on 2.6.5 kernel without any "--with-debug=" things. us224 reported running. 2004-01-14 Karsten Wiese Version 0.5.1: Runs with 2.6.1 kernel. 2003-12-30 Karsten Wiese Version 0.4.1: Fix 24Bit 4Channel capturing for the us428. 2003-11-27 Karsten Wiese, Martin Langer Version 0.4: us122 support. us224 could be tested by uncommenting the sections containing USB_ID_US224 2003-11-03 Karsten Wiese Version 0.3: 24Bit support. "arecord -D hw:1 -c 2 -r 48000 -M -f S24_3LE|aplay -D hw:1 -c 2 -r 48000 -M -f S24_3LE" works. 2003-08-22 Karsten Wiese Version 0.0.8: Removed EZUSB Firmware. First Stage Firmwaredownload is now done by tascam-firmware downloader. See: http://usb-midi-fw.sourceforge.net/tascam-firmware.tar.gz 2003-06-18 Karsten Wiese Version 0.0.5: changed to compile with kernel 2.4.21 and alsa 0.9.4 2002-10-16 Karsten Wiese Version 0.0.4: compiles again with alsa-current. USB_ISO_ASAP not used anymore (most of the time), instead urb->start_frame is calculated here now, some calls inside usb-driver don't need to happen anymore. To get the best out of this: Disable APM-support in the kernel as APM-BIOS calls (once each second) hard disable interrupt for many precious milliseconds. This helped me much on my slowish PII 400 & PIII 500. ACPI yet untested but might cause the same bad behaviour. Use a kernel with lowlatency and preemptiv patches applied. To autoload snd-usb-midi append a line post-install snd-usb-us428 modprobe snd-usb-midi to /etc/modules.conf. known problems: sliders, knobs, lights not yet handled except MASTER Volume slider. "pcm -c 2" doesn't work. "pcm -c 2 -m direct_interleaved" does. KDE3: "Enable full duplex operation" deadlocks. 2002-08-31 Karsten Wiese Version 0.0.3: audio also simplex; simplifying: iso urbs only 1 packet, melted structs. ASYNC_UNLINK not used anymore: no more crashes so far..... for alsa 0.9 rc3. 2002-08-09 Karsten Wiese Version 0.0.2: midi works with snd-usb-midi, audio (only fullduplex now) with i.e. bristol. The firmware has been sniffed from win2k us-428 driver 3.09. * Copyright (c) 2002 - 2004 Karsten Wiese * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{TASCAM(0x1604), "NAME_ALLCAPS"(0x8001)(0x8005)(0x8007) }}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for "NAME_ALLCAPS"."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS"."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS"."); static int snd_usX2Y_card_used[SNDRV_CARDS]; static void usX2Y_usb_disconnect(struct usb_device* usb_device, void* ptr); static void snd_usX2Y_card_private_free(struct snd_card *card); /* * pipe 4 is used for switching the lamps, setting samplerate, volumes .... */ static void i_usX2Y_Out04Int(struct urb *urb) { #ifdef CONFIG_SND_DEBUG if (urb->status) { int i; struct usX2Ydev *usX2Y = urb->context; for (i = 0; i < 10 && usX2Y->AS04.urb[i] != urb; i++); snd_printdd("i_usX2Y_Out04Int() urb %i status=%i\n", i, urb->status); } #endif } static void i_usX2Y_In04Int(struct urb *urb) { int err = 0; struct usX2Ydev *usX2Y = urb->context; struct us428ctls_sharedmem *us428ctls = usX2Y->us428ctls_sharedmem; usX2Y->In04IntCalls++; if (urb->status) { snd_printdd("Interrupt Pipe 4 came back with status=%i\n", urb->status); return; } // printk("%i:0x%02X ", 8, (int)((unsigned char*)usX2Y->In04Buf)[8]); Master volume shows 0 here if fader is at max during boot ?!? if (us428ctls) { int diff = -1; if (-2 == us428ctls->CtlSnapShotLast) { diff = 0; memcpy(usX2Y->In04Last, usX2Y->In04Buf, sizeof(usX2Y->In04Last)); us428ctls->CtlSnapShotLast = -1; } else { int i; for (i = 0; i < 21; i++) { if (usX2Y->In04Last[i] != ((char*)usX2Y->In04Buf)[i]) { if (diff < 0) diff = i; usX2Y->In04Last[i] = ((char*)usX2Y->In04Buf)[i]; } } } if (0 <= diff) { int n = us428ctls->CtlSnapShotLast + 1; if (n >= N_us428_ctl_BUFS || n < 0) n = 0; memcpy(us428ctls->CtlSnapShot + n, usX2Y->In04Buf, sizeof(us428ctls->CtlSnapShot[0])); us428ctls->CtlSnapShotDiffersAt[n] = diff; us428ctls->CtlSnapShotLast = n; wake_up(&usX2Y->us428ctls_wait_queue_head); } } if (usX2Y->US04) { if (0 == usX2Y->US04->submitted) do { err = usb_submit_urb(usX2Y->US04->urb[usX2Y->US04->submitted++], GFP_ATOMIC); } while (!err && usX2Y->US04->submitted < usX2Y->US04->len); } else if (us428ctls && us428ctls->p4outLast >= 0 && us428ctls->p4outLast < N_us428_p4out_BUFS) { if (us428ctls->p4outLast != us428ctls->p4outSent) { int j, send = us428ctls->p4outSent + 1; if (send >= N_us428_p4out_BUFS) send = 0; for (j = 0; j < URBS_AsyncSeq && !err; ++j) if (0 == usX2Y->AS04.urb[j]->status) { struct us428_p4out *p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost. usb_fill_bulk_urb(usX2Y->AS04.urb[j], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 0x04), &p4out->val.vol, p4out->type == eLT_Light ? sizeof(struct us428_lights) : 5, i_usX2Y_Out04Int, usX2Y); err = usb_submit_urb(usX2Y->AS04.urb[j], GFP_ATOMIC); us428ctls->p4outSent = send; break; } } } if (err) snd_printk(KERN_ERR "In04Int() usb_submit_urb err=%i\n", err); urb->dev = usX2Y->dev; usb_submit_urb(urb, GFP_ATOMIC); } /* * Prepare some urbs */ int usX2Y_AsyncSeq04_init(struct usX2Ydev *usX2Y) { int err = 0, i; if (NULL == (usX2Y->AS04.buffer = kmalloc(URB_DataLen_AsyncSeq*URBS_AsyncSeq, GFP_KERNEL))) { err = -ENOMEM; } else for (i = 0; i < URBS_AsyncSeq; ++i) { if (NULL == (usX2Y->AS04.urb[i] = usb_alloc_urb(0, GFP_KERNEL))) { err = -ENOMEM; break; } usb_fill_bulk_urb( usX2Y->AS04.urb[i], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 0x04), usX2Y->AS04.buffer + URB_DataLen_AsyncSeq*i, 0, i_usX2Y_Out04Int, usX2Y ); } return err; } int usX2Y_In04_init(struct usX2Ydev *usX2Y) { if (! (usX2Y->In04urb = usb_alloc_urb(0, GFP_KERNEL))) return -ENOMEM; if (! (usX2Y->In04Buf = kmalloc(21, GFP_KERNEL))) { usb_free_urb(usX2Y->In04urb); return -ENOMEM; } init_waitqueue_head(&usX2Y->In04WaitQueue); usb_fill_int_urb(usX2Y->In04urb, usX2Y->dev, usb_rcvintpipe(usX2Y->dev, 0x4), usX2Y->In04Buf, 21, i_usX2Y_In04Int, usX2Y, 10); return usb_submit_urb(usX2Y->In04urb, GFP_KERNEL); } static void usX2Y_unlinkSeq(struct snd_usX2Y_AsyncSeq *S) { int i; for (i = 0; i < URBS_AsyncSeq; ++i) { if (S[i].urb) { usb_kill_urb(S->urb[i]); usb_free_urb(S->urb[i]); S->urb[i] = NULL; } } kfree(S->buffer); } static struct usb_device_id snd_usX2Y_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US428 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US122 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US224 }, { /* terminator */ } }; static int usX2Y_create_card(struct usb_device *device, struct snd_card **cardp) { int dev; struct snd_card * card; int err; for (dev = 0; dev < SNDRV_CARDS; ++dev) if (enable[dev] && !snd_usX2Y_card_used[dev]) break; if (dev >= SNDRV_CARDS) return -ENODEV; err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct usX2Ydev), &card); if (err < 0) return err; snd_usX2Y_card_used[usX2Y(card)->card_index = dev] = 1; card->private_free = snd_usX2Y_card_private_free; usX2Y(card)->dev = device; init_waitqueue_head(&usX2Y(card)->prepare_wait_queue); mutex_init(&usX2Y(card)->prepare_mutex); INIT_LIST_HEAD(&usX2Y(card)->midi_list); strcpy(card->driver, "USB "NAME_ALLCAPS""); sprintf(card->shortname, "TASCAM "NAME_ALLCAPS""); sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)", card->shortname, le16_to_cpu(device->descriptor.idVendor), le16_to_cpu(device->descriptor.idProduct), 0,//us428(card)->usbmidi.ifnum, usX2Y(card)->dev->bus->busnum, usX2Y(card)->dev->devnum ); *cardp = card; return 0; } static int usX2Y_usb_probe(struct usb_device *device, struct usb_interface *intf, const struct usb_device_id *device_id, struct snd_card **cardp) { int err; struct snd_card * card; *cardp = NULL; if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 || (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428)) return -EINVAL; err = usX2Y_create_card(device, &card); if (err < 0) return err; snd_card_set_dev(card, &intf->dev); if ((err = usX2Y_hwdep_new(card, device)) < 0 || (err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } *cardp = card; return 0; } /* * new 2.5 USB kernel API */ static int snd_usX2Y_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct snd_card *card; int err; err = usX2Y_usb_probe(interface_to_usbdev(intf), intf, id, &card); if (err < 0) return err; dev_set_drvdata(&intf->dev, card); return 0; } static void snd_usX2Y_disconnect(struct usb_interface *intf) { usX2Y_usb_disconnect(interface_to_usbdev(intf), usb_get_intfdata(intf)); } MODULE_DEVICE_TABLE(usb, snd_usX2Y_usb_id_table); static struct usb_driver snd_usX2Y_usb_driver = { .name = "snd-usb-usx2y", .probe = snd_usX2Y_probe, .disconnect = snd_usX2Y_disconnect, .id_table = snd_usX2Y_usb_id_table, }; static void snd_usX2Y_card_private_free(struct snd_card *card) { kfree(usX2Y(card)->In04Buf); usb_free_urb(usX2Y(card)->In04urb); if (usX2Y(card)->us428ctls_sharedmem) snd_free_pages(usX2Y(card)->us428ctls_sharedmem, sizeof(*usX2Y(card)->us428ctls_sharedmem)); if (usX2Y(card)->card_index >= 0 && usX2Y(card)->card_index < SNDRV_CARDS) snd_usX2Y_card_used[usX2Y(card)->card_index] = 0; } /* * Frees the device. */ static void usX2Y_usb_disconnect(struct usb_device *device, void* ptr) { if (ptr) { struct snd_card *card = ptr; struct usX2Ydev *usX2Y = usX2Y(card); struct list_head *p; usX2Y->chip_status = USX2Y_STAT_CHIP_HUP; usX2Y_unlinkSeq(&usX2Y->AS04); usb_kill_urb(usX2Y->In04urb); snd_card_disconnect(card); /* release the midi resources */ list_for_each(p, &usX2Y->midi_list) { snd_usbmidi_disconnect(p); } if (usX2Y->us428ctls_sharedmem) wake_up(&usX2Y->us428ctls_wait_queue_head); snd_card_free(card); } } static int __init snd_usX2Y_module_init(void) { return usb_register(&snd_usX2Y_usb_driver); } static void __exit snd_usX2Y_module_exit(void) { usb_deregister(&snd_usX2Y_usb_driver); } module_init(snd_usX2Y_module_init) module_exit(snd_usX2Y_module_exit)
gpl-2.0
TheStrix/android_kernel_xiaomi_armani
arch/m68k/platform/528x/config.c
4383
2583
/***************************************************************************/ /* * linux/arch/m68knommu/platform/528x/config.c * * Sub-architcture dependent initialization code for the Freescale * 5280, 5281 and 5282 CPUs. * * Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2001-2003, SnapGear Inc. (www.snapgear.com) */ /***************************************************************************/ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfuart.h> /***************************************************************************/ #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m528x_qspi_init(void) { /* setup Port QS for QSPI with gpio CS control */ __raw_writeb(0x07, MCFGPIO_PQSPAR); } #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ static void __init m528x_uarts_init(void) { u8 port; /* make sure PUAPAR is set for UART0 and UART1 */ port = readb(MCF5282_GPIO_PUAPAR); port |= 0x03 | (0x03 << 2); writeb(port, MCF5282_GPIO_PUAPAR); } /***************************************************************************/ static void __init m528x_fec_init(void) { u16 v16; /* Set multi-function pins to ethernet mode for fec0 */ v16 = readw(MCF_IPSBAR + 0x100056); writew(v16 | 0xf00, MCF_IPSBAR + 0x100056); writeb(0xc0, MCF_IPSBAR + 0x100058); } /***************************************************************************/ #ifdef CONFIG_WILDFIRE void wildfire_halt(void) { writeb(0, 0x30000007); writeb(0x2, 0x30000007); } #endif #ifdef CONFIG_WILDFIREMOD void wildfiremod_halt(void) { printk(KERN_INFO "WildFireMod hibernating...\n"); /* Set portE.5 to Digital IO */ MCF5282_GPIO_PEPAR &= ~(1 << (5 * 2)); /* Make portE.5 an output */ MCF5282_GPIO_DDRE |= (1 << 5); /* Now toggle portE.5 from low to high */ MCF5282_GPIO_PORTE &= ~(1 << 5); MCF5282_GPIO_PORTE |= (1 << 5); printk(KERN_EMERG "Failed to hibernate. Halting!\n"); } #endif void __init config_BSP(char *commandp, int size) { #ifdef CONFIG_WILDFIRE mach_halt = wildfire_halt; #endif #ifdef CONFIG_WILDFIREMOD mach_halt = wildfiremod_halt; #endif mach_sched_init = hw_timer_init; m528x_uarts_init(); m528x_fec_init(); #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m528x_qspi_init(); #endif } /***************************************************************************/
gpl-2.0
yank555-lu/N3-CM11
drivers/staging/comedi/drivers/ni_at_a2150.c
4895
25844
/* comedi/drivers/ni_at_a2150.c Driver for National Instruments AT-A2150 boards Copyright (C) 2001, 2002 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: ni_at_a2150 Description: National Instruments AT-A2150 Author: Frank Mori Hess Status: works Devices: [National Instruments] AT-A2150C (at_a2150c), AT-2150S (at_a2150s) If you want to ac couple the board's inputs, use AREF_OTHER. Configuration options: [0] - I/O port base address [1] - IRQ (optional, required for timed conversions) [2] - DMA (optional, required for timed conversions) */ /* Yet another driver for obsolete hardware brought to you by Frank Hess. Testing and debugging help provided by Dave Andruczyk. This driver supports the boards: AT-A2150C AT-A2150S The only difference is their master clock frequencies. Options: [0] - base io address [1] - irq [2] - dma channel References (from ftp://ftp.natinst.com/support/manuals): 320360.pdf AT-A2150 User Manual TODO: analog level triggering TRIG_WAKE_EOS */ #include <linux/interrupt.h> #include <linux/slab.h> #include "../comedidev.h" #include <linux/ioport.h> #include <linux/io.h> #include <asm/dma.h> #include "8253.h" #include "comedi_fc.h" #define A2150_SIZE 28 #define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */ /* #define A2150_DEBUG enable debugging code */ #undef A2150_DEBUG /* disable debugging code */ /* Registers and bits */ #define CONFIG_REG 0x0 #define CHANNEL_BITS(x) ((x) & 0x7) #define CHANNEL_MASK 0x7 #define CLOCK_SELECT_BITS(x) (((x) & 0x3) << 3) #define CLOCK_DIVISOR_BITS(x) (((x) & 0x3) << 5) #define CLOCK_MASK (0xf << 3) #define ENABLE0_BIT 0x80 /* enable (don't internally ground) channels 0 and 1 */ #define ENABLE1_BIT 0x100 /* enable (don't internally ground) channels 2 and 3 */ #define AC0_BIT 0x200 /* ac couple channels 0,1 */ #define AC1_BIT 0x400 /* ac couple channels 2,3 */ #define APD_BIT 0x800 /* analog power down */ #define DPD_BIT 0x1000 /* digital power down */ #define TRIGGER_REG 0x2 /* trigger config register */ #define POST_TRIGGER_BITS 0x2 #define DELAY_TRIGGER_BITS 0x3 #define HW_TRIG_EN 0x10 /* enable hardware trigger */ #define FIFO_START_REG 0x6 /* software start aquistion trigger */ #define FIFO_RESET_REG 0x8 /* clears fifo + fifo flags */ #define FIFO_DATA_REG 0xa /* read data */ #define DMA_TC_CLEAR_REG 0xe /* clear dma terminal count interrupt */ #define STATUS_REG 0x12 /* read only */ #define FNE_BIT 0x1 /* fifo not empty */ #define OVFL_BIT 0x8 /* fifo overflow */ #define EDAQ_BIT 0x10 /* end of acquisition interrupt */ #define DCAL_BIT 0x20 /* offset calibration in progress */ #define INTR_BIT 0x40 /* interrupt has occurred */ #define DMA_TC_BIT 0x80 /* dma terminal count interrupt has occurred */ #define ID_BITS(x) (((x) >> 8) & 0x3) #define IRQ_DMA_CNTRL_REG 0x12 /* write only */ #define DMA_CHAN_BITS(x) ((x) & 0x7) /* sets dma channel */ #define DMA_EN_BIT 0x8 /* enables dma */ #define IRQ_LVL_BITS(x) (((x) & 0xf) << 4) /* sets irq level */ #define FIFO_INTR_EN_BIT 0x100 /* enable fifo interrupts */ #define FIFO_INTR_FHF_BIT 0x200 /* interrupt fifo half full */ #define DMA_INTR_EN_BIT 0x800 /* enable interrupt on dma terminal count */ #define DMA_DEM_EN_BIT 0x1000 /* enables demand mode dma */ #define I8253_BASE_REG 0x14 #define I8253_MODE_REG 0x17 #define HW_COUNT_DISABLE 0x30 /* disable hardware counting of conversions */ struct a2150_board { const char *name; int clock[4]; /* master clock periods, in nanoseconds */ int num_clocks; /* number of available master clock speeds */ int ai_speed; /* maximum conversion rate in nanoseconds */ }; /* analog input range */ static const struct comedi_lrange range_a2150 = { 1, { RANGE(-2.828, 2.828), } }; /* enum must match board indices */ enum { a2150_c, a2150_s }; static const struct a2150_board a2150_boards[] = { { .name = "at-a2150c", .clock = {31250, 22676, 20833, 19531}, .num_clocks = 4, .ai_speed = 19531, }, { .name = "at-a2150s", .clock = {62500, 50000, 41667, 0}, .num_clocks = 3, .ai_speed = 41667, }, }; /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct a2150_board *)dev->board_ptr) struct a2150_private { volatile unsigned int count; /* number of data points left to be taken */ unsigned int dma; /* dma channel */ s16 *dma_buffer; /* dma buffer */ unsigned int dma_transfer_size; /* size in bytes of dma transfers */ int irq_dma_bits; /* irq/dma register bits */ int config_bits; /* config register bits */ }; #define devpriv ((struct a2150_private *)dev->private) static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int a2150_detach(struct comedi_device *dev); static int a2150_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static struct comedi_driver driver_a2150 = { .driver_name = "ni_at_a2150", .module = THIS_MODULE, .attach = a2150_attach, .detach = a2150_detach, }; static irqreturn_t a2150_interrupt(int irq, void *d); static int a2150_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int a2150_get_timing(struct comedi_device *dev, unsigned int *period, int flags); static int a2150_probe(struct comedi_device *dev); static int a2150_set_chanlist(struct comedi_device *dev, unsigned int start_channel, unsigned int num_channels); /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ static int __init driver_a2150_init_module(void) { return comedi_driver_register(&driver_a2150); } static void __exit driver_a2150_cleanup_module(void) { comedi_driver_unregister(&driver_a2150); } module_init(driver_a2150_init_module); module_exit(driver_a2150_cleanup_module); #ifdef A2150_DEBUG static void ni_dump_regs(struct comedi_device *dev) { printk("config bits 0x%x\n", devpriv->config_bits); printk("irq dma bits 0x%x\n", devpriv->irq_dma_bits); printk("status bits 0x%x\n", inw(dev->iobase + STATUS_REG)); } #endif /* interrupt service routine */ static irqreturn_t a2150_interrupt(int irq, void *d) { int i; int status; unsigned long flags; struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async; struct comedi_cmd *cmd; unsigned int max_points, num_points, residue, leftover; short dpnt; static const int sample_size = sizeof(devpriv->dma_buffer[0]); if (dev->attached == 0) { comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; } /* initialize async here to make sure s is not NULL */ async = s->async; async->events = 0; cmd = &async->cmd; status = inw(dev->iobase + STATUS_REG); if ((status & INTR_BIT) == 0) { comedi_error(dev, "spurious interrupt"); return IRQ_NONE; } if (status & OVFL_BIT) { comedi_error(dev, "fifo overflow"); a2150_cancel(dev, s); async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; } if ((status & DMA_TC_BIT) == 0) { comedi_error(dev, "caught non-dma interrupt? Aborting."); a2150_cancel(dev, s); async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; comedi_event(dev, s); return IRQ_HANDLED; } flags = claim_dma_lock(); disable_dma(devpriv->dma); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma); /* figure out how many points to read */ max_points = devpriv->dma_transfer_size / sample_size; /* residue is the number of points left to be done on the dma * transfer. It should always be zero at this point unless * the stop_src is set to external triggering. */ residue = get_dma_residue(devpriv->dma) / sample_size; num_points = max_points - residue; if (devpriv->count < num_points && cmd->stop_src == TRIG_COUNT) num_points = devpriv->count; /* figure out how many points will be stored next time */ leftover = 0; if (cmd->stop_src == TRIG_NONE) { leftover = devpriv->dma_transfer_size / sample_size; } else if (devpriv->count > max_points) { leftover = devpriv->count - max_points; if (leftover > max_points) leftover = max_points; } /* there should only be a residue if collection was stopped by having * the stop_src set to an external trigger, in which case there * will be no more data */ if (residue) leftover = 0; for (i = 0; i < num_points; i++) { /* write data point to comedi buffer */ dpnt = devpriv->dma_buffer[i]; /* convert from 2's complement to unsigned coding */ dpnt ^= 0x8000; cfc_write_to_buffer(s, dpnt); if (cmd->stop_src == TRIG_COUNT) { if (--devpriv->count == 0) { /* end of acquisition */ a2150_cancel(dev, s); async->events |= COMEDI_CB_EOA; break; } } } /* re-enable dma */ if (leftover) { set_dma_addr(devpriv->dma, virt_to_bus(devpriv->dma_buffer)); set_dma_count(devpriv->dma, leftover * sample_size); enable_dma(devpriv->dma); } release_dma_lock(flags); async->events |= COMEDI_CB_BLOCK; comedi_event(dev, s); /* clear interrupt */ outw(0x00, dev->iobase + DMA_TC_CLEAR_REG); return IRQ_HANDLED; } /* probes board type, returns offset */ static int a2150_probe(struct comedi_device *dev) { int status = inw(dev->iobase + STATUS_REG); return ID_BITS(status); } static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase = it->options[0]; unsigned int irq = it->options[1]; unsigned int dma = it->options[2]; static const int timeout = 2000; int i; printk("comedi%d: %s: io 0x%lx", dev->minor, driver_a2150.driver_name, iobase); if (irq) { printk(", irq %u", irq); } else { printk(", no irq"); } if (dma) { printk(", dma %u", dma); } else { printk(", no dma"); } printk("\n"); /* allocate and initialize dev->private */ if (alloc_private(dev, sizeof(struct a2150_private)) < 0) return -ENOMEM; if (iobase == 0) { printk(" io base address required\n"); return -EINVAL; } /* check if io addresses are available */ if (!request_region(iobase, A2150_SIZE, driver_a2150.driver_name)) { printk(" I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* grab our IRQ */ if (irq) { /* check that irq is supported */ if (irq < 3 || irq == 8 || irq == 13 || irq > 15) { printk(" invalid irq line %u\n", irq); return -EINVAL; } if (request_irq(irq, a2150_interrupt, 0, driver_a2150.driver_name, dev)) { printk("unable to allocate irq %u\n", irq); return -EINVAL; } devpriv->irq_dma_bits |= IRQ_LVL_BITS(irq); dev->irq = irq; } /* initialize dma */ if (dma) { if (dma == 4 || dma > 7) { printk(" invalid dma channel %u\n", dma); return -EINVAL; } if (request_dma(dma, driver_a2150.driver_name)) { printk(" failed to allocate dma channel %u\n", dma); return -EINVAL; } devpriv->dma = dma; devpriv->dma_buffer = kmalloc(A2150_DMA_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); if (devpriv->dma_buffer == NULL) return -ENOMEM; disable_dma(dma); set_dma_mode(dma, DMA_MODE_READ); devpriv->irq_dma_bits |= DMA_CHAN_BITS(dma); } dev->board_ptr = a2150_boards + a2150_probe(dev); dev->board_name = thisboard->name; if (alloc_subdevices(dev, 1) < 0) return -ENOMEM; /* analog input subdevice */ s = dev->subdevices + 0; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_OTHER | SDF_CMD_READ; s->n_chan = 4; s->len_chanlist = 4; s->maxdata = 0xffff; s->range_table = &range_a2150; s->do_cmd = a2150_ai_cmd; s->do_cmdtest = a2150_ai_cmdtest; s->insn_read = a2150_ai_rinsn; s->cancel = a2150_cancel; /* need to do this for software counting of completed conversions, to * prevent hardware count from stopping acquisition */ outw(HW_COUNT_DISABLE, dev->iobase + I8253_MODE_REG); /* set card's irq and dma levels */ outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG); /* reset and sync adc clock circuitry */ outw_p(DPD_BIT | APD_BIT, dev->iobase + CONFIG_REG); outw_p(DPD_BIT, dev->iobase + CONFIG_REG); /* initialize configuration register */ devpriv->config_bits = 0; outw(devpriv->config_bits, dev->iobase + CONFIG_REG); /* wait until offset calibration is done, then enable analog inputs */ for (i = 0; i < timeout; i++) { if ((DCAL_BIT & inw(dev->iobase + STATUS_REG)) == 0) break; udelay(1000); } if (i == timeout) { printk (" timed out waiting for offset calibration to complete\n"); return -ETIME; } devpriv->config_bits |= ENABLE0_BIT | ENABLE1_BIT; outw(devpriv->config_bits, dev->iobase + CONFIG_REG); return 0; }; static int a2150_detach(struct comedi_device *dev) { printk("comedi%d: %s: remove\n", dev->minor, driver_a2150.driver_name); /* only free stuff if it has been allocated by _attach */ if (dev->iobase) { /* put board in power-down mode */ outw(APD_BIT | DPD_BIT, dev->iobase + CONFIG_REG); release_region(dev->iobase, A2150_SIZE); } if (dev->irq) free_irq(dev->irq, dev); if (devpriv) { if (devpriv->dma) free_dma(devpriv->dma); kfree(devpriv->dma_buffer); } return 0; }; static int a2150_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { /* disable dma on card */ devpriv->irq_dma_bits &= ~DMA_INTR_EN_BIT & ~DMA_EN_BIT; outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG); /* disable computer's dma */ disable_dma(devpriv->dma); /* clear fifo and reset triggering circuitry */ outw(0, dev->iobase + FIFO_RESET_REG); return 0; } static int a2150_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; int startChan; int i; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; a2150_get_timing(dev, &cmd->scan_begin_arg, cmd->flags); if (tmp != cmd->scan_begin_arg) err++; } if (err) return 4; /* check channel/gain list against card's limitations */ if (cmd->chanlist) { startChan = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != (startChan + i)) { comedi_error(dev, "entries in chanlist must be consecutive channels, counting upwards\n"); err++; } } if (cmd->chanlist_len == 2 && CR_CHAN(cmd->chanlist[0]) == 1) { comedi_error(dev, "length 2 chanlist must be channels 0,1 or channels 2,3"); err++; } if (cmd->chanlist_len == 3) { comedi_error(dev, "chanlist must have 1,2 or 4 channels"); err++; } if (CR_AREF(cmd->chanlist[0]) != CR_AREF(cmd->chanlist[1]) || CR_AREF(cmd->chanlist[2]) != CR_AREF(cmd->chanlist[3])) { comedi_error(dev, "channels 0/1 and 2/3 must have the same analog reference"); err++; } } if (err) return 5; return 0; } static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned long lock_flags; unsigned int old_config_bits = devpriv->config_bits; unsigned int trigger_bits; if (!dev->irq || !devpriv->dma) { comedi_error(dev, " irq and dma required, cannot do hardware conversions"); return -1; } if (cmd->flags & TRIG_RT) { comedi_error(dev, " dma incompatible with hard real-time interrupt (TRIG_RT), aborting"); return -1; } /* clear fifo and reset triggering circuitry */ outw(0, dev->iobase + FIFO_RESET_REG); /* setup chanlist */ if (a2150_set_chanlist(dev, CR_CHAN(cmd->chanlist[0]), cmd->chanlist_len) < 0) return -1; /* setup ac/dc coupling */ if (CR_AREF(cmd->chanlist[0]) == AREF_OTHER) devpriv->config_bits |= AC0_BIT; else devpriv->config_bits &= ~AC0_BIT; if (CR_AREF(cmd->chanlist[2]) == AREF_OTHER) devpriv->config_bits |= AC1_BIT; else devpriv->config_bits &= ~AC1_BIT; /* setup timing */ a2150_get_timing(dev, &cmd->scan_begin_arg, cmd->flags); /* send timing, channel, config bits */ outw(devpriv->config_bits, dev->iobase + CONFIG_REG); /* initialize number of samples remaining */ devpriv->count = cmd->stop_arg * cmd->chanlist_len; /* enable computer's dma */ lock_flags = claim_dma_lock(); disable_dma(devpriv->dma); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma); set_dma_addr(devpriv->dma, virt_to_bus(devpriv->dma_buffer)); /* set size of transfer to fill in 1/3 second */ #define ONE_THIRD_SECOND 333333333 devpriv->dma_transfer_size = sizeof(devpriv->dma_buffer[0]) * cmd->chanlist_len * ONE_THIRD_SECOND / cmd->scan_begin_arg; if (devpriv->dma_transfer_size > A2150_DMA_BUFFER_SIZE) devpriv->dma_transfer_size = A2150_DMA_BUFFER_SIZE; if (devpriv->dma_transfer_size < sizeof(devpriv->dma_buffer[0])) devpriv->dma_transfer_size = sizeof(devpriv->dma_buffer[0]); devpriv->dma_transfer_size -= devpriv->dma_transfer_size % sizeof(devpriv->dma_buffer[0]); set_dma_count(devpriv->dma, devpriv->dma_transfer_size); enable_dma(devpriv->dma); release_dma_lock(lock_flags); /* clear dma interrupt before enabling it, to try and get rid of that * one spurious interrupt that has been happening */ outw(0x00, dev->iobase + DMA_TC_CLEAR_REG); /* enable dma on card */ devpriv->irq_dma_bits |= DMA_INTR_EN_BIT | DMA_EN_BIT; outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG); /* may need to wait 72 sampling periods if timing was changed */ i8254_load(dev->iobase + I8253_BASE_REG, 0, 2, 72, 0); /* setup start triggering */ trigger_bits = 0; /* decide if we need to wait 72 periods for valid data */ if (cmd->start_src == TRIG_NOW && (old_config_bits & CLOCK_MASK) != (devpriv->config_bits & CLOCK_MASK)) { /* set trigger source to delay trigger */ trigger_bits |= DELAY_TRIGGER_BITS; } else { /* otherwise no delay */ trigger_bits |= POST_TRIGGER_BITS; } /* enable external hardware trigger */ if (cmd->start_src == TRIG_EXT) { trigger_bits |= HW_TRIG_EN; } else if (cmd->start_src == TRIG_OTHER) { /* XXX add support for level/slope start trigger using TRIG_OTHER */ comedi_error(dev, "you shouldn't see this?"); } /* send trigger config bits */ outw(trigger_bits, dev->iobase + TRIGGER_REG); /* start acquisition for soft trigger */ if (cmd->start_src == TRIG_NOW) outw(0, dev->iobase + FIFO_START_REG); #ifdef A2150_DEBUG ni_dump_regs(dev); #endif return 0; } static int a2150_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int i, n; static const int timeout = 100000; static const int filter_delay = 36; /* clear fifo and reset triggering circuitry */ outw(0, dev->iobase + FIFO_RESET_REG); /* setup chanlist */ if (a2150_set_chanlist(dev, CR_CHAN(insn->chanspec), 1) < 0) return -1; /* set dc coupling */ devpriv->config_bits &= ~AC0_BIT; devpriv->config_bits &= ~AC1_BIT; /* send timing, channel, config bits */ outw(devpriv->config_bits, dev->iobase + CONFIG_REG); /* disable dma on card */ devpriv->irq_dma_bits &= ~DMA_INTR_EN_BIT & ~DMA_EN_BIT; outw(devpriv->irq_dma_bits, dev->iobase + IRQ_DMA_CNTRL_REG); /* setup start triggering */ outw(0, dev->iobase + TRIGGER_REG); /* start acquisition for soft trigger */ outw(0, dev->iobase + FIFO_START_REG); /* there is a 35.6 sample delay for data to get through the antialias filter */ for (n = 0; n < filter_delay; n++) { for (i = 0; i < timeout; i++) { if (inw(dev->iobase + STATUS_REG) & FNE_BIT) break; udelay(1); } if (i == timeout) { comedi_error(dev, "timeout"); return -ETIME; } inw(dev->iobase + FIFO_DATA_REG); } /* read data */ for (n = 0; n < insn->n; n++) { for (i = 0; i < timeout; i++) { if (inw(dev->iobase + STATUS_REG) & FNE_BIT) break; udelay(1); } if (i == timeout) { comedi_error(dev, "timeout"); return -ETIME; } #ifdef A2150_DEBUG ni_dump_regs(dev); #endif data[n] = inw(dev->iobase + FIFO_DATA_REG); #ifdef A2150_DEBUG printk(" data is %i\n", data[n]); #endif data[n] ^= 0x8000; } /* clear fifo and reset triggering circuitry */ outw(0, dev->iobase + FIFO_RESET_REG); return n; } /* sets bits in devpriv->clock_bits to nearest approximation of requested period, * adjusts requested period to actual timing. */ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period, int flags) { int lub, glb, temp; int lub_divisor_shift, lub_index, glb_divisor_shift, glb_index; int i, j; /* initialize greatest lower and least upper bounds */ lub_divisor_shift = 3; lub_index = 0; lub = thisboard->clock[lub_index] * (1 << lub_divisor_shift); glb_divisor_shift = 0; glb_index = thisboard->num_clocks - 1; glb = thisboard->clock[glb_index] * (1 << glb_divisor_shift); /* make sure period is in available range */ if (*period < glb) *period = glb; if (*period > lub) *period = lub; /* we can multiply period by 1, 2, 4, or 8, using (1 << i) */ for (i = 0; i < 4; i++) { /* there are a maximum of 4 master clocks */ for (j = 0; j < thisboard->num_clocks; j++) { /* temp is the period in nanosec we are evaluating */ temp = thisboard->clock[j] * (1 << i); /* if it is the best match yet */ if (temp < lub && temp >= *period) { lub_divisor_shift = i; lub_index = j; lub = temp; } if (temp > glb && temp <= *period) { glb_divisor_shift = i; glb_index = j; glb = temp; } } } flags &= TRIG_ROUND_MASK; switch (flags) { case TRIG_ROUND_NEAREST: default: /* if least upper bound is better approximation */ if (lub - *period < *period - glb) *period = lub; else *period = glb; break; case TRIG_ROUND_UP: *period = lub; break; case TRIG_ROUND_DOWN: *period = glb; break; } /* set clock bits for config register appropriately */ devpriv->config_bits &= ~CLOCK_MASK; if (*period == lub) { devpriv->config_bits |= CLOCK_SELECT_BITS(lub_index) | CLOCK_DIVISOR_BITS(lub_divisor_shift); } else { devpriv->config_bits |= CLOCK_SELECT_BITS(glb_index) | CLOCK_DIVISOR_BITS(glb_divisor_shift); } return 0; } static int a2150_set_chanlist(struct comedi_device *dev, unsigned int start_channel, unsigned int num_channels) { if (start_channel + num_channels > 4) return -1; devpriv->config_bits &= ~CHANNEL_MASK; switch (num_channels) { case 1: devpriv->config_bits |= CHANNEL_BITS(0x4 | start_channel); break; case 2: if (start_channel == 0) { devpriv->config_bits |= CHANNEL_BITS(0x2); } else if (start_channel == 2) { devpriv->config_bits |= CHANNEL_BITS(0x3); } else { return -1; } break; case 4: devpriv->config_bits |= CHANNEL_BITS(0x1); break; default: return -1; break; } return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
ShinySide/G900T_Permissive_FOJ4
drivers/staging/comedi/drivers/ni_daq_dio24.c
4895
8780
/* comedi/drivers/ni_daq_dio24.c Driver for National Instruments PCMCIA DAQ-Card DIO-24 Copyright (C) 2002 Daniel Vecino Castel <dvecino@able.es> PCMCIA crap at end of file is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13 from the pcmcia package. The initial developer of the pcmcia dummy_cs.c code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: ni_daq_dio24 Description: National Instruments PCMCIA DAQ-Card DIO-24 Author: Daniel Vecino Castel <dvecino@able.es> Devices: [National Instruments] PCMCIA DAQ-Card DIO-24 (ni_daq_dio24) Status: ? Updated: Thu, 07 Nov 2002 21:53:06 -0800 This is just a wrapper around the 8255.o driver to properly handle the PCMCIA interface. */ /* #define LABPC_DEBUG *//* enable debugging messages */ #undef LABPC_DEBUG #include <linux/interrupt.h> #include <linux/slab.h> #include "../comedidev.h" #include <linux/ioport.h> #include "8255.h" #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> static struct pcmcia_device *pcmcia_cur_dev; #define DIO24_SIZE 4 /* size of io region used by board */ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dio24_detach(struct comedi_device *dev); enum dio24_bustype { pcmcia_bustype }; struct dio24_board_struct { const char *name; int device_id; /* device id for pcmcia board */ enum dio24_bustype bustype; /* PCMCIA */ int have_dio; /* have 8255 chip */ /* function pointers so we can use inb/outb or readb/writeb as appropriate */ unsigned int (*read_byte) (unsigned int address); void (*write_byte) (unsigned int byte, unsigned int address); }; static const struct dio24_board_struct dio24_boards[] = { { .name = "daqcard-dio24", .device_id = 0x475c, /* 0x10b is manufacturer id, 0x475c is device id */ .bustype = pcmcia_bustype, .have_dio = 1, }, { .name = "ni_daq_dio24", .device_id = 0x475c, /* 0x10b is manufacturer id, 0x475c is device id */ .bustype = pcmcia_bustype, .have_dio = 1, }, }; /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct dio24_board_struct *)dev->board_ptr) struct dio24_private { int data; /* number of data points left to be taken */ }; #define devpriv ((struct dio24_private *)dev->private) static struct comedi_driver driver_dio24 = { .driver_name = "ni_daq_dio24", .module = THIS_MODULE, .attach = dio24_attach, .detach = dio24_detach, .num_names = ARRAY_SIZE(dio24_boards), .board_name = &dio24_boards[0].name, .offset = sizeof(struct dio24_board_struct), }; static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase = 0; #ifdef incomplete unsigned int irq = 0; #endif struct pcmcia_device *link; /* allocate and initialize dev->private */ if (alloc_private(dev, sizeof(struct dio24_private)) < 0) return -ENOMEM; /* get base address, irq etc. based on bustype */ switch (thisboard->bustype) { case pcmcia_bustype: link = pcmcia_cur_dev; /* XXX hack */ if (!link) return -EIO; iobase = link->resource[0]->start; #ifdef incomplete irq = link->irq; #endif break; default: pr_err("bug! couldn't determine board type\n"); return -EINVAL; break; } pr_debug("comedi%d: ni_daq_dio24: %s, io 0x%lx", dev->minor, thisboard->name, iobase); #ifdef incomplete if (irq) pr_debug("irq %u\n", irq); #endif if (iobase == 0) { pr_err("io base address is zero!\n"); return -EINVAL; } dev->iobase = iobase; #ifdef incomplete /* grab our IRQ */ dev->irq = irq; #endif dev->board_name = thisboard->name; if (alloc_subdevices(dev, 1) < 0) return -ENOMEM; /* 8255 dio */ s = dev->subdevices + 0; subdev_8255_init(dev, s, NULL, dev->iobase); return 0; }; static int dio24_detach(struct comedi_device *dev) { dev_info(dev->hw_dev, "comedi%d: ni_daq_dio24: remove\n", dev->minor); if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 0); if (thisboard->bustype != pcmcia_bustype && dev->iobase) release_region(dev->iobase, DIO24_SIZE); if (dev->irq) free_irq(dev->irq, dev); return 0; }; static void dio24_config(struct pcmcia_device *link); static void dio24_release(struct pcmcia_device *link); static int dio24_cs_suspend(struct pcmcia_device *p_dev); static int dio24_cs_resume(struct pcmcia_device *p_dev); static int dio24_cs_attach(struct pcmcia_device *); static void dio24_cs_detach(struct pcmcia_device *); struct local_info_t { struct pcmcia_device *link; int stop; struct bus_operations *bus; }; static int dio24_cs_attach(struct pcmcia_device *link) { struct local_info_t *local; printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - CS-attach!\n"); dev_dbg(&link->dev, "dio24_cs_attach()\n"); /* Allocate space for private device-specific data */ local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); if (!local) return -ENOMEM; local->link = link; link->priv = local; pcmcia_cur_dev = link; dio24_config(link); return 0; } /* dio24_cs_attach */ static void dio24_cs_detach(struct pcmcia_device *link) { printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - cs-detach!\n"); dev_dbg(&link->dev, "dio24_cs_detach\n"); ((struct local_info_t *)link->priv)->stop = 1; dio24_release(link); /* This points to the parent local_info_t struct */ kfree(link->priv); } /* dio24_cs_detach */ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev, void *priv_data) { if (p_dev->config_index == 0) return -EINVAL; return pcmcia_request_io(p_dev); } static void dio24_config(struct pcmcia_device *link) { int ret; printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO! - config\n"); dev_dbg(&link->dev, "dio24_config\n"); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_AUDIO | CONF_AUTO_SET_IO; ret = pcmcia_loop_config(link, dio24_pcmcia_config_loop, NULL); if (ret) { dev_warn(&link->dev, "no configuration found\n"); goto failed; } if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; return; failed: printk(KERN_INFO "Fallo"); dio24_release(link); } /* dio24_config */ static void dio24_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "dio24_release\n"); pcmcia_disable_device(link); } /* dio24_release */ static int dio24_cs_suspend(struct pcmcia_device *link) { struct local_info_t *local = link->priv; /* Mark the device as stopped, to block IO until later */ local->stop = 1; return 0; } /* dio24_cs_suspend */ static int dio24_cs_resume(struct pcmcia_device *link) { struct local_info_t *local = link->priv; local->stop = 0; return 0; } /* dio24_cs_resume */ /*====================================================================*/ static const struct pcmcia_device_id dio24_cs_ids[] = { /* N.B. These IDs should match those in dio24_boards */ PCMCIA_DEVICE_MANF_CARD(0x010b, 0x475c), /* daqcard-dio24 */ PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, dio24_cs_ids); MODULE_AUTHOR("Daniel Vecino Castel <dvecino@able.es>"); MODULE_DESCRIPTION("Comedi driver for National Instruments " "PCMCIA DAQ-Card DIO-24"); MODULE_LICENSE("GPL"); struct pcmcia_driver dio24_cs_driver = { .probe = dio24_cs_attach, .remove = dio24_cs_detach, .suspend = dio24_cs_suspend, .resume = dio24_cs_resume, .id_table = dio24_cs_ids, .owner = THIS_MODULE, .name = "ni_daq_dio24", }; static int __init init_dio24_cs(void) { printk("ni_daq_dio24: HOLA SOY YO!\n"); pcmcia_register_driver(&dio24_cs_driver); return 0; } static void __exit exit_dio24_cs(void) { pcmcia_unregister_driver(&dio24_cs_driver); } int __init init_module(void) { int ret; ret = init_dio24_cs(); if (ret < 0) return ret; return comedi_driver_register(&driver_dio24); } void __exit cleanup_module(void) { exit_dio24_cs(); comedi_driver_unregister(&driver_dio24); }
gpl-2.0
KylinUI/android_kernel_samsung_exynos5410
drivers/uio/uio_pdrv_genirq.c
4895
7284
/* * drivers/uio/uio_pdrv_genirq.c * * Userspace I/O platform driver with generic IRQ handling code. * * Copyright (C) 2008 Magnus Damm * * Based on uio_pdrv.c by Uwe Kleine-Koenig, * Copyright (C) 2008 by Digi International Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/uio_driver.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/stringify.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_address.h> #define DRIVER_NAME "uio_pdrv_genirq" struct uio_pdrv_genirq_platdata { struct uio_info *uioinfo; spinlock_t lock; unsigned long flags; struct platform_device *pdev; }; static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode) { struct uio_pdrv_genirq_platdata *priv = info->priv; /* Wait until the Runtime PM code has woken up the device */ pm_runtime_get_sync(&priv->pdev->dev); return 0; } static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode) { struct uio_pdrv_genirq_platdata *priv = info->priv; /* Tell the Runtime PM code that the device has become idle */ pm_runtime_put_sync(&priv->pdev->dev); return 0; } static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info) { struct uio_pdrv_genirq_platdata *priv = dev_info->priv; /* Just disable the interrupt in the interrupt controller, and * remember the state so we can allow user space to enable it later. */ if (!test_and_set_bit(0, &priv->flags)) disable_irq_nosync(irq); return IRQ_HANDLED; } static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) { struct uio_pdrv_genirq_platdata *priv = dev_info->priv; unsigned long flags; /* Allow user space to enable and disable the interrupt * in the interrupt controller, but keep track of the * state to prevent per-irq depth damage. * * Serialize this operation to support multiple tasks. */ spin_lock_irqsave(&priv->lock, flags); if (irq_on) { if (test_and_clear_bit(0, &priv->flags)) enable_irq(dev_info->irq); } else { if (!test_and_set_bit(0, &priv->flags)) disable_irq(dev_info->irq); } spin_unlock_irqrestore(&priv->lock, flags); return 0; } static int uio_pdrv_genirq_probe(struct platform_device *pdev) { struct uio_info *uioinfo = pdev->dev.platform_data; struct uio_pdrv_genirq_platdata *priv; struct uio_mem *uiomem; int ret = -EINVAL; int i; if (!uioinfo) { int irq; /* alloc uioinfo for one device */ uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL); if (!uioinfo) { ret = -ENOMEM; dev_err(&pdev->dev, "unable to kmalloc\n"); goto bad2; } uioinfo->name = pdev->dev.of_node->name; uioinfo->version = "devicetree"; /* Multiple IRQs are not supported */ irq = platform_get_irq(pdev, 0); if (irq == -ENXIO) uioinfo->irq = UIO_IRQ_NONE; else uioinfo->irq = irq; } if (!uioinfo || !uioinfo->name || !uioinfo->version) { dev_err(&pdev->dev, "missing platform_data\n"); goto bad0; } if (uioinfo->handler || uioinfo->irqcontrol || uioinfo->irq_flags & IRQF_SHARED) { dev_err(&pdev->dev, "interrupt configuration error\n"); goto bad0; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; dev_err(&pdev->dev, "unable to kmalloc\n"); goto bad0; } priv->uioinfo = uioinfo; spin_lock_init(&priv->lock); priv->flags = 0; /* interrupt is enabled to begin with */ priv->pdev = pdev; uiomem = &uioinfo->mem[0]; for (i = 0; i < pdev->num_resources; ++i) { struct resource *r = &pdev->resource[i]; if (r->flags != IORESOURCE_MEM) continue; if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { dev_warn(&pdev->dev, "device has more than " __stringify(MAX_UIO_MAPS) " I/O memory resources.\n"); break; } uiomem->memtype = UIO_MEM_PHYS; uiomem->addr = r->start; uiomem->size = resource_size(r); ++uiomem; } while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { uiomem->size = 0; ++uiomem; } /* This driver requires no hardware specific kernel code to handle * interrupts. Instead, the interrupt handler simply disables the * interrupt in the interrupt controller. User space is responsible * for performing hardware specific acknowledge and re-enabling of * the interrupt in the interrupt controller. * * Interrupt sharing is not supported. */ uioinfo->handler = uio_pdrv_genirq_handler; uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol; uioinfo->open = uio_pdrv_genirq_open; uioinfo->release = uio_pdrv_genirq_release; uioinfo->priv = priv; /* Enable Runtime PM for this device: * The device starts in suspended state to allow the hardware to be * turned off by default. The Runtime PM bus code should power on the * hardware and enable clocks at open(). */ pm_runtime_enable(&pdev->dev); ret = uio_register_device(&pdev->dev, priv->uioinfo); if (ret) { dev_err(&pdev->dev, "unable to register uio device\n"); goto bad1; } platform_set_drvdata(pdev, priv); return 0; bad1: kfree(priv); pm_runtime_disable(&pdev->dev); bad0: /* kfree uioinfo for OF */ if (pdev->dev.of_node) kfree(uioinfo); bad2: return ret; } static int uio_pdrv_genirq_remove(struct platform_device *pdev) { struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev); uio_unregister_device(priv->uioinfo); pm_runtime_disable(&pdev->dev); priv->uioinfo->handler = NULL; priv->uioinfo->irqcontrol = NULL; /* kfree uioinfo for OF */ if (pdev->dev.of_node) kfree(priv->uioinfo); kfree(priv); return 0; } static int uio_pdrv_genirq_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() * and ->runtime_resume(). Simply returns success. * * In this driver pm_runtime_get_sync() and pm_runtime_put_sync() * are used at open() and release() time. This allows the * Runtime PM code to turn off power to the device while the * device is unused, ie before open() and after release(). * * This Runtime PM callback does not need to save or restore * any registers since user space is responsbile for hardware * register reinitialization after open(). */ return 0; } static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = { .runtime_suspend = uio_pdrv_genirq_runtime_nop, .runtime_resume = uio_pdrv_genirq_runtime_nop, }; #ifdef CONFIG_OF static const struct of_device_id uio_of_genirq_match[] = { { /* empty for now */ }, }; MODULE_DEVICE_TABLE(of, uio_of_genirq_match); #else # define uio_of_genirq_match NULL #endif static struct platform_driver uio_pdrv_genirq = { .probe = uio_pdrv_genirq_probe, .remove = uio_pdrv_genirq_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .pm = &uio_pdrv_genirq_dev_pm_ops, .of_match_table = uio_of_genirq_match, }, }; module_platform_driver(uio_pdrv_genirq); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
TeamTwisted/kernel_lge_hammerhead
drivers/staging/comedi/drivers/das16m1.c
4895
21368
/* comedi/drivers/das16m1.c CIO-DAS16/M1 driver Author: Frank Mori Hess, based on code from the das16 driver. Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: das16m1 Description: CIO-DAS16/M1 Author: Frank Mori Hess <fmhess@users.sourceforge.net> Devices: [Measurement Computing] CIO-DAS16/M1 (cio-das16/m1) Status: works This driver supports a single board - the CIO-DAS16/M1. As far as I know, there are no other boards that have the same register layout. Even the CIO-DAS16/M1/16 is significantly different. I was _barely_ able to reach the full 1 MHz capability of this board, using a hard real-time interrupt (set the TRIG_RT flag in your struct comedi_cmd and use rtlinux or RTAI). The board can't do dma, so the bottleneck is pulling the data across the ISA bus. I timed the interrupt handler, and it took my computer ~470 microseconds to pull 512 samples from the board. So at 1 Mhz sampling rate, expect your CPU to be spending almost all of its time in the interrupt handler. This board has some unusual restrictions for its channel/gain list. If the list has 2 or more channels in it, then two conditions must be satisfied: (1) - even/odd channels must appear at even/odd indices in the list (2) - the list must have an even number of entries. Options: [0] - base io address [1] - irq (optional, but you probably want it) irq can be omitted, although the cmd interface will not work without it. */ #include <linux/ioport.h> #include <linux/interrupt.h> #include "../comedidev.h" #include "8255.h" #include "8253.h" #include "comedi_fc.h" #define DAS16M1_SIZE 16 #define DAS16M1_SIZE2 8 #define DAS16M1_XTAL 100 /* 10 MHz master clock */ #define FIFO_SIZE 1024 /* 1024 sample fifo */ /* CIO-DAS16_M1.pdf "cio-das16/m1" 0 a/d bits 0-3, mux start 12 bit 1 a/d bits 4-11 unused 2 status control 3 di 4 bit do 4 bit 4 unused clear interrupt 5 interrupt, pacer 6 channel/gain queue address 7 channel/gain queue data 89ab 8254 cdef 8254 400 8255 404-407 8254 */ #define DAS16M1_AI 0 /* 16-bit wide register */ #define AI_CHAN(x) ((x) & 0xf) #define DAS16M1_CS 2 #define EXT_TRIG_BIT 0x1 #define OVRUN 0x20 #define IRQDATA 0x80 #define DAS16M1_DIO 3 #define DAS16M1_CLEAR_INTR 4 #define DAS16M1_INTR_CONTROL 5 #define EXT_PACER 0x2 #define INT_PACER 0x3 #define PACER_MASK 0x3 #define INTE 0x80 #define DAS16M1_QUEUE_ADDR 6 #define DAS16M1_QUEUE_DATA 7 #define Q_CHAN(x) ((x) & 0x7) #define Q_RANGE(x) (((x) & 0xf) << 4) #define UNIPOLAR 0x40 #define DAS16M1_8254_FIRST 0x8 #define DAS16M1_8254_FIRST_CNTRL 0xb #define TOTAL_CLEAR 0x30 #define DAS16M1_8254_SECOND 0xc #define DAS16M1_82C55 0x400 #define DAS16M1_8254_THIRD 0x404 static const struct comedi_lrange range_das16m1 = { 9, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), BIP_RANGE(10), } }; static int das16m1_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16m1_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16m1_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das16m1_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int das16m1_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s); static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s); static irqreturn_t das16m1_interrupt(int irq, void *d); static void das16m1_handler(struct comedi_device *dev, unsigned int status); static unsigned int das16m1_set_pacer(struct comedi_device *dev, unsigned int ns, int round_flag); static int das16m1_irq_bits(unsigned int irq); struct das16m1_board { const char *name; unsigned int ai_speed; }; static const struct das16m1_board das16m1_boards[] = { { .name = "cio-das16/m1", /* CIO-DAS16_M1.pdf */ .ai_speed = 1000, /* 1MHz max speed */ }, }; static int das16m1_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int das16m1_detach(struct comedi_device *dev); static struct comedi_driver driver_das16m1 = { .driver_name = "das16m1", .module = THIS_MODULE, .attach = das16m1_attach, .detach = das16m1_detach, .board_name = &das16m1_boards[0].name, .num_names = ARRAY_SIZE(das16m1_boards), .offset = sizeof(das16m1_boards[0]), }; struct das16m1_private_struct { unsigned int control_state; volatile unsigned int adc_count; /* number of samples completed */ /* initial value in lower half of hardware conversion counter, * needed to keep track of whether new count has been loaded into * counter yet (loaded by first sample conversion) */ u16 initial_hw_count; short ai_buffer[FIFO_SIZE]; unsigned int do_bits; /* saves status of digital output bits */ unsigned int divisor1; /* divides master clock to obtain conversion speed */ unsigned int divisor2; /* divides master clock to obtain conversion speed */ }; #define devpriv ((struct das16m1_private_struct *)(dev->private)) #define thisboard ((const struct das16m1_board *)(dev->board_ptr)) static int __init driver_das16m1_init_module(void) { return comedi_driver_register(&driver_das16m1); } static void __exit driver_das16m1_cleanup_module(void) { comedi_driver_unregister(&driver_das16m1); } module_init(driver_das16m1_init_module); module_exit(driver_das16m1_cleanup_module); static inline short munge_sample(short data) { return (data >> 4) & 0xfff; } static int das16m1_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { unsigned int err = 0, tmp, i; /* make sure triggers are valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { /* any count is allowed */ } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up arguments */ if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; /* calculate counter values that give desired timing */ i8253_cascade_ns_to_timer_2div(DAS16M1_XTAL, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->convert_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; } if (err) return 4; /* check chanlist against board's peculiarities */ if (cmd->chanlist && cmd->chanlist_len > 1) { for (i = 0; i < cmd->chanlist_len; i++) { /* even/odd channels must go into even/odd queue addresses */ if ((i % 2) != (CR_CHAN(cmd->chanlist[i]) % 2)) { comedi_error(dev, "bad chanlist:\n" " even/odd channels must go have even/odd chanlist indices"); err++; } } if ((cmd->chanlist_len % 2) != 0) { comedi_error(dev, "chanlist must be of even length or length 1"); err++; } } if (err) return 5; return 0; } static int das16m1_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int byte, i; if (dev->irq == 0) { comedi_error(dev, "irq required to execute comedi_cmd"); return -1; } /* disable interrupts and internal pacer */ devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); /* set software count */ devpriv->adc_count = 0; /* Initialize lower half of hardware counter, used to determine how * many samples are in fifo. Value doesn't actually load into counter * until counter's next clock (the next a/d conversion) */ i8254_load(dev->iobase + DAS16M1_8254_FIRST, 0, 1, 0, 2); /* remember current reading of counter so we know when counter has * actually been loaded */ devpriv->initial_hw_count = i8254_read(dev->iobase + DAS16M1_8254_FIRST, 0, 1); /* setup channel/gain queue */ for (i = 0; i < cmd->chanlist_len; i++) { outb(i, dev->iobase + DAS16M1_QUEUE_ADDR); byte = Q_CHAN(CR_CHAN(cmd->chanlist[i])) | Q_RANGE(CR_RANGE(cmd->chanlist[i])); outb(byte, dev->iobase + DAS16M1_QUEUE_DATA); } /* set counter mode and counts */ cmd->convert_arg = das16m1_set_pacer(dev, cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); /* set control & status register */ byte = 0; /* if we are using external start trigger (also board dislikes having * both start and conversion triggers external simultaneously) */ if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT) byte |= EXT_TRIG_BIT; outb(byte, dev->iobase + DAS16M1_CS); /* clear interrupt bit */ outb(0, dev->iobase + DAS16M1_CLEAR_INTR); /* enable interrupts and internal pacer */ devpriv->control_state &= ~PACER_MASK; if (cmd->convert_src == TRIG_TIMER) devpriv->control_state |= INT_PACER; else devpriv->control_state |= EXT_PACER; devpriv->control_state |= INTE; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; } static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; } static int das16m1_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, n; int byte; const int timeout = 1000; /* disable interrupts and internal pacer */ devpriv->control_state &= ~INTE & ~PACER_MASK; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); /* setup channel/gain queue */ outb(0, dev->iobase + DAS16M1_QUEUE_ADDR); byte = Q_CHAN(CR_CHAN(insn->chanspec)) | Q_RANGE(CR_RANGE(insn->chanspec)); outb(byte, dev->iobase + DAS16M1_QUEUE_DATA); for (n = 0; n < insn->n; n++) { /* clear IRQDATA bit */ outb(0, dev->iobase + DAS16M1_CLEAR_INTR); /* trigger conversion */ outb(0, dev->iobase); for (i = 0; i < timeout; i++) { if (inb(dev->iobase + DAS16M1_CS) & IRQDATA) break; } if (i == timeout) { comedi_error(dev, "timeout"); return -ETIME; } data[n] = munge_sample(inw(dev->iobase)); } return n; } static int das16m1_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bits; bits = inb(dev->iobase + DAS16M1_DIO) & 0xf; data[1] = bits; data[0] = 0; return 2; } static int das16m1_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int wbits; /* only set bits that have been masked */ data[0] &= 0xf; wbits = devpriv->do_bits; /* zero bits that have been masked */ wbits &= ~data[0]; /* set masked bits */ wbits |= data[0] & data[1]; devpriv->do_bits = wbits; data[1] = wbits; outb(devpriv->do_bits, dev->iobase + DAS16M1_DIO); return 2; } static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; unsigned int status; /* prevent race with interrupt handler */ spin_lock_irqsave(&dev->spinlock, flags); status = inb(dev->iobase + DAS16M1_CS); das16m1_handler(dev, status); spin_unlock_irqrestore(&dev->spinlock, flags); return s->async->buf_write_count - s->async->buf_read_count; } static irqreturn_t das16m1_interrupt(int irq, void *d) { int status; struct comedi_device *dev = d; if (dev->attached == 0) { comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; } /* prevent race with comedi_poll() */ spin_lock(&dev->spinlock); status = inb(dev->iobase + DAS16M1_CS); if ((status & (IRQDATA | OVRUN)) == 0) { comedi_error(dev, "spurious interrupt"); spin_unlock(&dev->spinlock); return IRQ_NONE; } das16m1_handler(dev, status); /* clear interrupt */ outb(0, dev->iobase + DAS16M1_CLEAR_INTR); spin_unlock(&dev->spinlock); return IRQ_HANDLED; } static void munge_sample_array(short *array, unsigned int num_elements) { unsigned int i; for (i = 0; i < num_elements; i++) array[i] = munge_sample(array[i]); } static void das16m1_handler(struct comedi_device *dev, unsigned int status) { struct comedi_subdevice *s; struct comedi_async *async; struct comedi_cmd *cmd; u16 num_samples; u16 hw_counter; s = dev->read_subdev; async = s->async; async->events = 0; cmd = &async->cmd; /* figure out how many samples are in fifo */ hw_counter = i8254_read(dev->iobase + DAS16M1_8254_FIRST, 0, 1); /* make sure hardware counter reading is not bogus due to initial value * not having been loaded yet */ if (devpriv->adc_count == 0 && hw_counter == devpriv->initial_hw_count) { num_samples = 0; } else { /* The calculation of num_samples looks odd, but it uses the following facts. * 16 bit hardware counter is initialized with value of zero (which really * means 0x1000). The counter decrements by one on each conversion * (when the counter decrements from zero it goes to 0xffff). num_samples * is a 16 bit variable, so it will roll over in a similar fashion to the * hardware counter. Work it out, and this is what you get. */ num_samples = -hw_counter - devpriv->adc_count; } /* check if we only need some of the points */ if (cmd->stop_src == TRIG_COUNT) { if (num_samples > cmd->stop_arg * cmd->chanlist_len) num_samples = cmd->stop_arg * cmd->chanlist_len; } /* make sure we dont try to get too many points if fifo has overrun */ if (num_samples > FIFO_SIZE) num_samples = FIFO_SIZE; insw(dev->iobase, devpriv->ai_buffer, num_samples); munge_sample_array(devpriv->ai_buffer, num_samples); cfc_write_array_to_buffer(s, devpriv->ai_buffer, num_samples * sizeof(short)); devpriv->adc_count += num_samples; if (cmd->stop_src == TRIG_COUNT) { if (devpriv->adc_count >= cmd->stop_arg * cmd->chanlist_len) { /* end of acquisition */ das16m1_cancel(dev, s); async->events |= COMEDI_CB_EOA; } } /* this probably won't catch overruns since the card doesn't generate * overrun interrupts, but we might as well try */ if (status & OVRUN) { das16m1_cancel(dev, s); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; comedi_error(dev, "fifo overflow"); } comedi_event(dev, s); } /* This function takes a time in nanoseconds and sets the * * 2 pacer clocks to the closest frequency possible. It also * * returns the actual sampling period. */ static unsigned int das16m1_set_pacer(struct comedi_device *dev, unsigned int ns, int rounding_flags) { i8253_cascade_ns_to_timer_2div(DAS16M1_XTAL, &(devpriv->divisor1), &(devpriv->divisor2), &ns, rounding_flags & TRIG_ROUND_MASK); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(dev->iobase + DAS16M1_8254_SECOND, 0, 1, devpriv->divisor1, 2); i8254_load(dev->iobase + DAS16M1_8254_SECOND, 0, 2, devpriv->divisor2, 2); return ns; } static int das16m1_irq_bits(unsigned int irq) { int ret; switch (irq) { case 10: ret = 0x0; break; case 11: ret = 0x1; break; case 12: ret = 0x2; break; case 15: ret = 0x3; break; case 2: ret = 0x4; break; case 3: ret = 0x5; break; case 5: ret = 0x6; break; case 7: ret = 0x7; break; default: return -1; break; } return ret << 4; } /* * Options list: * 0 I/O base * 1 IRQ */ static int das16m1_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret; unsigned int irq; unsigned long iobase; iobase = it->options[0]; ret = alloc_private(dev, sizeof(struct das16m1_private_struct)); if (ret < 0) return ret; dev->board_name = thisboard->name; if (!request_region(iobase, DAS16M1_SIZE, driver_das16m1.driver_name)) { comedi_error(dev, "I/O port conflict\n"); return -EIO; } if (!request_region(iobase + DAS16M1_82C55, DAS16M1_SIZE2, driver_das16m1.driver_name)) { release_region(iobase, DAS16M1_SIZE); comedi_error(dev, "I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* now for the irq */ irq = it->options[1]; /* make sure it is valid */ if (das16m1_irq_bits(irq) >= 0) { ret = request_irq(irq, das16m1_interrupt, 0, driver_das16m1.driver_name, dev); if (ret < 0) return ret; dev->irq = irq; printk ("irq %u\n", irq); } else if (irq == 0) { printk (", no irq\n"); } else { comedi_error(dev, "invalid irq\n" " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n"); return -EINVAL; } ret = alloc_subdevices(dev, 4); if (ret < 0) return ret; s = dev->subdevices + 0; dev->read_subdev = s; /* ai */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 8; s->subdev_flags = SDF_DIFF; s->len_chanlist = 256; s->maxdata = (1 << 12) - 1; s->range_table = &range_das16m1; s->insn_read = das16m1_ai_rinsn; s->do_cmdtest = das16m1_cmd_test; s->do_cmd = das16m1_cmd_exec; s->cancel = das16m1_cancel; s->poll = das16m1_poll; s = dev->subdevices + 1; /* di */ s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das16m1_di_rbits; s = dev->subdevices + 2; /* do */ s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das16m1_do_wbits; s = dev->subdevices + 3; /* 8255 */ subdev_8255_init(dev, s, NULL, dev->iobase + DAS16M1_82C55); /* disable upper half of hardware conversion counter so it doesn't mess with us */ outb(TOTAL_CLEAR, dev->iobase + DAS16M1_8254_FIRST_CNTRL); /* initialize digital output lines */ outb(devpriv->do_bits, dev->iobase + DAS16M1_DIO); /* set the interrupt level */ if (dev->irq) devpriv->control_state = das16m1_irq_bits(dev->irq); else devpriv->control_state = 0; outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL); return 0; } static int das16m1_detach(struct comedi_device *dev) { /* das16m1_reset(dev); */ if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 3); if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) { release_region(dev->iobase, DAS16M1_SIZE); release_region(dev->iobase + DAS16M1_82C55, DAS16M1_SIZE2); } return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
Entropy512/kernel_motorola_falcon_reference
drivers/gpu/drm/nouveau/nv40_fb.c
5407
4572
#include "drmP.h" #include "drm.h" #include "nouveau_drv.h" #include "nouveau_drm.h" void nv40_fb_set_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; switch (dev_priv->chipset) { case 0x40: nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); break; default: nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit); nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch); nv_wr32(dev, NV40_PFB_TILE(i), tile->addr); break; } } static void nv40_fb_init_gart(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma; if (dev_priv->gart_info.type != NOUVEAU_GART_HW) { nv_wr32(dev, 0x100800, 0x00000001); return; } nv_wr32(dev, 0x100800, gart->pinst | 0x00000002); nv_mask(dev, 0x10008c, 0x00000100, 0x00000100); nv_wr32(dev, 0x100820, 0x00000000); } static void nv44_fb_init_gart(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma; u32 vinst; if (dev_priv->gart_info.type != NOUVEAU_GART_HW) { nv_wr32(dev, 0x100850, 0x80000000); nv_wr32(dev, 0x100800, 0x00000001); return; } /* calculate vram address of this PRAMIN block, object * must be allocated on 512KiB alignment, and not exceed * a total size of 512KiB for this to work correctly */ vinst = nv_rd32(dev, 0x10020c); vinst -= ((gart->pinst >> 19) + 1) << 19; nv_wr32(dev, 0x100850, 0x80000000); nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr); nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size); nv_wr32(dev, 0x100850, 0x00008000); nv_mask(dev, 0x10008c, 0x00000200, 0x00000200); nv_wr32(dev, 0x100820, 0x00000000); nv_wr32(dev, 0x10082c, 0x00000001); nv_wr32(dev, 0x100800, vinst | 0x00000010); } int nv40_fb_vram_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; /* 0x001218 is actually present on a few other NV4X I looked at, * and even contains sane values matching 0x100474. From looking * at various vbios images however, this isn't the case everywhere. * So, I chose to use the same regs I've seen NVIDIA reading around * the memory detection, hopefully that'll get us the right numbers */ if (dev_priv->chipset == 0x40) { u32 pbus1218 = nv_rd32(dev, 0x001218); switch (pbus1218 & 0x00000300) { case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break; case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break; case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break; case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break; } } else if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) { u32 pfb914 = nv_rd32(dev, 0x100914); switch (pfb914 & 0x00000003) { case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break; case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break; case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break; case 0x00000003: break; } } else if (dev_priv->chipset != 0x4e) { u32 pfb474 = nv_rd32(dev, 0x100474); if (pfb474 & 0x00000004) dev_priv->vram_type = NV_MEM_TYPE_GDDR3; if (pfb474 & 0x00000002) dev_priv->vram_type = NV_MEM_TYPE_DDR2; if (pfb474 & 0x00000001) dev_priv->vram_type = NV_MEM_TYPE_DDR1; } else { dev_priv->vram_type = NV_MEM_TYPE_STOLEN; } dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000; return 0; } int nv40_fb_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; uint32_t tmp; int i; if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { if (nv44_graph_class(dev)) nv44_fb_init_gart(dev); else nv40_fb_init_gart(dev); } switch (dev_priv->chipset) { case 0x40: case 0x45: tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2); nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15)); pfb->num_tiles = NV10_PFB_TILE__SIZE; break; case 0x46: /* G72 */ case 0x47: /* G70 */ case 0x49: /* G71 */ case 0x4b: /* G73 */ case 0x4c: /* C51 (G7X version) */ pfb->num_tiles = NV40_PFB_TILE__SIZE_1; break; default: pfb->num_tiles = NV40_PFB_TILE__SIZE_0; break; } /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) pfb->set_tile_region(dev, i); return 0; } void nv40_fb_takedown(struct drm_device *dev) { }
gpl-2.0
Blechd0se/mako_kernel
arch/powerpc/platforms/wsp/h8.c
6943
2649
/* * Copyright 2008-2011, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/io.h> #include "wsp.h" /* * The UART connection to the H8 is over ttyS1 which is just a 16550. * We assume that FW has it setup right and no one messes with it. */ static u8 __iomem *h8; #define RBR 0 /* Receiver Buffer Register */ #define THR 0 /* Transmitter Holding Register */ #define LSR 5 /* Line Status Register */ #define LSR_DR 0x01 /* LSR value for Data-Ready */ #define LSR_THRE 0x20 /* LSR value for Transmitter-Holding-Register-Empty */ static void wsp_h8_putc(int c) { u8 lsr; do { lsr = readb(h8 + LSR); } while ((lsr & LSR_THRE) != LSR_THRE); writeb(c, h8 + THR); } static int wsp_h8_getc(void) { u8 lsr; do { lsr = readb(h8 + LSR); } while ((lsr & LSR_DR) != LSR_DR); return readb(h8 + RBR); } static void wsp_h8_puts(const char *s, int sz) { int i; for (i = 0; i < sz; i++) { wsp_h8_putc(s[i]); /* no flow control so wait for echo */ wsp_h8_getc(); } wsp_h8_putc('\r'); wsp_h8_putc('\n'); } static void wsp_h8_terminal_cmd(const char *cmd, int sz) { hard_irq_disable(); wsp_h8_puts(cmd, sz); /* should never return, but just in case */ for (;;) continue; } void wsp_h8_restart(char *cmd) { static const char restart[] = "warm-reset"; (void)cmd; wsp_h8_terminal_cmd(restart, sizeof(restart) - 1); } void wsp_h8_power_off(void) { static const char off[] = "power-off"; wsp_h8_terminal_cmd(off, sizeof(off) - 1); } static void __iomem *wsp_h8_getaddr(void) { struct device_node *aliases; struct device_node *uart; struct property *path; void __iomem *va = NULL; /* * there is nothing in the devtree to tell us which is mapped * to the H8, but se know it is the second serial port. */ aliases = of_find_node_by_path("/aliases"); if (aliases == NULL) return NULL; path = of_find_property(aliases, "serial1", NULL); if (path == NULL) goto out; uart = of_find_node_by_path(path->value); if (uart == NULL) goto out; va = of_iomap(uart, 0); /* remove it so no one messes with it */ of_detach_node(uart); of_node_put(uart); out: of_node_put(aliases); return va; } void __init wsp_setup_h8(void) { h8 = wsp_h8_getaddr(); /* Devtree change? lets hard map it anyway */ if (h8 == NULL) { pr_warn("UART to H8 could not be found"); h8 = ioremap(0xffc0008000ULL, 0x100); } }
gpl-2.0
LeeDroid-/Ace-2.6.35
arch/mips/pci/fixup-lemote2f.c
8479
4879
/* * Copyright (C) 2008 Lemote Technology * Copyright (C) 2004 ICT CAS * Author: Li xiaoyu, lixy@ict.ac.cn * * Copyright (C) 2007 Lemote, Inc. * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/pci.h> #include <loongson.h> #include <cs5536/cs5536.h> #include <cs5536/cs5536_pci.h> /* PCI interrupt pins * * These should not be changed, or you should consider loongson2f interrupt * register and your pci card dispatch */ #define PCIA 4 #define PCIB 5 #define PCIC 6 #define PCID 7 /* all the pci device has the PCIA pin, check the datasheet. */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0}, /* 11: Unused */ {0, 0, 0, 0, 0}, /* 12: Unused */ {0, 0, 0, 0, 0}, /* 13: Unused */ {0, 0, 0, 0, 0}, /* 14: Unused */ {0, 0, 0, 0, 0}, /* 15: Unused */ {0, 0, 0, 0, 0}, /* 16: Unused */ {0, PCIA, 0, 0, 0}, /* 17: RTL8110-0 */ {0, PCIB, 0, 0, 0}, /* 18: RTL8110-1 */ {0, PCIC, 0, 0, 0}, /* 19: SiI3114 */ {0, PCID, 0, 0, 0}, /* 20: 3-ports nec usb */ {0, PCIA, PCIB, PCIC, PCID}, /* 21: PCI-SLOT */ {0, 0, 0, 0, 0}, /* 22: Unused */ {0, 0, 0, 0, 0}, /* 23: Unused */ {0, 0, 0, 0, 0}, /* 24: Unused */ {0, 0, 0, 0, 0}, /* 25: Unused */ {0, 0, 0, 0, 0}, /* 26: Unused */ {0, 0, 0, 0, 0}, /* 27: Unused */ }; int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int virq; if ((PCI_SLOT(dev->devfn) != PCI_IDSEL_CS5536) && (PCI_SLOT(dev->devfn) < 32)) { virq = irq_tab[slot][pin]; printk(KERN_INFO "slot: %d, pin: %d, irq: %d\n", slot, pin, virq + LOONGSON_IRQ_BASE); if (virq != 0) return LOONGSON_IRQ_BASE + virq; else return 0; } else if (PCI_SLOT(dev->devfn) == PCI_IDSEL_CS5536) { /* cs5536 */ switch (PCI_FUNC(dev->devfn)) { case 2: pci_write_config_byte(dev, PCI_INTERRUPT_LINE, CS5536_IDE_INTR); return CS5536_IDE_INTR; /* for IDE */ case 3: pci_write_config_byte(dev, PCI_INTERRUPT_LINE, CS5536_ACC_INTR); return CS5536_ACC_INTR; /* for AUDIO */ case 4: /* for OHCI */ case 5: /* for EHCI */ case 6: /* for UDC */ case 7: /* for OTG */ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, CS5536_USB_INTR); return CS5536_USB_INTR; } return dev->irq; } else { printk(KERN_INFO " strange pci slot number.\n"); return 0; } } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; } /* CS5536 SPEC. fixup */ static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev) { /* the uart1 and uart2 interrupt in PIC is enabled as default */ pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1); pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1); } static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev) { /* setting the mutex pin as IDE function */ pci_write_config_dword(pdev, PCI_IDE_CFG_REG, CS5536_IDE_FLASH_SIGNATURE); } static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev) { /* enable the AUDIO interrupt in PIC */ pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1); pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0); } static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev) { /* enable the OHCI interrupt in PIC */ /* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */ pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1); } static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev) { u32 hi, lo; /* Serial short detect enable */ _rdmsr(USB_MSR_REG(USB_CONFIG), &hi, &lo); _wrmsr(USB_MSR_REG(USB_CONFIG), (1 << 1) | (1 << 3), lo); /* setting the USB2.0 micro frame length */ pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000); } static void __init loongson_nec_fixup(struct pci_dev *pdev) { unsigned int val; pci_read_config_dword(pdev, 0xe0, &val); /* Only 2 port be used */ pci_write_config_dword(pdev, 0xe0, (val & ~3) | 0x2); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, loongson_cs5536_isa_fixup); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_OHC, loongson_cs5536_ohci_fixup); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_EHC, loongson_cs5536_ehci_fixup); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_AUDIO, loongson_cs5536_acc_fixup); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, loongson_cs5536_ide_fixup); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB, loongson_nec_fixup);
gpl-2.0
INFAMOUS-LOLLIPOP/android_kernel_htc_msm8974
arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
8735
3569
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * This module provides system/board/application information obtained * by the bootloader. */ #include <linux/module.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-spinlock.h> #include <asm/octeon/cvmx-sysinfo.h> /** * This structure defines the private state maintained by sysinfo module. * */ static struct { struct cvmx_sysinfo sysinfo; /* system information */ cvmx_spinlock_t lock; /* mutex spinlock */ } state = { .lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER }; /* * Global variables that define the min/max of the memory region set * up for 32 bit userspace access. */ uint64_t linux_mem32_min; uint64_t linux_mem32_max; uint64_t linux_mem32_wired; uint64_t linux_mem32_offset; /** * This function returns the application information as obtained * by the bootloader. This provides the core mask of the cores * running the same application image, as well as the physical * memory regions available to the core. * * Returns Pointer to the boot information structure * */ struct cvmx_sysinfo *cvmx_sysinfo_get(void) { return &(state.sysinfo); } EXPORT_SYMBOL(cvmx_sysinfo_get); /** * This function is used in non-simple executive environments (such as * Linux kernel, u-boot, etc.) to configure the minimal fields that * are required to use simple executive files directly. * * Locking (if required) must be handled outside of this * function * * @phy_mem_desc_ptr: * Pointer to global physical memory descriptor * (bootmem descriptor) @board_type: Octeon board * type enumeration * * @board_rev_major: * Board major revision * @board_rev_minor: * Board minor revision * @cpu_clock_hz: * CPU clock freqency in hertz * * Returns 0: Failure * 1: success */ int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr, uint16_t board_type, uint8_t board_rev_major, uint8_t board_rev_minor, uint32_t cpu_clock_hz) { /* The sysinfo structure was already initialized */ if (state.sysinfo.board_type) return 0; memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo)); state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr; state.sysinfo.board_type = board_type; state.sysinfo.board_rev_major = board_rev_major; state.sysinfo.board_rev_minor = board_rev_minor; state.sysinfo.cpu_clock_hz = cpu_clock_hz; return 1; }
gpl-2.0
smarkwell/asuswrt-merlin
release/src-rt/linux/linux-2.6/drivers/net/r8169.c
32
75758
/* ========================================================================= r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x. -------------------------------------------------------------------- History: Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>. May 20 2002 - Add link status force-mode and TBI mode support. 2004 - Massive updates. See kernel SCM system for details. ========================================================================= 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes. Command: 'insmod r8169 media = SET_MEDIA' Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex. SET_MEDIA can be: _10_Half = 0x01 _10_Full = 0x02 _100_Half = 0x04 _100_Full = 0x08 _1000_Full = 0x10 2. Support TBI mode. ========================================================================= VERSION 1.1 <2002/10/4> The bit4:0 of MII register 4 is called "selector field", and have to be 00001b to indicate support of IEEE std 802.3 during NWay process of exchanging Link Code Word (FLP). VERSION 1.2 <2002/11/30> - Large style cleanup - Use ether_crc in stock kernel (linux/crc32.h) - Copy mc_filter setup code from 8139cp (includes an optimization, and avoids set_bit use) VERSION 1.6LK <2004/04/14> - Merge of Realtek's version 1.6 - Conversion to DMA API - Suspend/resume - Endianness - Misc Rx/Tx bugs VERSION 2.2LK <2005/01/25> - RX csum, TX csum/SG, TSO - VLAN - baby (< 7200) Jumbo frames support - Merge of Realtek's version 2.2 (new phy) */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> #ifdef CONFIG_R8169_NAPI #define NAPI_SUFFIX "-NAPI" #else #define NAPI_SUFFIX "" #endif #define RTL8169_VERSION "2.2LK" NAPI_SUFFIX #define MODULENAME "r8169" #define PFX MODULENAME ": " #ifdef RTL8169_DEBUG #define assert(expr) \ if (!(expr)) { \ printk( "Assertion failed! %s,%s,%s,line=%d\n", \ #expr,__FILE__,__FUNCTION__,__LINE__); \ } #define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0) #else #define assert(expr) do {} while (0) #define dprintk(fmt, args...) do {} while (0) #endif /* RTL8169_DEBUG */ #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) #define TX_BUFFS_AVAIL(tp) \ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) #ifdef CONFIG_R8169_NAPI #define rtl8169_rx_skb netif_receive_skb #define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb #define rtl8169_rx_quota(count, quota) min(count, quota) #else #define rtl8169_rx_skb netif_rx #define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx #define rtl8169_rx_quota(count, quota) count #endif /* media options */ #define MAX_UNITS 8 static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; static int num_media = 0; /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static const int max_interrupt_work = 20; /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; /* MAC address length */ #define MAC_ADDR_LEN 6 #define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */ #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define EarlyTxThld 0x3F /* 0x3F means NO early transmit */ #define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ #define R8169_REGS_SIZE 256 #define R8169_NAPI_WEIGHT 64 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ #define RX_BUF_SIZE 1536 /* Rx Buffer size */ #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) #define RTL8169_TX_TIMEOUT (6*HZ) #define RTL8169_PHY_TIMEOUT (10*HZ) /* write/read MMIO register */ #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) #define RTL_R8(reg) readb (ioaddr + (reg)) #define RTL_R16(reg) readw (ioaddr + (reg)) #define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg))) enum mac_version { RTL_GIGA_MAC_VER_01 = 0x00, RTL_GIGA_MAC_VER_02 = 0x01, RTL_GIGA_MAC_VER_03 = 0x02, RTL_GIGA_MAC_VER_04 = 0x03, RTL_GIGA_MAC_VER_05 = 0x04, RTL_GIGA_MAC_VER_11 = 0x0b, RTL_GIGA_MAC_VER_12 = 0x0c, RTL_GIGA_MAC_VER_13 = 0x0d, RTL_GIGA_MAC_VER_14 = 0x0e, RTL_GIGA_MAC_VER_15 = 0x0f }; enum phy_version { RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */ RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */ RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */ RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */ RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */ RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */ }; #define _R(NAME,MAC,MASK) \ { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK } static const struct { const char *name; u8 mac_version; u32 RxConfigMask; /* Clears the bits supported by this chip */ } rtl_chip_info[] = { _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880), _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880), _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880), _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139 }; #undef _R enum cfg_version { RTL_CFG_0 = 0x00, RTL_CFG_1, RTL_CFG_2 }; static const struct { unsigned int region; unsigned int align; } rtl_cfg_info[] = { [RTL_CFG_0] = { 1, NET_IP_ALIGN }, [RTL_CFG_1] = { 2, NET_IP_ALIGN }, [RTL_CFG_2] = { 2, 8 } }; static struct pci_device_id rtl8169_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, {0,}, }; MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); static int rx_copybreak = 200; static int use_dac; static struct { u32 msg_enable; } debug = { -1 }; enum RTL8169_registers { MAC0 = 0, /* Ethernet hardware address. */ MAR0 = 8, /* Multicast filter. */ CounterAddrLow = 0x10, CounterAddrHigh = 0x14, TxDescStartAddrLow = 0x20, TxDescStartAddrHigh = 0x24, TxHDescStartAddrLow = 0x28, TxHDescStartAddrHigh = 0x2c, FLASH = 0x30, ERSR = 0x36, ChipCmd = 0x37, TxPoll = 0x38, IntrMask = 0x3C, IntrStatus = 0x3E, TxConfig = 0x40, RxConfig = 0x44, RxMissed = 0x4C, Cfg9346 = 0x50, Config0 = 0x51, Config1 = 0x52, Config2 = 0x53, Config3 = 0x54, Config4 = 0x55, Config5 = 0x56, MultiIntr = 0x5C, PHYAR = 0x60, TBICSR = 0x64, TBI_ANAR = 0x68, TBI_LPAR = 0x6A, PHYstatus = 0x6C, RxMaxSize = 0xDA, CPlusCmd = 0xE0, IntrMitigate = 0xE2, RxDescAddrLow = 0xE4, RxDescAddrHigh = 0xE8, EarlyTxThres = 0xEC, FuncEvent = 0xF0, FuncEventMask = 0xF4, FuncPresetState = 0xF8, FuncForceEvent = 0xFC, }; enum RTL8169_register_content { /* InterruptStatusBits */ SYSErr = 0x8000, PCSTimeout = 0x4000, SWInt = 0x0100, TxDescUnavail = 0x80, RxFIFOOver = 0x40, LinkChg = 0x20, RxOverflow = 0x10, TxErr = 0x08, TxOK = 0x04, RxErr = 0x02, RxOK = 0x01, /* RxStatusDesc */ RxFOVF = (1 << 23), RxRWT = (1 << 22), RxRES = (1 << 21), RxRUNT = (1 << 20), RxCRC = (1 << 19), /* ChipCmdBits */ CmdReset = 0x10, CmdRxEnb = 0x08, CmdTxEnb = 0x04, RxBufEmpty = 0x01, /* Cfg9346Bits */ Cfg9346_Lock = 0x00, Cfg9346_Unlock = 0xC0, /* rx_mode_bits */ AcceptErr = 0x20, AcceptRunt = 0x10, AcceptBroadcast = 0x08, AcceptMulticast = 0x04, AcceptMyPhys = 0x02, AcceptAllPhys = 0x01, /* RxConfigBits */ RxCfgFIFOShift = 13, RxCfgDMAShift = 8, /* TxConfigBits */ TxInterFrameGapShift = 24, TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ /* Config1 register p.24 */ PMEnable = (1 << 0), /* Power Management Enable */ /* Config3 register p.25 */ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ /* Config5 register p.27 */ BWF = (1 << 6), /* Accept Broadcast wakeup frame */ MWF = (1 << 5), /* Accept Multicast wakeup frame */ UWF = (1 << 4), /* Accept Unicast wakeup frame */ LanWake = (1 << 1), /* LanWake enable/disable */ PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ /* TBICSR p.28 */ TBIReset = 0x80000000, TBILoopback = 0x40000000, TBINwEnable = 0x20000000, TBINwRestart = 0x10000000, TBILinkOk = 0x02000000, TBINwComplete = 0x01000000, /* CPlusCmd p.31 */ RxVlan = (1 << 6), RxChkSum = (1 << 5), PCIDAC = (1 << 4), PCIMulRW = (1 << 3), /* rtl8169_PHYstatus */ TBI_Enable = 0x80, TxFlowCtrl = 0x40, RxFlowCtrl = 0x20, _1000bpsF = 0x10, _100bps = 0x08, _10bps = 0x04, LinkStatus = 0x02, FullDup = 0x01, /* _MediaType */ _10_Half = 0x01, _10_Full = 0x02, _100_Half = 0x04, _100_Full = 0x08, _1000_Full = 0x10, /* _TBICSRBit */ TBILinkOK = 0x02000000, /* DumpCounterCommand */ CounterDump = 0x8, }; enum _DescStatusBit { DescOwn = (1 << 31), /* Descriptor is owned by NIC */ RingEnd = (1 << 30), /* End of descriptor ring */ FirstFrag = (1 << 29), /* First segment of a packet */ LastFrag = (1 << 28), /* Final segment of a packet */ /* Tx private */ LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ MSSShift = 16, /* MSS value position */ MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */ IPCS = (1 << 18), /* Calculate IP checksum */ UDPCS = (1 << 17), /* Calculate UDP/IP checksum */ TCPCS = (1 << 16), /* Calculate TCP/IP checksum */ TxVlanTag = (1 << 17), /* Add VLAN tag */ /* Rx private */ PID1 = (1 << 18), /* Protocol ID bit 1/2 */ PID0 = (1 << 17), /* Protocol ID bit 2/2 */ #define RxProtoUDP (PID1) #define RxProtoTCP (PID0) #define RxProtoIP (PID1 | PID0) #define RxProtoMask RxProtoIP IPFail = (1 << 16), /* IP checksum failed */ UDPFail = (1 << 15), /* UDP/IP checksum failed */ TCPFail = (1 << 14), /* TCP/IP checksum failed */ RxVlanTag = (1 << 16), /* VLAN tag available */ }; #define RsvdMask 0x3fffc000 struct TxDesc { u32 opts1; u32 opts2; u64 addr; }; struct RxDesc { u32 opts1; u32 opts2; u64 addr; }; struct ring_info { struct sk_buff *skb; u32 len; u8 __pad[sizeof(void *) - sizeof(u32)]; }; struct rtl8169_private { void __iomem *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; /* Index of PCI device */ struct net_device *dev; struct net_device_stats stats; /* statistics of net device */ spinlock_t lock; /* spin lock flag */ u32 msg_enable; int chipset; int mac_version; int phy_version; u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ u32 dirty_rx; u32 dirty_tx; struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ dma_addr_t TxPhyAddr; dma_addr_t RxPhyAddr; struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ unsigned align; unsigned rx_buf_sz; struct timer_list timer; u16 cp_cmd; u16 intr_mask; int phy_auto_nego_reg; int phy_1000_ctrl_reg; #ifdef CONFIG_R8169_VLAN struct vlan_group *vlgrp; #endif int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); void (*get_settings)(struct net_device *, struct ethtool_cmd *); void (*phy_reset_enable)(void __iomem *); unsigned int (*phy_reset_pending)(void __iomem *); unsigned int (*link_ok)(void __iomem *); struct delayed_work task; unsigned wol_enabled : 1; }; MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); module_param_array(media, int, &num_media, 0); MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8)."); module_param(rx_copybreak, int, 0); MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); module_param(use_dac, int, 0); MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); MODULE_LICENSE("GPL"); MODULE_VERSION(RTL8169_VERSION); static int rtl8169_open(struct net_device *dev); static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance); static int rtl8169_init_ring(struct net_device *dev); static void rtl8169_hw_start(struct net_device *dev); static int rtl8169_close(struct net_device *dev); static void rtl8169_set_rx_mode(struct net_device *dev); static void rtl8169_tx_timeout(struct net_device *dev); static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, void __iomem *); static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); static void rtl8169_down(struct net_device *dev); static void rtl8169_rx_clear(struct rtl8169_private *tp); #ifdef CONFIG_R8169_NAPI static int rtl8169_poll(struct net_device *dev, int *budget); #endif static const u16 rtl8169_intr_mask = SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK; static const u16 rtl8169_napi_event = RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr; static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); static void mdio_write(void __iomem *ioaddr, int RegAddr, int value) { int i; RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value); for (i = 20; i > 0; i--) { /* Check if the RTL8169 has completed writing to the specified MII register */ if (!(RTL_R32(PHYAR) & 0x80000000)) break; udelay(25); } } static int mdio_read(void __iomem *ioaddr, int RegAddr) { int i, value = -1; RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16); for (i = 20; i > 0; i--) { /* Check if the RTL8169 has completed retrieving data from the specified MII register */ if (RTL_R32(PHYAR) & 0x80000000) { value = (int) (RTL_R32(PHYAR) & 0xFFFF); break; } udelay(25); } return value; } static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) { RTL_W16(IntrMask, 0x0000); RTL_W16(IntrStatus, 0xffff); } static void rtl8169_asic_down(void __iomem *ioaddr) { RTL_W8(ChipCmd, 0x00); rtl8169_irq_mask_and_ack(ioaddr); RTL_R16(CPlusCmd); } static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr) { return RTL_R32(TBICSR) & TBIReset; } static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr) { return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET; } static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) { return RTL_R32(TBICSR) & TBILinkOk; } static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr) { return RTL_R8(PHYstatus) & LinkStatus; } static void rtl8169_tbi_reset_enable(void __iomem *ioaddr) { RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); } static void rtl8169_xmii_reset_enable(void __iomem *ioaddr) { unsigned int val; val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET; mdio_write(ioaddr, MII_BMCR, val & 0xffff); } static void rtl8169_check_link_status(struct net_device *dev, struct rtl8169_private *tp, void __iomem *ioaddr) { unsigned long flags; spin_lock_irqsave(&tp->lock, flags); if (tp->link_ok(ioaddr)) { netif_carrier_on(dev); if (netif_msg_ifup(tp)) printk(KERN_INFO PFX "%s: link up\n", dev->name); } else { if (netif_msg_ifdown(tp)) printk(KERN_INFO PFX "%s: link down\n", dev->name); netif_carrier_off(dev); } spin_unlock_irqrestore(&tp->lock, flags); } static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex) { struct { u16 speed; u8 duplex; u8 autoneg; u8 media; } link_settings[] = { { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half }, { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full }, { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half }, { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full }, { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full }, /* Make TBI happy */ { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff } }, *p; unsigned char option; option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff; if ((option != 0xff) && !idx && netif_msg_drv(&debug)) printk(KERN_WARNING PFX "media option is deprecated.\n"); for (p = link_settings; p->media != 0xff; p++) { if (p->media == option) break; } *autoneg = p->autoneg; *speed = p->speed; *duplex = p->duplex; } static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u8 options; wol->wolopts = 0; #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) wol->supported = WAKE_ANY; spin_lock_irq(&tp->lock); options = RTL_R8(Config1); if (!(options & PMEnable)) goto out_unlock; options = RTL_R8(Config3); if (options & LinkUp) wol->wolopts |= WAKE_PHY; if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; options = RTL_R8(Config5); if (options & UWF) wol->wolopts |= WAKE_UCAST; if (options & BWF) wol->wolopts |= WAKE_BCAST; if (options & MWF) wol->wolopts |= WAKE_MCAST; out_unlock: spin_unlock_irq(&tp->lock); } static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; int i; static struct { u32 opt; u16 reg; u8 mask; } cfg[] = { { WAKE_ANY, Config1, PMEnable }, { WAKE_PHY, Config3, LinkUp }, { WAKE_MAGIC, Config3, MagicPacket }, { WAKE_UCAST, Config5, UWF }, { WAKE_BCAST, Config5, BWF }, { WAKE_MCAST, Config5, MWF }, { WAKE_ANY, Config5, LanWake } }; spin_lock_irq(&tp->lock); RTL_W8(Cfg9346, Cfg9346_Unlock); for (i = 0; i < ARRAY_SIZE(cfg); i++) { u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; if (wol->wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(cfg[i].reg, options); } RTL_W8(Cfg9346, Cfg9346_Lock); tp->wol_enabled = (wol->wolopts) ? 1 : 0; spin_unlock_irq(&tp->lock); return 0; } static void rtl8169_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct rtl8169_private *tp = netdev_priv(dev); strcpy(info->driver, MODULENAME); strcpy(info->version, RTL8169_VERSION); strcpy(info->bus_info, pci_name(tp->pci_dev)); } static int rtl8169_get_regs_len(struct net_device *dev) { return R8169_REGS_SIZE; } static int rtl8169_set_speed_tbi(struct net_device *dev, u8 autoneg, u16 speed, u8 duplex) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; int ret = 0; u32 reg; reg = RTL_R32(TBICSR); if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && (duplex == DUPLEX_FULL)) { RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); } else if (autoneg == AUTONEG_ENABLE) RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); else { if (netif_msg_link(tp)) { printk(KERN_WARNING "%s: " "incorrect speed setting refused in TBI mode\n", dev->name); } ret = -EOPNOTSUPP; } return ret; } static int rtl8169_set_speed_xmii(struct net_device *dev, u8 autoneg, u16 speed, u8 duplex) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; int auto_nego, giga_ctrl; auto_nego = mdio_read(ioaddr, MII_ADVERTISE); auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); giga_ctrl = mdio_read(ioaddr, MII_CTRL1000); giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); if (autoneg == AUTONEG_ENABLE) { auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; } else { if (speed == SPEED_10) auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL; else if (speed == SPEED_100) auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL; else if (speed == SPEED_1000) giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; if (duplex == DUPLEX_HALF) auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL); if (duplex == DUPLEX_FULL) auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF); /* This tweak comes straight from Realtek's driver. */ if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) && (tp->mac_version == RTL_GIGA_MAC_VER_13)) { auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA; } } /* The 8100e/8101e do Fast Ethernet only. */ if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || (tp->mac_version == RTL_GIGA_MAC_VER_14) || (tp->mac_version == RTL_GIGA_MAC_VER_15)) { if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) && netif_msg_link(tp)) { printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n", dev->name); } giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); } auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; tp->phy_auto_nego_reg = auto_nego; tp->phy_1000_ctrl_reg = giga_ctrl; mdio_write(ioaddr, MII_ADVERTISE, auto_nego); mdio_write(ioaddr, MII_CTRL1000, giga_ctrl); mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); return 0; } static int rtl8169_set_speed(struct net_device *dev, u8 autoneg, u16 speed, u8 duplex) { struct rtl8169_private *tp = netdev_priv(dev); int ret; ret = tp->set_speed(dev, autoneg, speed, duplex); if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); return ret; } static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8169_private *tp = netdev_priv(dev); unsigned long flags; int ret; spin_lock_irqsave(&tp->lock, flags); ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex); spin_unlock_irqrestore(&tp->lock, flags); return ret; } static u32 rtl8169_get_rx_csum(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); return tp->cp_cmd & RxChkSum; } static int rtl8169_set_rx_csum(struct net_device *dev, u32 data) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; spin_lock_irqsave(&tp->lock, flags); if (data) tp->cp_cmd |= RxChkSum; else tp->cp_cmd &= ~RxChkSum; RTL_W16(CPlusCmd, tp->cp_cmd); RTL_R16(CPlusCmd); spin_unlock_irqrestore(&tp->lock, flags); return 0; } #ifdef CONFIG_R8169_VLAN static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, struct sk_buff *skb) { return (tp->vlgrp && vlan_tx_tag_present(skb)) ? TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; } static void rtl8169_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; spin_lock_irqsave(&tp->lock, flags); tp->vlgrp = grp; if (tp->vlgrp) tp->cp_cmd |= RxVlan; else tp->cp_cmd &= ~RxVlan; RTL_W16(CPlusCmd, tp->cp_cmd); RTL_R16(CPlusCmd); spin_unlock_irqrestore(&tp->lock, flags); } static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, struct sk_buff *skb) { u32 opts2 = le32_to_cpu(desc->opts2); int ret; if (tp->vlgrp && (opts2 & RxVlanTag)) { rtl8169_rx_hwaccel_skb(skb, tp->vlgrp, swab16(opts2 & 0xffff)); ret = 0; } else ret = -1; desc->opts2 = 0; return ret; } #else /* !CONFIG_R8169_VLAN */ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, struct sk_buff *skb) { return 0; } static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, struct sk_buff *skb) { return -1; } #endif static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u32 status; cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; cmd->port = PORT_FIBRE; cmd->transceiver = XCVR_INTERNAL; status = RTL_R32(TBICSR); cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; cmd->autoneg = !!(status & TBINwEnable); cmd->speed = SPEED_1000; cmd->duplex = DUPLEX_FULL; /* Always set */ } static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u8 status; cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP; cmd->autoneg = 1; cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg; if (tp->phy_auto_nego_reg & ADVERTISE_10HALF) cmd->advertising |= ADVERTISED_10baseT_Half; if (tp->phy_auto_nego_reg & ADVERTISE_10FULL) cmd->advertising |= ADVERTISED_10baseT_Full; if (tp->phy_auto_nego_reg & ADVERTISE_100HALF) cmd->advertising |= ADVERTISED_100baseT_Half; if (tp->phy_auto_nego_reg & ADVERTISE_100FULL) cmd->advertising |= ADVERTISED_100baseT_Full; if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL) cmd->advertising |= ADVERTISED_1000baseT_Full; status = RTL_R8(PHYstatus); if (status & _1000bpsF) cmd->speed = SPEED_1000; else if (status & _100bps) cmd->speed = SPEED_100; else if (status & _10bps) cmd->speed = SPEED_10; if (status & TxFlowCtrl) cmd->advertising |= ADVERTISED_Asym_Pause; if (status & RxFlowCtrl) cmd->advertising |= ADVERTISED_Pause; cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ? DUPLEX_FULL : DUPLEX_HALF; } static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8169_private *tp = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&tp->lock, flags); tp->get_settings(dev, cmd); spin_unlock_irqrestore(&tp->lock, flags); return 0; } static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct rtl8169_private *tp = netdev_priv(dev); unsigned long flags; if (regs->len > R8169_REGS_SIZE) regs->len = R8169_REGS_SIZE; spin_lock_irqsave(&tp->lock, flags); memcpy_fromio(p, tp->mmio_addr, regs->len); spin_unlock_irqrestore(&tp->lock, flags); } static u32 rtl8169_get_msglevel(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); return tp->msg_enable; } static void rtl8169_set_msglevel(struct net_device *dev, u32 value) { struct rtl8169_private *tp = netdev_priv(dev); tp->msg_enable = value; } static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { "tx_packets", "rx_packets", "tx_errors", "rx_errors", "rx_missed", "align_errors", "tx_single_collisions", "tx_multi_collisions", "unicast", "broadcast", "multicast", "tx_aborted", "tx_underrun", }; struct rtl8169_counters { u64 tx_packets; u64 rx_packets; u64 tx_errors; u32 rx_errors; u16 rx_missed; u16 align_errors; u32 tx_one_collision; u32 tx_multi_collision; u64 rx_unicast; u64 rx_broadcast; u32 rx_multicast; u16 tx_aborted; u16 tx_underun; }; static int rtl8169_get_stats_count(struct net_device *dev) { return ARRAY_SIZE(rtl8169_gstrings); } static void rtl8169_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; struct rtl8169_counters *counters; dma_addr_t paddr; u32 cmd; ASSERT_RTNL(); counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr); if (!counters) return; RTL_W32(CounterAddrHigh, (u64)paddr >> 32); cmd = (u64)paddr & DMA_32BIT_MASK; RTL_W32(CounterAddrLow, cmd); RTL_W32(CounterAddrLow, cmd | CounterDump); while (RTL_R32(CounterAddrLow) & CounterDump) { if (msleep_interruptible(1)) break; } RTL_W32(CounterAddrLow, 0); RTL_W32(CounterAddrHigh, 0); data[0] = le64_to_cpu(counters->tx_packets); data[1] = le64_to_cpu(counters->rx_packets); data[2] = le64_to_cpu(counters->tx_errors); data[3] = le32_to_cpu(counters->rx_errors); data[4] = le16_to_cpu(counters->rx_missed); data[5] = le16_to_cpu(counters->align_errors); data[6] = le32_to_cpu(counters->tx_one_collision); data[7] = le32_to_cpu(counters->tx_multi_collision); data[8] = le64_to_cpu(counters->rx_unicast); data[9] = le64_to_cpu(counters->rx_broadcast); data[10] = le32_to_cpu(counters->rx_multicast); data[11] = le16_to_cpu(counters->tx_aborted); data[12] = le16_to_cpu(counters->tx_underun); pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr); } static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch(stringset) { case ETH_SS_STATS: memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings)); break; } } static const struct ethtool_ops rtl8169_ethtool_ops = { .get_drvinfo = rtl8169_get_drvinfo, .get_regs_len = rtl8169_get_regs_len, .get_link = ethtool_op_get_link, .get_settings = rtl8169_get_settings, .set_settings = rtl8169_set_settings, .get_msglevel = rtl8169_get_msglevel, .set_msglevel = rtl8169_set_msglevel, .get_rx_csum = rtl8169_get_rx_csum, .set_rx_csum = rtl8169_set_rx_csum, .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_regs = rtl8169_get_regs, .get_wol = rtl8169_get_wol, .set_wol = rtl8169_set_wol, .get_strings = rtl8169_get_strings, .get_stats_count = rtl8169_get_stats_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, .get_perm_addr = ethtool_op_get_perm_addr, }; static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum, int bitval) { int val; val = mdio_read(ioaddr, reg); val = (bitval == 1) ? val | (bitval << bitnum) : val & ~(0x0001 << bitnum); mdio_write(ioaddr, reg, val & 0xffff); } static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr) { const struct { u32 mask; int mac_version; } mac_info[] = { { 0x38800000, RTL_GIGA_MAC_VER_15 }, { 0x38000000, RTL_GIGA_MAC_VER_12 }, { 0x34000000, RTL_GIGA_MAC_VER_13 }, { 0x30800000, RTL_GIGA_MAC_VER_14 }, { 0x30000000, RTL_GIGA_MAC_VER_11 }, { 0x18000000, RTL_GIGA_MAC_VER_05 }, { 0x10000000, RTL_GIGA_MAC_VER_04 }, { 0x04000000, RTL_GIGA_MAC_VER_03 }, { 0x00800000, RTL_GIGA_MAC_VER_02 }, { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */ }, *p = mac_info; u32 reg; reg = RTL_R32(TxConfig) & 0x7c800000; while ((reg & p->mask) != p->mask) p++; tp->mac_version = p->mac_version; } static void rtl8169_print_mac_version(struct rtl8169_private *tp) { dprintk("mac_version = 0x%02x\n", tp->mac_version); } static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr) { const struct { u16 mask; u16 set; int phy_version; } phy_info[] = { { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G }, { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F }, { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E }, { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */ }, *p = phy_info; u16 reg; reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff; while ((reg & p->mask) != p->set) p++; tp->phy_version = p->phy_version; } static void rtl8169_print_phy_version(struct rtl8169_private *tp) { struct { int version; char *msg; u32 reg; } phy_print[] = { { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 }, { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 }, { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 }, { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 }, { 0, NULL, 0x0000 } }, *p; for (p = phy_print; p->msg; p++) { if (tp->phy_version == p->version) { dprintk("phy_version == %s (%04x)\n", p->msg, p->reg); return; } } dprintk("phy_version == Unknown\n"); } static void rtl8169_hw_phy_config(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; struct { u16 regs[5]; /* Beware of bit-sign propagation */ } phy_magic[5] = { { { 0x0000, //w 4 15 12 0 0x00a1, //w 3 15 0 00a1 0x0008, //w 2 15 0 0008 0x1020, //w 1 15 0 1020 0x1000 } },{ //w 0 15 0 1000 { 0x7000, //w 4 15 12 7 0xff41, //w 3 15 0 ff41 0xde60, //w 2 15 0 de60 0x0140, //w 1 15 0 0140 0x0077 } },{ //w 0 15 0 0077 { 0xa000, //w 4 15 12 a 0xdf01, //w 3 15 0 df01 0xdf20, //w 2 15 0 df20 0xff95, //w 1 15 0 ff95 0xfa00 } },{ //w 0 15 0 fa00 { 0xb000, //w 4 15 12 b 0xff41, //w 3 15 0 ff41 0xde20, //w 2 15 0 de20 0x0140, //w 1 15 0 0140 0x00bb } },{ //w 0 15 0 00bb { 0xf000, //w 4 15 12 f 0xdf01, //w 3 15 0 df01 0xdf20, //w 2 15 0 df20 0xff95, //w 1 15 0 ff95 0xbf00 } //w 0 15 0 bf00 } }, *p = phy_magic; int i; rtl8169_print_mac_version(tp); rtl8169_print_phy_version(tp); if (tp->mac_version <= RTL_GIGA_MAC_VER_01) return; if (tp->phy_version >= RTL_GIGA_PHY_VER_H) return; dprintk("MAC version != 0 && PHY version == 0 or 1\n"); dprintk("Do final_reg2.cfg\n"); /* Shazam ! */ if (tp->mac_version == RTL_GIGA_MAC_VER_04) { mdio_write(ioaddr, 31, 0x0002); mdio_write(ioaddr, 1, 0x90d0); mdio_write(ioaddr, 31, 0x0000); return; } /* phy config for RTL8169s mac_version C chip */ mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) { int val, pos = 4; val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff); mdio_write(ioaddr, pos, val); while (--pos >= 0) mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff); rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0 } mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0 } static void rtl8169_phy_timer(unsigned long __opaque) { struct net_device *dev = (struct net_device *)__opaque; struct rtl8169_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; void __iomem *ioaddr = tp->mmio_addr; unsigned long timeout = RTL8169_PHY_TIMEOUT; assert(tp->mac_version > RTL_GIGA_MAC_VER_01); assert(tp->phy_version < RTL_GIGA_PHY_VER_H); if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) return; spin_lock_irq(&tp->lock); if (tp->phy_reset_pending(ioaddr)) { /* * A busy loop could burn quite a few cycles on nowadays CPU. * Let's delay the execution of the timer for a few ticks. */ timeout = HZ/10; goto out_mod_timer; } if (tp->link_ok(ioaddr)) goto out_unlock; if (netif_msg_link(tp)) printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name); tp->phy_reset_enable(ioaddr); out_mod_timer: mod_timer(timer, jiffies + timeout); out_unlock: spin_unlock_irq(&tp->lock); } static inline void rtl8169_delete_timer(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || (tp->phy_version >= RTL_GIGA_PHY_VER_H)) return; del_timer_sync(timer); } static inline void rtl8169_request_timer(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) || (tp->phy_version >= RTL_GIGA_PHY_VER_H)) return; mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT); } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void rtl8169_netpoll(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; disable_irq(pdev->irq); rtl8169_interrupt(pdev->irq, dev); enable_irq(pdev->irq); } #endif static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, void __iomem *ioaddr) { iounmap(ioaddr); pci_release_regions(pdev); pci_disable_device(pdev); free_netdev(dev); } static void rtl8169_phy_reset(struct net_device *dev, struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; int i; tp->phy_reset_enable(ioaddr); for (i = 0; i < 100; i++) { if (!tp->phy_reset_pending(ioaddr)) return; msleep(1); } if (netif_msg_link(tp)) printk(KERN_ERR "%s: PHY reset failed.\n", dev->name); } static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; static int board_idx = -1; u8 autoneg, duplex; u16 speed; board_idx++; rtl8169_hw_phy_config(dev); dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(0x82, 0x01); if (tp->mac_version < RTL_GIGA_MAC_VER_03) { dprintk("Set PCI Latency=0x40\n"); pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); } if (tp->mac_version == RTL_GIGA_MAC_VER_02) { dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(0x82, 0x01); dprintk("Set PHY Reg 0x0bh = 0x00h\n"); mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0 } rtl8169_link_option(board_idx, &autoneg, &speed, &duplex); rtl8169_phy_reset(dev, tp); rtl8169_set_speed(dev, autoneg, speed, duplex); if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp)) printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name); } static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct rtl8169_private *tp = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); if (!netif_running(dev)) return -ENODEV; switch (cmd) { case SIOCGMIIPHY: data->phy_id = 32; /* Internal PHY */ return 0; case SIOCGMIIREG: data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f); return 0; case SIOCSMIIREG: if (!capable(CAP_NET_ADMIN)) return -EPERM; mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in); return 0; } return -EOPNOTSUPP; } static int __devinit rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const unsigned int region = rtl_cfg_info[ent->driver_data].region; struct rtl8169_private *tp; struct net_device *dev; void __iomem *ioaddr; unsigned int pm_cap; int i, rc; if (netif_msg_drv(&debug)) { printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", MODULENAME, RTL8169_VERSION); } dev = alloc_etherdev(sizeof (*tp)); if (!dev) { if (netif_msg_drv(&debug)) dev_err(&pdev->dev, "unable to alloc new ethernet\n"); rc = -ENOMEM; goto out; } SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); tp->dev = dev; tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pci_enable_device(pdev); if (rc < 0) { if (netif_msg_probe(tp)) dev_err(&pdev->dev, "enable failure\n"); goto err_out_free_dev_1; } rc = pci_set_mwi(pdev); if (rc < 0) goto err_out_disable_2; /* save power state before pci_enable_device overwrites it */ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pm_cap) { u16 pwr_command, acpi_idle_state; pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command); acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK; } else { if (netif_msg_probe(tp)) { dev_err(&pdev->dev, "PowerManagement capability not found.\n"); } } /* make sure PCI base addr 1 is MMIO */ if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { if (netif_msg_probe(tp)) { dev_err(&pdev->dev, "region #%d not an MMIO resource, aborting\n", region); } rc = -ENODEV; goto err_out_mwi_3; } /* check for weird/broken PCI region reporting */ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { if (netif_msg_probe(tp)) { dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); } rc = -ENODEV; goto err_out_mwi_3; } rc = pci_request_regions(pdev, MODULENAME); if (rc < 0) { if (netif_msg_probe(tp)) dev_err(&pdev->dev, "could not request regions.\n"); goto err_out_mwi_3; } tp->cp_cmd = PCIMulRW | RxChkSum; if ((sizeof(dma_addr_t) > 4) && !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) { tp->cp_cmd |= PCIDAC; dev->features |= NETIF_F_HIGHDMA; } else { rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (rc < 0) { if (netif_msg_probe(tp)) { dev_err(&pdev->dev, "DMA configuration failed.\n"); } goto err_out_free_res_4; } } pci_set_master(pdev); /* ioremap MMIO region */ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); if (!ioaddr) { if (netif_msg_probe(tp)) dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); rc = -EIO; goto err_out_free_res_4; } /* Unneeded ? Don't mess with Mrs. Murphy. */ rtl8169_irq_mask_and_ack(ioaddr); /* Soft reset the chip. */ RTL_W8(ChipCmd, CmdReset); /* Check that the chip has finished the reset. */ for (i = 100; i > 0; i--) { if ((RTL_R8(ChipCmd) & CmdReset) == 0) break; msleep_interruptible(1); } /* Identify chip attached to board */ rtl8169_get_mac_version(tp, ioaddr); rtl8169_get_phy_version(tp, ioaddr); rtl8169_print_mac_version(tp); rtl8169_print_phy_version(tp); for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { if (tp->mac_version == rtl_chip_info[i].mac_version) break; } if (i < 0) { /* Unknown chip: assume array element #0, original RTL-8169 */ if (netif_msg_probe(tp)) { dev_printk(KERN_DEBUG, &pdev->dev, "unknown chip version, assuming %s\n", rtl_chip_info[0].name); } i++; } tp->chipset = i; RTL_W8(Cfg9346, Cfg9346_Unlock); RTL_W8(Config1, RTL_R8(Config1) | PMEnable); RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); RTL_W8(Cfg9346, Cfg9346_Lock); if (RTL_R8(PHYstatus) & TBI_Enable) { tp->set_speed = rtl8169_set_speed_tbi; tp->get_settings = rtl8169_gset_tbi; tp->phy_reset_enable = rtl8169_tbi_reset_enable; tp->phy_reset_pending = rtl8169_tbi_reset_pending; tp->link_ok = rtl8169_tbi_link_ok; tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */ } else { tp->set_speed = rtl8169_set_speed_xmii; tp->get_settings = rtl8169_gset_xmii; tp->phy_reset_enable = rtl8169_xmii_reset_enable; tp->phy_reset_pending = rtl8169_xmii_reset_pending; tp->link_ok = rtl8169_xmii_link_ok; dev->do_ioctl = rtl8169_ioctl; } /* Get MAC address. FIXME: read EEPROM */ for (i = 0; i < MAC_ADDR_LEN; i++) dev->dev_addr[i] = RTL_R8(MAC0 + i); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->open = rtl8169_open; dev->hard_start_xmit = rtl8169_start_xmit; dev->get_stats = rtl8169_get_stats; SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); dev->stop = rtl8169_close; dev->tx_timeout = rtl8169_tx_timeout; dev->set_multicast_list = rtl8169_set_rx_mode; dev->watchdog_timeo = RTL8169_TX_TIMEOUT; dev->irq = pdev->irq; dev->base_addr = (unsigned long) ioaddr; dev->change_mtu = rtl8169_change_mtu; #ifdef CONFIG_R8169_NAPI dev->poll = rtl8169_poll; dev->weight = R8169_NAPI_WEIGHT; #endif #ifdef CONFIG_R8169_VLAN dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->vlan_rx_register = rtl8169_vlan_rx_register; #endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = rtl8169_netpoll; #endif tp->intr_mask = 0xffff; tp->pci_dev = pdev; tp->mmio_addr = ioaddr; tp->align = rtl_cfg_info[ent->driver_data].align; init_timer(&tp->timer); tp->timer.data = (unsigned long) dev; tp->timer.function = rtl8169_phy_timer; spin_lock_init(&tp->lock); rc = register_netdev(dev); if (rc < 0) goto err_out_unmap_5; pci_set_drvdata(pdev, dev); if (netif_msg_probe(tp)) { printk(KERN_INFO "%s: %s at 0x%lx, " "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " "IRQ %d\n", dev->name, rtl_chip_info[tp->chipset].name, dev->base_addr, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], dev->irq); } rtl8169_init_phy(dev, tp); out: return rc; err_out_unmap_5: iounmap(ioaddr); err_out_free_res_4: pci_release_regions(pdev); err_out_mwi_3: pci_clear_mwi(pdev); err_out_disable_2: pci_disable_device(pdev); err_out_free_dev_1: free_netdev(dev); goto out; } static void __devexit rtl8169_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); assert(dev != NULL); assert(tp != NULL); flush_scheduled_work(); unregister_netdev(dev); rtl8169_release_board(pdev, dev, tp->mmio_addr); pci_set_drvdata(pdev, NULL); } static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, struct net_device *dev) { unsigned int mtu = dev->mtu; tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; } static int rtl8169_open(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; int retval = -ENOMEM; rtl8169_set_rxbufsize(tp, dev); /* * Rx and Tx desscriptors needs 256 bytes alignment. * pci_alloc_consistent provides more. */ tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, &tp->TxPhyAddr); if (!tp->TxDescArray) goto out; tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, &tp->RxPhyAddr); if (!tp->RxDescArray) goto err_free_tx_0; retval = rtl8169_init_ring(dev); if (retval < 0) goto err_free_rx_1; INIT_DELAYED_WORK(&tp->task, NULL); smp_mb(); retval = request_irq(dev->irq, rtl8169_interrupt, IRQF_SHARED, dev->name, dev); if (retval < 0) goto err_release_ring_2; rtl8169_hw_start(dev); rtl8169_request_timer(dev); rtl8169_check_link_status(dev, tp, tp->mmio_addr); out: return retval; err_release_ring_2: rtl8169_rx_clear(tp); err_free_rx_1: pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); err_free_tx_0: pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, tp->TxPhyAddr); goto out; } static void rtl8169_hw_reset(void __iomem *ioaddr) { /* Disable interrupts */ rtl8169_irq_mask_and_ack(ioaddr); /* Reset the chipset */ RTL_W8(ChipCmd, CmdReset); /* PCI commit */ RTL_R8(ChipCmd); } static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; u32 cfg = rtl8169_rx_config; cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); RTL_W32(RxConfig, cfg); /* Set DMA burst size and Interframe Gap Time */ RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | (InterFrameGap << TxInterFrameGapShift)); } static void rtl8169_hw_start(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; u16 cmd; u32 i; /* Soft reset the chip. */ RTL_W8(ChipCmd, CmdReset); /* Check that the chip has finished the reset. */ for (i = 100; i > 0; i--) { if ((RTL_R8(ChipCmd) & CmdReset) == 0) break; msleep_interruptible(1); } if (tp->mac_version == RTL_GIGA_MAC_VER_05) { RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW); pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); } if (tp->mac_version == RTL_GIGA_MAC_VER_13) { pci_write_config_word(pdev, 0x68, 0x00); pci_write_config_word(pdev, 0x69, 0x08); } /* Undocumented stuff. */ if (tp->mac_version == RTL_GIGA_MAC_VER_05) { /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */ if ((RTL_R8(Config2) & 0x07) & 0x01) RTL_W32(0x7c, 0x0007ffff); RTL_W32(0x7c, 0x0007ff00); pci_read_config_word(pdev, PCI_COMMAND, &cmd); cmd = cmd & 0xef; pci_write_config_word(pdev, PCI_COMMAND, cmd); } RTL_W8(Cfg9346, Cfg9346_Unlock); if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || (tp->mac_version == RTL_GIGA_MAC_VER_02) || (tp->mac_version == RTL_GIGA_MAC_VER_03) || (tp->mac_version == RTL_GIGA_MAC_VER_04)) RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W8(EarlyTxThres, EarlyTxThld); /* Low hurts. Let's disable the filtering. */ RTL_W16(RxMaxSize, 16383); if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || (tp->mac_version == RTL_GIGA_MAC_VER_02) || (tp->mac_version == RTL_GIGA_MAC_VER_03) || (tp->mac_version == RTL_GIGA_MAC_VER_04)) rtl8169_set_rx_tx_config_registers(tp); cmd = RTL_R16(CPlusCmd); RTL_W16(CPlusCmd, cmd); tp->cp_cmd |= cmd | PCIMulRW; if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || (tp->mac_version == RTL_GIGA_MAC_VER_03)) { dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. " "Bit-3 and bit-14 MUST be 1\n"); tp->cp_cmd |= (1 << 14); } RTL_W16(CPlusCmd, tp->cp_cmd); /* * Undocumented corner. Supposedly: * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets */ RTL_W16(IntrMitigate, 0x0000); /* * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh * register to be written before TxDescAddrLow to work. * Switching from MMIO to I/O access fixes the issue as well. */ RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32)); RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK)); RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32)); RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK)); if ((tp->mac_version != RTL_GIGA_MAC_VER_01) && (tp->mac_version != RTL_GIGA_MAC_VER_02) && (tp->mac_version != RTL_GIGA_MAC_VER_03) && (tp->mac_version != RTL_GIGA_MAC_VER_04)) { RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); rtl8169_set_rx_tx_config_registers(tp); } RTL_W8(Cfg9346, Cfg9346_Lock); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R8(IntrMask); RTL_W32(RxMissed, 0); rtl8169_set_rx_mode(dev); /* no early-rx interrupts */ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); /* Enable all known interrupts by setting the interrupt mask. */ RTL_W16(IntrMask, rtl8169_intr_mask); netif_start_queue(dev); } static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) { struct rtl8169_private *tp = netdev_priv(dev); int ret = 0; if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu) return -EINVAL; dev->mtu = new_mtu; if (!netif_running(dev)) goto out; rtl8169_down(dev); rtl8169_set_rxbufsize(tp, dev); ret = rtl8169_init_ring(dev); if (ret < 0) goto out; netif_poll_enable(dev); rtl8169_hw_start(dev); rtl8169_request_timer(dev); out: return ret; } static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) { desc->addr = 0x0badbadbadbadbadull; desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); } static void rtl8169_free_rx_skb(struct rtl8169_private *tp, struct sk_buff **sk_buff, struct RxDesc *desc) { struct pci_dev *pdev = tp->pci_dev; pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(*sk_buff); *sk_buff = NULL; rtl8169_make_unusable_by_asic(desc); } static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz) { u32 eor = le32_to_cpu(desc->opts1) & RingEnd; desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); } static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, u32 rx_buf_sz) { desc->addr = cpu_to_le64(mapping); wmb(); rtl8169_mark_to_asic(desc, rx_buf_sz); } static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff, struct RxDesc *desc, int rx_buf_sz, unsigned int align) { struct sk_buff *skb; dma_addr_t mapping; int ret = 0; skb = dev_alloc_skb(rx_buf_sz + align); if (!skb) goto err_out; skb_reserve(skb, (align - 1) & (unsigned long)skb->data); *sk_buff = skb; mapping = pci_map_single(pdev, skb->data, rx_buf_sz, PCI_DMA_FROMDEVICE); rtl8169_map_to_asic(desc, mapping, rx_buf_sz); out: return ret; err_out: ret = -ENOMEM; rtl8169_make_unusable_by_asic(desc); goto out; } static void rtl8169_rx_clear(struct rtl8169_private *tp) { int i; for (i = 0; i < NUM_RX_DESC; i++) { if (tp->Rx_skbuff[i]) { rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescArray + i); } } } static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, u32 start, u32 end) { u32 cur; for (cur = start; end - cur > 0; cur++) { int ret, i = cur % NUM_RX_DESC; if (tp->Rx_skbuff[i]) continue; ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i, tp->RxDescArray + i, tp->rx_buf_sz, tp->align); if (ret < 0) break; } return cur - start; } static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) { desc->opts1 |= cpu_to_le32(RingEnd); } static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) { tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; } static int rtl8169_init_ring(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); rtl8169_init_ring_indexes(tp); memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) goto err_out; rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); return 0; err_out: rtl8169_rx_clear(tp); return -ENOMEM; } static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb, struct TxDesc *desc) { unsigned int len = tx_skb->len; pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); desc->opts1 = 0x00; desc->opts2 = 0x00; desc->addr = 0x00; tx_skb->len = 0; } static void rtl8169_tx_clear(struct rtl8169_private *tp) { unsigned int i; for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) { unsigned int entry = i % NUM_TX_DESC; struct ring_info *tx_skb = tp->tx_skb + entry; unsigned int len = tx_skb->len; if (len) { struct sk_buff *skb = tx_skb->skb; rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); if (skb) { dev_kfree_skb(skb); tx_skb->skb = NULL; } tp->stats.tx_dropped++; } } tp->cur_tx = tp->dirty_tx = 0; } static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) { struct rtl8169_private *tp = netdev_priv(dev); PREPARE_DELAYED_WORK(&tp->task, task); schedule_delayed_work(&tp->task, 4); } static void rtl8169_wait_for_quiescence(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; synchronize_irq(dev->irq); /* Wait for any pending NAPI task to complete */ netif_poll_disable(dev); rtl8169_irq_mask_and_ack(ioaddr); netif_poll_enable(dev); } static void rtl8169_reinit_task(struct work_struct *work) { struct rtl8169_private *tp = container_of(work, struct rtl8169_private, task.work); struct net_device *dev = tp->dev; int ret; rtnl_lock(); if (!netif_running(dev)) goto out_unlock; rtl8169_wait_for_quiescence(dev); rtl8169_close(dev); ret = rtl8169_open(dev); if (unlikely(ret < 0)) { if (net_ratelimit()) { struct rtl8169_private *tp = netdev_priv(dev); if (netif_msg_drv(tp)) { printk(PFX KERN_ERR "%s: reinit failure (status = %d)." " Rescheduling.\n", dev->name, ret); } } rtl8169_schedule_work(dev, rtl8169_reinit_task); } out_unlock: rtnl_unlock(); } static void rtl8169_reset_task(struct work_struct *work) { struct rtl8169_private *tp = container_of(work, struct rtl8169_private, task.work); struct net_device *dev = tp->dev; rtnl_lock(); if (!netif_running(dev)) goto out_unlock; rtl8169_wait_for_quiescence(dev); rtl8169_rx_interrupt(dev, tp, tp->mmio_addr); rtl8169_tx_clear(tp); if (tp->dirty_rx == tp->cur_rx) { rtl8169_init_ring_indexes(tp); rtl8169_hw_start(dev); netif_wake_queue(dev); } else { if (net_ratelimit()) { struct rtl8169_private *tp = netdev_priv(dev); if (netif_msg_intr(tp)) { printk(PFX KERN_EMERG "%s: Rx buffers shortage\n", dev->name); } } rtl8169_schedule_work(dev, rtl8169_reset_task); } out_unlock: rtnl_unlock(); } static void rtl8169_tx_timeout(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); rtl8169_hw_reset(tp->mmio_addr); /* Let's wait a bit while any (async) irq lands on */ rtl8169_schedule_work(dev, rtl8169_reset_task); } static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, u32 opts1) { struct skb_shared_info *info = skb_shinfo(skb); unsigned int cur_frag, entry; struct TxDesc *txd; entry = tp->cur_tx; for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { skb_frag_t *frag = info->frags + cur_frag; dma_addr_t mapping; u32 status, len; void *addr; entry = (entry + 1) % NUM_TX_DESC; txd = tp->TxDescArray + entry; len = frag->size; addr = ((void *) page_address(frag->page)) + frag->page_offset; mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE); /* anti gcc 2.95.3 bugware (sic) */ status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); txd->opts1 = cpu_to_le32(status); txd->addr = cpu_to_le64(mapping); tp->tx_skb[entry].len = len; } if (cur_frag) { tp->tx_skb[entry].skb = skb; txd->opts1 |= cpu_to_le32(LastFrag); } return cur_frag; } static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) { if (dev->features & NETIF_F_TSO) { u32 mss = skb_shinfo(skb)->gso_size; if (mss) return LargeSend | ((mss & MSSMask) << MSSShift); } if (skb->ip_summed == CHECKSUM_PARTIAL) { const struct iphdr *ip = ip_hdr(skb); if (ip->protocol == IPPROTO_TCP) return IPCS | TCPCS; else if (ip->protocol == IPPROTO_UDP) return IPCS | UDPCS; WARN_ON(1); /* we need a WARN() */ } return 0; } static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC; struct TxDesc *txd = tp->TxDescArray + entry; void __iomem *ioaddr = tp->mmio_addr; dma_addr_t mapping; u32 status, len; u32 opts1; int ret = NETDEV_TX_OK; if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { if (netif_msg_drv(tp)) { printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", dev->name); } goto err_stop; } if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) goto err_stop; opts1 = DescOwn | rtl8169_tso_csum(skb, dev); frags = rtl8169_xmit_frags(tp, skb, opts1); if (frags) { len = skb_headlen(skb); opts1 |= FirstFrag; } else { len = skb->len; if (unlikely(len < ETH_ZLEN)) { if (skb_padto(skb, ETH_ZLEN)) goto err_update_stats; len = ETH_ZLEN; } opts1 |= FirstFrag | LastFrag; tp->tx_skb[entry].skb = skb; } mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); tp->tx_skb[entry].len = len; txd->addr = cpu_to_le64(mapping); txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb)); wmb(); /* anti gcc 2.95.3 bugware (sic) */ status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); txd->opts1 = cpu_to_le32(status); dev->trans_start = jiffies; tp->cur_tx += frags + 1; smp_wmb(); RTL_W8(TxPoll, 0x40); /* set polling bit */ if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { netif_stop_queue(dev); smp_rmb(); if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) netif_wake_queue(dev); } out: return ret; err_stop: netif_stop_queue(dev); ret = NETDEV_TX_BUSY; err_update_stats: tp->stats.tx_dropped++; goto out; } static void rtl8169_pcierr_interrupt(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; void __iomem *ioaddr = tp->mmio_addr; u16 pci_status, pci_cmd; pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); pci_read_config_word(pdev, PCI_STATUS, &pci_status); if (netif_msg_intr(tp)) { printk(KERN_ERR "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n", dev->name, pci_cmd, pci_status); } /* * The recovery sequence below admits a very elaborated explanation: * - it seems to work; * - I did not see what else could be done; * - it makes iop3xx happy. * * Feel free to adjust to your needs. */ if (pdev->broken_parity_status) pci_cmd &= ~PCI_COMMAND_PARITY; else pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); pci_write_config_word(pdev, PCI_STATUS, pci_status & (PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); /* The infamous DAC f*ckup only happens at boot time */ if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { if (netif_msg_intr(tp)) printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name); tp->cp_cmd &= ~PCIDAC; RTL_W16(CPlusCmd, tp->cp_cmd); dev->features &= ~NETIF_F_HIGHDMA; } rtl8169_hw_reset(ioaddr); rtl8169_schedule_work(dev, rtl8169_reinit_task); } static void rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp, void __iomem *ioaddr) { unsigned int dirty_tx, tx_left; assert(dev != NULL); assert(tp != NULL); assert(ioaddr != NULL); dirty_tx = tp->dirty_tx; smp_rmb(); tx_left = tp->cur_tx - dirty_tx; while (tx_left > 0) { unsigned int entry = dirty_tx % NUM_TX_DESC; struct ring_info *tx_skb = tp->tx_skb + entry; u32 len = tx_skb->len; u32 status; rmb(); status = le32_to_cpu(tp->TxDescArray[entry].opts1); if (status & DescOwn) break; tp->stats.tx_bytes += len; tp->stats.tx_packets++; rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { dev_kfree_skb_irq(tx_skb->skb); tx_skb->skb = NULL; } dirty_tx++; tx_left--; } if (tp->dirty_tx != dirty_tx) { tp->dirty_tx = dirty_tx; smp_wmb(); if (netif_queue_stopped(dev) && (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { netif_wake_queue(dev); } } } static inline int rtl8169_fragmented_frame(u32 status) { return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); } static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc) { u32 opts1 = le32_to_cpu(desc->opts1); u32 status = opts1 & RxProtoMask; if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || ((status == RxProtoUDP) && !(opts1 & UDPFail)) || ((status == RxProtoIP) && !(opts1 & IPFail))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; } static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, struct RxDesc *desc, int rx_buf_sz, unsigned int align) { int ret = -1; if (pkt_size < rx_copybreak) { struct sk_buff *skb; skb = dev_alloc_skb(pkt_size + align); if (skb) { skb_reserve(skb, (align - 1) & (unsigned long)skb->data); eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); *sk_buff = skb; rtl8169_mark_to_asic(desc, rx_buf_sz); ret = 0; } } return ret; } static int rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp, void __iomem *ioaddr) { unsigned int cur_rx, rx_left; unsigned int delta, count; assert(dev != NULL); assert(tp != NULL); assert(ioaddr != NULL); cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota); for (; rx_left > 0; rx_left--, cur_rx++) { unsigned int entry = cur_rx % NUM_RX_DESC; struct RxDesc *desc = tp->RxDescArray + entry; u32 status; rmb(); status = le32_to_cpu(desc->opts1); if (status & DescOwn) break; if (unlikely(status & RxRES)) { if (netif_msg_rx_err(tp)) { printk(KERN_INFO "%s: Rx ERROR. status = %08x\n", dev->name, status); } tp->stats.rx_errors++; if (status & (RxRWT | RxRUNT)) tp->stats.rx_length_errors++; if (status & RxCRC) tp->stats.rx_crc_errors++; if (status & RxFOVF) { rtl8169_schedule_work(dev, rtl8169_reset_task); tp->stats.rx_fifo_errors++; } rtl8169_mark_to_asic(desc, tp->rx_buf_sz); } else { struct sk_buff *skb = tp->Rx_skbuff[entry]; int pkt_size = (status & 0x00001FFF) - 4; void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int) = pci_dma_sync_single_for_device; /* * The driver does not support incoming fragmented * frames. They are seen as a symptom of over-mtu * sized frames. */ if (unlikely(rtl8169_fragmented_frame(status))) { tp->stats.rx_dropped++; tp->stats.rx_length_errors++; rtl8169_mark_to_asic(desc, tp->rx_buf_sz); continue; } rtl8169_rx_csum(skb, desc); pci_dma_sync_single_for_cpu(tp->pci_dev, le64_to_cpu(desc->addr), tp->rx_buf_sz, PCI_DMA_FROMDEVICE); if (rtl8169_try_rx_copy(&skb, pkt_size, desc, tp->rx_buf_sz, tp->align)) { pci_action = pci_unmap_single; tp->Rx_skbuff[entry] = NULL; } pci_action(tp->pci_dev, le64_to_cpu(desc->addr), tp->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0) rtl8169_rx_skb(skb); dev->last_rx = jiffies; tp->stats.rx_bytes += pkt_size; tp->stats.rx_packets++; } } count = cur_rx - tp->cur_rx; tp->cur_rx = cur_rx; delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); if (!delta && count && netif_msg_intr(tp)) printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name); tp->dirty_rx += delta; /* * FIXME: until there is periodic timer to try and refill the ring, * a temporary shortage may definitely kill the Rx process. * - disable the asic to try and avoid an overflow and kick it again * after refill ? * - how do others driver handle this condition (Uh oh...). */ if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp)) printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name); return count; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct net_device *dev = (struct net_device *) dev_instance; struct rtl8169_private *tp = netdev_priv(dev); int boguscnt = max_interrupt_work; void __iomem *ioaddr = tp->mmio_addr; int status; int handled = 0; do { status = RTL_R16(IntrStatus); /* hotplug/major error/no more work/shared irq */ if ((status == 0xFFFF) || !status) break; handled = 1; if (unlikely(!netif_running(dev))) { rtl8169_asic_down(ioaddr); goto out; } status &= tp->intr_mask; RTL_W16(IntrStatus, (status & RxFIFOOver) ? (status | RxOverflow) : status); if (!(status & rtl8169_intr_mask)) break; if (unlikely(status & SYSErr)) { rtl8169_pcierr_interrupt(dev); break; } if (status & LinkChg) rtl8169_check_link_status(dev, tp, ioaddr); #ifdef CONFIG_R8169_NAPI if (status & rtl8169_napi_event) { RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event); tp->intr_mask = ~rtl8169_napi_event; if (likely(netif_rx_schedule_prep(dev))) __netif_rx_schedule(dev); else if (netif_msg_intr(tp)) { printk(KERN_INFO "%s: interrupt %04x in poll\n", dev->name, status); } } break; #else /* Rx interrupt */ if (status & (RxOK | RxOverflow | RxFIFOOver)) { rtl8169_rx_interrupt(dev, tp, ioaddr); } /* Tx interrupt */ if (status & (TxOK | TxErr)) rtl8169_tx_interrupt(dev, tp, ioaddr); #endif boguscnt--; } while (boguscnt > 0); if (boguscnt <= 0) { if (netif_msg_intr(tp) && net_ratelimit() ) { printk(KERN_WARNING "%s: Too much work at interrupt!\n", dev->name); } /* Clear all interrupt sources. */ RTL_W16(IntrStatus, 0xffff); } out: return IRQ_RETVAL(handled); } #ifdef CONFIG_R8169_NAPI static int rtl8169_poll(struct net_device *dev, int *budget) { unsigned int work_done, work_to_do = min(*budget, dev->quota); struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; work_done = rtl8169_rx_interrupt(dev, tp, ioaddr); rtl8169_tx_interrupt(dev, tp, ioaddr); *budget -= work_done; dev->quota -= work_done; if (work_done < work_to_do) { netif_rx_complete(dev); tp->intr_mask = 0xffff; /* * 20040426: the barrier is not strictly required but the * behavior of the irq handler could be less predictable * without it. Btw, the lack of flush for the posted pci * write is safe - FR */ smp_wmb(); RTL_W16(IntrMask, rtl8169_intr_mask); } return (work_done >= work_to_do); } #endif static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned int poll_locked = 0; unsigned int intrmask; rtl8169_delete_timer(dev); netif_stop_queue(dev); core_down: spin_lock_irq(&tp->lock); rtl8169_asic_down(ioaddr); /* Update the error counts. */ tp->stats.rx_missed_errors += RTL_R32(RxMissed); RTL_W32(RxMissed, 0); spin_unlock_irq(&tp->lock); synchronize_irq(dev->irq); if (!poll_locked) { netif_poll_disable(dev); poll_locked++; } /* Give a racing hard_start_xmit a few cycles to complete. */ synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ /* * And now for the 50k$ question: are IRQ disabled or not ? * * Two paths lead here: * 1) dev->close * -> netif_running() is available to sync the current code and the * IRQ handler. See rtl8169_interrupt for details. * 2) dev->change_mtu * -> rtl8169_poll can not be issued again and re-enable the * interruptions. Let's simply issue the IRQ down sequence again. * * No loop if hotpluged or major error (0xffff). */ intrmask = RTL_R16(IntrMask); if (intrmask && (intrmask != 0xffff)) goto core_down; rtl8169_tx_clear(tp); rtl8169_rx_clear(tp); } static int rtl8169_close(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; rtl8169_down(dev); free_irq(dev->irq, dev); netif_poll_enable(dev); pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, tp->TxPhyAddr); tp->TxDescArray = NULL; tp->RxDescArray = NULL; return 0; } static void rtl8169_set_rx_mode(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; u32 mc_filter[2]; /* Multicast hash filter */ int i, rx_mode; u32 tmp = 0; if (dev->flags & IFF_PROMISC) { /* Unconditionally log net taps. */ if (netif_msg_link(tp)) { printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); } rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else if ((dev->mc_count > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { struct dev_mc_list *mclist; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; } } spin_lock_irqsave(&tp->lock, flags); tmp = rtl8169_rx_config | rx_mode | (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); if ((tp->mac_version == RTL_GIGA_MAC_VER_11) || (tp->mac_version == RTL_GIGA_MAC_VER_12) || (tp->mac_version == RTL_GIGA_MAC_VER_13) || (tp->mac_version == RTL_GIGA_MAC_VER_14) || (tp->mac_version == RTL_GIGA_MAC_VER_15)) { mc_filter[0] = 0xffffffff; mc_filter[1] = 0xffffffff; } RTL_W32(RxConfig, tmp); RTL_W32(MAR0 + 0, mc_filter[0]); RTL_W32(MAR0 + 4, mc_filter[1]); spin_unlock_irqrestore(&tp->lock, flags); } /** * rtl8169_get_stats - Get rtl8169 read/write statistics * @dev: The Ethernet Device to get statistics for * * Get TX/RX statistics for rtl8169 */ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; if (netif_running(dev)) { spin_lock_irqsave(&tp->lock, flags); tp->stats.rx_missed_errors += RTL_R32(RxMissed); RTL_W32(RxMissed, 0); spin_unlock_irqrestore(&tp->lock, flags); } return &tp->stats; } #ifdef CONFIG_PM static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; if (!netif_running(dev)) goto out_pci_suspend; netif_device_detach(dev); netif_stop_queue(dev); spin_lock_irq(&tp->lock); rtl8169_asic_down(ioaddr); tp->stats.rx_missed_errors += RTL_R32(RxMissed); RTL_W32(RxMissed, 0); spin_unlock_irq(&tp->lock); out_pci_suspend: pci_save_state(pdev); pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int rtl8169_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_enable_wake(pdev, PCI_D0, 0); if (!netif_running(dev)) goto out; netif_device_attach(dev); rtl8169_schedule_work(dev, rtl8169_reset_task); out: return 0; } #endif /* CONFIG_PM */ static struct pci_driver rtl8169_pci_driver = { .name = MODULENAME, .id_table = rtl8169_pci_tbl, .probe = rtl8169_init_one, .remove = __devexit_p(rtl8169_remove_one), #ifdef CONFIG_PM .suspend = rtl8169_suspend, .resume = rtl8169_resume, #endif }; static int __init rtl8169_init_module(void) { return pci_register_driver(&rtl8169_pci_driver); } static void __exit rtl8169_cleanup_module(void) { pci_unregister_driver(&rtl8169_pci_driver); } module_init(rtl8169_init_module); module_exit(rtl8169_cleanup_module);
gpl-2.0
KangProject/N910T
drivers/video/backlight/atmel-pwm-bl.c
288
6002
/* * Copyright (C) 2008 Atmel Corporation * * Backlight driver using Atmel PWM peripheral. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/backlight.h> #include <linux/atmel_pwm.h> #include <linux/atmel-pwm-bl.h> #include <linux/slab.h> struct atmel_pwm_bl { const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct platform_device *pdev; struct pwm_channel pwmc; int gpio_on; }; static int atmel_pwm_bl_set_intensity(struct backlight_device *bd) { struct atmel_pwm_bl *pwmbl = bl_get_data(bd); int intensity = bd->props.brightness; int pwm_duty; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (pwmbl->pdata->pwm_active_low) pwm_duty = pwmbl->pdata->pwm_duty_min + intensity; else pwm_duty = pwmbl->pdata->pwm_duty_max - intensity; if (pwm_duty > pwmbl->pdata->pwm_duty_max) pwm_duty = pwmbl->pdata->pwm_duty_max; if (pwm_duty < pwmbl->pdata->pwm_duty_min) pwm_duty = pwmbl->pdata->pwm_duty_min; if (!intensity) { if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 0 ^ pwmbl->pdata->on_active_low); } pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); pwm_channel_disable(&pwmbl->pwmc); } else { pwm_channel_enable(&pwmbl->pwmc); pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 1 ^ pwmbl->pdata->on_active_low); } } return 0; } static int atmel_pwm_bl_get_intensity(struct backlight_device *bd) { struct atmel_pwm_bl *pwmbl = bl_get_data(bd); u8 intensity; if (pwmbl->pdata->pwm_active_low) { intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) - pwmbl->pdata->pwm_duty_min; } else { intensity = pwmbl->pdata->pwm_duty_max - pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY); } return intensity; } static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl) { unsigned long pwm_rate = pwmbl->pwmc.mck; unsigned long prescale = DIV_ROUND_UP(pwm_rate, (pwmbl->pdata->pwm_frequency * pwmbl->pdata->pwm_compare_max)) - 1; /* * Prescale must be power of two and maximum 0xf in size because of * hardware limit. PWM speed will be: * PWM module clock speed / (2 ^ prescale). */ prescale = fls(prescale); if (prescale > 0xf) prescale = 0xf; pwm_channel_writel(&pwmbl->pwmc, PWM_CMR, prescale); pwm_channel_writel(&pwmbl->pwmc, PWM_CDTY, pwmbl->pdata->pwm_duty_min + pwmbl->bldev->props.brightness); pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD, pwmbl->pdata->pwm_compare_max); dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver (%lu Hz)\n", pwmbl->pwmc.mck / pwmbl->pdata->pwm_compare_max / (1 << prescale)); return pwm_channel_enable(&pwmbl->pwmc); } static const struct backlight_ops atmel_pwm_bl_ops = { .get_brightness = atmel_pwm_bl_get_intensity, .update_status = atmel_pwm_bl_set_intensity, }; static int __init atmel_pwm_bl_probe(struct platform_device *pdev) { struct backlight_properties props; const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct atmel_pwm_bl *pwmbl; int retval; pwmbl = devm_kzalloc(&pdev->dev, sizeof(struct atmel_pwm_bl), GFP_KERNEL); if (!pwmbl) return -ENOMEM; pwmbl->pdev = pdev; pdata = pdev->dev.platform_data; if (!pdata) { retval = -ENODEV; goto err_free_mem; } if (pdata->pwm_compare_max < pdata->pwm_duty_max || pdata->pwm_duty_min > pdata->pwm_duty_max || pdata->pwm_frequency == 0) { retval = -EINVAL; goto err_free_mem; } pwmbl->pdata = pdata; pwmbl->gpio_on = pdata->gpio_on; retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc); if (retval) goto err_free_mem; if (pwmbl->gpio_on != -1) { retval = devm_gpio_request(&pdev->dev, pwmbl->gpio_on, "gpio_atmel_pwm_bl"); if (retval) { pwmbl->gpio_on = -1; goto err_free_pwm; } /* Turn display off by default. */ retval = gpio_direction_output(pwmbl->gpio_on, 0 ^ pdata->on_active_low); if (retval) goto err_free_pwm; } memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl, &atmel_pwm_bl_ops, &props); if (IS_ERR(bldev)) { retval = PTR_ERR(bldev); goto err_free_pwm; } pwmbl->bldev = bldev; platform_set_drvdata(pdev, pwmbl); /* Power up the backlight by default at middle intesity. */ bldev->props.power = FB_BLANK_UNBLANK; bldev->props.brightness = bldev->props.max_brightness / 2; retval = atmel_pwm_bl_init_pwm(pwmbl); if (retval) goto err_free_bl_dev; atmel_pwm_bl_set_intensity(bldev); return 0; err_free_bl_dev: platform_set_drvdata(pdev, NULL); backlight_device_unregister(bldev); err_free_pwm: pwm_channel_free(&pwmbl->pwmc); err_free_mem: return retval; } static int __exit atmel_pwm_bl_remove(struct platform_device *pdev) { struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev); if (pwmbl->gpio_on != -1) gpio_set_value(pwmbl->gpio_on, 0); pwm_channel_disable(&pwmbl->pwmc); pwm_channel_free(&pwmbl->pwmc); backlight_device_unregister(pwmbl->bldev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver atmel_pwm_bl_driver = { .driver = { .name = "atmel-pwm-bl", }, /* REVISIT add suspend() and resume() */ .remove = __exit_p(atmel_pwm_bl_remove), }; module_platform_driver_probe(atmel_pwm_bl_driver, atmel_pwm_bl_probe); MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>"); MODULE_DESCRIPTION("Atmel PWM backlight driver"); MODULE_LICENSE("GPL");
gpl-2.0
PureNexusProject/android_kernel_asus_flo
arch/arm/mach-msm/board-8960-regulator.c
288
23129
/* * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/regulator/pm8xxx-regulator.h> #include <linux/regulator/msm-gpio-regulator.h> #include <mach/rpm-regulator.h> #include <mach/socinfo.h> #include "board-8960.h" #define VREG_CONSUMERS(_id) \ static struct regulator_consumer_supply vreg_consumers_##_id[] /* * Consumer specific regulator names: * regulator name consumer dev_name */ VREG_CONSUMERS(L1) = { REGULATOR_SUPPLY("8921_l1", NULL), }; VREG_CONSUMERS(L2) = { REGULATOR_SUPPLY("8921_l2", NULL), REGULATOR_SUPPLY("dsi_vdda", "mipi_dsi.1"), REGULATOR_SUPPLY("mipi_csi_vdd", "msm_csid.0"), REGULATOR_SUPPLY("mipi_csi_vdd", "msm_csid.1"), REGULATOR_SUPPLY("mipi_csi_vdd", "msm_csid.2"), }; VREG_CONSUMERS(L3) = { REGULATOR_SUPPLY("8921_l3", NULL), REGULATOR_SUPPLY("HSUSB_3p3", "msm_otg"), }; VREG_CONSUMERS(L4) = { REGULATOR_SUPPLY("8921_l4", NULL), REGULATOR_SUPPLY("HSUSB_1p8", "msm_otg"), REGULATOR_SUPPLY("iris_vddxo", "wcnss_wlan.0"), }; VREG_CONSUMERS(L5) = { REGULATOR_SUPPLY("8921_l5", NULL), REGULATOR_SUPPLY("sdc_vdd", "msm_sdcc.1"), }; VREG_CONSUMERS(L6) = { REGULATOR_SUPPLY("8921_l6", NULL), REGULATOR_SUPPLY("sdc_vdd", "msm_sdcc.3"), }; VREG_CONSUMERS(L7) = { REGULATOR_SUPPLY("8921_l7", NULL), REGULATOR_SUPPLY("sdc_vdd_io", "msm_sdcc.3"), }; VREG_CONSUMERS(L8) = { REGULATOR_SUPPLY("8921_l8", NULL), REGULATOR_SUPPLY("dsi_vdc", "mipi_dsi.1"), }; VREG_CONSUMERS(L9) = { REGULATOR_SUPPLY("8921_l9", NULL), REGULATOR_SUPPLY("vdd", "3-0024"), REGULATOR_SUPPLY("vdd_ana", "3-004a"), }; VREG_CONSUMERS(L10) = { REGULATOR_SUPPLY("8921_l10", NULL), REGULATOR_SUPPLY("iris_vddpa", "wcnss_wlan.0"), }; VREG_CONSUMERS(L11) = { REGULATOR_SUPPLY("8921_l11", NULL), REGULATOR_SUPPLY("cam_vana", "4-001a"), REGULATOR_SUPPLY("cam_vana", "4-006c"), REGULATOR_SUPPLY("cam_vana", "4-0048"), REGULATOR_SUPPLY("cam_vana", "4-0020"), REGULATOR_SUPPLY("cam_vana", "4-0034"), }; VREG_CONSUMERS(L12) = { REGULATOR_SUPPLY("8921_l12", NULL), REGULATOR_SUPPLY("cam_vdig", "4-001a"), REGULATOR_SUPPLY("cam_vdig", "4-006c"), REGULATOR_SUPPLY("cam_vdig", "4-0048"), REGULATOR_SUPPLY("cam_vdig", "4-0020"), REGULATOR_SUPPLY("cam_vdig", "4-0034"), }; VREG_CONSUMERS(L14) = { REGULATOR_SUPPLY("8921_l14", NULL), REGULATOR_SUPPLY("pa_therm", "pm8xxx-adc"), }; VREG_CONSUMERS(L15) = { REGULATOR_SUPPLY("8921_l15", NULL), }; VREG_CONSUMERS(L16) = { REGULATOR_SUPPLY("8921_l16", NULL), REGULATOR_SUPPLY("cam_vaf", "4-001a"), REGULATOR_SUPPLY("cam_vaf", "4-006c"), REGULATOR_SUPPLY("cam_vaf", "4-0048"), REGULATOR_SUPPLY("cam_vaf", "4-0020"), REGULATOR_SUPPLY("cam_vaf", "4-0034"), }; VREG_CONSUMERS(L17) = { REGULATOR_SUPPLY("8921_l17", NULL), }; VREG_CONSUMERS(L18) = { REGULATOR_SUPPLY("8921_l18", NULL), }; VREG_CONSUMERS(L21) = { REGULATOR_SUPPLY("8921_l21", NULL), }; VREG_CONSUMERS(L22) = { REGULATOR_SUPPLY("8921_l22", NULL), }; VREG_CONSUMERS(L23) = { REGULATOR_SUPPLY("8921_l23", NULL), REGULATOR_SUPPLY("dsi_vddio", "mipi_dsi.1"), REGULATOR_SUPPLY("hdmi_avdd", "hdmi_msm.0"), REGULATOR_SUPPLY("pll_vdd", "pil_riva"), REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.1"), REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.2"), }; VREG_CONSUMERS(L24) = { REGULATOR_SUPPLY("8921_l24", NULL), REGULATOR_SUPPLY("riva_vddmx", "wcnss_wlan.0"), }; VREG_CONSUMERS(L25) = { REGULATOR_SUPPLY("8921_l25", NULL), REGULATOR_SUPPLY("VDDD_CDC_D", "tabla-slim"), REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "tabla-slim"), REGULATOR_SUPPLY("VDDD_CDC_D", "tabla2x-slim"), REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "tabla2x-slim"), }; VREG_CONSUMERS(L26) = { REGULATOR_SUPPLY("8921_l26", NULL), REGULATOR_SUPPLY("core_vdd", "pil_qdsp6v4.0"), }; VREG_CONSUMERS(L27) = { REGULATOR_SUPPLY("8921_l27", NULL), REGULATOR_SUPPLY("core_vdd", "pil_qdsp6v4.2"), }; VREG_CONSUMERS(L28) = { REGULATOR_SUPPLY("8921_l28", NULL), REGULATOR_SUPPLY("core_vdd", "pil_qdsp6v4.1"), }; VREG_CONSUMERS(L29) = { REGULATOR_SUPPLY("8921_l29", NULL), }; VREG_CONSUMERS(S1) = { REGULATOR_SUPPLY("8921_s1", NULL), }; VREG_CONSUMERS(S2) = { REGULATOR_SUPPLY("8921_s2", NULL), REGULATOR_SUPPLY("iris_vddrfa", "wcnss_wlan.0"), }; VREG_CONSUMERS(S3) = { REGULATOR_SUPPLY("8921_s3", NULL), REGULATOR_SUPPLY("HSUSB_VDDCX", "msm_otg"), REGULATOR_SUPPLY("riva_vddcx", "wcnss_wlan.0"), REGULATOR_SUPPLY("HSIC_VDDCX", "msm_hsic_host"), }; VREG_CONSUMERS(S4) = { REGULATOR_SUPPLY("8921_s4", NULL), REGULATOR_SUPPLY("sdc_vdd_io", "msm_sdcc.1"), REGULATOR_SUPPLY("sdc_vdd", "msm_sdcc.2"), REGULATOR_SUPPLY("sdc_vdd_io", "msm_sdcc.4"), REGULATOR_SUPPLY("riva_vddpx", "wcnss_wlan.0"), REGULATOR_SUPPLY("hdmi_vcc", "hdmi_msm.0"), REGULATOR_SUPPLY("VDDIO_CDC", "tabla-slim"), REGULATOR_SUPPLY("CDC_VDD_CP", "tabla-slim"), REGULATOR_SUPPLY("CDC_VDDA_TX", "tabla-slim"), REGULATOR_SUPPLY("CDC_VDDA_RX", "tabla-slim"), REGULATOR_SUPPLY("VDDIO_CDC", "tabla2x-slim"), REGULATOR_SUPPLY("CDC_VDD_CP", "tabla2x-slim"), REGULATOR_SUPPLY("CDC_VDDA_TX", "tabla2x-slim"), REGULATOR_SUPPLY("CDC_VDDA_RX", "tabla2x-slim"), REGULATOR_SUPPLY("vcc_i2c", "3-005b"), REGULATOR_SUPPLY("EXT_HUB_VDDIO", "msm_smsc_hub"), REGULATOR_SUPPLY("vcc_i2c", "10-0048"), }; VREG_CONSUMERS(S5) = { REGULATOR_SUPPLY("8921_s5", NULL), REGULATOR_SUPPLY("krait0", "acpuclk-8960"), REGULATOR_SUPPLY("krait0", "acpuclk-8960ab"), }; VREG_CONSUMERS(S6) = { REGULATOR_SUPPLY("8921_s6", NULL), REGULATOR_SUPPLY("krait1", "acpuclk-8960"), REGULATOR_SUPPLY("krait1", "acpuclk-8960ab"), }; VREG_CONSUMERS(S7) = { REGULATOR_SUPPLY("8921_s7", NULL), }; VREG_CONSUMERS(S8) = { REGULATOR_SUPPLY("8921_s8", NULL), }; VREG_CONSUMERS(LVS1) = { REGULATOR_SUPPLY("8921_lvs1", NULL), REGULATOR_SUPPLY("iris_vddio", "wcnss_wlan.0"), }; VREG_CONSUMERS(LVS2) = { REGULATOR_SUPPLY("8921_lvs2", NULL), REGULATOR_SUPPLY("iris_vdddig", "wcnss_wlan.0"), }; VREG_CONSUMERS(LVS3) = { REGULATOR_SUPPLY("8921_lvs3", NULL), }; VREG_CONSUMERS(LVS4) = { REGULATOR_SUPPLY("8921_lvs4", NULL), REGULATOR_SUPPLY("vcc_i2c", "3-0024"), REGULATOR_SUPPLY("vcc_i2c", "3-004a"), }; VREG_CONSUMERS(LVS5) = { REGULATOR_SUPPLY("8921_lvs5", NULL), REGULATOR_SUPPLY("cam_vio", "4-001a"), REGULATOR_SUPPLY("cam_vio", "4-006c"), REGULATOR_SUPPLY("cam_vio", "4-0048"), REGULATOR_SUPPLY("cam_vio", "4-0020"), REGULATOR_SUPPLY("cam_vio", "4-0034"), }; /* This mapping is used for CDP only. */ VREG_CONSUMERS(CDP_LVS6) = { REGULATOR_SUPPLY("8921_lvs6", NULL), REGULATOR_SUPPLY("vdd-io", "spi0.0"), }; /* This mapping is used for non-CDP targets only. */ VREG_CONSUMERS(LVS6) = { REGULATOR_SUPPLY("8921_lvs6", NULL), REGULATOR_SUPPLY("vdd-io", "spi0.0"), REGULATOR_SUPPLY("vdd-phy", "spi0.0"), }; VREG_CONSUMERS(LVS7) = { REGULATOR_SUPPLY("8921_lvs7", NULL), }; VREG_CONSUMERS(USB_OTG) = { REGULATOR_SUPPLY("8921_usb_otg", NULL), }; VREG_CONSUMERS(HDMI_MVS) = { REGULATOR_SUPPLY("8921_hdmi_mvs", NULL), REGULATOR_SUPPLY("hdmi_mvs", "hdmi_msm.0"), }; VREG_CONSUMERS(NCP) = { REGULATOR_SUPPLY("8921_ncp", NULL), }; VREG_CONSUMERS(EXT_5V) = { REGULATOR_SUPPLY("ext_5v", NULL), }; VREG_CONSUMERS(EXT_L2) = { REGULATOR_SUPPLY("ext_l2", NULL), REGULATOR_SUPPLY("vdd-phy", "spi0.0"), }; VREG_CONSUMERS(EXT_3P3V) = { REGULATOR_SUPPLY("ext_3p3v", NULL), REGULATOR_SUPPLY("vdd_ana", "3-005b"), REGULATOR_SUPPLY("vdd_lvds_3p3v", "mipi_dsi.1"), REGULATOR_SUPPLY("mhl_usb_hs_switch", "msm_otg"), }; VREG_CONSUMERS(EXT_OTG_SW) = { REGULATOR_SUPPLY("ext_otg_sw", NULL), REGULATOR_SUPPLY("vbus_otg", "msm_otg"), }; #define PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, _modes, _ops, \ _apply_uV, _pull_down, _always_on, _supply_regulator, \ _system_uA, _enable_time, _reg_id) \ { \ .init_data = { \ .constraints = { \ .valid_modes_mask = _modes, \ .valid_ops_mask = _ops, \ .min_uV = _min_uV, \ .max_uV = _max_uV, \ .input_uV = _max_uV, \ .apply_uV = _apply_uV, \ .always_on = _always_on, \ .name = _name, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ .supply_regulator = _supply_regulator, \ }, \ .id = _reg_id, \ .pull_down_enable = _pull_down, \ .system_uA = _system_uA, \ .enable_time = _enable_time, \ } #define PM8XXX_LDO(_id, _name, _always_on, _pull_down, _min_uV, _max_uV, \ _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE | \ REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_DRMS, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_NLDO1200(_id, _name, _always_on, _pull_down, _min_uV, \ _max_uV, _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE | \ REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_DRMS, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_SMPS(_id, _name, _always_on, _pull_down, _min_uV, _max_uV, \ _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE | \ REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_DRMS, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_FTSMPS(_id, _name, _always_on, _pull_down, _min_uV, _max_uV, \ _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL, \ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS \ | REGULATOR_CHANGE_MODE, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_VS(_id, _name, _always_on, _pull_down, _enable_time, \ _supply_regulator, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, 0, 0, 0, REGULATOR_CHANGE_STATUS, 0, \ _pull_down, _always_on, _supply_regulator, 0, _enable_time, \ _reg_id) #define PM8XXX_VS300(_id, _name, _always_on, _pull_down, _enable_time, \ _supply_regulator, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, 0, 0, 0, REGULATOR_CHANGE_STATUS, 0, \ _pull_down, _always_on, _supply_regulator, 0, _enable_time, \ _reg_id) #define PM8XXX_NCP(_id, _name, _always_on, _min_uV, _max_uV, _enable_time, \ _supply_regulator, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, 0, \ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, 0, 0, \ _always_on, _supply_regulator, 0, _enable_time, _reg_id) /* Pin control initialization */ #define PM8XXX_PC(_id, _name, _always_on, _pin_fn, _pin_ctrl, \ _supply_regulator, _reg_id) \ { \ .init_data = { \ .constraints = { \ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ .always_on = _always_on, \ .name = _name, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id##_PC), \ .consumer_supplies = vreg_consumers_##_id##_PC, \ .supply_regulator = _supply_regulator, \ }, \ .id = _reg_id, \ .pin_fn = PM8XXX_VREG_PIN_FN_##_pin_fn, \ .pin_ctrl = _pin_ctrl, \ } #define GPIO_VREG(_id, _reg_name, _gpio_label, _gpio, _supply_regulator) \ [GPIO_VREG_ID_##_id] = { \ .init_data = { \ .constraints = { \ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ .supply_regulator = _supply_regulator, \ }, \ .regulator_name = _reg_name, \ .gpio_label = _gpio_label, \ .gpio = _gpio, \ } #define SAW_VREG_INIT(_id, _name, _min_uV, _max_uV) \ { \ .constraints = { \ .name = _name, \ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \ .min_uV = _min_uV, \ .max_uV = _max_uV, \ }, \ .num_consumer_supplies = ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ } #define RPM_INIT(_id, _min_uV, _max_uV, _modes, _ops, _apply_uV, _default_uV, \ _peak_uA, _avg_uA, _pull_down, _pin_ctrl, _freq, _pin_fn, \ _force_mode, _sleep_set_force_mode, _power_mode, _state, \ _sleep_selectable, _always_on, _supply_regulator, _system_uA) \ { \ .init_data = { \ .constraints = { \ .valid_modes_mask = _modes, \ .valid_ops_mask = _ops, \ .min_uV = _min_uV, \ .max_uV = _max_uV, \ .input_uV = _min_uV, \ .apply_uV = _apply_uV, \ .always_on = _always_on, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ .supply_regulator = _supply_regulator, \ }, \ .id = RPM_VREG_ID_PM8921_##_id, \ .default_uV = _default_uV, \ .peak_uA = _peak_uA, \ .avg_uA = _avg_uA, \ .pull_down_enable = _pull_down, \ .pin_ctrl = _pin_ctrl, \ .freq = RPM_VREG_FREQ_##_freq, \ .pin_fn = _pin_fn, \ .force_mode = _force_mode, \ .sleep_set_force_mode = _sleep_set_force_mode, \ .power_mode = _power_mode, \ .state = _state, \ .sleep_selectable = _sleep_selectable, \ .system_uA = _system_uA, \ } #define RPM_LDO(_id, _always_on, _pd, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator, _system_uA, _init_peak_uA) \ RPM_INIT(_id, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE \ | REGULATOR_CHANGE_DRMS, 0, _max_uV, _init_peak_uA, 0, _pd, \ RPM_VREG_PIN_CTRL_NONE, NONE, RPM_VREG_PIN_FN_8960_NONE, \ RPM_VREG_FORCE_MODE_8960_NONE, \ RPM_VREG_FORCE_MODE_8960_NONE, RPM_VREG_POWER_MODE_8960_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, _system_uA) #define RPM_SMPS(_id, _always_on, _pd, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator, _system_uA, _freq, _force_mode, \ _sleep_set_force_mode) \ RPM_INIT(_id, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE \ | REGULATOR_CHANGE_DRMS, 0, _max_uV, _system_uA, 0, _pd, \ RPM_VREG_PIN_CTRL_NONE, _freq, RPM_VREG_PIN_FN_8960_NONE, \ RPM_VREG_FORCE_MODE_8960_##_force_mode, \ RPM_VREG_FORCE_MODE_8960_##_sleep_set_force_mode, \ RPM_VREG_POWER_MODE_8960_PWM, RPM_VREG_STATE_OFF, \ _sleep_selectable, _always_on, _supply_regulator, _system_uA) #define RPM_VS(_id, _always_on, _pd, _sleep_selectable, _supply_regulator) \ RPM_INIT(_id, 0, 0, 0, REGULATOR_CHANGE_STATUS, 0, 0, 1000, 1000, _pd, \ RPM_VREG_PIN_CTRL_NONE, NONE, RPM_VREG_PIN_FN_8960_NONE, \ RPM_VREG_FORCE_MODE_8960_NONE, \ RPM_VREG_FORCE_MODE_8960_NONE, RPM_VREG_POWER_MODE_8960_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, 0) #define RPM_NCP(_id, _always_on, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator, _freq) \ RPM_INIT(_id, _min_uV, _max_uV, 0, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS, 0, _max_uV, 1000, 1000, 0, \ RPM_VREG_PIN_CTRL_NONE, _freq, RPM_VREG_PIN_FN_8960_NONE, \ RPM_VREG_FORCE_MODE_8960_NONE, \ RPM_VREG_FORCE_MODE_8960_NONE, RPM_VREG_POWER_MODE_8960_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, 0) /* Pin control initialization */ #define RPM_PC_INIT(_id, _always_on, _pin_fn, _pin_ctrl, _supply_regulator) \ { \ .init_data = { \ .constraints = { \ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ .always_on = _always_on, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id##_PC), \ .consumer_supplies = vreg_consumers_##_id##_PC, \ .supply_regulator = _supply_regulator, \ }, \ .id = RPM_VREG_ID_PM8921_##_id##_PC, \ .pin_fn = RPM_VREG_PIN_FN_8960_##_pin_fn, \ .pin_ctrl = _pin_ctrl, \ } /* GPIO regulator constraints */ struct gpio_regulator_platform_data msm_gpio_regulator_pdata[] __devinitdata = { /* ID vreg_name gpio_label gpio supply */ GPIO_VREG(EXT_5V, "ext_5v", "ext_5v_en", PM8921_MPP_PM_TO_SYS(7), NULL), GPIO_VREG(EXT_L2, "ext_l2", "ext_l2_en", 91, NULL), GPIO_VREG(EXT_3P3V, "ext_3p3v", "ext_3p3v_en", PM8921_GPIO_PM_TO_SYS(17), NULL), GPIO_VREG(EXT_OTG_SW, "ext_otg_sw", "ext_otg_sw_en", PM8921_GPIO_PM_TO_SYS(42), "8921_usb_otg"), }; /* SAW regulator constraints */ struct regulator_init_data msm_saw_regulator_pdata_s5 = /* ID vreg_name min_uV max_uV */ SAW_VREG_INIT(S5, "8921_s5", 850000, 1300000); struct regulator_init_data msm_saw_regulator_pdata_s6 = SAW_VREG_INIT(S6, "8921_s6", 850000, 1300000); /* PM8921 regulator constraints */ struct pm8xxx_regulator_platform_data msm_pm8921_regulator_pdata[] __devinitdata = { /* * ID name always_on pd min_uV max_uV en_t supply * system_uA reg_ID */ PM8XXX_NLDO1200(L26, "8921_l26", 0, 1, 375000, 1050000, 200, "8921_s7", 0, 1), PM8XXX_NLDO1200(L27, "8921_l27", 0, 1, 375000, 1050000, 200, "8921_s7", 0, 2), PM8XXX_NLDO1200(L28, "8921_l28", 0, 1, 375000, 1050000, 200, "8921_s7", 0, 3), PM8XXX_LDO(L29, "8921_l29", 0, 1, 2050000, 2100000, 200, "8921_s8", 0, 4), /* ID name always_on pd en_t supply reg_ID */ PM8XXX_VS300(USB_OTG, "8921_usb_otg", 0, 1, 0, "ext_5v", 5), PM8XXX_VS300(HDMI_MVS, "8921_hdmi_mvs", 0, 1, 0, "ext_5v", 6), }; static struct rpm_regulator_init_data msm_rpm_regulator_init_data[] __devinitdata = { /* ID a_on pd ss min_uV max_uV supply sys_uA freq fm ss_fm */ RPM_SMPS(S1, 1, 1, 0, 1225000, 1225000, NULL, 100000, 3p20, NONE, NONE), RPM_SMPS(S2, 0, 1, 0, 1300000, 1300000, NULL, 0, 1p60, NONE, NONE), RPM_SMPS(S3, 0, 1, 1, 500000, 1150000, NULL, 100000, 4p80, NONE, NONE), RPM_SMPS(S4, 1, 1, 0, 1800000, 1800000, NULL, 100000, 1p60, AUTO, AUTO), RPM_SMPS(S7, 0, 1, 0, 1150000, 1150000, NULL, 100000, 3p20, NONE, NONE), RPM_SMPS(S8, 1, 1, 1, 2050000, 2050000, NULL, 100000, 1p60, NONE, NONE), /* ID a_on pd ss min_uV max_uV supply sys_uA init_ip */ RPM_LDO(L1, 1, 1, 0, 1050000, 1050000, "8921_s4", 0, 10000), RPM_LDO(L2, 0, 1, 0, 1200000, 1200000, "8921_s4", 0, 0), RPM_LDO(L3, 0, 1, 0, 3075000, 3075000, NULL, 0, 0), RPM_LDO(L4, 1, 1, 0, 1800000, 1800000, NULL, 10000, 10000), RPM_LDO(L5, 0, 1, 0, 2950000, 2950000, NULL, 0, 0), RPM_LDO(L6, 0, 1, 0, 2950000, 2950000, NULL, 0, 0), RPM_LDO(L7, 1, 1, 0, 1850000, 2950000, NULL, 10000, 10000), RPM_LDO(L8, 0, 1, 0, 2800000, 3000000, NULL, 0, 0), RPM_LDO(L9, 0, 1, 0, 3000000, 3000000, NULL, 0, 0), RPM_LDO(L10, 0, 1, 0, 3000000, 3000000, NULL, 0, 0), RPM_LDO(L11, 0, 1, 0, 2850000, 2850000, NULL, 0, 0), RPM_LDO(L12, 0, 1, 0, 1200000, 1200000, "8921_s4", 0, 0), RPM_LDO(L14, 0, 1, 0, 1800000, 1800000, NULL, 0, 0), RPM_LDO(L15, 0, 1, 0, 1800000, 2950000, NULL, 0, 0), RPM_LDO(L16, 0, 1, 0, 2800000, 2800000, NULL, 0, 0), RPM_LDO(L17, 0, 1, 0, 1800000, 2950000, NULL, 0, 0), RPM_LDO(L18, 0, 1, 0, 1300000, 1300000, "8921_s4", 0, 0), RPM_LDO(L21, 0, 1, 0, 1900000, 1900000, "8921_s8", 0, 0), RPM_LDO(L22, 0, 1, 0, 2750000, 2750000, NULL, 0, 0), RPM_LDO(L23, 1, 1, 1, 1800000, 1800000, "8921_s8", 10000, 10000), RPM_LDO(L24, 0, 1, 1, 750000, 1150000, "8921_s1", 10000, 10000), RPM_LDO(L25, 1, 1, 0, 1250000, 1250000, "8921_s1", 10000, 10000), /* ID a_on pd ss supply */ RPM_VS(LVS1, 0, 1, 0, "8921_s4"), RPM_VS(LVS2, 0, 1, 0, "8921_s1"), RPM_VS(LVS3, 0, 1, 0, "8921_s4"), RPM_VS(LVS4, 0, 1, 0, "8921_s4"), RPM_VS(LVS5, 0, 1, 0, "8921_s4"), RPM_VS(LVS6, 0, 1, 0, "8921_s4"), RPM_VS(LVS7, 0, 1, 0, "8921_s4"), /* ID a_on ss min_uV max_uV supply freq */ RPM_NCP(NCP, 0, 0, 1800000, 1800000, "8921_l6", 1p60), }; int msm_pm8921_regulator_pdata_len __devinitdata = ARRAY_SIZE(msm_pm8921_regulator_pdata); #define RPM_REG_MAP(_id, _sleep_also, _voter, _supply, _dev_name) \ { \ .vreg_id = RPM_VREG_ID_PM8921_##_id, \ .sleep_also = _sleep_also, \ .voter = _voter, \ .supply = _supply, \ .dev_name = _dev_name, \ } static struct rpm_regulator_consumer_mapping msm_rpm_regulator_consumer_mapping[] __devinitdata = { RPM_REG_MAP(L23, 0, 1, "krait0_l23", "acpuclk-8960"), RPM_REG_MAP(L23, 0, 2, "krait1_l23", "acpuclk-8960"), RPM_REG_MAP(L23, 0, 6, "l2_l23", "acpuclk-8960"), RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8960"), RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8960"), RPM_REG_MAP(S3, 0, 1, "krait0_dig", "acpuclk-8960"), RPM_REG_MAP(S3, 0, 2, "krait1_dig", "acpuclk-8960"), RPM_REG_MAP(S8, 0, 1, "krait0_s8", "acpuclk-8960"), RPM_REG_MAP(S8, 0, 2, "krait1_s8", "acpuclk-8960"), RPM_REG_MAP(S8, 0, 6, "l2_s8", "acpuclk-8960"), RPM_REG_MAP(L23, 0, 1, "krait0_l23", "acpuclk-8960ab"), RPM_REG_MAP(L23, 0, 2, "krait1_l23", "acpuclk-8960ab"), RPM_REG_MAP(L23, 0, 6, "l2_l23", "acpuclk-8960ab"), RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8960ab"), RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8960ab"), RPM_REG_MAP(S3, 0, 1, "krait0_dig", "acpuclk-8960ab"), RPM_REG_MAP(S3, 0, 2, "krait1_dig", "acpuclk-8960ab"), RPM_REG_MAP(S8, 0, 1, "krait0_s8", "acpuclk-8960ab"), RPM_REG_MAP(S8, 0, 2, "krait1_s8", "acpuclk-8960ab"), RPM_REG_MAP(S8, 0, 6, "l2_s8", "acpuclk-8960ab"), }; struct rpm_regulator_platform_data msm_rpm_regulator_pdata __devinitdata = { .init_data = msm_rpm_regulator_init_data, .num_regulators = ARRAY_SIZE(msm_rpm_regulator_init_data), .version = RPM_VREG_VERSION_8960, .vreg_id_vdd_mem = RPM_VREG_ID_PM8921_L24, .vreg_id_vdd_dig = RPM_VREG_ID_PM8921_S3, .consumer_map = msm_rpm_regulator_consumer_mapping, .consumer_map_len = ARRAY_SIZE(msm_rpm_regulator_consumer_mapping), }; /* * Fix up regulator consumer data that moves to a different regulator based on * the current target. */ void __init configure_msm8960_power_grid(void) { static struct rpm_regulator_init_data *rpm_data; int i; if (machine_is_msm8960_cdp()) { /* Only modify LVS6 consumers for CDP targets. */ for (i = 0; i < ARRAY_SIZE(msm_rpm_regulator_init_data); i++) { rpm_data = &msm_rpm_regulator_init_data[i]; if (rpm_data->id == RPM_VREG_ID_PM8921_LVS6) { rpm_data->init_data.consumer_supplies = vreg_consumers_CDP_LVS6; rpm_data->init_data.num_consumer_supplies = ARRAY_SIZE(vreg_consumers_CDP_LVS6); } } } }
gpl-2.0
Kra1o5/android_kernel_huawei_u8815-gb
drivers/base/firmware_class.c
288
17472
/* * firmware_class.c - Multi purpose firmware loading support * * Copyright (c) 2003 Manuel Estrada Sainz * * Please see Documentation/firmware_class/ for more information. * */ #include <linux/capability.h> #include <linux/device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/highmem.h> #include <linux/firmware.h> #include <linux/slab.h> #define to_dev(obj) container_of(obj, struct device, kobj) MODULE_AUTHOR("Manuel Estrada Sainz"); MODULE_DESCRIPTION("Multi purpose firmware loading support"); MODULE_LICENSE("GPL"); /* Builtin firmware support */ #ifdef CONFIG_FW_LOADER extern struct builtin_fw __start_builtin_fw[]; extern struct builtin_fw __end_builtin_fw[]; static bool fw_get_builtin_firmware(struct firmware *fw, const char *name) { struct builtin_fw *b_fw; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { if (strcmp(name, b_fw->name) == 0) { fw->size = b_fw->size; fw->data = b_fw->data; return true; } } return false; } static bool fw_is_builtin_firmware(const struct firmware *fw) { struct builtin_fw *b_fw; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) if (fw->data == b_fw->data) return true; return false; } #else /* Module case - no builtin firmware support */ static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name) { return false; } static inline bool fw_is_builtin_firmware(const struct firmware *fw) { return false; } #endif enum { FW_STATUS_LOADING, FW_STATUS_DONE, FW_STATUS_ABORT, }; static int loading_timeout = 60; /* In seconds */ /* fw_lock could be moved to 'struct firmware_priv' but since it is just * guarding for corner cases a global lock should be OK */ static DEFINE_MUTEX(fw_lock); struct firmware_priv { struct completion completion; struct firmware *fw; unsigned long status; struct page **pages; int nr_pages; int page_array_size; struct timer_list timeout; struct device dev; bool nowait; char fw_id[]; }; static struct firmware_priv *to_firmware_priv(struct device *dev) { return container_of(dev, struct firmware_priv, dev); } static void fw_load_abort(struct firmware_priv *fw_priv) { set_bit(FW_STATUS_ABORT, &fw_priv->status); wmb(); complete(&fw_priv->completion); } static ssize_t firmware_timeout_show(struct class *class, struct class_attribute *attr, char *buf) { return sprintf(buf, "%d\n", loading_timeout); } /** * firmware_timeout_store - set number of seconds to wait for firmware * @class: device class pointer * @attr: device attribute pointer * @buf: buffer to scan for timeout value * @count: number of bytes in @buf * * Sets the number of seconds to wait for the firmware. Once * this expires an error will be returned to the driver and no * firmware will be provided. * * Note: zero means 'wait forever'. **/ static ssize_t firmware_timeout_store(struct class *class, struct class_attribute *attr, const char *buf, size_t count) { loading_timeout = simple_strtol(buf, NULL, 10); if (loading_timeout < 0) loading_timeout = 0; return count; } static struct class_attribute firmware_class_attrs[] = { __ATTR(timeout, S_IWUSR | S_IRUGO, firmware_timeout_show, firmware_timeout_store), __ATTR_NULL }; static void fw_dev_release(struct device *dev) { struct firmware_priv *fw_priv = to_firmware_priv(dev); int i; for (i = 0; i < fw_priv->nr_pages; i++) __free_page(fw_priv->pages[i]); kfree(fw_priv->pages); kfree(fw_priv); module_put(THIS_MODULE); } static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) { struct firmware_priv *fw_priv = to_firmware_priv(dev); if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id)) return -ENOMEM; if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) return -ENOMEM; if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait)) return -ENOMEM; return 0; } static struct class firmware_class = { .name = "firmware", .class_attrs = firmware_class_attrs, .dev_uevent = firmware_uevent, .dev_release = fw_dev_release, }; static ssize_t firmware_loading_show(struct device *dev, struct device_attribute *attr, char *buf) { struct firmware_priv *fw_priv = to_firmware_priv(dev); int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status); return sprintf(buf, "%d\n", loading); } static void firmware_free_data(const struct firmware *fw) { int i; vunmap(fw->data); if (fw->pages) { for (i = 0; i < PFN_UP(fw->size); i++) __free_page(fw->pages[i]); kfree(fw->pages); } } /* Some architectures don't have PAGE_KERNEL_RO */ #ifndef PAGE_KERNEL_RO #define PAGE_KERNEL_RO PAGE_KERNEL #endif /** * firmware_loading_store - set value in the 'loading' control file * @dev: device pointer * @attr: device attribute pointer * @buf: buffer to scan for loading control value * @count: number of bytes in @buf * * The relevant values are: * * 1: Start a load, discarding any previous partial load. * 0: Conclude the load and hand the data to the driver code. * -1: Conclude the load with an error and discard any written data. **/ static ssize_t firmware_loading_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct firmware_priv *fw_priv = to_firmware_priv(dev); int loading = simple_strtol(buf, NULL, 10); int i; switch (loading) { case 1: mutex_lock(&fw_lock); if (!fw_priv->fw) { mutex_unlock(&fw_lock); break; } firmware_free_data(fw_priv->fw); memset(fw_priv->fw, 0, sizeof(struct firmware)); /* If the pages are not owned by 'struct firmware' */ for (i = 0; i < fw_priv->nr_pages; i++) __free_page(fw_priv->pages[i]); kfree(fw_priv->pages); fw_priv->pages = NULL; fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; set_bit(FW_STATUS_LOADING, &fw_priv->status); mutex_unlock(&fw_lock); break; case 0: if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { vunmap(fw_priv->fw->data); fw_priv->fw->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0, PAGE_KERNEL_RO); if (!fw_priv->fw->data) { dev_err(dev, "%s: vmap() failed\n", __func__); goto err; } /* Pages are now owned by 'struct firmware' */ fw_priv->fw->pages = fw_priv->pages; fw_priv->pages = NULL; fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; complete(&fw_priv->completion); clear_bit(FW_STATUS_LOADING, &fw_priv->status); break; } /* fallthrough */ default: dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); /* fallthrough */ case -1: err: fw_load_abort(fw_priv); break; } return count; } static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware *fw; ssize_t ret_count; mutex_lock(&fw_lock); fw = fw_priv->fw; if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { ret_count = -ENODEV; goto out; } if (offset > fw->size) { ret_count = 0; goto out; } if (count > fw->size - offset) count = fw->size - offset; ret_count = count; while (count) { void *page_data; int page_nr = offset >> PAGE_SHIFT; int page_ofs = offset & (PAGE_SIZE-1); int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); page_data = kmap(fw_priv->pages[page_nr]); memcpy(buffer, page_data + page_ofs, page_cnt); kunmap(fw_priv->pages[page_nr]); buffer += page_cnt; offset += page_cnt; count -= page_cnt; } out: mutex_unlock(&fw_lock); return ret_count; } static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) { int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; /* If the array of pages is too small, grow it... */ if (fw_priv->page_array_size < pages_needed) { int new_array_size = max(pages_needed, fw_priv->page_array_size * 2); struct page **new_pages; new_pages = kmalloc(new_array_size * sizeof(void *), GFP_KERNEL); if (!new_pages) { fw_load_abort(fw_priv); return -ENOMEM; } memcpy(new_pages, fw_priv->pages, fw_priv->page_array_size * sizeof(void *)); memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * (new_array_size - fw_priv->page_array_size)); kfree(fw_priv->pages); fw_priv->pages = new_pages; fw_priv->page_array_size = new_array_size; } while (fw_priv->nr_pages < pages_needed) { fw_priv->pages[fw_priv->nr_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!fw_priv->pages[fw_priv->nr_pages]) { fw_load_abort(fw_priv); return -ENOMEM; } fw_priv->nr_pages++; } return 0; } /** * firmware_data_write - write method for firmware * @filp: open sysfs file * @kobj: kobject for the device * @bin_attr: bin_attr structure * @buffer: buffer being written * @offset: buffer offset for write in total data store area * @count: buffer size * * Data written to the 'data' attribute will be later handed to * the driver as a firmware image. **/ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t offset, size_t count) { struct device *dev = to_dev(kobj); struct firmware_priv *fw_priv = to_firmware_priv(dev); struct firmware *fw; ssize_t retval; if (!capable(CAP_SYS_RAWIO)) return -EPERM; mutex_lock(&fw_lock); fw = fw_priv->fw; if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { retval = -ENODEV; goto out; } retval = fw_realloc_buffer(fw_priv, offset + count); if (retval) goto out; retval = count; while (count) { void *page_data; int page_nr = offset >> PAGE_SHIFT; int page_ofs = offset & (PAGE_SIZE - 1); int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); page_data = kmap(fw_priv->pages[page_nr]); memcpy(page_data + page_ofs, buffer, page_cnt); kunmap(fw_priv->pages[page_nr]); buffer += page_cnt; offset += page_cnt; count -= page_cnt; } fw->size = max_t(size_t, offset, fw->size); out: mutex_unlock(&fw_lock); return retval; } static struct bin_attribute firmware_attr_data = { .attr = { .name = "data", .mode = 0644 }, .size = 0, .read = firmware_data_read, .write = firmware_data_write, }; static void firmware_class_timeout(u_long data) { struct firmware_priv *fw_priv = (struct firmware_priv *) data; fw_load_abort(fw_priv); } static struct firmware_priv * fw_create_instance(struct firmware *firmware, const char *fw_name, struct device *device, bool uevent, bool nowait) { struct firmware_priv *fw_priv; struct device *f_dev; int error; fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL); if (!fw_priv) { dev_err(device, "%s: kmalloc failed\n", __func__); error = -ENOMEM; goto err_out; } fw_priv->fw = firmware; fw_priv->nowait = nowait; strcpy(fw_priv->fw_id, fw_name); init_completion(&fw_priv->completion); setup_timer(&fw_priv->timeout, firmware_class_timeout, (u_long) fw_priv); f_dev = &fw_priv->dev; device_initialize(f_dev); dev_set_name(f_dev, "%s", dev_name(device)); f_dev->parent = device; f_dev->class = &firmware_class; dev_set_uevent_suppress(f_dev, true); /* Need to pin this module until class device is destroyed */ __module_get(THIS_MODULE); error = device_add(f_dev); if (error) { dev_err(device, "%s: device_register failed\n", __func__); goto err_put_dev; } error = device_create_bin_file(f_dev, &firmware_attr_data); if (error) { dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__); goto err_del_dev; } error = device_create_file(f_dev, &dev_attr_loading); if (error) { dev_err(device, "%s: device_create_file failed\n", __func__); goto err_del_bin_attr; } if (uevent) dev_set_uevent_suppress(f_dev, false); return fw_priv; err_del_bin_attr: device_remove_bin_file(f_dev, &firmware_attr_data); err_del_dev: device_del(f_dev); err_put_dev: put_device(f_dev); err_out: return ERR_PTR(error); } static void fw_destroy_instance(struct firmware_priv *fw_priv) { struct device *f_dev = &fw_priv->dev; device_remove_file(f_dev, &dev_attr_loading); device_remove_bin_file(f_dev, &firmware_attr_data); device_unregister(f_dev); } static int _request_firmware(const struct firmware **firmware_p, const char *name, struct device *device, bool uevent, bool nowait) { struct firmware_priv *fw_priv; struct firmware *firmware; int retval = 0; if (!firmware_p) return -EINVAL; *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", __func__); retval = -ENOMEM; goto out; } if (fw_get_builtin_firmware(firmware, name)) { dev_dbg(device, "firmware: using built-in firmware %s\n", name); return 0; } if (uevent) dev_dbg(device, "firmware: requesting %s\n", name); fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); if (IS_ERR(fw_priv)) { retval = PTR_ERR(fw_priv); goto out; } if (uevent) { if (loading_timeout > 0) mod_timer(&fw_priv->timeout, round_jiffies_up(jiffies + loading_timeout * HZ)); kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); } wait_for_completion(&fw_priv->completion); set_bit(FW_STATUS_DONE, &fw_priv->status); del_timer_sync(&fw_priv->timeout); mutex_lock(&fw_lock); if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) retval = -ENOENT; fw_priv->fw = NULL; mutex_unlock(&fw_lock); fw_destroy_instance(fw_priv); out: if (retval) { release_firmware(firmware); *firmware_p = NULL; } return retval; } /** * request_firmware: - send firmware request and wait for it * @firmware_p: pointer to firmware image * @name: name of firmware file * @device: device for which firmware is being loaded * * @firmware_p will be used to return a firmware image by the name * of @name for device @device. * * Should be called from user context where sleeping is allowed. * * @name will be used as $FIRMWARE in the uevent environment and * should be distinctive enough not to be confused with any other * firmware image for this or any other device. **/ int request_firmware(const struct firmware **firmware_p, const char *name, struct device *device) { int uevent = 1; return _request_firmware(firmware_p, name, device, uevent, false); } /** * release_firmware: - release the resource associated with a firmware image * @fw: firmware resource to release **/ void release_firmware(const struct firmware *fw) { if (fw) { if (!fw_is_builtin_firmware(fw)) firmware_free_data(fw); kfree(fw); } } /* Async support */ struct firmware_work { struct work_struct work; struct module *module; const char *name; struct device *device; void *context; void (*cont)(const struct firmware *fw, void *context); int uevent; }; static int request_firmware_work_func(void *arg) { struct firmware_work *fw_work = arg; const struct firmware *fw; int ret; if (!arg) { WARN_ON(1); return 0; } ret = _request_firmware(&fw, fw_work->name, fw_work->device, fw_work->uevent, true); fw_work->cont(fw, fw_work->context); module_put(fw_work->module); kfree(fw_work); return ret; } /** * request_firmware_nowait - asynchronous version of request_firmware * @module: module requesting the firmware * @uevent: sends uevent to copy the firmware image if this flag * is non-zero else the firmware copy must be done manually. * @name: name of firmware file * @device: device for which firmware is being loaded * @gfp: allocation flags * @context: will be passed over to @cont, and * @fw may be %NULL if firmware request fails. * @cont: function will be called asynchronously when the firmware * request is over. * * Asynchronous variant of request_firmware() for user contexts where * it is not possible to sleep for long time. It can't be called * in atomic contexts. **/ int request_firmware_nowait( struct module *module, int uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) { struct task_struct *task; struct firmware_work *fw_work; fw_work = kzalloc(sizeof (struct firmware_work), gfp); if (!fw_work) return -ENOMEM; fw_work->module = module; fw_work->name = name; fw_work->device = device; fw_work->context = context; fw_work->cont = cont; fw_work->uevent = uevent; if (!try_module_get(module)) { kfree(fw_work); return -EFAULT; } task = kthread_run(request_firmware_work_func, fw_work, "firmware/%s", name); if (IS_ERR(task)) { fw_work->cont(NULL, fw_work->context); module_put(fw_work->module); kfree(fw_work); return PTR_ERR(task); } return 0; } static int __init firmware_class_init(void) { return class_register(&firmware_class); } static void __exit firmware_class_exit(void) { class_unregister(&firmware_class); } fs_initcall(firmware_class_init); module_exit(firmware_class_exit); EXPORT_SYMBOL(release_firmware); EXPORT_SYMBOL(request_firmware); EXPORT_SYMBOL(request_firmware_nowait);
gpl-2.0
cleech/linux
arch/alpha/kernel/es1888.c
800
1361
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/es1888.c * * Init the built-in ES1888 sound chip (SB16 compatible) */ #include <linux/init.h> #include <asm/io.h> #include "proto.h" void __init es1888_init(void) { /* Sequence of IO reads to init the audio controller */ inb(0x0229); inb(0x0229); inb(0x0229); inb(0x022b); inb(0x0229); inb(0x022b); inb(0x0229); inb(0x0229); inb(0x022b); inb(0x0229); inb(0x0220); /* This sets the base address to 0x220 */ /* Sequence to set DMA channels */ outb(0x01, 0x0226); /* reset */ inb(0x0226); /* pause */ outb(0x00, 0x0226); /* release reset */ while (!(inb(0x022e) & 0x80)) /* wait for bit 7 to assert*/ continue; inb(0x022a); /* pause */ outb(0xc6, 0x022c); /* enable extended mode */ inb(0x022a); /* pause, also forces the write */ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ continue; outb(0xb1, 0x022c); /* setup for write to Interrupt CR */ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ continue; outb(0x14, 0x022c); /* set IRQ 5 */ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ continue; outb(0xb2, 0x022c); /* setup for write to DMA CR */ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ continue; outb(0x18, 0x022c); /* set DMA channel 1 */ inb(0x022c); /* force the write */ }
gpl-2.0
mkasick/android_kernel_samsung_jfltespr
drivers/media/video/msm_wfd/enc-venus-subdev.c
1056
30435
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/bitmap.h> #include <linux/completion.h> #include <linux/ion.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/wait.h> #include <mach/iommu_domains.h> #include <media/msm_vidc.h> #include <media/v4l2-subdev.h> #include "enc-subdev.h" #include "wfd-util.h" #define BUF_TYPE_OUTPUT V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE #define BUF_TYPE_INPUT V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE static struct ion_client *venc_ion_client; struct index_bitmap { unsigned long *bitmap; int size; int size_bits; /*Size in bits, not necessarily size/8 */ }; struct venc_inst { void *vidc_context; struct mutex lock; struct venc_msg_ops vmops; struct mem_region registered_input_bufs, registered_output_bufs; struct index_bitmap free_input_indices, free_output_indices; int num_output_planes, num_input_planes; struct task_struct *callback_thread; bool callback_thread_running; struct completion dq_complete, cmd_complete; bool secure; int domain; }; int venc_load_fw(struct v4l2_subdev *sd) { /*No need to explicitly load the fw */ return 0; } int venc_init(struct v4l2_subdev *sd, u32 val) { if (!venc_ion_client) venc_ion_client = msm_ion_client_create(-1, "wfd_enc_subdev"); return venc_ion_client ? 0 : -ENOMEM; } static int next_free_index(struct index_bitmap *index_bitmap) { int index = find_first_zero_bit(index_bitmap->bitmap, index_bitmap->size_bits); return (index >= index_bitmap->size_bits) ? -1 : index; } static int mark_index_busy(struct index_bitmap *index_bitmap, int index) { if (index > index_bitmap->size_bits) { WFD_MSG_WARN("Marking unknown index as busy\n"); return -EINVAL; } set_bit(index, index_bitmap->bitmap); return 0; } static int mark_index_free(struct index_bitmap *index_bitmap, int index) { if (index > index_bitmap->size_bits) { WFD_MSG_WARN("Marking unknown index as free\n"); return -EINVAL; } clear_bit(index, index_bitmap->bitmap); return 0; } static int get_list_len(struct mem_region *list) { struct mem_region *curr = NULL; int index = 0; list_for_each_entry(curr, &list->list, list) { ++index; } return index; } static struct mem_region *get_registered_mregion(struct mem_region *list, struct mem_region *mregion) { struct mem_region *curr = NULL; list_for_each_entry(curr, &list->list, list) { if (unlikely(mem_region_equals(curr, mregion))) return curr; } return NULL; } static int venc_vidc_callback_thread(void *data) { struct venc_inst *inst = data; WFD_MSG_DBG("Starting callback thread\n"); while (!kthread_should_stop()) { bool dequeue_buf = false; struct v4l2_buffer buffer = {0}; struct v4l2_event event = {0}; int num_planes = 0; int flags = msm_vidc_wait(inst->vidc_context); if (flags & POLLERR) { WFD_MSG_ERR("Encoder reported error\n"); break; } if (flags & POLLPRI) { bool bail_out = false; msm_vidc_dqevent(inst->vidc_context, &event); if (event.type == V4L2_EVENT_MSM_VIDC_CLOSE_DONE) { WFD_MSG_ERR("enc callback thread shutting " \ "down normally\n"); bail_out = true; } else { WFD_MSG_ERR("Got unknown event %d, ignoring\n", event.id); } complete_all(&inst->cmd_complete); if (bail_out) break; } if (flags & POLLIN || flags & POLLRDNORM) { buffer.type = BUF_TYPE_OUTPUT; dequeue_buf = true; num_planes = inst->num_output_planes; WFD_MSG_DBG("Output buffer ready!\n"); } if (flags & POLLOUT || flags & POLLWRNORM) { buffer.type = BUF_TYPE_INPUT; dequeue_buf = true; num_planes = inst->num_input_planes; WFD_MSG_DBG("Input buffer ready!\n"); } if (dequeue_buf) { int rc = 0; struct v4l2_plane *planes = NULL; struct mem_region *curr = NULL, *mregion = NULL; struct list_head *reg_bufs = NULL; struct index_bitmap *bitmap = NULL; planes = kzalloc(sizeof(*planes) * num_planes, GFP_KERNEL); buffer.m.planes = planes; buffer.length = 1; buffer.memory = V4L2_MEMORY_USERPTR; rc = msm_vidc_dqbuf(inst->vidc_context, &buffer); if (rc) { WFD_MSG_ERR("Error dequeuing buffer " \ "from vidc: %d", rc); goto abort_dequeue; } reg_bufs = buffer.type == BUF_TYPE_OUTPUT ? &inst->registered_output_bufs.list : &inst->registered_input_bufs.list; bitmap = buffer.type == BUF_TYPE_OUTPUT ? &inst->free_output_indices : &inst->free_input_indices; list_for_each_entry(curr, reg_bufs, list) { if ((u32)curr->paddr == buffer.m.planes[0].m.userptr) { mregion = curr; break; } } if (!mregion) { WFD_MSG_ERR("Got done msg for unknown buf\n"); goto abort_dequeue; } if (buffer.type == BUF_TYPE_OUTPUT && inst->vmops.op_buffer_done) { struct vb2_buffer *vb = (struct vb2_buffer *)mregion->cookie; vb->v4l2_buf.flags = buffer.flags; vb->v4l2_buf.timestamp = buffer.timestamp; vb->v4l2_planes[0].bytesused = buffer.m.planes[0].bytesused; inst->vmops.op_buffer_done( inst->vmops.cbdata, 0, vb); } else if (buffer.type == BUF_TYPE_INPUT && inst->vmops.ip_buffer_done) { inst->vmops.ip_buffer_done( inst->vmops.cbdata, 0, mregion); } complete_all(&inst->dq_complete); mutex_lock(&inst->lock); mark_index_free(bitmap, buffer.index); mutex_unlock(&inst->lock); abort_dequeue: kfree(planes); } } WFD_MSG_DBG("Exiting callback thread\n"); mutex_lock(&inst->lock); inst->callback_thread_running = false; mutex_unlock(&inst->lock); return 0; } static long venc_open(struct v4l2_subdev *sd, void *arg) { struct venc_inst *inst = NULL; struct venc_msg_ops *vmops = arg; struct v4l2_event_subscription event = {0}; struct msm_vidc_iommu_info maps[MAX_MAP]; int rc = 0; if (!vmops) { WFD_MSG_ERR("Callbacks required for %s\n", __func__); rc = -EINVAL; goto venc_open_fail; } else if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_open_fail; } inst = kzalloc(sizeof(*inst), GFP_KERNEL); if (!inst) { WFD_MSG_ERR("Failed to allocate memory\n"); rc = -EINVAL; goto venc_open_fail; } inst->secure = false; inst->vmops = *vmops; INIT_LIST_HEAD(&inst->registered_output_bufs.list); INIT_LIST_HEAD(&inst->registered_input_bufs.list); init_completion(&inst->dq_complete); init_completion(&inst->cmd_complete); mutex_init(&inst->lock); inst->vidc_context = msm_vidc_open(MSM_VIDC_CORE_0, MSM_VIDC_ENCODER); if (!inst->vidc_context) { WFD_MSG_ERR("Failed to create vidc context\n"); rc = -ENXIO; goto vidc_open_fail; } event.type = V4L2_EVENT_MSM_VIDC_CLOSE_DONE; rc = msm_vidc_subscribe_event(inst->vidc_context, &event); if (rc) { WFD_MSG_ERR("Failed to subscribe to CLOSE_DONE event\n"); goto vidc_subscribe_fail; } event.type = V4L2_EVENT_MSM_VIDC_FLUSH_DONE; rc = msm_vidc_subscribe_event(inst->vidc_context, &event); if (rc) { WFD_MSG_ERR("Failed to subscribe to FLUSH_DONE event\n"); goto vidc_subscribe_fail; } rc = msm_vidc_get_iommu_maps(inst->vidc_context, maps); if (rc) { WFD_MSG_ERR("Failed to retreive domain mappings\n"); rc = -ENODATA; goto vidc_subscribe_fail; } inst->domain = maps[inst->secure ? CP_MAP : NS_MAP].domain; inst->callback_thread = kthread_run(venc_vidc_callback_thread, inst, "venc_vidc_callback_thread"); if (IS_ERR(inst->callback_thread)) { WFD_MSG_ERR("Failed to create callback thread\n"); rc = PTR_ERR(inst->callback_thread); inst->callback_thread = NULL; goto vidc_kthread_create_fail; } inst->callback_thread_running = true; sd->dev_priv = inst; vmops->cookie = inst; return 0; vidc_kthread_create_fail: event.type = V4L2_EVENT_MSM_VIDC_CLOSE_DONE; msm_vidc_unsubscribe_event(inst->vidc_context, &event); event.type = V4L2_EVENT_MSM_VIDC_FLUSH_DONE; msm_vidc_unsubscribe_event(inst->vidc_context, &event); vidc_subscribe_fail: msm_vidc_close(inst->vidc_context); vidc_open_fail: kfree(inst); venc_open_fail: return rc; } static long venc_close(struct v4l2_subdev *sd, void *arg) { struct venc_inst *inst = NULL; struct v4l2_event_subscription event = {0}; struct v4l2_encoder_cmd enc_cmd = {0}; int rc = 0; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_close_fail; } inst = (struct venc_inst *)sd->dev_priv; enc_cmd.cmd = V4L2_ENC_CMD_STOP; msm_vidc_encoder_cmd(inst->vidc_context, &enc_cmd); wait_for_completion(&inst->cmd_complete); if (inst->callback_thread && inst->callback_thread_running) kthread_stop(inst->callback_thread); event.type = V4L2_EVENT_MSM_VIDC_CLOSE_DONE; rc = msm_vidc_unsubscribe_event(inst->vidc_context, &event); if (rc) WFD_MSG_WARN("Failed to unsubscribe close event\n"); event.type = V4L2_EVENT_MSM_VIDC_FLUSH_DONE; rc = msm_vidc_unsubscribe_event(inst->vidc_context, &event); if (rc) WFD_MSG_WARN("Failed to unsubscribe flush event\n"); rc = msm_vidc_close(inst->vidc_context); if (rc) WFD_MSG_WARN("Failed to close vidc context\n"); kfree(inst); sd->dev_priv = inst = NULL; venc_close_fail: return rc; } static long venc_get_buffer_req(struct v4l2_subdev *sd, void *arg) { int rc = 0; struct venc_inst *inst = NULL; struct bufreq *bufreq = arg; struct v4l2_requestbuffers v4l2_bufreq = {0}; struct v4l2_format v4l2_format = {0}; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_buf_req_fail; } else if (!arg) { WFD_MSG_ERR("Invalid buffer requirements\n"); rc = -EINVAL; goto venc_buf_req_fail; } inst = (struct venc_inst *)sd->dev_priv; /* Get buffer count */ v4l2_bufreq = (struct v4l2_requestbuffers) { .count = bufreq->count, .type = BUF_TYPE_OUTPUT, .memory = V4L2_MEMORY_USERPTR, }; rc = msm_vidc_reqbufs(inst->vidc_context, &v4l2_bufreq); if (rc) { WFD_MSG_ERR("Failed getting buffer requirements\n"); goto venc_buf_req_fail; } /* Get buffer size */ v4l2_format.type = BUF_TYPE_OUTPUT; rc = msm_vidc_g_fmt(inst->vidc_context, &v4l2_format); if (rc) { WFD_MSG_ERR("Failed getting OP buffer size\n"); goto venc_buf_req_fail; } bufreq->count = v4l2_bufreq.count; bufreq->size = v4l2_format.fmt.pix_mp.plane_fmt[0].sizeimage; inst->free_output_indices.size_bits = bufreq->count; inst->free_output_indices.size = roundup(bufreq->count, sizeof(unsigned long)) / sizeof(unsigned long); inst->free_output_indices.bitmap = kzalloc(inst->free_output_indices. size, GFP_KERNEL); venc_buf_req_fail: return rc; } static long venc_set_buffer_req(struct v4l2_subdev *sd, void *arg) { int rc = 0; struct venc_inst *inst = NULL; struct bufreq *bufreq = arg; struct v4l2_requestbuffers v4l2_bufreq = {0}; struct v4l2_format v4l2_format = {0}; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_buf_req_fail; } else if (!arg) { WFD_MSG_ERR("Invalid buffer requirements\n"); rc = -EINVAL; goto venc_buf_req_fail; } inst = (struct venc_inst *)sd->dev_priv; /* Attempt to set buffer count */ v4l2_bufreq = (struct v4l2_requestbuffers) { .count = bufreq->count, .type = BUF_TYPE_INPUT, .memory = V4L2_MEMORY_USERPTR, }; rc = msm_vidc_reqbufs(inst->vidc_context, &v4l2_bufreq); if (rc) { WFD_MSG_ERR("Failed getting buffer requirements"); goto venc_buf_req_fail; } /* Get buffer size */ v4l2_format.type = BUF_TYPE_INPUT; rc = msm_vidc_g_fmt(inst->vidc_context, &v4l2_format); if (rc) { WFD_MSG_ERR("Failed getting OP buffer size\n"); goto venc_buf_req_fail; } bufreq->count = v4l2_bufreq.count; bufreq->size = v4l2_format.fmt.pix_mp.plane_fmt[0].sizeimage; inst->free_input_indices.size_bits = bufreq->count; inst->free_input_indices.size = roundup(bufreq->count, sizeof(unsigned long)) / sizeof(unsigned long); inst->free_input_indices.bitmap = kzalloc(inst->free_input_indices. size, GFP_KERNEL); venc_buf_req_fail: return rc; } static long venc_start(struct v4l2_subdev *sd) { struct venc_inst *inst = NULL; int rc = 0; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_start_fail; } inst = (struct venc_inst *)sd->dev_priv; rc = msm_vidc_streamon(inst->vidc_context, BUF_TYPE_OUTPUT); if (rc) { WFD_MSG_ERR("Failed to streamon vidc's output port"); goto venc_start_fail; } rc = msm_vidc_streamon(inst->vidc_context, BUF_TYPE_INPUT); if (rc) { WFD_MSG_ERR("Failed to streamon vidc's input port"); goto venc_start_fail; } venc_start_fail: return rc; } static long venc_stop(struct v4l2_subdev *sd) { struct venc_inst *inst = NULL; int rc = 0; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_stop_fail; } inst = (struct venc_inst *)sd->dev_priv; rc = msm_vidc_streamoff(inst->vidc_context, BUF_TYPE_INPUT); if (rc) { WFD_MSG_ERR("Failed to streamoff vidc's input port"); goto venc_stop_fail; } rc = msm_vidc_streamoff(inst->vidc_context, BUF_TYPE_OUTPUT); if (rc) { WFD_MSG_ERR("Failed to streamoff vidc's output port"); goto venc_stop_fail; } venc_stop_fail: return rc; } static long venc_set_input_buffer(struct v4l2_subdev *sd, void *arg) { int rc = 0; struct venc_inst *inst = NULL; struct v4l2_buffer buf = {0}; struct v4l2_plane plane = {0}; struct mem_region *mregion = arg; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto set_input_buffer_fail; } else if (!arg) { WFD_MSG_ERR("Invalid input buffer\n"); rc = -EINVAL; goto set_input_buffer_fail; } inst = (struct venc_inst *)sd->dev_priv; if (get_registered_mregion(&inst->registered_input_bufs, mregion)) { WFD_MSG_ERR("Duplicate input buffer\n"); rc = -EEXIST; goto set_input_buffer_fail; } mregion = kzalloc(sizeof(*mregion), GFP_KERNEL); *mregion = *(struct mem_region *)arg; plane = (struct v4l2_plane) { .length = mregion->size, .m.userptr = (u32)mregion->paddr, }; buf = (struct v4l2_buffer) { .index = get_list_len(&inst->registered_input_bufs), .type = BUF_TYPE_INPUT, .bytesused = 0, .memory = V4L2_MEMORY_USERPTR, .m.planes = &plane, .length = 1, }; WFD_MSG_DBG("Prepare %p with index, %d", (void *)buf.m.planes[0].m.userptr, buf.index); rc = msm_vidc_prepare_buf(inst->vidc_context, &buf); if (rc) { WFD_MSG_ERR("Failed to prepare input buffer\n"); goto set_input_buffer_fail; } list_add_tail(&mregion->list, &inst->registered_input_bufs.list); return 0; set_input_buffer_fail: kfree(mregion); return rc; } static int venc_map_user_to_kernel(struct venc_inst *inst, struct mem_region *mregion) { int rc = 0; unsigned long flags = 0, size = 0; if (!mregion) { rc = -EINVAL; goto venc_map_fail; } mregion->ion_handle = ion_import_dma_buf(venc_ion_client, mregion->fd); if (IS_ERR_OR_NULL(mregion->ion_handle)) { rc = PTR_ERR(mregion->ion_handle); WFD_MSG_ERR("Failed to get handle: %p, %d, %d, %d\n", venc_ion_client, mregion->fd, mregion->offset, rc); mregion->ion_handle = NULL; goto venc_map_fail; } rc = ion_handle_get_flags(venc_ion_client, mregion->ion_handle, &flags); if (rc) { WFD_MSG_ERR("Failed to get ion flags %d\n", rc); goto venc_map_fail; } mregion->kvaddr = ion_map_kernel(venc_ion_client, mregion->ion_handle, flags); if (IS_ERR_OR_NULL(mregion->kvaddr)) { WFD_MSG_ERR("Failed to map buffer into kernel\n"); rc = PTR_ERR(mregion->kvaddr); mregion->kvaddr = NULL; goto venc_map_fail; } rc = ion_map_iommu(venc_ion_client, mregion->ion_handle, inst->domain, 0, SZ_4K, 0, (unsigned long *)&mregion->paddr, &size, flags, 0); if (rc) { WFD_MSG_ERR("Failed to map into iommu\n"); goto venc_map_iommu_map_fail; } else if (size < mregion->size) { WFD_MSG_ERR("Failed to iommu map the correct size\n"); goto venc_map_iommu_size_fail; } return 0; venc_map_iommu_size_fail: ion_unmap_iommu(venc_ion_client, mregion->ion_handle, inst->domain, 0); venc_map_iommu_map_fail: ion_unmap_kernel(venc_ion_client, mregion->ion_handle); venc_map_fail: return rc; } static int venc_unmap_user_to_kernel(struct venc_inst *inst, struct mem_region *mregion) { if (!mregion || !mregion->ion_handle) return 0; if (mregion->paddr) { ion_unmap_iommu(venc_ion_client, mregion->ion_handle, inst->domain, 0); mregion->paddr = NULL; } if (mregion->kvaddr) { ion_unmap_kernel(venc_ion_client, mregion->ion_handle); mregion->kvaddr = NULL; } return 0; } static long venc_set_output_buffer(struct v4l2_subdev *sd, void *arg) { int rc = 0; struct venc_inst *inst = NULL; struct v4l2_buffer buf = {0}; struct v4l2_plane plane = {0}; struct mem_region *mregion = arg; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_set_output_buffer_fail; } else if (!mregion) { WFD_MSG_ERR("Invalid output buffer\n"); rc = -EINVAL; goto venc_set_output_buffer_fail; } inst = (struct venc_inst *)sd->dev_priv; /* Check if buf already registered */ if (get_registered_mregion(&inst->registered_output_bufs, mregion)) { WFD_MSG_ERR("Duplicate output buffer\n"); rc = -EEXIST; goto venc_set_output_buffer_fail; } mregion = kzalloc(sizeof(*mregion), GFP_KERNEL); if (!mregion) { WFD_MSG_ERR("Failed to allocate memory\n"); goto venc_set_output_buffer_fail; } *mregion = *(struct mem_region *)arg; INIT_LIST_HEAD(&mregion->list); rc = venc_map_user_to_kernel(inst, mregion); if (rc) { WFD_MSG_ERR("Failed to map output buffer\n"); goto venc_set_output_buffer_map_fail; } plane = (struct v4l2_plane) { .length = mregion->size, .m.userptr = (u32)mregion->paddr, }; buf = (struct v4l2_buffer) { .index = get_list_len(&inst->registered_output_bufs), .type = BUF_TYPE_OUTPUT, .bytesused = 0, .memory = V4L2_MEMORY_USERPTR, .m.planes = &plane, .length = 1, }; WFD_MSG_DBG("Prepare %p with index, %d", (void *)buf.m.planes[0].m.userptr, buf.index); rc = msm_vidc_prepare_buf(inst->vidc_context, &buf); if (rc) { WFD_MSG_ERR("Failed to prepare output buffer\n"); goto venc_set_output_buffer_prepare_fail; } list_add_tail(&mregion->list, &inst->registered_output_bufs.list); return rc; venc_set_output_buffer_prepare_fail: venc_unmap_user_to_kernel(inst, mregion); venc_set_output_buffer_map_fail: kfree(mregion); venc_set_output_buffer_fail: return rc; } static long venc_set_format(struct v4l2_subdev *sd, void *arg) { struct venc_inst *inst = NULL; struct v4l2_format *fmt = arg, temp; int rc = 0; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_set_format_fail; } else if (!fmt) { WFD_MSG_ERR("Invalid format\n"); rc = -EINVAL; goto venc_set_format_fail; } else if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { WFD_MSG_ERR("Invalid buffer type %d\n", fmt->type); rc = -ENOTSUPP; goto venc_set_format_fail; } inst = (struct venc_inst *)sd->dev_priv; temp = (struct v4l2_format) { .type = BUF_TYPE_OUTPUT, .fmt.pix_mp = (struct v4l2_pix_format_mplane) { .width = fmt->fmt.pix.width, .height = fmt->fmt.pix.height, .pixelformat = fmt->fmt.pix.pixelformat, }, }; rc = msm_vidc_s_fmt(inst->vidc_context, &temp); if (rc) { WFD_MSG_ERR("Failed to format for output port\n"); goto venc_set_format_fail; } else if (!temp.fmt.pix_mp.num_planes) { WFD_MSG_ERR("No. of planes for output buffers make no sense\n"); rc = -EINVAL; goto venc_set_format_fail; } fmt->fmt.pix.sizeimage = temp.fmt.pix_mp.plane_fmt[0].sizeimage; inst->num_output_planes = temp.fmt.pix_mp.num_planes; temp.type = BUF_TYPE_INPUT; temp.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12; rc = msm_vidc_s_fmt(inst->vidc_context, &temp); inst->num_input_planes = temp.fmt.pix_mp.num_planes; if (rc) { WFD_MSG_ERR("Failed to format for input port\n"); goto venc_set_format_fail; } venc_set_format_fail: return rc; } static long venc_set_framerate(struct v4l2_subdev *sd, void *arg) { struct venc_inst *inst = NULL; struct v4l2_control ctrl = {0}; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); return -EINVAL; } else if (!arg) { WFD_MSG_ERR("Invalid framerate\n"); return -EINVAL; } inst = (struct venc_inst *)sd->dev_priv; ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE; ctrl.value = 30; return msm_vidc_s_ctrl(inst->vidc_context, &ctrl); } static long venc_fill_outbuf(struct v4l2_subdev *sd, void *arg) { struct venc_inst *inst = NULL; struct mem_region *mregion = NULL; struct v4l2_buffer buffer = {0}; struct v4l2_plane plane = {0}; int index = 0, rc = 0; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); return -EINVAL; } else if (!arg) { WFD_MSG_ERR("Invalid output buffer ot fill\n"); return -EINVAL; } inst = (struct venc_inst *)sd->dev_priv; mregion = get_registered_mregion(&inst->registered_output_bufs, arg); if (!mregion) { WFD_MSG_ERR("Output buffer not registered\n"); return -ENOENT; } plane = (struct v4l2_plane) { .length = mregion->size, .m.userptr = (u32)mregion->paddr, }; while (true) { mutex_lock(&inst->lock); index = next_free_index(&inst->free_output_indices); mutex_unlock(&inst->lock); if (index < 0) wait_for_completion(&inst->dq_complete); else break; } buffer = (struct v4l2_buffer) { .index = index, .type = BUF_TYPE_OUTPUT, .memory = V4L2_MEMORY_USERPTR, .m.planes = &plane, .length = 1, }; WFD_MSG_DBG("Fill buffer %p with index, %d", (void *)buffer.m.planes[0].m.userptr, buffer.index); rc = msm_vidc_qbuf(inst->vidc_context, &buffer); if (!rc) { mutex_lock(&inst->lock); mark_index_busy(&inst->free_output_indices, index); mutex_unlock(&inst->lock); } return rc; } static long venc_encode_frame(struct v4l2_subdev *sd, void *arg) { struct venc_inst *inst = NULL; struct venc_buf_info *venc_buf = arg; struct mem_region *mregion = NULL; struct v4l2_buffer buffer = {0}; struct v4l2_plane plane = {0}; int index = 0, rc = 0; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); return -EINVAL; } else if (!venc_buf) { WFD_MSG_ERR("Invalid output buffer ot fill\n"); return -EINVAL; } inst = (struct venc_inst *)sd->dev_priv; mregion = venc_buf->mregion; plane = (struct v4l2_plane) { .length = mregion->size, .m.userptr = (u32)mregion->paddr, .bytesused = mregion->size, }; while (true) { mutex_lock(&inst->lock); index = next_free_index(&inst->free_input_indices); mutex_unlock(&inst->lock); if (index < 0) wait_for_completion(&inst->dq_complete); else break; } buffer = (struct v4l2_buffer) { .index = index, .type = BUF_TYPE_INPUT, .timestamp = ns_to_timeval(venc_buf->timestamp), .memory = V4L2_MEMORY_USERPTR, .m.planes = &plane, .length = 1, }; WFD_MSG_DBG("Encode buffer %p with index, %d", (void *)buffer.m.planes[0].m.userptr, buffer.index); rc = msm_vidc_qbuf(inst->vidc_context, &buffer); if (!rc) { mutex_lock(&inst->lock); mark_index_busy(&inst->free_input_indices, index); mutex_unlock(&inst->lock); } return rc; } static long venc_alloc_recon_buffers(struct v4l2_subdev *sd, void *arg) { /* vidc driver allocates internally on streamon */ return 0; } static long venc_free_buffer(struct venc_inst *inst, int type, struct mem_region *to_free, bool unmap_user_buffer) { struct mem_region *mregion = NULL; struct mem_region *buf_list = NULL; if (type == BUF_TYPE_OUTPUT) { buf_list = &inst->registered_output_bufs; } else if (type == BUF_TYPE_INPUT) { buf_list = &inst->registered_input_bufs; } else { WFD_MSG_ERR("Trying to free a buffer of unknown type\n"); return -EINVAL; } mregion = get_registered_mregion(buf_list, to_free); if (!mregion) { WFD_MSG_ERR("Buffer not registered, cannot free\n"); return -ENOENT; } if (unmap_user_buffer) { int rc = venc_unmap_user_to_kernel(inst, mregion); if (rc) WFD_MSG_WARN("Unable to unmap user buffer\n"); } list_del(&mregion->list); kfree(mregion); return 0; } static long venc_free_output_buffer(struct v4l2_subdev *sd, void *arg) { int rc = 0; struct venc_inst *inst = NULL; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_free_output_buffer_fail; } else if (!arg) { WFD_MSG_ERR("Invalid output buffer\n"); rc = -EINVAL; goto venc_free_output_buffer_fail; } inst = (struct venc_inst *)sd->dev_priv; return venc_free_buffer(inst, BUF_TYPE_OUTPUT, arg, true); venc_free_output_buffer_fail: return rc; } static long venc_flush_buffers(struct v4l2_subdev *sd, void *arg) { struct venc_inst *inst = NULL; struct v4l2_encoder_cmd enc_cmd = {0}; int rc = 0; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_flush_buffers_fail; } inst = (struct venc_inst *)sd->dev_priv; enc_cmd.cmd = V4L2_ENC_QCOM_CMD_FLUSH; enc_cmd.flags = V4L2_QCOM_CMD_FLUSH_OUTPUT | V4L2_QCOM_CMD_FLUSH_CAPTURE; msm_vidc_encoder_cmd(inst->vidc_context, &enc_cmd); wait_for_completion(&inst->cmd_complete); venc_flush_buffers_fail: return rc; } static long venc_free_input_buffer(struct v4l2_subdev *sd, void *arg) { int rc = 0; struct venc_inst *inst = NULL; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); rc = -EINVAL; goto venc_free_input_buffer_fail; } else if (!arg) { WFD_MSG_ERR("Invalid output buffer\n"); rc = -EINVAL; goto venc_free_input_buffer_fail; } inst = (struct venc_inst *)sd->dev_priv; return venc_free_buffer(inst, BUF_TYPE_INPUT, arg, false); venc_free_input_buffer_fail: return rc; } static long venc_free_recon_buffers(struct v4l2_subdev *sd, void *arg) { /* vidc driver takes care of this */ return 0; } static long venc_set_property(struct v4l2_subdev *sd, void *arg) { struct venc_inst *inst = NULL; struct v4l2_control *ctrl = arg; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); return -EINVAL; } inst = (struct venc_inst *)sd->dev_priv; if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEADER_MODE) { /* XXX: We don't support this yet, but to prevent unncessary * target specific code for the client, we'll not error out. * The client ideally shouldn't notice this */ return 0; } return msm_vidc_s_ctrl(inst->vidc_context, (struct v4l2_control *)arg); } static long venc_get_property(struct v4l2_subdev *sd, void *arg) { struct venc_inst *inst = NULL; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); return -EINVAL; } inst = (struct venc_inst *)sd->dev_priv; return msm_vidc_g_ctrl(inst->vidc_context, (struct v4l2_control *)arg); } long venc_mmap(struct v4l2_subdev *sd, void *arg) { struct mem_region_map *mmap = arg; struct mem_region *mregion = NULL; unsigned long rc = 0, size = 0; void *paddr = NULL; struct venc_inst *inst = NULL; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); return -EINVAL; } else if (!mmap || !mmap->mregion) { WFD_MSG_ERR("Memregion required for %s\n", __func__); return -EINVAL; } inst = (struct venc_inst *)sd->dev_priv; mregion = mmap->mregion; if (mregion->size % SZ_4K != 0) { WFD_MSG_ERR("Memregion not aligned to %d\n", SZ_4K); return -EINVAL; } rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle, inst->domain, 0, SZ_4K, 0, (unsigned long *)&paddr, &size, 0, 0); if (rc) { WFD_MSG_ERR("Failed to get physical addr\n"); paddr = NULL; } else if (size < mregion->size) { WFD_MSG_ERR("Failed to map enough memory\n"); rc = -ENOMEM; } mregion->paddr = paddr; return rc; } long venc_munmap(struct v4l2_subdev *sd, void *arg) { struct mem_region_map *mmap = arg; struct mem_region *mregion = NULL; struct venc_inst *inst = NULL; if (!sd) { WFD_MSG_ERR("Subdevice required for %s\n", __func__); return -EINVAL; } else if (!mmap || !mmap->mregion) { WFD_MSG_ERR("Memregion required for %s\n", __func__); return -EINVAL; } inst = (struct venc_inst *)sd->dev_priv; mregion = mmap->mregion; ion_unmap_iommu(mmap->ion_client, mregion->ion_handle, inst->domain, 0); return 0; } static long venc_set_framerate_mode(struct v4l2_subdev *sd, void *arg) { /* TODO: Unsupported for now, but return false success * to preserve binary compatibility for userspace apps * across targets */ return 0; } long venc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { long rc = 0; switch (cmd) { case OPEN: rc = venc_open(sd, arg); break; case CLOSE: rc = venc_close(sd, arg); break; case ENCODE_START: rc = venc_start(sd); break; case ENCODE_FRAME: venc_encode_frame(sd, arg); break; case ENCODE_STOP: rc = venc_stop(sd); break; case SET_PROP: rc = venc_set_property(sd, arg); break; case GET_PROP: rc = venc_get_property(sd, arg); break; case GET_BUFFER_REQ: rc = venc_get_buffer_req(sd, arg); break; case SET_BUFFER_REQ: rc = venc_set_buffer_req(sd, arg); break; case FREE_BUFFER: break; case FILL_OUTPUT_BUFFER: rc = venc_fill_outbuf(sd, arg); break; case SET_FORMAT: rc = venc_set_format(sd, arg); break; case SET_FRAMERATE: rc = venc_set_framerate(sd, arg); break; case SET_INPUT_BUFFER: rc = venc_set_input_buffer(sd, arg); break; case SET_OUTPUT_BUFFER: rc = venc_set_output_buffer(sd, arg); break; case ALLOC_RECON_BUFFERS: rc = venc_alloc_recon_buffers(sd, arg); break; case FREE_OUTPUT_BUFFER: rc = venc_free_output_buffer(sd, arg); break; case FREE_INPUT_BUFFER: rc = venc_free_input_buffer(sd, arg); break; case FREE_RECON_BUFFERS: rc = venc_free_recon_buffers(sd, arg); break; case ENCODE_FLUSH: rc = venc_flush_buffers(sd, arg); break; case ENC_MMAP: rc = venc_mmap(sd, arg); break; case ENC_MUNMAP: rc = venc_munmap(sd, arg); break; case SET_FRAMERATE_MODE: rc = venc_set_framerate_mode(sd, arg); break; default: WFD_MSG_ERR("Unknown ioctl %d to enc-subdev\n", cmd); rc = -ENOTSUPP; break; } return rc; }
gpl-2.0
Kali-/lge-kernel-msm7x30
arch/powerpc/platforms/embedded6xx/flipper-pic.c
1056
5619
/* * arch/powerpc/platforms/embedded6xx/flipper-pic.c * * Nintendo GameCube/Wii "Flipper" interrupt controller support. * Copyright (C) 2004-2009 The GameCube Linux Team * Copyright (C) 2007,2008,2009 Albert Herranz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * */ #define DRV_MODULE_NAME "flipper-pic" #define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/of.h> #include <asm/io.h> #include "flipper-pic.h" #define FLIPPER_NR_IRQS 32 /* * Each interrupt has a corresponding bit in both * the Interrupt Cause (ICR) and Interrupt Mask (IMR) registers. * * Enabling/disabling an interrupt line involves setting/clearing * the corresponding bit in IMR. * Except for the RSW interrupt, all interrupts get deasserted automatically * when the source deasserts the interrupt. */ #define FLIPPER_ICR 0x00 #define FLIPPER_ICR_RSS (1<<16) /* reset switch state */ #define FLIPPER_IMR 0x04 #define FLIPPER_RESET 0x24 /* * IRQ chip hooks. * */ static void flipper_pic_mask_and_ack(unsigned int virq) { int irq = virq_to_hw(virq); void __iomem *io_base = get_irq_chip_data(virq); u32 mask = 1 << irq; clrbits32(io_base + FLIPPER_IMR, mask); /* this is at least needed for RSW */ out_be32(io_base + FLIPPER_ICR, mask); } static void flipper_pic_ack(unsigned int virq) { int irq = virq_to_hw(virq); void __iomem *io_base = get_irq_chip_data(virq); /* this is at least needed for RSW */ out_be32(io_base + FLIPPER_ICR, 1 << irq); } static void flipper_pic_mask(unsigned int virq) { int irq = virq_to_hw(virq); void __iomem *io_base = get_irq_chip_data(virq); clrbits32(io_base + FLIPPER_IMR, 1 << irq); } static void flipper_pic_unmask(unsigned int virq) { int irq = virq_to_hw(virq); void __iomem *io_base = get_irq_chip_data(virq); setbits32(io_base + FLIPPER_IMR, 1 << irq); } static struct irq_chip flipper_pic = { .name = "flipper-pic", .ack = flipper_pic_ack, .mask_ack = flipper_pic_mask_and_ack, .mask = flipper_pic_mask, .unmask = flipper_pic_unmask, }; /* * IRQ host hooks. * */ static struct irq_host *flipper_irq_host; static int flipper_pic_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { set_irq_chip_data(virq, h->host_data); irq_to_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq); return 0; } static void flipper_pic_unmap(struct irq_host *h, unsigned int irq) { set_irq_chip_data(irq, NULL); set_irq_chip(irq, NULL); } static int flipper_pic_match(struct irq_host *h, struct device_node *np) { return 1; } static struct irq_host_ops flipper_irq_host_ops = { .map = flipper_pic_map, .unmap = flipper_pic_unmap, .match = flipper_pic_match, }; /* * Platform hooks. * */ static void __flipper_quiesce(void __iomem *io_base) { /* mask and ack all IRQs */ out_be32(io_base + FLIPPER_IMR, 0x00000000); out_be32(io_base + FLIPPER_ICR, 0xffffffff); } struct irq_host * __init flipper_pic_init(struct device_node *np) { struct device_node *pi; struct irq_host *irq_host = NULL; struct resource res; void __iomem *io_base; int retval; pi = of_get_parent(np); if (!pi) { pr_err("no parent found\n"); goto out; } if (!of_device_is_compatible(pi, "nintendo,flipper-pi")) { pr_err("unexpected parent compatible\n"); goto out; } retval = of_address_to_resource(pi, 0, &res); if (retval) { pr_err("no io memory range found\n"); goto out; } io_base = ioremap(res.start, resource_size(&res)); pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base); __flipper_quiesce(io_base); irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS, &flipper_irq_host_ops, -1); if (!irq_host) { pr_err("failed to allocate irq_host\n"); return NULL; } irq_host->host_data = io_base; out: return irq_host; } unsigned int flipper_pic_get_irq(void) { void __iomem *io_base = flipper_irq_host->host_data; int irq; u32 irq_status; irq_status = in_be32(io_base + FLIPPER_ICR) & in_be32(io_base + FLIPPER_IMR); if (irq_status == 0) return NO_IRQ; /* no more IRQs pending */ irq = __ffs(irq_status); return irq_linear_revmap(flipper_irq_host, irq); } /* * Probe function. * */ void __init flipper_pic_probe(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-pic"); BUG_ON(!np); flipper_irq_host = flipper_pic_init(np); BUG_ON(!flipper_irq_host); irq_set_default_host(flipper_irq_host); of_node_put(np); } /* * Misc functions related to the flipper chipset. * */ /** * flipper_quiesce() - quiesce flipper irq controller * * Mask and ack all interrupt sources. * */ void flipper_quiesce(void) { void __iomem *io_base = flipper_irq_host->host_data; __flipper_quiesce(io_base); } /* * Resets the platform. */ void flipper_platform_reset(void) { void __iomem *io_base; if (flipper_irq_host && flipper_irq_host->host_data) { io_base = flipper_irq_host->host_data; out_8(io_base + FLIPPER_RESET, 0x00); } } /* * Returns non-zero if the reset button is pressed. */ int flipper_is_reset_button_pressed(void) { void __iomem *io_base; u32 icr; if (flipper_irq_host && flipper_irq_host->host_data) { io_base = flipper_irq_host->host_data; icr = in_be32(io_base + FLIPPER_ICR); return !(icr & FLIPPER_ICR_RSS); } return 0; }
gpl-2.0
MinimalOS/android_kernel_moto_shamu
sound/usb/caiaq/audio.c
2080
23476
/* * Copyright (c) 2006-2008 Daniel Mack, Karsten Wiese * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/device.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/pcm.h> #include "device.h" #include "audio.h" #define N_URBS 32 #define CLOCK_DRIFT_TOLERANCE 5 #define FRAMES_PER_URB 8 #define BYTES_PER_FRAME 512 #define CHANNELS_PER_STREAM 2 #define BYTES_PER_SAMPLE 3 #define BYTES_PER_SAMPLE_USB 4 #define MAX_BUFFER_SIZE (128*1024) #define MAX_ENDPOINT_SIZE 512 #define ENDPOINT_CAPTURE 2 #define ENDPOINT_PLAYBACK 6 #define MAKE_CHECKBYTE(cdev,stream,i) \ (stream << 1) | (~(i / (cdev->n_streams * BYTES_PER_SAMPLE_USB)) & 1) static struct snd_pcm_hardware snd_usb_caiaq_pcm_hardware = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER), .formats = SNDRV_PCM_FMTBIT_S24_3BE, .rates = (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000), .rate_min = 44100, .rate_max = 0, /* will overwrite later */ .channels_min = CHANNELS_PER_STREAM, .channels_max = CHANNELS_PER_STREAM, .buffer_bytes_max = MAX_BUFFER_SIZE, .period_bytes_min = 128, .period_bytes_max = MAX_BUFFER_SIZE, .periods_min = 1, .periods_max = 1024, }; static void activate_substream(struct snd_usb_caiaqdev *cdev, struct snd_pcm_substream *sub) { spin_lock(&cdev->spinlock); if (sub->stream == SNDRV_PCM_STREAM_PLAYBACK) cdev->sub_playback[sub->number] = sub; else cdev->sub_capture[sub->number] = sub; spin_unlock(&cdev->spinlock); } static void deactivate_substream(struct snd_usb_caiaqdev *cdev, struct snd_pcm_substream *sub) { unsigned long flags; spin_lock_irqsave(&cdev->spinlock, flags); if (sub->stream == SNDRV_PCM_STREAM_PLAYBACK) cdev->sub_playback[sub->number] = NULL; else cdev->sub_capture[sub->number] = NULL; spin_unlock_irqrestore(&cdev->spinlock, flags); } static int all_substreams_zero(struct snd_pcm_substream **subs) { int i; for (i = 0; i < MAX_STREAMS; i++) if (subs[i] != NULL) return 0; return 1; } static int stream_start(struct snd_usb_caiaqdev *cdev) { int i, ret; struct device *dev = caiaqdev_to_dev(cdev); dev_dbg(dev, "%s(%p)\n", __func__, cdev); if (cdev->streaming) return -EINVAL; memset(cdev->sub_playback, 0, sizeof(cdev->sub_playback)); memset(cdev->sub_capture, 0, sizeof(cdev->sub_capture)); cdev->input_panic = 0; cdev->output_panic = 0; cdev->first_packet = 4; cdev->streaming = 1; cdev->warned = 0; for (i = 0; i < N_URBS; i++) { ret = usb_submit_urb(cdev->data_urbs_in[i], GFP_ATOMIC); if (ret) { dev_err(dev, "unable to trigger read #%d! (ret %d)\n", i, ret); cdev->streaming = 0; return -EPIPE; } } return 0; } static void stream_stop(struct snd_usb_caiaqdev *cdev) { int i; struct device *dev = caiaqdev_to_dev(cdev); dev_dbg(dev, "%s(%p)\n", __func__, cdev); if (!cdev->streaming) return; cdev->streaming = 0; for (i = 0; i < N_URBS; i++) { usb_kill_urb(cdev->data_urbs_in[i]); if (test_bit(i, &cdev->outurb_active_mask)) usb_kill_urb(cdev->data_urbs_out[i]); } cdev->outurb_active_mask = 0; } static int snd_usb_caiaq_substream_open(struct snd_pcm_substream *substream) { struct snd_usb_caiaqdev *cdev = snd_pcm_substream_chip(substream); struct device *dev = caiaqdev_to_dev(cdev); dev_dbg(dev, "%s(%p)\n", __func__, substream); substream->runtime->hw = cdev->pcm_info; snd_pcm_limit_hw_rates(substream->runtime); return 0; } static int snd_usb_caiaq_substream_close(struct snd_pcm_substream *substream) { struct snd_usb_caiaqdev *cdev = snd_pcm_substream_chip(substream); struct device *dev = caiaqdev_to_dev(cdev); dev_dbg(dev, "%s(%p)\n", __func__, substream); if (all_substreams_zero(cdev->sub_playback) && all_substreams_zero(cdev->sub_capture)) { /* when the last client has stopped streaming, * all sample rates are allowed again */ stream_stop(cdev); cdev->pcm_info.rates = cdev->samplerates; } return 0; } static int snd_usb_caiaq_pcm_hw_params(struct snd_pcm_substream *sub, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(sub, params_buffer_bytes(hw_params)); } static int snd_usb_caiaq_pcm_hw_free(struct snd_pcm_substream *sub) { struct snd_usb_caiaqdev *cdev = snd_pcm_substream_chip(sub); deactivate_substream(cdev, sub); return snd_pcm_lib_free_pages(sub); } /* this should probably go upstream */ #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12 #error "Change this table" #endif static unsigned int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000 }; static int snd_usb_caiaq_pcm_prepare(struct snd_pcm_substream *substream) { int bytes_per_sample, bpp, ret, i; int index = substream->number; struct snd_usb_caiaqdev *cdev = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct device *dev = caiaqdev_to_dev(cdev); dev_dbg(dev, "%s(%p)\n", __func__, substream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { int out_pos; switch (cdev->spec.data_alignment) { case 0: case 2: out_pos = BYTES_PER_SAMPLE + 1; break; case 3: default: out_pos = 0; break; } cdev->period_out_count[index] = out_pos; cdev->audio_out_buf_pos[index] = out_pos; } else { int in_pos; switch (cdev->spec.data_alignment) { case 0: in_pos = BYTES_PER_SAMPLE + 2; break; case 2: in_pos = BYTES_PER_SAMPLE; break; case 3: default: in_pos = 0; break; } cdev->period_in_count[index] = in_pos; cdev->audio_in_buf_pos[index] = in_pos; } if (cdev->streaming) return 0; /* the first client that opens a stream defines the sample rate * setting for all subsequent calls, until the last client closed. */ for (i=0; i < ARRAY_SIZE(rates); i++) if (runtime->rate == rates[i]) cdev->pcm_info.rates = 1 << i; snd_pcm_limit_hw_rates(runtime); bytes_per_sample = BYTES_PER_SAMPLE; if (cdev->spec.data_alignment >= 2) bytes_per_sample++; bpp = ((runtime->rate / 8000) + CLOCK_DRIFT_TOLERANCE) * bytes_per_sample * CHANNELS_PER_STREAM * cdev->n_streams; if (bpp > MAX_ENDPOINT_SIZE) bpp = MAX_ENDPOINT_SIZE; ret = snd_usb_caiaq_set_audio_params(cdev, runtime->rate, runtime->sample_bits, bpp); if (ret) return ret; ret = stream_start(cdev); if (ret) return ret; cdev->output_running = 0; wait_event_timeout(cdev->prepare_wait_queue, cdev->output_running, HZ); if (!cdev->output_running) { stream_stop(cdev); return -EPIPE; } return 0; } static int snd_usb_caiaq_pcm_trigger(struct snd_pcm_substream *sub, int cmd) { struct snd_usb_caiaqdev *cdev = snd_pcm_substream_chip(sub); struct device *dev = caiaqdev_to_dev(cdev); dev_dbg(dev, "%s(%p) cmd %d\n", __func__, sub, cmd); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: activate_substream(cdev, sub); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: deactivate_substream(cdev, sub); break; default: return -EINVAL; } return 0; } static snd_pcm_uframes_t snd_usb_caiaq_pcm_pointer(struct snd_pcm_substream *sub) { int index = sub->number; struct snd_usb_caiaqdev *cdev = snd_pcm_substream_chip(sub); snd_pcm_uframes_t ptr; spin_lock(&cdev->spinlock); if (cdev->input_panic || cdev->output_panic) { ptr = SNDRV_PCM_POS_XRUN; goto unlock; } if (sub->stream == SNDRV_PCM_STREAM_PLAYBACK) ptr = bytes_to_frames(sub->runtime, cdev->audio_out_buf_pos[index]); else ptr = bytes_to_frames(sub->runtime, cdev->audio_in_buf_pos[index]); unlock: spin_unlock(&cdev->spinlock); return ptr; } /* operators for both playback and capture */ static struct snd_pcm_ops snd_usb_caiaq_ops = { .open = snd_usb_caiaq_substream_open, .close = snd_usb_caiaq_substream_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_usb_caiaq_pcm_hw_params, .hw_free = snd_usb_caiaq_pcm_hw_free, .prepare = snd_usb_caiaq_pcm_prepare, .trigger = snd_usb_caiaq_pcm_trigger, .pointer = snd_usb_caiaq_pcm_pointer }; static void check_for_elapsed_periods(struct snd_usb_caiaqdev *cdev, struct snd_pcm_substream **subs) { int stream, pb, *cnt; struct snd_pcm_substream *sub; for (stream = 0; stream < cdev->n_streams; stream++) { sub = subs[stream]; if (!sub) continue; pb = snd_pcm_lib_period_bytes(sub); cnt = (sub->stream == SNDRV_PCM_STREAM_PLAYBACK) ? &cdev->period_out_count[stream] : &cdev->period_in_count[stream]; if (*cnt >= pb) { snd_pcm_period_elapsed(sub); *cnt %= pb; } } } static void read_in_urb_mode0(struct snd_usb_caiaqdev *cdev, const struct urb *urb, const struct usb_iso_packet_descriptor *iso) { unsigned char *usb_buf = urb->transfer_buffer + iso->offset; struct snd_pcm_substream *sub; int stream, i; if (all_substreams_zero(cdev->sub_capture)) return; for (i = 0; i < iso->actual_length;) { for (stream = 0; stream < cdev->n_streams; stream++, i++) { sub = cdev->sub_capture[stream]; if (sub) { struct snd_pcm_runtime *rt = sub->runtime; char *audio_buf = rt->dma_area; int sz = frames_to_bytes(rt, rt->buffer_size); audio_buf[cdev->audio_in_buf_pos[stream]++] = usb_buf[i]; cdev->period_in_count[stream]++; if (cdev->audio_in_buf_pos[stream] == sz) cdev->audio_in_buf_pos[stream] = 0; } } } } static void read_in_urb_mode2(struct snd_usb_caiaqdev *cdev, const struct urb *urb, const struct usb_iso_packet_descriptor *iso) { unsigned char *usb_buf = urb->transfer_buffer + iso->offset; unsigned char check_byte; struct snd_pcm_substream *sub; int stream, i; for (i = 0; i < iso->actual_length;) { if (i % (cdev->n_streams * BYTES_PER_SAMPLE_USB) == 0) { for (stream = 0; stream < cdev->n_streams; stream++, i++) { if (cdev->first_packet) continue; check_byte = MAKE_CHECKBYTE(cdev, stream, i); if ((usb_buf[i] & 0x3f) != check_byte) cdev->input_panic = 1; if (usb_buf[i] & 0x80) cdev->output_panic = 1; } } cdev->first_packet = 0; for (stream = 0; stream < cdev->n_streams; stream++, i++) { sub = cdev->sub_capture[stream]; if (cdev->input_panic) usb_buf[i] = 0; if (sub) { struct snd_pcm_runtime *rt = sub->runtime; char *audio_buf = rt->dma_area; int sz = frames_to_bytes(rt, rt->buffer_size); audio_buf[cdev->audio_in_buf_pos[stream]++] = usb_buf[i]; cdev->period_in_count[stream]++; if (cdev->audio_in_buf_pos[stream] == sz) cdev->audio_in_buf_pos[stream] = 0; } } } } static void read_in_urb_mode3(struct snd_usb_caiaqdev *cdev, const struct urb *urb, const struct usb_iso_packet_descriptor *iso) { unsigned char *usb_buf = urb->transfer_buffer + iso->offset; struct device *dev = caiaqdev_to_dev(cdev); int stream, i; /* paranoia check */ if (iso->actual_length % (BYTES_PER_SAMPLE_USB * CHANNELS_PER_STREAM)) return; for (i = 0; i < iso->actual_length;) { for (stream = 0; stream < cdev->n_streams; stream++) { struct snd_pcm_substream *sub = cdev->sub_capture[stream]; char *audio_buf = NULL; int c, n, sz = 0; if (sub && !cdev->input_panic) { struct snd_pcm_runtime *rt = sub->runtime; audio_buf = rt->dma_area; sz = frames_to_bytes(rt, rt->buffer_size); } for (c = 0; c < CHANNELS_PER_STREAM; c++) { /* 3 audio data bytes, followed by 1 check byte */ if (audio_buf) { for (n = 0; n < BYTES_PER_SAMPLE; n++) { audio_buf[cdev->audio_in_buf_pos[stream]++] = usb_buf[i+n]; if (cdev->audio_in_buf_pos[stream] == sz) cdev->audio_in_buf_pos[stream] = 0; } cdev->period_in_count[stream] += BYTES_PER_SAMPLE; } i += BYTES_PER_SAMPLE; if (usb_buf[i] != ((stream << 1) | c) && !cdev->first_packet) { if (!cdev->input_panic) dev_warn(dev, " EXPECTED: %02x got %02x, c %d, stream %d, i %d\n", ((stream << 1) | c), usb_buf[i], c, stream, i); cdev->input_panic = 1; } i++; } } } if (cdev->first_packet > 0) cdev->first_packet--; } static void read_in_urb(struct snd_usb_caiaqdev *cdev, const struct urb *urb, const struct usb_iso_packet_descriptor *iso) { struct device *dev = caiaqdev_to_dev(cdev); if (!cdev->streaming) return; if (iso->actual_length < cdev->bpp) return; switch (cdev->spec.data_alignment) { case 0: read_in_urb_mode0(cdev, urb, iso); break; case 2: read_in_urb_mode2(cdev, urb, iso); break; case 3: read_in_urb_mode3(cdev, urb, iso); break; } if ((cdev->input_panic || cdev->output_panic) && !cdev->warned) { dev_warn(dev, "streaming error detected %s %s\n", cdev->input_panic ? "(input)" : "", cdev->output_panic ? "(output)" : ""); cdev->warned = 1; } } static void fill_out_urb_mode_0(struct snd_usb_caiaqdev *cdev, struct urb *urb, const struct usb_iso_packet_descriptor *iso) { unsigned char *usb_buf = urb->transfer_buffer + iso->offset; struct snd_pcm_substream *sub; int stream, i; for (i = 0; i < iso->length;) { for (stream = 0; stream < cdev->n_streams; stream++, i++) { sub = cdev->sub_playback[stream]; if (sub) { struct snd_pcm_runtime *rt = sub->runtime; char *audio_buf = rt->dma_area; int sz = frames_to_bytes(rt, rt->buffer_size); usb_buf[i] = audio_buf[cdev->audio_out_buf_pos[stream]]; cdev->period_out_count[stream]++; cdev->audio_out_buf_pos[stream]++; if (cdev->audio_out_buf_pos[stream] == sz) cdev->audio_out_buf_pos[stream] = 0; } else usb_buf[i] = 0; } /* fill in the check bytes */ if (cdev->spec.data_alignment == 2 && i % (cdev->n_streams * BYTES_PER_SAMPLE_USB) == (cdev->n_streams * CHANNELS_PER_STREAM)) for (stream = 0; stream < cdev->n_streams; stream++, i++) usb_buf[i] = MAKE_CHECKBYTE(cdev, stream, i); } } static void fill_out_urb_mode_3(struct snd_usb_caiaqdev *cdev, struct urb *urb, const struct usb_iso_packet_descriptor *iso) { unsigned char *usb_buf = urb->transfer_buffer + iso->offset; int stream, i; for (i = 0; i < iso->length;) { for (stream = 0; stream < cdev->n_streams; stream++) { struct snd_pcm_substream *sub = cdev->sub_playback[stream]; char *audio_buf = NULL; int c, n, sz = 0; if (sub) { struct snd_pcm_runtime *rt = sub->runtime; audio_buf = rt->dma_area; sz = frames_to_bytes(rt, rt->buffer_size); } for (c = 0; c < CHANNELS_PER_STREAM; c++) { for (n = 0; n < BYTES_PER_SAMPLE; n++) { if (audio_buf) { usb_buf[i+n] = audio_buf[cdev->audio_out_buf_pos[stream]++]; if (cdev->audio_out_buf_pos[stream] == sz) cdev->audio_out_buf_pos[stream] = 0; } else { usb_buf[i+n] = 0; } } if (audio_buf) cdev->period_out_count[stream] += BYTES_PER_SAMPLE; i += BYTES_PER_SAMPLE; /* fill in the check byte pattern */ usb_buf[i++] = (stream << 1) | c; } } } } static inline void fill_out_urb(struct snd_usb_caiaqdev *cdev, struct urb *urb, const struct usb_iso_packet_descriptor *iso) { switch (cdev->spec.data_alignment) { case 0: case 2: fill_out_urb_mode_0(cdev, urb, iso); break; case 3: fill_out_urb_mode_3(cdev, urb, iso); break; } } static void read_completed(struct urb *urb) { struct snd_usb_caiaq_cb_info *info = urb->context; struct snd_usb_caiaqdev *cdev; struct device *dev; struct urb *out = NULL; int i, frame, len, send_it = 0, outframe = 0; size_t offset = 0; if (urb->status || !info) return; cdev = info->cdev; dev = caiaqdev_to_dev(cdev); if (!cdev->streaming) return; /* find an unused output urb that is unused */ for (i = 0; i < N_URBS; i++) if (test_and_set_bit(i, &cdev->outurb_active_mask) == 0) { out = cdev->data_urbs_out[i]; break; } if (!out) { dev_err(dev, "Unable to find an output urb to use\n"); goto requeue; } /* read the recently received packet and send back one which has * the same layout */ for (frame = 0; frame < FRAMES_PER_URB; frame++) { if (urb->iso_frame_desc[frame].status) continue; len = urb->iso_frame_desc[outframe].actual_length; out->iso_frame_desc[outframe].length = len; out->iso_frame_desc[outframe].actual_length = 0; out->iso_frame_desc[outframe].offset = offset; offset += len; if (len > 0) { spin_lock(&cdev->spinlock); fill_out_urb(cdev, out, &out->iso_frame_desc[outframe]); read_in_urb(cdev, urb, &urb->iso_frame_desc[frame]); spin_unlock(&cdev->spinlock); check_for_elapsed_periods(cdev, cdev->sub_playback); check_for_elapsed_periods(cdev, cdev->sub_capture); send_it = 1; } outframe++; } if (send_it) { out->number_of_packets = outframe; usb_submit_urb(out, GFP_ATOMIC); } else { struct snd_usb_caiaq_cb_info *oinfo = out->context; clear_bit(oinfo->index, &cdev->outurb_active_mask); } requeue: /* re-submit inbound urb */ for (frame = 0; frame < FRAMES_PER_URB; frame++) { urb->iso_frame_desc[frame].offset = BYTES_PER_FRAME * frame; urb->iso_frame_desc[frame].length = BYTES_PER_FRAME; urb->iso_frame_desc[frame].actual_length = 0; } urb->number_of_packets = FRAMES_PER_URB; usb_submit_urb(urb, GFP_ATOMIC); } static void write_completed(struct urb *urb) { struct snd_usb_caiaq_cb_info *info = urb->context; struct snd_usb_caiaqdev *cdev = info->cdev; if (!cdev->output_running) { cdev->output_running = 1; wake_up(&cdev->prepare_wait_queue); } clear_bit(info->index, &cdev->outurb_active_mask); } static struct urb **alloc_urbs(struct snd_usb_caiaqdev *cdev, int dir, int *ret) { int i, frame; struct urb **urbs; struct usb_device *usb_dev = cdev->chip.dev; struct device *dev = caiaqdev_to_dev(cdev); unsigned int pipe; pipe = (dir == SNDRV_PCM_STREAM_PLAYBACK) ? usb_sndisocpipe(usb_dev, ENDPOINT_PLAYBACK) : usb_rcvisocpipe(usb_dev, ENDPOINT_CAPTURE); urbs = kmalloc(N_URBS * sizeof(*urbs), GFP_KERNEL); if (!urbs) { dev_err(dev, "unable to kmalloc() urbs, OOM!?\n"); *ret = -ENOMEM; return NULL; } for (i = 0; i < N_URBS; i++) { urbs[i] = usb_alloc_urb(FRAMES_PER_URB, GFP_KERNEL); if (!urbs[i]) { dev_err(dev, "unable to usb_alloc_urb(), OOM!?\n"); *ret = -ENOMEM; return urbs; } urbs[i]->transfer_buffer = kmalloc(FRAMES_PER_URB * BYTES_PER_FRAME, GFP_KERNEL); if (!urbs[i]->transfer_buffer) { dev_err(dev, "unable to kmalloc() transfer buffer, OOM!?\n"); *ret = -ENOMEM; return urbs; } for (frame = 0; frame < FRAMES_PER_URB; frame++) { struct usb_iso_packet_descriptor *iso = &urbs[i]->iso_frame_desc[frame]; iso->offset = BYTES_PER_FRAME * frame; iso->length = BYTES_PER_FRAME; } urbs[i]->dev = usb_dev; urbs[i]->pipe = pipe; urbs[i]->transfer_buffer_length = FRAMES_PER_URB * BYTES_PER_FRAME; urbs[i]->context = &cdev->data_cb_info[i]; urbs[i]->interval = 1; urbs[i]->number_of_packets = FRAMES_PER_URB; urbs[i]->complete = (dir == SNDRV_PCM_STREAM_CAPTURE) ? read_completed : write_completed; } *ret = 0; return urbs; } static void free_urbs(struct urb **urbs) { int i; if (!urbs) return; for (i = 0; i < N_URBS; i++) { if (!urbs[i]) continue; usb_kill_urb(urbs[i]); kfree(urbs[i]->transfer_buffer); usb_free_urb(urbs[i]); } kfree(urbs); } int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev) { int i, ret; struct device *dev = caiaqdev_to_dev(cdev); cdev->n_audio_in = max(cdev->spec.num_analog_audio_in, cdev->spec.num_digital_audio_in) / CHANNELS_PER_STREAM; cdev->n_audio_out = max(cdev->spec.num_analog_audio_out, cdev->spec.num_digital_audio_out) / CHANNELS_PER_STREAM; cdev->n_streams = max(cdev->n_audio_in, cdev->n_audio_out); dev_dbg(dev, "cdev->n_audio_in = %d\n", cdev->n_audio_in); dev_dbg(dev, "cdev->n_audio_out = %d\n", cdev->n_audio_out); dev_dbg(dev, "cdev->n_streams = %d\n", cdev->n_streams); if (cdev->n_streams > MAX_STREAMS) { dev_err(dev, "unable to initialize device, too many streams.\n"); return -EINVAL; } ret = snd_pcm_new(cdev->chip.card, cdev->product_name, 0, cdev->n_audio_out, cdev->n_audio_in, &cdev->pcm); if (ret < 0) { dev_err(dev, "snd_pcm_new() returned %d\n", ret); return ret; } cdev->pcm->private_data = cdev; strlcpy(cdev->pcm->name, cdev->product_name, sizeof(cdev->pcm->name)); memset(cdev->sub_playback, 0, sizeof(cdev->sub_playback)); memset(cdev->sub_capture, 0, sizeof(cdev->sub_capture)); memcpy(&cdev->pcm_info, &snd_usb_caiaq_pcm_hardware, sizeof(snd_usb_caiaq_pcm_hardware)); /* setup samplerates */ cdev->samplerates = cdev->pcm_info.rates; switch (cdev->chip.usb_id) { case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AK1): case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL3): case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_SESSIONIO): case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_GUITARRIGMOBILE): cdev->samplerates |= SNDRV_PCM_RATE_192000; /* fall thru */ case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO2DJ): case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ): case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORAUDIO2): cdev->samplerates |= SNDRV_PCM_RATE_88200; break; } snd_pcm_set_ops(cdev->pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_usb_caiaq_ops); snd_pcm_set_ops(cdev->pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usb_caiaq_ops); snd_pcm_lib_preallocate_pages_for_all(cdev->pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), MAX_BUFFER_SIZE, MAX_BUFFER_SIZE); cdev->data_cb_info = kmalloc(sizeof(struct snd_usb_caiaq_cb_info) * N_URBS, GFP_KERNEL); if (!cdev->data_cb_info) return -ENOMEM; cdev->outurb_active_mask = 0; BUILD_BUG_ON(N_URBS > (sizeof(cdev->outurb_active_mask) * 8)); for (i = 0; i < N_URBS; i++) { cdev->data_cb_info[i].cdev = cdev; cdev->data_cb_info[i].index = i; } cdev->data_urbs_in = alloc_urbs(cdev, SNDRV_PCM_STREAM_CAPTURE, &ret); if (ret < 0) { kfree(cdev->data_cb_info); free_urbs(cdev->data_urbs_in); return ret; } cdev->data_urbs_out = alloc_urbs(cdev, SNDRV_PCM_STREAM_PLAYBACK, &ret); if (ret < 0) { kfree(cdev->data_cb_info); free_urbs(cdev->data_urbs_in); free_urbs(cdev->data_urbs_out); return ret; } return 0; } void snd_usb_caiaq_audio_free(struct snd_usb_caiaqdev *cdev) { struct device *dev = caiaqdev_to_dev(cdev); dev_dbg(dev, "%s(%p)\n", __func__, cdev); stream_stop(cdev); free_urbs(cdev->data_urbs_in); free_urbs(cdev->data_urbs_out); kfree(cdev->data_cb_info); }
gpl-2.0
gearslam/himawhlspcs
drivers/staging/comedi/drivers/ni_mio_common.c
2080
170214
/* comedi/drivers/ni_mio_common.c Hardware driver for DAQ-STC based boards COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org> Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* This file is meant to be included by another file, e.g., ni_atmio.c or ni_pcimio.c. Interrupt support originally added by Truxton Fulton <trux@truxton.com> References (from ftp://ftp.natinst.com/support/manuals): 340747b.pdf AT-MIO E series Register Level Programmer Manual 341079b.pdf PCI E Series RLPM 340934b.pdf DAQ-STC reference manual 67xx and 611x registers (from ftp://ftp.ni.com/support/daq/mhddk/documentation/) release_ni611x.pdf release_ni67xx.pdf Other possibly relevant info: 320517c.pdf User manual (obsolete) 320517f.pdf User manual (new) 320889a.pdf delete 320906c.pdf maximum signal ratings 321066a.pdf about 16x 321791a.pdf discontinuation of at-mio-16e-10 rev. c 321808a.pdf about at-mio-16e-10 rev P 321837a.pdf discontinuation of at-mio-16de-10 rev d 321838a.pdf about at-mio-16de-10 rev N ISSUES: - the interrupt routine needs to be cleaned up 2006-02-07: S-Series PCI-6143: Support has been added but is not fully tested as yet. Terry Barnaby, BEAM Ltd. */ /* #define DEBUG_INTERRUPT */ /* #define DEBUG_STATUS_A */ /* #define DEBUG_STATUS_B */ #include <linux/interrupt.h> #include <linux/sched.h> #include "8255.h" #include "mite.h" #include "comedi_fc.h" #ifndef MDPRINTK #define MDPRINTK(format, args...) #endif /* A timeout count */ #define NI_TIMEOUT 1000 static const unsigned old_RTSI_clock_channel = 7; /* Note: this table must match the ai_gain_* definitions */ static const short ni_gainlkup[][16] = { [ai_gain_16] = {0, 1, 2, 3, 4, 5, 6, 7, 0x100, 0x101, 0x102, 0x103, 0x104, 0x105, 0x106, 0x107}, [ai_gain_8] = {1, 2, 4, 7, 0x101, 0x102, 0x104, 0x107}, [ai_gain_14] = {1, 2, 3, 4, 5, 6, 7, 0x101, 0x102, 0x103, 0x104, 0x105, 0x106, 0x107}, [ai_gain_4] = {0, 1, 4, 7}, [ai_gain_611x] = {0x00a, 0x00b, 0x001, 0x002, 0x003, 0x004, 0x005, 0x006}, [ai_gain_622x] = {0, 1, 4, 5}, [ai_gain_628x] = {1, 2, 3, 4, 5, 6, 7}, [ai_gain_6143] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, }; static const struct comedi_lrange range_ni_E_ai = { 16, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-2.5, 2.5), RANGE(-1, 1), RANGE(-0.5, 0.5), RANGE(-0.25, 0.25), RANGE(-0.1, 0.1), RANGE(-0.05, 0.05), RANGE(0, 20), RANGE(0, 10), RANGE(0, 5), RANGE(0, 2), RANGE(0, 1), RANGE(0, 0.5), RANGE(0, 0.2), RANGE(0, 0.1), } }; static const struct comedi_lrange range_ni_E_ai_limited = { 8, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-1, 1), RANGE(-0.1, 0.1), RANGE(0, 10), RANGE(0, 5), RANGE(0, 1), RANGE(0, 0.1), } }; static const struct comedi_lrange range_ni_E_ai_limited14 = { 14, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-2, 2), RANGE(-1, 1), RANGE(-0.5, 0.5), RANGE(-0.2, 0.2), RANGE(-0.1, 0.1), RANGE(0, 10), RANGE(0, 5), RANGE(0, 2), RANGE(0, 1), RANGE(0, 0.5), RANGE(0, 0.2), RANGE(0, 0.1), } }; static const struct comedi_lrange range_ni_E_ai_bipolar4 = { 4, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-0.5, 0.5), RANGE(-0.05, 0.05), } }; static const struct comedi_lrange range_ni_E_ai_611x = { 8, { RANGE(-50, 50), RANGE(-20, 20), RANGE(-10, 10), RANGE(-5, 5), RANGE(-2, 2), RANGE(-1, 1), RANGE(-0.5, 0.5), RANGE(-0.2, 0.2), } }; static const struct comedi_lrange range_ni_M_ai_622x = { 4, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-1, 1), RANGE(-0.2, 0.2), } }; static const struct comedi_lrange range_ni_M_ai_628x = { 7, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-2, 2), RANGE(-1, 1), RANGE(-0.5, 0.5), RANGE(-0.2, 0.2), RANGE(-0.1, 0.1), } }; static const struct comedi_lrange range_ni_E_ao_ext = { 4, { RANGE(-10, 10), RANGE(0, 10), RANGE_ext(-1, 1), RANGE_ext(0, 1), } }; static const struct comedi_lrange *const ni_range_lkup[] = { [ai_gain_16] = &range_ni_E_ai, [ai_gain_8] = &range_ni_E_ai_limited, [ai_gain_14] = &range_ni_E_ai_limited14, [ai_gain_4] = &range_ni_E_ai_bipolar4, [ai_gain_611x] = &range_ni_E_ai_611x, [ai_gain_622x] = &range_ni_M_ai_622x, [ai_gain_628x] = &range_ni_M_ai_628x, [ai_gain_6143] = &range_bipolar5 }; static int ni_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_cdio_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int ni_cdio_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void handle_cdio_interrupt(struct comedi_device *dev); static int ni_cdo_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum); static int ni_serial_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_serial_hw_readwrite8(struct comedi_device *dev, struct comedi_subdevice *s, unsigned char data_out, unsigned char *data_in); static int ni_serial_sw_readwrite8(struct comedi_device *dev, struct comedi_subdevice *s, unsigned char data_out, unsigned char *data_in); static int ni_calib_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_calib_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_eeprom_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_m_series_eeprom_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_pfi_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_pfi_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static unsigned ni_old_get_pfi_routing(struct comedi_device *dev, unsigned chan); static void ni_rtsi_init(struct comedi_device *dev); static int ni_rtsi_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_rtsi_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s); static int ni_read_eeprom(struct comedi_device *dev, int addr); #ifdef DEBUG_STATUS_A static void ni_mio_print_status_a(int status); #else #define ni_mio_print_status_a(a) #endif #ifdef DEBUG_STATUS_B static void ni_mio_print_status_b(int status); #else #define ni_mio_print_status_b(a) #endif static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s); #ifndef PCIDMA static void ni_handle_fifo_half_full(struct comedi_device *dev); static int ni_ao_fifo_half_empty(struct comedi_device *dev, struct comedi_subdevice *s); #endif static void ni_handle_fifo_dregs(struct comedi_device *dev); static int ni_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum); static void ni_load_channelgain_list(struct comedi_device *dev, unsigned int n_chan, unsigned int *list); static void shutdown_ai_command(struct comedi_device *dev); static int ni_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum); static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s); static int ni_8255_callback(int dir, int port, int data, unsigned long arg); static int ni_gpct_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_gpct_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_gpct_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); #ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int ni_gpct_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); #endif static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void handle_gpct_interrupt(struct comedi_device *dev, unsigned short counter_index); static int init_cs5529(struct comedi_device *dev); static int cs5529_do_conversion(struct comedi_device *dev, unsigned short *data); static int cs5529_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); #ifdef NI_CS5529_DEBUG static unsigned int cs5529_config_read(struct comedi_device *dev, unsigned int reg_select_bits); #endif static void cs5529_config_write(struct comedi_device *dev, unsigned int value, unsigned int reg_select_bits); static int ni_m_series_pwm_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_6143_pwm_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_set_master_clock(struct comedi_device *dev, unsigned source, unsigned period_ns); static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status); static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status); enum aimodes { AIMODE_NONE = 0, AIMODE_HALF_FULL = 1, AIMODE_SCAN = 2, AIMODE_SAMPLE = 3, }; enum ni_common_subdevices { NI_AI_SUBDEV, NI_AO_SUBDEV, NI_DIO_SUBDEV, NI_8255_DIO_SUBDEV, NI_UNUSED_SUBDEV, NI_CALIBRATION_SUBDEV, NI_EEPROM_SUBDEV, NI_PFI_DIO_SUBDEV, NI_CS5529_CALIBRATION_SUBDEV, NI_SERIAL_SUBDEV, NI_RTSI_SUBDEV, NI_GPCT0_SUBDEV, NI_GPCT1_SUBDEV, NI_FREQ_OUT_SUBDEV, NI_NUM_SUBDEVICES }; static inline unsigned NI_GPCT_SUBDEV(unsigned counter_index) { switch (counter_index) { case 0: return NI_GPCT0_SUBDEV; break; case 1: return NI_GPCT1_SUBDEV; break; default: break; } BUG(); return NI_GPCT0_SUBDEV; } enum timebase_nanoseconds { TIMEBASE_1_NS = 50, TIMEBASE_2_NS = 10000 }; #define SERIAL_DISABLED 0 #define SERIAL_600NS 600 #define SERIAL_1_2US 1200 #define SERIAL_10US 10000 static const int num_adc_stages_611x = 3; static void handle_a_interrupt(struct comedi_device *dev, unsigned short status, unsigned ai_mite_status); static void handle_b_interrupt(struct comedi_device *dev, unsigned short status, unsigned ao_mite_status); static void get_last_sample_611x(struct comedi_device *dev); static void get_last_sample_6143(struct comedi_device *dev); static inline void ni_set_bitfield(struct comedi_device *dev, int reg, unsigned bit_mask, unsigned bit_values) { struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags); switch (reg) { case Interrupt_A_Enable_Register: devpriv->int_a_enable_reg &= ~bit_mask; devpriv->int_a_enable_reg |= bit_values & bit_mask; devpriv->stc_writew(dev, devpriv->int_a_enable_reg, Interrupt_A_Enable_Register); break; case Interrupt_B_Enable_Register: devpriv->int_b_enable_reg &= ~bit_mask; devpriv->int_b_enable_reg |= bit_values & bit_mask; devpriv->stc_writew(dev, devpriv->int_b_enable_reg, Interrupt_B_Enable_Register); break; case IO_Bidirection_Pin_Register: devpriv->io_bidirection_pin_reg &= ~bit_mask; devpriv->io_bidirection_pin_reg |= bit_values & bit_mask; devpriv->stc_writew(dev, devpriv->io_bidirection_pin_reg, IO_Bidirection_Pin_Register); break; case AI_AO_Select: devpriv->ai_ao_select_reg &= ~bit_mask; devpriv->ai_ao_select_reg |= bit_values & bit_mask; ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select); break; case G0_G1_Select: devpriv->g0_g1_select_reg &= ~bit_mask; devpriv->g0_g1_select_reg |= bit_values & bit_mask; ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select); break; default: printk("Warning %s() called with invalid register\n", __func__); printk("reg is %d\n", reg); break; } mmiowb(); spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags); } #ifdef PCIDMA static int ni_ai_drain_dma(struct comedi_device *dev); /* DMA channel setup */ /* negative channel means no channel */ static inline void ni_set_ai_dma_channel(struct comedi_device *dev, int channel) { unsigned bitfield; if (channel >= 0) { bitfield = (ni_stc_dma_channel_select_bitfield(channel) << AI_DMA_Select_Shift) & AI_DMA_Select_Mask; } else { bitfield = 0; } ni_set_bitfield(dev, AI_AO_Select, AI_DMA_Select_Mask, bitfield); } /* negative channel means no channel */ static inline void ni_set_ao_dma_channel(struct comedi_device *dev, int channel) { unsigned bitfield; if (channel >= 0) { bitfield = (ni_stc_dma_channel_select_bitfield(channel) << AO_DMA_Select_Shift) & AO_DMA_Select_Mask; } else { bitfield = 0; } ni_set_bitfield(dev, AI_AO_Select, AO_DMA_Select_Mask, bitfield); } /* negative mite_channel means no channel */ static inline void ni_set_gpct_dma_channel(struct comedi_device *dev, unsigned gpct_index, int mite_channel) { unsigned bitfield; if (mite_channel >= 0) { bitfield = GPCT_DMA_Select_Bits(gpct_index, mite_channel); } else { bitfield = 0; } ni_set_bitfield(dev, G0_G1_Select, GPCT_DMA_Select_Mask(gpct_index), bitfield); } /* negative mite_channel means no channel */ static inline void ni_set_cdo_dma_channel(struct comedi_device *dev, int mite_channel) { struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags); devpriv->cdio_dma_select_reg &= ~CDO_DMA_Select_Mask; if (mite_channel >= 0) { /*XXX just guessing ni_stc_dma_channel_select_bitfield() returns the right bits, under the assumption the cdio dma selection works just like ai/ao/gpct. Definitely works for dma channels 0 and 1. */ devpriv->cdio_dma_select_reg |= (ni_stc_dma_channel_select_bitfield(mite_channel) << CDO_DMA_Select_Shift) & CDO_DMA_Select_Mask; } ni_writeb(devpriv->cdio_dma_select_reg, M_Offset_CDIO_DMA_Select); mmiowb(); spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags); } static int ni_request_ai_mite_channel(struct comedi_device *dev) { struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); BUG_ON(devpriv->ai_mite_chan); devpriv->ai_mite_chan = mite_request_channel(devpriv->mite, devpriv->ai_mite_ring); if (devpriv->ai_mite_chan == NULL) { spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); comedi_error(dev, "failed to reserve mite dma channel for analog input."); return -EBUSY; } devpriv->ai_mite_chan->dir = COMEDI_INPUT; ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel); spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); return 0; } static int ni_request_ao_mite_channel(struct comedi_device *dev) { struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); BUG_ON(devpriv->ao_mite_chan); devpriv->ao_mite_chan = mite_request_channel(devpriv->mite, devpriv->ao_mite_ring); if (devpriv->ao_mite_chan == NULL) { spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); comedi_error(dev, "failed to reserve mite dma channel for analog outut."); return -EBUSY; } devpriv->ao_mite_chan->dir = COMEDI_OUTPUT; ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel); spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); return 0; } static int ni_request_gpct_mite_channel(struct comedi_device *dev, unsigned gpct_index, enum comedi_io_direction direction) { struct ni_private *devpriv = dev->private; unsigned long flags; struct mite_channel *mite_chan; BUG_ON(gpct_index >= NUM_GPCT); spin_lock_irqsave(&devpriv->mite_channel_lock, flags); BUG_ON(devpriv->counter_dev->counters[gpct_index].mite_chan); mite_chan = mite_request_channel(devpriv->mite, devpriv->gpct_mite_ring[gpct_index]); if (mite_chan == NULL) { spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); comedi_error(dev, "failed to reserve mite dma channel for counter."); return -EBUSY; } mite_chan->dir = direction; ni_tio_set_mite_channel(&devpriv->counter_dev->counters[gpct_index], mite_chan); ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel); spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); return 0; } #endif /* PCIDMA */ static int ni_request_cdo_mite_channel(struct comedi_device *dev) { #ifdef PCIDMA struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); BUG_ON(devpriv->cdo_mite_chan); devpriv->cdo_mite_chan = mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring); if (devpriv->cdo_mite_chan == NULL) { spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); comedi_error(dev, "failed to reserve mite dma channel for correlated digital outut."); return -EBUSY; } devpriv->cdo_mite_chan->dir = COMEDI_OUTPUT; ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel); spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); #endif /* PCIDMA */ return 0; } static void ni_release_ai_mite_channel(struct comedi_device *dev) { #ifdef PCIDMA struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->ai_mite_chan) { ni_set_ai_dma_channel(dev, -1); mite_release_channel(devpriv->ai_mite_chan); devpriv->ai_mite_chan = NULL; } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); #endif /* PCIDMA */ } static void ni_release_ao_mite_channel(struct comedi_device *dev) { #ifdef PCIDMA struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->ao_mite_chan) { ni_set_ao_dma_channel(dev, -1); mite_release_channel(devpriv->ao_mite_chan); devpriv->ao_mite_chan = NULL; } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); #endif /* PCIDMA */ } #ifdef PCIDMA static void ni_release_gpct_mite_channel(struct comedi_device *dev, unsigned gpct_index) { struct ni_private *devpriv = dev->private; unsigned long flags; BUG_ON(gpct_index >= NUM_GPCT); spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->counter_dev->counters[gpct_index].mite_chan) { struct mite_channel *mite_chan = devpriv->counter_dev->counters[gpct_index].mite_chan; ni_set_gpct_dma_channel(dev, gpct_index, -1); ni_tio_set_mite_channel(&devpriv-> counter_dev->counters[gpct_index], NULL); mite_release_channel(mite_chan); } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); } #endif /* PCIDMA */ static void ni_release_cdo_mite_channel(struct comedi_device *dev) { #ifdef PCIDMA struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->cdo_mite_chan) { ni_set_cdo_dma_channel(dev, -1); mite_release_channel(devpriv->cdo_mite_chan); devpriv->cdo_mite_chan = NULL; } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); #endif /* PCIDMA */ } /* e-series boards use the second irq signals to generate dma requests for their counters */ #ifdef PCIDMA static void ni_e_series_enable_second_irq(struct comedi_device *dev, unsigned gpct_index, short enable) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; if (board->reg_type & ni_reg_m_series_mask) return; switch (gpct_index) { case 0: if (enable) { devpriv->stc_writew(dev, G0_Gate_Second_Irq_Enable, Second_IRQ_A_Enable_Register); } else { devpriv->stc_writew(dev, 0, Second_IRQ_A_Enable_Register); } break; case 1: if (enable) { devpriv->stc_writew(dev, G1_Gate_Second_Irq_Enable, Second_IRQ_B_Enable_Register); } else { devpriv->stc_writew(dev, 0, Second_IRQ_B_Enable_Register); } break; default: BUG(); break; } } #endif /* PCIDMA */ static void ni_clear_ai_fifo(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; if (board->reg_type == ni_reg_6143) { /* Flush the 6143 data FIFO */ ni_writel(0x10, AIFIFO_Control_6143); /* Flush fifo */ ni_writel(0x00, AIFIFO_Control_6143); /* Flush fifo */ while (ni_readl(AIFIFO_Status_6143) & 0x10) ; /* Wait for complete */ } else { devpriv->stc_writew(dev, 1, ADC_FIFO_Clear); if (board->reg_type == ni_reg_625x) { ni_writeb(0, M_Offset_Static_AI_Control(0)); ni_writeb(1, M_Offset_Static_AI_Control(0)); #if 0 /* the NI example code does 3 convert pulses for 625x boards, but that appears to be wrong in practice. */ devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register); devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register); devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register); #endif } } } static void win_out2(struct comedi_device *dev, uint32_t data, int reg) { struct ni_private *devpriv = dev->private; devpriv->stc_writew(dev, data >> 16, reg); devpriv->stc_writew(dev, data & 0xffff, reg + 1); } static uint32_t win_in2(struct comedi_device *dev, int reg) { struct ni_private *devpriv = dev->private; uint32_t bits; bits = devpriv->stc_readw(dev, reg) << 16; bits |= devpriv->stc_readw(dev, reg + 1); return bits; } #define ao_win_out(data, addr) ni_ao_win_outw(dev, data, addr) static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data, int addr) { struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->window_lock, flags); ni_writew(addr, AO_Window_Address_611x); ni_writew(data, AO_Window_Data_611x); spin_unlock_irqrestore(&devpriv->window_lock, flags); } static inline void ni_ao_win_outl(struct comedi_device *dev, uint32_t data, int addr) { struct ni_private *devpriv = dev->private; unsigned long flags; spin_lock_irqsave(&devpriv->window_lock, flags); ni_writew(addr, AO_Window_Address_611x); ni_writel(data, AO_Window_Data_611x); spin_unlock_irqrestore(&devpriv->window_lock, flags); } static inline unsigned short ni_ao_win_inw(struct comedi_device *dev, int addr) { struct ni_private *devpriv = dev->private; unsigned long flags; unsigned short data; spin_lock_irqsave(&devpriv->window_lock, flags); ni_writew(addr, AO_Window_Address_611x); data = ni_readw(AO_Window_Data_611x); spin_unlock_irqrestore(&devpriv->window_lock, flags); return data; } /* ni_set_bits( ) allows different parts of the ni_mio_common driver to * share registers (such as Interrupt_A_Register) without interfering with * each other. * * NOTE: the switch/case statements are optimized out for a constant argument * so this is actually quite fast--- If you must wrap another function around this * make it inline to avoid a large speed penalty. * * value should only be 1 or 0. */ static inline void ni_set_bits(struct comedi_device *dev, int reg, unsigned bits, unsigned value) { unsigned bit_values; if (value) bit_values = bits; else bit_values = 0; ni_set_bitfield(dev, reg, bits, bit_values); } static irqreturn_t ni_E_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct ni_private *devpriv = dev->private; unsigned short a_status; unsigned short b_status; unsigned int ai_mite_status = 0; unsigned int ao_mite_status = 0; unsigned long flags; #ifdef PCIDMA struct mite_struct *mite = devpriv->mite; #endif if (!dev->attached) return IRQ_NONE; smp_mb(); /* make sure dev->attached is checked before handler does anything else. */ /* lock to avoid race with comedi_poll */ spin_lock_irqsave(&dev->spinlock, flags); a_status = devpriv->stc_readw(dev, AI_Status_1_Register); b_status = devpriv->stc_readw(dev, AO_Status_1_Register); #ifdef PCIDMA if (mite) { unsigned long flags_too; spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too); if (devpriv->ai_mite_chan) { ai_mite_status = mite_get_status(devpriv->ai_mite_chan); if (ai_mite_status & CHSR_LINKC) writel(CHOR_CLRLC, devpriv->mite->mite_io_addr + MITE_CHOR(devpriv-> ai_mite_chan->channel)); } if (devpriv->ao_mite_chan) { ao_mite_status = mite_get_status(devpriv->ao_mite_chan); if (ao_mite_status & CHSR_LINKC) writel(CHOR_CLRLC, mite->mite_io_addr + MITE_CHOR(devpriv-> ao_mite_chan->channel)); } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags_too); } #endif ack_a_interrupt(dev, a_status); ack_b_interrupt(dev, b_status); if ((a_status & Interrupt_A_St) || (ai_mite_status & CHSR_INT)) handle_a_interrupt(dev, a_status, ai_mite_status); if ((b_status & Interrupt_B_St) || (ao_mite_status & CHSR_INT)) handle_b_interrupt(dev, b_status, ao_mite_status); handle_gpct_interrupt(dev, 0); handle_gpct_interrupt(dev, 1); handle_cdio_interrupt(dev); spin_unlock_irqrestore(&dev->spinlock, flags); return IRQ_HANDLED; } #ifdef PCIDMA static void ni_sync_ai_dma(struct comedi_device *dev) { struct ni_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV]; unsigned long flags; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->ai_mite_chan) mite_sync_input_dma(devpriv->ai_mite_chan, s->async); spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); } static void mite_handle_b_linkc(struct mite_struct *mite, struct comedi_device *dev) { struct ni_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[NI_AO_SUBDEV]; unsigned long flags; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->ao_mite_chan) { mite_sync_output_dma(devpriv->ao_mite_chan, s->async); } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); } static int ni_ao_wait_for_dma_load(struct comedi_device *dev) { struct ni_private *devpriv = dev->private; static const int timeout = 10000; int i; for (i = 0; i < timeout; i++) { unsigned short b_status; b_status = devpriv->stc_readw(dev, AO_Status_1_Register); if (b_status & AO_FIFO_Half_Full_St) break; /* if we poll too often, the pci bus activity seems to slow the dma transfer down */ udelay(10); } if (i == timeout) { comedi_error(dev, "timed out waiting for dma load"); return -EPIPE; } return 0; } #endif /* PCIDMA */ static void ni_handle_eos(struct comedi_device *dev, struct comedi_subdevice *s) { struct ni_private *devpriv = dev->private; if (devpriv->aimode == AIMODE_SCAN) { #ifdef PCIDMA static const int timeout = 10; int i; for (i = 0; i < timeout; i++) { ni_sync_ai_dma(dev); if ((s->async->events & COMEDI_CB_EOS)) break; udelay(1); } #else ni_handle_fifo_dregs(dev); s->async->events |= COMEDI_CB_EOS; #endif } /* handle special case of single scan using AI_End_On_End_Of_Scan */ if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) { shutdown_ai_command(dev); } } static void shutdown_ai_command(struct comedi_device *dev) { struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV]; #ifdef PCIDMA ni_ai_drain_dma(dev); #endif ni_handle_fifo_dregs(dev); get_last_sample_611x(dev); get_last_sample_6143(dev); s->async->events |= COMEDI_CB_EOA; } static void ni_event(struct comedi_device *dev, struct comedi_subdevice *s) { if (s-> async->events & (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW | COMEDI_CB_EOA)) { switch (s->index) { case NI_AI_SUBDEV: ni_ai_reset(dev, s); break; case NI_AO_SUBDEV: ni_ao_reset(dev, s); break; case NI_GPCT0_SUBDEV: case NI_GPCT1_SUBDEV: ni_gpct_cancel(dev, s); break; case NI_DIO_SUBDEV: ni_cdio_cancel(dev, s); break; default: break; } } comedi_event(dev, s); } static void handle_gpct_interrupt(struct comedi_device *dev, unsigned short counter_index) { #ifdef PCIDMA struct ni_private *devpriv = dev->private; struct comedi_subdevice *s; s = &dev->subdevices[NI_GPCT_SUBDEV(counter_index)]; ni_tio_handle_interrupt(&devpriv->counter_dev->counters[counter_index], s); if (s->async->events) ni_event(dev, s); #endif } static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status) { struct ni_private *devpriv = dev->private; unsigned short ack = 0; if (a_status & AI_SC_TC_St) { ack |= AI_SC_TC_Interrupt_Ack; } if (a_status & AI_START1_St) { ack |= AI_START1_Interrupt_Ack; } if (a_status & AI_START_St) { ack |= AI_START_Interrupt_Ack; } if (a_status & AI_STOP_St) { /* not sure why we used to ack the START here also, instead of doing it independently. Frank Hess 2007-07-06 */ ack |= AI_STOP_Interrupt_Ack /*| AI_START_Interrupt_Ack */ ; } if (ack) devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register); } static void handle_a_interrupt(struct comedi_device *dev, unsigned short status, unsigned ai_mite_status) { struct ni_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV]; /* 67xx boards don't have ai subdevice, but their gpct0 might generate an a interrupt */ if (s->type == COMEDI_SUBD_UNUSED) return; #ifdef DEBUG_INTERRUPT printk ("ni_mio_common: interrupt: a_status=%04x ai_mite_status=%08x\n", status, ai_mite_status); ni_mio_print_status_a(status); #endif #ifdef PCIDMA if (ai_mite_status & CHSR_LINKC) { ni_sync_ai_dma(dev); } if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY | CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR | CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) { printk ("unknown mite interrupt, ack! (ai_mite_status=%08x)\n", ai_mite_status); /* mite_print_chsr(ai_mite_status); */ s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; /* disable_irq(dev->irq); */ } #endif /* test for all uncommon interrupt events at the same time */ if (status & (AI_Overrun_St | AI_Overflow_St | AI_SC_TC_Error_St | AI_SC_TC_St | AI_START1_St)) { if (status == 0xffff) { printk ("ni_mio_common: a_status=0xffff. Card removed?\n"); /* we probably aren't even running a command now, * so it's a good idea to be careful. */ if (comedi_is_subdevice_running(s)) { s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; ni_event(dev, s); } return; } if (status & (AI_Overrun_St | AI_Overflow_St | AI_SC_TC_Error_St)) { printk("ni_mio_common: ai error a_status=%04x\n", status); ni_mio_print_status_a(status); shutdown_ai_command(dev); s->async->events |= COMEDI_CB_ERROR; if (status & (AI_Overrun_St | AI_Overflow_St)) s->async->events |= COMEDI_CB_OVERFLOW; ni_event(dev, s); return; } if (status & AI_SC_TC_St) { #ifdef DEBUG_INTERRUPT printk("ni_mio_common: SC_TC interrupt\n"); #endif if (!devpriv->ai_continuous) { shutdown_ai_command(dev); } } } #ifndef PCIDMA if (status & AI_FIFO_Half_Full_St) { int i; static const int timeout = 10; /* pcmcia cards (at least 6036) seem to stop producing interrupts if we *fail to get the fifo less than half full, so loop to be sure.*/ for (i = 0; i < timeout; ++i) { ni_handle_fifo_half_full(dev); if ((devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Half_Full_St) == 0) break; } } #endif /* !PCIDMA */ if ((status & AI_STOP_St)) { ni_handle_eos(dev, s); } ni_event(dev, s); #ifdef DEBUG_INTERRUPT status = devpriv->stc_readw(dev, AI_Status_1_Register); if (status & Interrupt_A_St) { printk ("handle_a_interrupt: didn't clear interrupt? status=0x%x\n", status); } #endif } static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status) { struct ni_private *devpriv = dev->private; unsigned short ack = 0; if (b_status & AO_BC_TC_St) { ack |= AO_BC_TC_Interrupt_Ack; } if (b_status & AO_Overrun_St) { ack |= AO_Error_Interrupt_Ack; } if (b_status & AO_START_St) { ack |= AO_START_Interrupt_Ack; } if (b_status & AO_START1_St) { ack |= AO_START1_Interrupt_Ack; } if (b_status & AO_UC_TC_St) { ack |= AO_UC_TC_Interrupt_Ack; } if (b_status & AO_UI2_TC_St) { ack |= AO_UI2_TC_Interrupt_Ack; } if (b_status & AO_UPDATE_St) { ack |= AO_UPDATE_Interrupt_Ack; } if (ack) devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register); } static void handle_b_interrupt(struct comedi_device *dev, unsigned short b_status, unsigned ao_mite_status) { struct ni_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[NI_AO_SUBDEV]; /* unsigned short ack=0; */ #ifdef DEBUG_INTERRUPT printk("ni_mio_common: interrupt: b_status=%04x m1_status=%08x\n", b_status, ao_mite_status); ni_mio_print_status_b(b_status); #endif #ifdef PCIDMA /* Currently, mite.c requires us to handle LINKC */ if (ao_mite_status & CHSR_LINKC) { mite_handle_b_linkc(devpriv->mite, dev); } if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY | CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR | CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) { printk ("unknown mite interrupt, ack! (ao_mite_status=%08x)\n", ao_mite_status); /* mite_print_chsr(ao_mite_status); */ s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; } #endif if (b_status == 0xffff) return; if (b_status & AO_Overrun_St) { printk ("ni_mio_common: AO FIFO underrun status=0x%04x status2=0x%04x\n", b_status, devpriv->stc_readw(dev, AO_Status_2_Register)); s->async->events |= COMEDI_CB_OVERFLOW; } if (b_status & AO_BC_TC_St) { MDPRINTK ("ni_mio_common: AO BC_TC status=0x%04x status2=0x%04x\n", b_status, devpriv->stc_readw(dev, AO_Status_2_Register)); s->async->events |= COMEDI_CB_EOA; } #ifndef PCIDMA if (b_status & AO_FIFO_Request_St) { int ret; ret = ni_ao_fifo_half_empty(dev, s); if (!ret) { printk("ni_mio_common: AO buffer underrun\n"); ni_set_bits(dev, Interrupt_B_Enable_Register, AO_FIFO_Interrupt_Enable | AO_Error_Interrupt_Enable, 0); s->async->events |= COMEDI_CB_OVERFLOW; } } #endif ni_event(dev, s); } #ifdef DEBUG_STATUS_A static const char *const status_a_strings[] = { "passthru0", "fifo", "G0_gate", "G0_TC", "stop", "start", "sc_tc", "start1", "start2", "sc_tc_error", "overflow", "overrun", "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_a" }; static void ni_mio_print_status_a(int status) { int i; printk("A status:"); for (i = 15; i >= 0; i--) { if (status & (1 << i)) { printk(" %s", status_a_strings[i]); } } printk("\n"); } #endif #ifdef DEBUG_STATUS_B static const char *const status_b_strings[] = { "passthru1", "fifo", "G1_gate", "G1_TC", "UI2_TC", "UPDATE", "UC_TC", "BC_TC", "start1", "overrun", "start", "bc_tc_error", "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_b" }; static void ni_mio_print_status_b(int status) { int i; printk("B status:"); for (i = 15; i >= 0; i--) { if (status & (1 << i)) { printk(" %s", status_b_strings[i]); } } printk("\n"); } #endif #ifndef PCIDMA static void ni_ao_fifo_load(struct comedi_device *dev, struct comedi_subdevice *s, int n) { const struct ni_board_struct *board = comedi_board(dev); struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; int chan; int i; short d; u32 packed_data; int range; int err = 1; chan = async->cur_chan; for (i = 0; i < n; i++) { err &= comedi_buf_get(async, &d); if (err == 0) break; range = CR_RANGE(cmd->chanlist[chan]); if (board->reg_type & ni_reg_6xxx_mask) { packed_data = d & 0xffff; /* 6711 only has 16 bit wide ao fifo */ if (board->reg_type != ni_reg_6711) { err &= comedi_buf_get(async, &d); if (err == 0) break; chan++; i++; packed_data |= (d << 16) & 0xffff0000; } ni_writel(packed_data, DAC_FIFO_Data_611x); } else { ni_writew(d, DAC_FIFO_Data); } chan++; chan %= cmd->chanlist_len; } async->cur_chan = chan; if (err == 0) { async->events |= COMEDI_CB_OVERFLOW; } } /* * There's a small problem if the FIFO gets really low and we * don't have the data to fill it. Basically, if after we fill * the FIFO with all the data available, the FIFO is _still_ * less than half full, we never clear the interrupt. If the * IRQ is in edge mode, we never get another interrupt, because * this one wasn't cleared. If in level mode, we get flooded * with interrupts that we can't fulfill, because nothing ever * gets put into the buffer. * * This kind of situation is recoverable, but it is easier to * just pretend we had a FIFO underrun, since there is a good * chance it will happen anyway. This is _not_ the case for * RT code, as RT code might purposely be running close to the * metal. Needs to be fixed eventually. */ static int ni_ao_fifo_half_empty(struct comedi_device *dev, struct comedi_subdevice *s) { const struct ni_board_struct *board = comedi_board(dev); int n; n = comedi_buf_read_n_available(s->async); if (n == 0) { s->async->events |= COMEDI_CB_OVERFLOW; return 0; } n /= sizeof(short); if (n > board->ao_fifo_depth / 2) n = board->ao_fifo_depth / 2; ni_ao_fifo_load(dev, s, n); s->async->events |= COMEDI_CB_BLOCK; return 1; } static int ni_ao_prep_fifo(struct comedi_device *dev, struct comedi_subdevice *s) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; int n; /* reset fifo */ devpriv->stc_writew(dev, 1, DAC_FIFO_Clear); if (board->reg_type & ni_reg_6xxx_mask) ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x); /* load some data */ n = comedi_buf_read_n_available(s->async); if (n == 0) return 0; n /= sizeof(short); if (n > board->ao_fifo_depth) n = board->ao_fifo_depth; ni_ao_fifo_load(dev, s, n); return n; } static void ni_ai_fifo_read(struct comedi_device *dev, struct comedi_subdevice *s, int n) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; struct comedi_async *async = s->async; int i; if (board->reg_type == ni_reg_611x) { short data[2]; u32 dl; for (i = 0; i < n / 2; i++) { dl = ni_readl(ADC_FIFO_Data_611x); /* This may get the hi/lo data in the wrong order */ data[0] = (dl >> 16) & 0xffff; data[1] = dl & 0xffff; cfc_write_array_to_buffer(s, data, sizeof(data)); } /* Check if there's a single sample stuck in the FIFO */ if (n % 2) { dl = ni_readl(ADC_FIFO_Data_611x); data[0] = dl & 0xffff; cfc_write_to_buffer(s, data[0]); } } else if (board->reg_type == ni_reg_6143) { short data[2]; u32 dl; /* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */ for (i = 0; i < n / 2; i++) { dl = ni_readl(AIFIFO_Data_6143); data[0] = (dl >> 16) & 0xffff; data[1] = dl & 0xffff; cfc_write_array_to_buffer(s, data, sizeof(data)); } if (n % 2) { /* Assume there is a single sample stuck in the FIFO */ ni_writel(0x01, AIFIFO_Control_6143); /* Get stranded sample into FIFO */ dl = ni_readl(AIFIFO_Data_6143); data[0] = (dl >> 16) & 0xffff; cfc_write_to_buffer(s, data[0]); } } else { if (n > sizeof(devpriv->ai_fifo_buffer) / sizeof(devpriv->ai_fifo_buffer[0])) { comedi_error(dev, "bug! ai_fifo_buffer too small"); async->events |= COMEDI_CB_ERROR; return; } for (i = 0; i < n; i++) { devpriv->ai_fifo_buffer[i] = ni_readw(ADC_FIFO_Data_Register); } cfc_write_array_to_buffer(s, devpriv->ai_fifo_buffer, n * sizeof(devpriv->ai_fifo_buffer[0])); } } static void ni_handle_fifo_half_full(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV]; int n; n = board->ai_fifo_depth / 2; ni_ai_fifo_read(dev, s, n); } #endif #ifdef PCIDMA static int ni_ai_drain_dma(struct comedi_device *dev) { struct ni_private *devpriv = dev->private; int i; static const int timeout = 10000; unsigned long flags; int retval = 0; spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->ai_mite_chan) { for (i = 0; i < timeout; i++) { if ((devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Empty_St) && mite_bytes_in_transit(devpriv->ai_mite_chan) == 0) break; udelay(5); } if (i == timeout) { printk("ni_mio_common: wait for dma drain timed out\n"); printk ("mite_bytes_in_transit=%i, AI_Status1_Register=0x%x\n", mite_bytes_in_transit(devpriv->ai_mite_chan), devpriv->stc_readw(dev, AI_Status_1_Register)); retval = -1; } } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); ni_sync_ai_dma(dev); return retval; } #endif /* Empties the AI fifo */ static void ni_handle_fifo_dregs(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV]; short data[2]; u32 dl; short fifo_empty; int i; if (board->reg_type == ni_reg_611x) { while ((devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Empty_St) == 0) { dl = ni_readl(ADC_FIFO_Data_611x); /* This may get the hi/lo data in the wrong order */ data[0] = (dl >> 16); data[1] = (dl & 0xffff); cfc_write_array_to_buffer(s, data, sizeof(data)); } } else if (board->reg_type == ni_reg_6143) { i = 0; while (ni_readl(AIFIFO_Status_6143) & 0x04) { dl = ni_readl(AIFIFO_Data_6143); /* This may get the hi/lo data in the wrong order */ data[0] = (dl >> 16); data[1] = (dl & 0xffff); cfc_write_array_to_buffer(s, data, sizeof(data)); i += 2; } /* Check if stranded sample is present */ if (ni_readl(AIFIFO_Status_6143) & 0x01) { ni_writel(0x01, AIFIFO_Control_6143); /* Get stranded sample into FIFO */ dl = ni_readl(AIFIFO_Data_6143); data[0] = (dl >> 16) & 0xffff; cfc_write_to_buffer(s, data[0]); } } else { fifo_empty = devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Empty_St; while (fifo_empty == 0) { for (i = 0; i < sizeof(devpriv->ai_fifo_buffer) / sizeof(devpriv->ai_fifo_buffer[0]); i++) { fifo_empty = devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Empty_St; if (fifo_empty) break; devpriv->ai_fifo_buffer[i] = ni_readw(ADC_FIFO_Data_Register); } cfc_write_array_to_buffer(s, devpriv->ai_fifo_buffer, i * sizeof(devpriv-> ai_fifo_buffer[0])); } } } static void get_last_sample_611x(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv __maybe_unused = dev->private; struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV]; short data; u32 dl; if (board->reg_type != ni_reg_611x) return; /* Check if there's a single sample stuck in the FIFO */ if (ni_readb(XXX_Status) & 0x80) { dl = ni_readl(ADC_FIFO_Data_611x); data = (dl & 0xffff); cfc_write_to_buffer(s, data); } } static void get_last_sample_6143(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv __maybe_unused = dev->private; struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV]; short data; u32 dl; if (board->reg_type != ni_reg_6143) return; /* Check if there's a single sample stuck in the FIFO */ if (ni_readl(AIFIFO_Status_6143) & 0x01) { ni_writel(0x01, AIFIFO_Control_6143); /* Get stranded sample into FIFO */ dl = ni_readl(AIFIFO_Data_6143); /* This may get the hi/lo data in the wrong order */ data = (dl >> 16) & 0xffff; cfc_write_to_buffer(s, data); } } static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s, void *data, unsigned int num_bytes, unsigned int chan_index) { struct ni_private *devpriv = dev->private; struct comedi_async *async = s->async; unsigned int i; unsigned int length = num_bytes / bytes_per_sample(s); short *array = data; unsigned int *larray = data; for (i = 0; i < length; i++) { #ifdef PCIDMA if (s->subdev_flags & SDF_LSAMPL) larray[i] = le32_to_cpu(larray[i]); else array[i] = le16_to_cpu(array[i]); #endif if (s->subdev_flags & SDF_LSAMPL) larray[i] += devpriv->ai_offset[chan_index]; else array[i] += devpriv->ai_offset[chan_index]; chan_index++; chan_index %= async->cmd.chanlist_len; } } #ifdef PCIDMA static int ni_ai_setup_MITE_dma(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[NI_AI_SUBDEV]; int retval; unsigned long flags; retval = ni_request_ai_mite_channel(dev); if (retval) return retval; /* printk("comedi_debug: using mite channel %i for ai.\n", devpriv->ai_mite_chan->channel); */ /* write alloc the entire buffer */ comedi_buf_write_alloc(s->async, s->async->prealloc_bufsz); spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->ai_mite_chan == NULL) { spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); return -EIO; } switch (board->reg_type) { case ni_reg_611x: case ni_reg_6143: mite_prep_dma(devpriv->ai_mite_chan, 32, 16); break; case ni_reg_628x: mite_prep_dma(devpriv->ai_mite_chan, 32, 32); break; default: mite_prep_dma(devpriv->ai_mite_chan, 16, 16); break; } /*start the MITE */ mite_dma_arm(devpriv->ai_mite_chan); spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); return 0; } static int ni_ao_setup_MITE_dma(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[NI_AO_SUBDEV]; int retval; unsigned long flags; retval = ni_request_ao_mite_channel(dev); if (retval) return retval; /* read alloc the entire buffer */ comedi_buf_read_alloc(s->async, s->async->prealloc_bufsz); spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->ao_mite_chan) { if (board->reg_type & (ni_reg_611x | ni_reg_6713)) { mite_prep_dma(devpriv->ao_mite_chan, 32, 32); } else { /* doing 32 instead of 16 bit wide transfers from memory makes the mite do 32 bit pci transfers, doubling pci bandwidth. */ mite_prep_dma(devpriv->ao_mite_chan, 16, 32); } mite_dma_arm(devpriv->ao_mite_chan); } else retval = -EIO; spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); return retval; } #endif /* PCIDMA */ /* used for both cancel ioctl and board initialization this is pretty harsh for a cancel, but it works... */ static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; ni_release_ai_mite_channel(dev); /* ai configuration */ devpriv->stc_writew(dev, AI_Configuration_Start | AI_Reset, Joint_Reset_Register); ni_set_bits(dev, Interrupt_A_Enable_Register, AI_SC_TC_Interrupt_Enable | AI_START1_Interrupt_Enable | AI_START2_Interrupt_Enable | AI_START_Interrupt_Enable | AI_STOP_Interrupt_Enable | AI_Error_Interrupt_Enable | AI_FIFO_Interrupt_Enable, 0); ni_clear_ai_fifo(dev); if (board->reg_type != ni_reg_6143) ni_writeb(0, Misc_Command); devpriv->stc_writew(dev, AI_Disarm, AI_Command_1_Register); /* reset pulses */ devpriv->stc_writew(dev, AI_Start_Stop | AI_Mode_1_Reserved /*| AI_Trigger_Once */ , AI_Mode_1_Register); devpriv->stc_writew(dev, 0x0000, AI_Mode_2_Register); /* generate FIFO interrupts on non-empty */ devpriv->stc_writew(dev, (0 << 6) | 0x0000, AI_Mode_3_Register); if (board->reg_type == ni_reg_611x) { devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width | AI_SOC_Polarity | AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register); devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) | AI_EXTMUX_CLK_Output_Select(0) | AI_LOCALMUX_CLK_Output_Select(2) | AI_SC_TC_Output_Select(3) | AI_CONVERT_Output_Select (AI_CONVERT_Output_Enable_High), AI_Output_Control_Register); } else if (board->reg_type == ni_reg_6143) { devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width | AI_SOC_Polarity | AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register); devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) | AI_EXTMUX_CLK_Output_Select(0) | AI_LOCALMUX_CLK_Output_Select(2) | AI_SC_TC_Output_Select(3) | AI_CONVERT_Output_Select (AI_CONVERT_Output_Enable_Low), AI_Output_Control_Register); } else { unsigned ai_output_control_bits; devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width | AI_SOC_Polarity | AI_CONVERT_Pulse_Width | AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register); ai_output_control_bits = AI_SCAN_IN_PROG_Output_Select(3) | AI_EXTMUX_CLK_Output_Select(0) | AI_LOCALMUX_CLK_Output_Select(2) | AI_SC_TC_Output_Select(3); if (board->reg_type == ni_reg_622x) ai_output_control_bits |= AI_CONVERT_Output_Select (AI_CONVERT_Output_Enable_High); else ai_output_control_bits |= AI_CONVERT_Output_Select (AI_CONVERT_Output_Enable_Low); devpriv->stc_writew(dev, ai_output_control_bits, AI_Output_Control_Register); } /* the following registers should not be changed, because there * are no backup registers in devpriv. If you want to change * any of these, add a backup register and other appropriate code: * AI_Mode_1_Register * AI_Mode_3_Register * AI_Personal_Register * AI_Output_Control_Register */ devpriv->stc_writew(dev, AI_SC_TC_Error_Confirm | AI_START_Interrupt_Ack | AI_START2_Interrupt_Ack | AI_START1_Interrupt_Ack | AI_SC_TC_Interrupt_Ack | AI_Error_Interrupt_Ack | AI_STOP_Interrupt_Ack, Interrupt_A_Ack_Register); /* clear interrupts */ devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register); return 0; } static int ni_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; int count; /* lock to avoid race with interrupt handler */ spin_lock_irqsave(&dev->spinlock, flags); #ifndef PCIDMA ni_handle_fifo_dregs(dev); #else ni_sync_ai_dma(dev); #endif count = s->async->buf_write_count - s->async->buf_read_count; spin_unlock_irqrestore(&dev->spinlock, flags); return count; } static int ni_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; int i, n; const unsigned int mask = (1 << board->adbits) - 1; unsigned signbits; unsigned short d; unsigned long dl; ni_load_channelgain_list(dev, 1, &insn->chanspec); ni_clear_ai_fifo(dev); signbits = devpriv->ai_offset[0]; if (board->reg_type == ni_reg_611x) { for (n = 0; n < num_adc_stages_611x; n++) { devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register); udelay(1); } for (n = 0; n < insn->n; n++) { devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register); /* The 611x has screwy 32-bit FIFOs. */ d = 0; for (i = 0; i < NI_TIMEOUT; i++) { if (ni_readb(XXX_Status) & 0x80) { d = (ni_readl(ADC_FIFO_Data_611x) >> 16) & 0xffff; break; } if (!(devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Empty_St)) { d = ni_readl(ADC_FIFO_Data_611x) & 0xffff; break; } } if (i == NI_TIMEOUT) { printk ("ni_mio_common: timeout in 611x ni_ai_insn_read\n"); return -ETIME; } d += signbits; data[n] = d; } } else if (board->reg_type == ni_reg_6143) { for (n = 0; n < insn->n; n++) { devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register); /* The 6143 has 32-bit FIFOs. You need to strobe a bit to move a single 16bit stranded sample into the FIFO */ dl = 0; for (i = 0; i < NI_TIMEOUT; i++) { if (ni_readl(AIFIFO_Status_6143) & 0x01) { ni_writel(0x01, AIFIFO_Control_6143); /* Get stranded sample into FIFO */ dl = ni_readl(AIFIFO_Data_6143); break; } } if (i == NI_TIMEOUT) { printk ("ni_mio_common: timeout in 6143 ni_ai_insn_read\n"); return -ETIME; } data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF; } } else { for (n = 0; n < insn->n; n++) { devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register); for (i = 0; i < NI_TIMEOUT; i++) { if (!(devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Empty_St)) break; } if (i == NI_TIMEOUT) { printk ("ni_mio_common: timeout in ni_ai_insn_read\n"); return -ETIME; } if (board->reg_type & ni_reg_m_series_mask) { data[n] = ni_readl(M_Offset_AI_FIFO_Data) & mask; } else { d = ni_readw(ADC_FIFO_Data_Register); d += signbits; /* subtle: needs to be short addition */ data[n] = d; } } } return insn->n; } static void ni_prime_channelgain_list(struct comedi_device *dev) { struct ni_private *devpriv = dev->private; int i; devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register); for (i = 0; i < NI_TIMEOUT; ++i) { if (!(devpriv->stc_readw(dev, AI_Status_1_Register) & AI_FIFO_Empty_St)) { devpriv->stc_writew(dev, 1, ADC_FIFO_Clear); return; } udelay(1); } printk("ni_mio_common: timeout loading channel/gain list\n"); } static void ni_m_series_load_channelgain_list(struct comedi_device *dev, unsigned int n_chan, unsigned int *list) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; unsigned int chan, range, aref; unsigned int i; unsigned offset; unsigned int dither; unsigned range_code; devpriv->stc_writew(dev, 1, Configuration_Memory_Clear); /* offset = 1 << (board->adbits - 1); */ if ((list[0] & CR_ALT_SOURCE)) { unsigned bypass_bits; chan = CR_CHAN(list[0]); range = CR_RANGE(list[0]); range_code = ni_gainlkup[board->gainlkup][range]; dither = ((list[0] & CR_ALT_FILTER) != 0); bypass_bits = MSeries_AI_Bypass_Config_FIFO_Bit; bypass_bits |= chan; bypass_bits |= (devpriv->ai_calib_source) & (MSeries_AI_Bypass_Cal_Sel_Pos_Mask | MSeries_AI_Bypass_Cal_Sel_Neg_Mask | MSeries_AI_Bypass_Mode_Mux_Mask | MSeries_AO_Bypass_AO_Cal_Sel_Mask); bypass_bits |= MSeries_AI_Bypass_Gain_Bits(range_code); if (dither) bypass_bits |= MSeries_AI_Bypass_Dither_Bit; /* don't use 2's complement encoding */ bypass_bits |= MSeries_AI_Bypass_Polarity_Bit; ni_writel(bypass_bits, M_Offset_AI_Config_FIFO_Bypass); } else { ni_writel(0, M_Offset_AI_Config_FIFO_Bypass); } offset = 0; for (i = 0; i < n_chan; i++) { unsigned config_bits = 0; chan = CR_CHAN(list[i]); aref = CR_AREF(list[i]); range = CR_RANGE(list[i]); dither = ((list[i] & CR_ALT_FILTER) != 0); range_code = ni_gainlkup[board->gainlkup][range]; devpriv->ai_offset[i] = offset; switch (aref) { case AREF_DIFF: config_bits |= MSeries_AI_Config_Channel_Type_Differential_Bits; break; case AREF_COMMON: config_bits |= MSeries_AI_Config_Channel_Type_Common_Ref_Bits; break; case AREF_GROUND: config_bits |= MSeries_AI_Config_Channel_Type_Ground_Ref_Bits; break; case AREF_OTHER: break; } config_bits |= MSeries_AI_Config_Channel_Bits(chan); config_bits |= MSeries_AI_Config_Bank_Bits(board->reg_type, chan); config_bits |= MSeries_AI_Config_Gain_Bits(range_code); if (i == n_chan - 1) config_bits |= MSeries_AI_Config_Last_Channel_Bit; if (dither) config_bits |= MSeries_AI_Config_Dither_Bit; /* don't use 2's complement encoding */ config_bits |= MSeries_AI_Config_Polarity_Bit; ni_writew(config_bits, M_Offset_AI_Config_FIFO_Data); } ni_prime_channelgain_list(dev); } /* * Notes on the 6110 and 6111: * These boards a slightly different than the rest of the series, since * they have multiple A/D converters. * From the driver side, the configuration memory is a * little different. * Configuration Memory Low: * bits 15-9: same * bit 8: unipolar/bipolar (should be 0 for bipolar) * bits 0-3: gain. This is 4 bits instead of 3 for the other boards * 1001 gain=0.1 (+/- 50) * 1010 0.2 * 1011 0.1 * 0001 1 * 0010 2 * 0011 5 * 0100 10 * 0101 20 * 0110 50 * Configuration Memory High: * bits 12-14: Channel Type * 001 for differential * 000 for calibration * bit 11: coupling (this is not currently handled) * 1 AC coupling * 0 DC coupling * bits 0-2: channel * valid channels are 0-3 */ static void ni_load_channelgain_list(struct comedi_device *dev, unsigned int n_chan, unsigned int *list) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; unsigned int chan, range, aref; unsigned int i; unsigned int hi, lo; unsigned offset; unsigned int dither; if (board->reg_type & ni_reg_m_series_mask) { ni_m_series_load_channelgain_list(dev, n_chan, list); return; } if (n_chan == 1 && (board->reg_type != ni_reg_611x) && (board->reg_type != ni_reg_6143)) { if (devpriv->changain_state && devpriv->changain_spec == list[0]) { /* ready to go. */ return; } devpriv->changain_state = 1; devpriv->changain_spec = list[0]; } else { devpriv->changain_state = 0; } devpriv->stc_writew(dev, 1, Configuration_Memory_Clear); /* Set up Calibration mode if required */ if (board->reg_type == ni_reg_6143) { if ((list[0] & CR_ALT_SOURCE) && !devpriv->ai_calib_source_enabled) { /* Strobe Relay enable bit */ ni_writew(devpriv->ai_calib_source | Calibration_Channel_6143_RelayOn, Calibration_Channel_6143); ni_writew(devpriv->ai_calib_source, Calibration_Channel_6143); devpriv->ai_calib_source_enabled = 1; msleep_interruptible(100); /* Allow relays to change */ } else if (!(list[0] & CR_ALT_SOURCE) && devpriv->ai_calib_source_enabled) { /* Strobe Relay disable bit */ ni_writew(devpriv->ai_calib_source | Calibration_Channel_6143_RelayOff, Calibration_Channel_6143); ni_writew(devpriv->ai_calib_source, Calibration_Channel_6143); devpriv->ai_calib_source_enabled = 0; msleep_interruptible(100); /* Allow relays to change */ } } offset = 1 << (board->adbits - 1); for (i = 0; i < n_chan; i++) { if ((board->reg_type != ni_reg_6143) && (list[i] & CR_ALT_SOURCE)) { chan = devpriv->ai_calib_source; } else { chan = CR_CHAN(list[i]); } aref = CR_AREF(list[i]); range = CR_RANGE(list[i]); dither = ((list[i] & CR_ALT_FILTER) != 0); /* fix the external/internal range differences */ range = ni_gainlkup[board->gainlkup][range]; if (board->reg_type == ni_reg_611x) devpriv->ai_offset[i] = offset; else devpriv->ai_offset[i] = (range & 0x100) ? 0 : offset; hi = 0; if ((list[i] & CR_ALT_SOURCE)) { if (board->reg_type == ni_reg_611x) ni_writew(CR_CHAN(list[i]) & 0x0003, Calibration_Channel_Select_611x); } else { if (board->reg_type == ni_reg_611x) aref = AREF_DIFF; else if (board->reg_type == ni_reg_6143) aref = AREF_OTHER; switch (aref) { case AREF_DIFF: hi |= AI_DIFFERENTIAL; break; case AREF_COMMON: hi |= AI_COMMON; break; case AREF_GROUND: hi |= AI_GROUND; break; case AREF_OTHER: break; } } hi |= AI_CONFIG_CHANNEL(chan); ni_writew(hi, Configuration_Memory_High); if (board->reg_type != ni_reg_6143) { lo = range; if (i == n_chan - 1) lo |= AI_LAST_CHANNEL; if (dither) lo |= AI_DITHER; ni_writew(lo, Configuration_Memory_Low); } } /* prime the channel/gain list */ if ((board->reg_type != ni_reg_611x) && (board->reg_type != ni_reg_6143)) { ni_prime_channelgain_list(dev); } } static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec, int round_mode) { struct ni_private *devpriv = dev->private; int divider; switch (round_mode) { case TRIG_ROUND_NEAREST: default: divider = (nanosec + devpriv->clock_ns / 2) / devpriv->clock_ns; break; case TRIG_ROUND_DOWN: divider = (nanosec) / devpriv->clock_ns; break; case TRIG_ROUND_UP: divider = (nanosec + devpriv->clock_ns - 1) / devpriv->clock_ns; break; } return divider - 1; } static unsigned ni_timer_to_ns(const struct comedi_device *dev, int timer) { struct ni_private *devpriv = dev->private; return devpriv->clock_ns * (timer + 1); } static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev, unsigned num_channels) { const struct ni_board_struct *board = comedi_board(dev); switch (board->reg_type) { case ni_reg_611x: case ni_reg_6143: /* simultaneously-sampled inputs */ return board->ai_speed; break; default: /* multiplexed inputs */ break; } return board->ai_speed * num_channels; } static int ni_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; int err = 0; int tmp; unsigned int sources; /* Step 1 : check if triggers are trivially valid */ if ((cmd->flags & CMDF_WRITE)) cmd->flags &= ~CMDF_WRITE; err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT | TRIG_EXT); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER | TRIG_EXT); sources = TRIG_TIMER | TRIG_EXT; if (board->reg_type == ni_reg_611x || board->reg_type == ni_reg_6143) sources |= TRIG_NOW; err |= cfc_check_trigger_src(&cmd->convert_src, sources); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= cfc_check_trigger_is_unique(cmd->start_src); err |= cfc_check_trigger_is_unique(cmd->scan_begin_src); err |= cfc_check_trigger_is_unique(cmd->convert_src); err |= cfc_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ if (cmd->start_src == TRIG_EXT) { /* external trigger */ unsigned int tmp = CR_CHAN(cmd->start_arg); if (tmp > 16) tmp = 16; tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE)); err |= cfc_check_trigger_arg_is(&cmd->start_arg, tmp); } else { /* true for both TRIG_NOW and TRIG_INT */ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); } if (cmd->scan_begin_src == TRIG_TIMER) { err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, ni_min_ai_scan_period_ns(dev, cmd->chanlist_len)); err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, devpriv->clock_ns * 0xffffff); } else if (cmd->scan_begin_src == TRIG_EXT) { /* external trigger */ unsigned int tmp = CR_CHAN(cmd->scan_begin_arg); if (tmp > 16) tmp = 16; tmp |= (cmd->scan_begin_arg & (CR_INVERT | CR_EDGE)); err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, tmp); } else { /* TRIG_OTHER */ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0); } if (cmd->convert_src == TRIG_TIMER) { if ((board->reg_type == ni_reg_611x) || (board->reg_type == ni_reg_6143)) { err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0); } else { err |= cfc_check_trigger_arg_min(&cmd->convert_arg, board->ai_speed); err |= cfc_check_trigger_arg_max(&cmd->convert_arg, devpriv->clock_ns * 0xffff); } } else if (cmd->convert_src == TRIG_EXT) { /* external trigger */ unsigned int tmp = CR_CHAN(cmd->convert_arg); if (tmp > 16) tmp = 16; tmp |= (cmd->convert_arg & (CR_ALT_FILTER | CR_INVERT)); err |= cfc_check_trigger_arg_is(&cmd->convert_arg, tmp); } else if (cmd->convert_src == TRIG_NOW) { err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0); } err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) { unsigned int max_count = 0x01000000; if (board->reg_type == ni_reg_611x) max_count -= num_adc_stages_611x; err |= cfc_check_trigger_arg_max(&cmd->stop_arg, max_count); err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1); } else { /* TRIG_NONE */ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; cmd->scan_begin_arg = ni_timer_to_ns(dev, ni_ns_to_timer(dev, cmd->scan_begin_arg, cmd-> flags & TRIG_ROUND_MASK)); if (tmp != cmd->scan_begin_arg) err++; } if (cmd->convert_src == TRIG_TIMER) { if ((board->reg_type != ni_reg_611x) && (board->reg_type != ni_reg_6143)) { tmp = cmd->convert_arg; cmd->convert_arg = ni_timer_to_ns(dev, ni_ns_to_timer(dev, cmd->convert_arg, cmd-> flags & TRIG_ROUND_MASK)); if (tmp != cmd->convert_arg) err++; if (cmd->scan_begin_src == TRIG_TIMER && cmd->scan_begin_arg < cmd->convert_arg * cmd->scan_end_arg) { cmd->scan_begin_arg = cmd->convert_arg * cmd->scan_end_arg; err++; } } } if (err) return 4; return 0; } static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; const struct comedi_cmd *cmd = &s->async->cmd; int timer; int mode1 = 0; /* mode1 is needed for both stop and convert */ int mode2 = 0; int start_stop_select = 0; unsigned int stop_count; int interrupt_a_enable = 0; MDPRINTK("ni_ai_cmd\n"); if (dev->irq == 0) { comedi_error(dev, "cannot run command without an irq"); return -EIO; } ni_clear_ai_fifo(dev); ni_load_channelgain_list(dev, cmd->chanlist_len, cmd->chanlist); /* start configuration */ devpriv->stc_writew(dev, AI_Configuration_Start, Joint_Reset_Register); /* disable analog triggering for now, since it * interferes with the use of pfi0 */ devpriv->an_trig_etc_reg &= ~Analog_Trigger_Enable; devpriv->stc_writew(dev, devpriv->an_trig_etc_reg, Analog_Trigger_Etc_Register); switch (cmd->start_src) { case TRIG_INT: case TRIG_NOW: devpriv->stc_writew(dev, AI_START2_Select(0) | AI_START1_Sync | AI_START1_Edge | AI_START1_Select(0), AI_Trigger_Select_Register); break; case TRIG_EXT: { int chan = CR_CHAN(cmd->start_arg); unsigned int bits = AI_START2_Select(0) | AI_START1_Sync | AI_START1_Select(chan + 1); if (cmd->start_arg & CR_INVERT) bits |= AI_START1_Polarity; if (cmd->start_arg & CR_EDGE) bits |= AI_START1_Edge; devpriv->stc_writew(dev, bits, AI_Trigger_Select_Register); break; } } mode2 &= ~AI_Pre_Trigger; mode2 &= ~AI_SC_Initial_Load_Source; mode2 &= ~AI_SC_Reload_Mode; devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); if (cmd->chanlist_len == 1 || (board->reg_type == ni_reg_611x) || (board->reg_type == ni_reg_6143)) { start_stop_select |= AI_STOP_Polarity; start_stop_select |= AI_STOP_Select(31); /* logic low */ start_stop_select |= AI_STOP_Sync; } else { start_stop_select |= AI_STOP_Select(19); /* ai configuration memory */ } devpriv->stc_writew(dev, start_stop_select, AI_START_STOP_Select_Register); devpriv->ai_cmd2 = 0; switch (cmd->stop_src) { case TRIG_COUNT: stop_count = cmd->stop_arg - 1; if (board->reg_type == ni_reg_611x) { /* have to take 3 stage adc pipeline into account */ stop_count += num_adc_stages_611x; } /* stage number of scans */ devpriv->stc_writel(dev, stop_count, AI_SC_Load_A_Registers); mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Trigger_Once; devpriv->stc_writew(dev, mode1, AI_Mode_1_Register); /* load SC (Scan Count) */ devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register); devpriv->ai_continuous = 0; if (stop_count == 0) { devpriv->ai_cmd2 |= AI_End_On_End_Of_Scan; interrupt_a_enable |= AI_STOP_Interrupt_Enable; /* this is required to get the last sample for chanlist_len > 1, not sure why */ if (cmd->chanlist_len > 1) start_stop_select |= AI_STOP_Polarity | AI_STOP_Edge; } break; case TRIG_NONE: /* stage number of scans */ devpriv->stc_writel(dev, 0, AI_SC_Load_A_Registers); mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Continuous; devpriv->stc_writew(dev, mode1, AI_Mode_1_Register); /* load SC (Scan Count) */ devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register); devpriv->ai_continuous = 1; break; } switch (cmd->scan_begin_src) { case TRIG_TIMER: /* stop bits for non 611x boards AI_SI_Special_Trigger_Delay=0 AI_Pre_Trigger=0 AI_START_STOP_Select_Register: AI_START_Polarity=0 (?) rising edge AI_START_Edge=1 edge triggered AI_START_Sync=1 (?) AI_START_Select=0 SI_TC AI_STOP_Polarity=0 rising edge AI_STOP_Edge=0 level AI_STOP_Sync=1 AI_STOP_Select=19 external pin (configuration mem) */ start_stop_select |= AI_START_Edge | AI_START_Sync; devpriv->stc_writew(dev, start_stop_select, AI_START_STOP_Select_Register); mode2 |= AI_SI_Reload_Mode(0); /* AI_SI_Initial_Load_Source=A */ mode2 &= ~AI_SI_Initial_Load_Source; /* mode2 |= AI_SC_Reload_Mode; */ devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); /* load SI */ timer = ni_ns_to_timer(dev, cmd->scan_begin_arg, TRIG_ROUND_NEAREST); devpriv->stc_writel(dev, timer, AI_SI_Load_A_Registers); devpriv->stc_writew(dev, AI_SI_Load, AI_Command_1_Register); break; case TRIG_EXT: if (cmd->scan_begin_arg & CR_EDGE) start_stop_select |= AI_START_Edge; /* AI_START_Polarity==1 is falling edge */ if (cmd->scan_begin_arg & CR_INVERT) start_stop_select |= AI_START_Polarity; if (cmd->scan_begin_src != cmd->convert_src || (cmd->scan_begin_arg & ~CR_EDGE) != (cmd->convert_arg & ~CR_EDGE)) start_stop_select |= AI_START_Sync; start_stop_select |= AI_START_Select(1 + CR_CHAN(cmd->scan_begin_arg)); devpriv->stc_writew(dev, start_stop_select, AI_START_STOP_Select_Register); break; } switch (cmd->convert_src) { case TRIG_TIMER: case TRIG_NOW: if (cmd->convert_arg == 0 || cmd->convert_src == TRIG_NOW) timer = 1; else timer = ni_ns_to_timer(dev, cmd->convert_arg, TRIG_ROUND_NEAREST); devpriv->stc_writew(dev, 1, AI_SI2_Load_A_Register); /* 0,0 does not work. */ devpriv->stc_writew(dev, timer, AI_SI2_Load_B_Register); /* AI_SI2_Reload_Mode = alternate */ /* AI_SI2_Initial_Load_Source = A */ mode2 &= ~AI_SI2_Initial_Load_Source; mode2 |= AI_SI2_Reload_Mode; devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); /* AI_SI2_Load */ devpriv->stc_writew(dev, AI_SI2_Load, AI_Command_1_Register); mode2 |= AI_SI2_Reload_Mode; /* alternate */ mode2 |= AI_SI2_Initial_Load_Source; /* B */ devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); break; case TRIG_EXT: mode1 |= AI_CONVERT_Source_Select(1 + cmd->convert_arg); if ((cmd->convert_arg & CR_INVERT) == 0) mode1 |= AI_CONVERT_Source_Polarity; devpriv->stc_writew(dev, mode1, AI_Mode_1_Register); mode2 |= AI_Start_Stop_Gate_Enable | AI_SC_Gate_Enable; devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); break; } if (dev->irq) { /* interrupt on FIFO, errors, SC_TC */ interrupt_a_enable |= AI_Error_Interrupt_Enable | AI_SC_TC_Interrupt_Enable; #ifndef PCIDMA interrupt_a_enable |= AI_FIFO_Interrupt_Enable; #endif if (cmd->flags & TRIG_WAKE_EOS || (devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) { /* wake on end-of-scan */ devpriv->aimode = AIMODE_SCAN; } else { devpriv->aimode = AIMODE_HALF_FULL; } switch (devpriv->aimode) { case AIMODE_HALF_FULL: /*generate FIFO interrupts and DMA requests on half-full */ #ifdef PCIDMA devpriv->stc_writew(dev, AI_FIFO_Mode_HF_to_E, AI_Mode_3_Register); #else devpriv->stc_writew(dev, AI_FIFO_Mode_HF, AI_Mode_3_Register); #endif break; case AIMODE_SAMPLE: /*generate FIFO interrupts on non-empty */ devpriv->stc_writew(dev, AI_FIFO_Mode_NE, AI_Mode_3_Register); break; case AIMODE_SCAN: #ifdef PCIDMA devpriv->stc_writew(dev, AI_FIFO_Mode_NE, AI_Mode_3_Register); #else devpriv->stc_writew(dev, AI_FIFO_Mode_HF, AI_Mode_3_Register); #endif interrupt_a_enable |= AI_STOP_Interrupt_Enable; break; default: break; } devpriv->stc_writew(dev, AI_Error_Interrupt_Ack | AI_STOP_Interrupt_Ack | AI_START_Interrupt_Ack | AI_START2_Interrupt_Ack | AI_START1_Interrupt_Ack | AI_SC_TC_Interrupt_Ack | AI_SC_TC_Error_Confirm, Interrupt_A_Ack_Register); /* clear interrupts */ ni_set_bits(dev, Interrupt_A_Enable_Register, interrupt_a_enable, 1); MDPRINTK("Interrupt_A_Enable_Register = 0x%04x\n", devpriv->int_a_enable_reg); } else { /* interrupt on nothing */ ni_set_bits(dev, Interrupt_A_Enable_Register, ~0, 0); /* XXX start polling if necessary */ MDPRINTK("interrupting on nothing\n"); } /* end configuration */ devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register); switch (cmd->scan_begin_src) { case TRIG_TIMER: devpriv->stc_writew(dev, AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm, AI_Command_1_Register); break; case TRIG_EXT: /* XXX AI_SI_Arm? */ devpriv->stc_writew(dev, AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm, AI_Command_1_Register); break; } #ifdef PCIDMA { int retval = ni_ai_setup_MITE_dma(dev); if (retval) return retval; } /* mite_dump_regs(devpriv->mite); */ #endif switch (cmd->start_src) { case TRIG_NOW: /* AI_START1_Pulse */ devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2, AI_Command_2_Register); s->async->inttrig = NULL; break; case TRIG_EXT: s->async->inttrig = NULL; break; case TRIG_INT: s->async->inttrig = &ni_ai_inttrig; break; } MDPRINTK("exit ni_ai_cmd\n"); return 0; } static int ni_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { struct ni_private *devpriv = dev->private; if (trignum != 0) return -EINVAL; devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2, AI_Command_2_Register); s->async->inttrig = NULL; return 1; } static int ni_ai_config_analog_trig(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ni_ai_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; if (insn->n < 1) return -EINVAL; switch (data[0]) { case INSN_CONFIG_ANALOG_TRIG: return ni_ai_config_analog_trig(dev, s, insn, data); case INSN_CONFIG_ALT_SOURCE: if (board->reg_type & ni_reg_m_series_mask) { if (data[1] & ~(MSeries_AI_Bypass_Cal_Sel_Pos_Mask | MSeries_AI_Bypass_Cal_Sel_Neg_Mask | MSeries_AI_Bypass_Mode_Mux_Mask | MSeries_AO_Bypass_AO_Cal_Sel_Mask)) { return -EINVAL; } devpriv->ai_calib_source = data[1]; } else if (board->reg_type == ni_reg_6143) { unsigned int calib_source; calib_source = data[1] & 0xf; if (calib_source > 0xF) return -EINVAL; devpriv->ai_calib_source = calib_source; ni_writew(calib_source, Calibration_Channel_6143); } else { unsigned int calib_source; unsigned int calib_source_adjust; calib_source = data[1] & 0xf; calib_source_adjust = (data[1] >> 4) & 0xff; if (calib_source >= 8) return -EINVAL; devpriv->ai_calib_source = calib_source; if (board->reg_type == ni_reg_611x) { ni_writeb(calib_source_adjust, Cal_Gain_Select_611x); } } return 2; default: break; } return -EINVAL; } static int ni_ai_config_analog_trig(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; unsigned int a, b, modebits; int err = 0; /* data[1] is flags * data[2] is analog line * data[3] is set level * data[4] is reset level */ if (!board->has_analog_trig) return -EINVAL; if ((data[1] & 0xffff0000) != COMEDI_EV_SCAN_BEGIN) { data[1] &= (COMEDI_EV_SCAN_BEGIN | 0xffff); err++; } if (data[2] >= board->n_adchan) { data[2] = board->n_adchan - 1; err++; } if (data[3] > 255) { /* a */ data[3] = 255; err++; } if (data[4] > 255) { /* b */ data[4] = 255; err++; } /* * 00 ignore * 01 set * 10 reset * * modes: * 1 level: +b- +a- * high mode 00 00 01 10 * low mode 00 00 10 01 * 2 level: (a<b) * hysteresis low mode 10 00 00 01 * hysteresis high mode 01 00 00 10 * middle mode 10 01 01 10 */ a = data[3]; b = data[4]; modebits = data[1] & 0xff; if (modebits & 0xf0) { /* two level mode */ if (b < a) { /* swap order */ a = data[4]; b = data[3]; modebits = ((data[1] & 0xf) << 4) | ((data[1] & 0xf0) >> 4); } devpriv->atrig_low = a; devpriv->atrig_high = b; switch (modebits) { case 0x81: /* low hysteresis mode */ devpriv->atrig_mode = 6; break; case 0x42: /* high hysteresis mode */ devpriv->atrig_mode = 3; break; case 0x96: /* middle window mode */ devpriv->atrig_mode = 2; break; default: data[1] &= ~0xff; err++; } } else { /* one level mode */ if (b != 0) { data[4] = 0; err++; } switch (modebits) { case 0x06: /* high window mode */ devpriv->atrig_high = a; devpriv->atrig_mode = 0; break; case 0x09: /* low window mode */ devpriv->atrig_low = a; devpriv->atrig_mode = 1; break; default: data[1] &= ~0xff; err++; } } if (err) return -EAGAIN; return 5; } /* munge data from unsigned to 2's complement for analog output bipolar modes */ static void ni_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s, void *data, unsigned int num_bytes, unsigned int chan_index) { const struct ni_board_struct *board = comedi_board(dev); struct comedi_async *async = s->async; unsigned int range; unsigned int i; unsigned int offset; unsigned int length = num_bytes / sizeof(short); short *array = data; offset = 1 << (board->aobits - 1); for (i = 0; i < length; i++) { range = CR_RANGE(async->cmd.chanlist[chan_index]); if (board->ao_unipolar == 0 || (range & 1) == 0) array[i] -= offset; #ifdef PCIDMA array[i] = cpu_to_le16(array[i]); #endif chan_index++; chan_index %= async->cmd.chanlist_len; } } static int ni_m_series_ao_config_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int chanspec[], unsigned int n_chans, int timed) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; unsigned int range; unsigned int chan; unsigned int conf; int i; int invert = 0; if (timed) { for (i = 0; i < board->n_aochan; ++i) { devpriv->ao_conf[i] &= ~MSeries_AO_Update_Timed_Bit; ni_writeb(devpriv->ao_conf[i], M_Offset_AO_Config_Bank(i)); ni_writeb(0xf, M_Offset_AO_Waveform_Order(i)); } } for (i = 0; i < n_chans; i++) { const struct comedi_krange *krange; chan = CR_CHAN(chanspec[i]); range = CR_RANGE(chanspec[i]); krange = s->range_table->range + range; invert = 0; conf = 0; switch (krange->max - krange->min) { case 20000000: conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits; ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan)); break; case 10000000: conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits; ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan)); break; case 4000000: conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits; ni_writeb(MSeries_Attenuate_x5_Bit, M_Offset_AO_Reference_Attenuation(chan)); break; case 2000000: conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits; ni_writeb(MSeries_Attenuate_x5_Bit, M_Offset_AO_Reference_Attenuation(chan)); break; default: printk("%s: bug! unhandled ao reference voltage\n", __func__); break; } switch (krange->max + krange->min) { case 0: conf |= MSeries_AO_DAC_Offset_0V_Bits; break; case 10000000: conf |= MSeries_AO_DAC_Offset_5V_Bits; break; default: printk("%s: bug! unhandled ao offset voltage\n", __func__); break; } if (timed) conf |= MSeries_AO_Update_Timed_Bit; ni_writeb(conf, M_Offset_AO_Config_Bank(chan)); devpriv->ao_conf[chan] = conf; ni_writeb(i, M_Offset_AO_Waveform_Order(chan)); } return invert; } static int ni_old_ao_config_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int chanspec[], unsigned int n_chans) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; unsigned int range; unsigned int chan; unsigned int conf; int i; int invert = 0; for (i = 0; i < n_chans; i++) { chan = CR_CHAN(chanspec[i]); range = CR_RANGE(chanspec[i]); conf = AO_Channel(chan); if (board->ao_unipolar) { if ((range & 1) == 0) { conf |= AO_Bipolar; invert = (1 << (board->aobits - 1)); } else { invert = 0; } if (range & 2) conf |= AO_Ext_Ref; } else { conf |= AO_Bipolar; invert = (1 << (board->aobits - 1)); } /* not all boards can deglitch, but this shouldn't hurt */ if (chanspec[i] & CR_DEGLITCH) conf |= AO_Deglitch; /* analog reference */ /* AREF_OTHER connects AO ground to AI ground, i think */ conf |= (CR_AREF(chanspec[i]) == AREF_OTHER) ? AO_Ground_Ref : 0; ni_writew(conf, AO_Configuration); devpriv->ao_conf[chan] = conf; } return invert; } static int ni_ao_config_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int chanspec[], unsigned int n_chans, int timed) { const struct ni_board_struct *board = comedi_board(dev); if (board->reg_type & ni_reg_m_series_mask) return ni_m_series_ao_config_chanlist(dev, s, chanspec, n_chans, timed); else return ni_old_ao_config_chanlist(dev, s, chanspec, n_chans); } static int ni_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; data[0] = devpriv->ao[CR_CHAN(insn->chanspec)]; return 1; } static int ni_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); unsigned int invert; invert = ni_ao_config_chanlist(dev, s, &insn->chanspec, 1, 0); devpriv->ao[chan] = data[0]; if (board->reg_type & ni_reg_m_series_mask) { ni_writew(data[0], M_Offset_DAC_Direct_Data(chan)); } else ni_writew(data[0] ^ invert, (chan) ? DAC1_Direct_Data : DAC0_Direct_Data); return 1; } static int ni_ao_insn_write_671x(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); unsigned int invert; ao_win_out(1 << chan, AO_Immediate_671x); invert = 1 << (board->aobits - 1); ni_ao_config_chanlist(dev, s, &insn->chanspec, 1, 0); devpriv->ao[chan] = data[0]; ao_win_out(data[0] ^ invert, DACx_Direct_Data_671x(chan)); return 1; } static int ni_ao_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; switch (data[0]) { case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: switch (data[1]) { case COMEDI_OUTPUT: data[2] = 1 + board->ao_fifo_depth * sizeof(short); if (devpriv->mite) data[2] += devpriv->mite->fifo_size; break; case COMEDI_INPUT: data[2] = 0; break; default: return -EINVAL; break; } return 0; default: break; } return -EINVAL; } static int ni_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { const struct ni_board_struct *board __maybe_unused = comedi_board(dev); struct ni_private *devpriv = dev->private; int ret; int interrupt_b_bits; int i; static const int timeout = 1000; if (trignum != 0) return -EINVAL; /* Null trig at beginning prevent ao start trigger from executing more than once per command (and doing things like trying to allocate the ao dma channel multiple times) */ s->async->inttrig = NULL; ni_set_bits(dev, Interrupt_B_Enable_Register, AO_FIFO_Interrupt_Enable | AO_Error_Interrupt_Enable, 0); interrupt_b_bits = AO_Error_Interrupt_Enable; #ifdef PCIDMA devpriv->stc_writew(dev, 1, DAC_FIFO_Clear); if (board->reg_type & ni_reg_6xxx_mask) ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x); ret = ni_ao_setup_MITE_dma(dev); if (ret) return ret; ret = ni_ao_wait_for_dma_load(dev); if (ret < 0) return ret; #else ret = ni_ao_prep_fifo(dev, s); if (ret == 0) return -EPIPE; interrupt_b_bits |= AO_FIFO_Interrupt_Enable; #endif devpriv->stc_writew(dev, devpriv->ao_mode3 | AO_Not_An_UPDATE, AO_Mode_3_Register); devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register); /* wait for DACs to be loaded */ for (i = 0; i < timeout; i++) { udelay(1); if ((devpriv->stc_readw(dev, Joint_Status_2_Register) & AO_TMRDACWRs_In_Progress_St) == 0) break; } if (i == timeout) { comedi_error(dev, "timed out waiting for AO_TMRDACWRs_In_Progress_St to clear"); return -EIO; } /* stc manual says we are need to clear error interrupt after AO_TMRDACWRs_In_Progress_St clears */ devpriv->stc_writew(dev, AO_Error_Interrupt_Ack, Interrupt_B_Ack_Register); ni_set_bits(dev, Interrupt_B_Enable_Register, interrupt_b_bits, 1); devpriv->stc_writew(dev, devpriv->ao_cmd1 | AO_UI_Arm | AO_UC_Arm | AO_BC_Arm | AO_DAC1_Update_Mode | AO_DAC0_Update_Mode, AO_Command_1_Register); devpriv->stc_writew(dev, devpriv->ao_cmd2 | AO_START1_Pulse, AO_Command_2_Register); return 0; } static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; const struct comedi_cmd *cmd = &s->async->cmd; int bits; int i; unsigned trigvar; if (dev->irq == 0) { comedi_error(dev, "cannot run command without an irq"); return -EIO; } devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register); devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register); if (board->reg_type & ni_reg_6xxx_mask) { ao_win_out(CLEAR_WG, AO_Misc_611x); bits = 0; for (i = 0; i < cmd->chanlist_len; i++) { int chan; chan = CR_CHAN(cmd->chanlist[i]); bits |= 1 << chan; ao_win_out(chan, AO_Waveform_Generation_611x); } ao_win_out(bits, AO_Timed_611x); } ni_ao_config_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len, 1); if (cmd->stop_src == TRIG_NONE) { devpriv->ao_mode1 |= AO_Continuous; devpriv->ao_mode1 &= ~AO_Trigger_Once; } else { devpriv->ao_mode1 &= ~AO_Continuous; devpriv->ao_mode1 |= AO_Trigger_Once; } devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); switch (cmd->start_src) { case TRIG_INT: case TRIG_NOW: devpriv->ao_trigger_select &= ~(AO_START1_Polarity | AO_START1_Select(-1)); devpriv->ao_trigger_select |= AO_START1_Edge | AO_START1_Sync; devpriv->stc_writew(dev, devpriv->ao_trigger_select, AO_Trigger_Select_Register); break; case TRIG_EXT: devpriv->ao_trigger_select = AO_START1_Select(CR_CHAN(cmd->start_arg) + 1); if (cmd->start_arg & CR_INVERT) devpriv->ao_trigger_select |= AO_START1_Polarity; /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */ if (cmd->start_arg & CR_EDGE) devpriv->ao_trigger_select |= AO_START1_Edge; /* 0=edge detection disabled, 1=enabled */ devpriv->stc_writew(dev, devpriv->ao_trigger_select, AO_Trigger_Select_Register); break; default: BUG(); break; } devpriv->ao_mode3 &= ~AO_Trigger_Length; devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register); devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); devpriv->ao_mode2 &= ~AO_BC_Initial_Load_Source; devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); if (cmd->stop_src == TRIG_NONE) { devpriv->stc_writel(dev, 0xffffff, AO_BC_Load_A_Register); } else { devpriv->stc_writel(dev, 0, AO_BC_Load_A_Register); } devpriv->stc_writew(dev, AO_BC_Load, AO_Command_1_Register); devpriv->ao_mode2 &= ~AO_UC_Initial_Load_Source; devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); switch (cmd->stop_src) { case TRIG_COUNT: if (board->reg_type & ni_reg_m_series_mask) { /* this is how the NI example code does it for m-series boards, verified correct with 6259 */ devpriv->stc_writel(dev, cmd->stop_arg - 1, AO_UC_Load_A_Register); devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register); } else { devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register); devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register); devpriv->stc_writel(dev, cmd->stop_arg - 1, AO_UC_Load_A_Register); } break; case TRIG_NONE: devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register); devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register); devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register); break; default: devpriv->stc_writel(dev, 0, AO_UC_Load_A_Register); devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register); devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register); } devpriv->ao_mode1 &= ~(AO_UI_Source_Select(0x1f) | AO_UI_Source_Polarity | AO_UPDATE_Source_Select(0x1f) | AO_UPDATE_Source_Polarity); switch (cmd->scan_begin_src) { case TRIG_TIMER: devpriv->ao_cmd2 &= ~AO_BC_Gate_Enable; trigvar = ni_ns_to_timer(dev, cmd->scan_begin_arg, TRIG_ROUND_NEAREST); devpriv->stc_writel(dev, 1, AO_UI_Load_A_Register); devpriv->stc_writew(dev, AO_UI_Load, AO_Command_1_Register); devpriv->stc_writel(dev, trigvar, AO_UI_Load_A_Register); break; case TRIG_EXT: devpriv->ao_mode1 |= AO_UPDATE_Source_Select(cmd->scan_begin_arg); if (cmd->scan_begin_arg & CR_INVERT) devpriv->ao_mode1 |= AO_UPDATE_Source_Polarity; devpriv->ao_cmd2 |= AO_BC_Gate_Enable; break; default: BUG(); break; } devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register); devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); devpriv->ao_mode2 &= ~(AO_UI_Reload_Mode(3) | AO_UI_Initial_Load_Source); devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); if (cmd->scan_end_arg > 1) { devpriv->ao_mode1 |= AO_Multiple_Channels; devpriv->stc_writew(dev, AO_Number_Of_Channels(cmd->scan_end_arg - 1) | AO_UPDATE_Output_Select (AO_Update_Output_High_Z), AO_Output_Control_Register); } else { unsigned bits; devpriv->ao_mode1 &= ~AO_Multiple_Channels; bits = AO_UPDATE_Output_Select(AO_Update_Output_High_Z); if (board->reg_type & (ni_reg_m_series_mask | ni_reg_6xxx_mask)) { bits |= AO_Number_Of_Channels(0); } else { bits |= AO_Number_Of_Channels(CR_CHAN(cmd->chanlist[0])); } devpriv->stc_writew(dev, bits, AO_Output_Control_Register); } devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); devpriv->stc_writew(dev, AO_DAC0_Update_Mode | AO_DAC1_Update_Mode, AO_Command_1_Register); devpriv->ao_mode3 |= AO_Stop_On_Overrun_Error; devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register); devpriv->ao_mode2 &= ~AO_FIFO_Mode_Mask; #ifdef PCIDMA devpriv->ao_mode2 |= AO_FIFO_Mode_HF_to_F; #else devpriv->ao_mode2 |= AO_FIFO_Mode_HF; #endif devpriv->ao_mode2 &= ~AO_FIFO_Retransmit_Enable; devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); bits = AO_BC_Source_Select | AO_UPDATE_Pulse_Width | AO_TMRDACWR_Pulse_Width; if (board->ao_fifo_depth) bits |= AO_FIFO_Enable; else bits |= AO_DMA_PIO_Control; #if 0 /* F Hess: windows driver does not set AO_Number_Of_DAC_Packages bit for 6281, verified with bus analyzer. */ if (board->reg_type & ni_reg_m_series_mask) bits |= AO_Number_Of_DAC_Packages; #endif devpriv->stc_writew(dev, bits, AO_Personal_Register); /* enable sending of ao dma requests */ devpriv->stc_writew(dev, AO_AOFREQ_Enable, AO_Start_Select_Register); devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register); if (cmd->stop_src == TRIG_COUNT) { devpriv->stc_writew(dev, AO_BC_TC_Interrupt_Ack, Interrupt_B_Ack_Register); ni_set_bits(dev, Interrupt_B_Enable_Register, AO_BC_TC_Interrupt_Enable, 1); } s->async->inttrig = &ni_ao_inttrig; return 0; } static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; int err = 0; int tmp; /* Step 1 : check if triggers are trivially valid */ if ((cmd->flags & CMDF_WRITE) == 0) cmd->flags |= CMDF_WRITE; err |= cfc_check_trigger_src(&cmd->start_src, TRIG_INT | TRIG_EXT); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER | TRIG_EXT); err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= cfc_check_trigger_is_unique(cmd->start_src); err |= cfc_check_trigger_is_unique(cmd->scan_begin_src); err |= cfc_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ if (cmd->start_src == TRIG_EXT) { /* external trigger */ unsigned int tmp = CR_CHAN(cmd->start_arg); if (tmp > 18) tmp = 18; tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE)); err |= cfc_check_trigger_arg_is(&cmd->start_arg, tmp); } else { /* true for both TRIG_NOW and TRIG_INT */ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); } if (cmd->scan_begin_src == TRIG_TIMER) { err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, board->ao_speed); err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, devpriv->clock_ns * 0xffffff); } err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0); err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= cfc_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff); else /* TRIG_NONE */ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; cmd->scan_begin_arg = ni_timer_to_ns(dev, ni_ns_to_timer(dev, cmd->scan_begin_arg, cmd-> flags & TRIG_ROUND_MASK)); if (tmp != cmd->scan_begin_arg) err++; } if (err) return 4; /* step 5: fix up chanlist */ if (err) return 5; return 0; } static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; /* devpriv->ao0p=0x0000; */ /* ni_writew(devpriv->ao0p,AO_Configuration); */ /* devpriv->ao1p=AO_Channel(1); */ /* ni_writew(devpriv->ao1p,AO_Configuration); */ ni_release_ao_mite_channel(dev); devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register); devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register); ni_set_bits(dev, Interrupt_B_Enable_Register, ~0, 0); devpriv->stc_writew(dev, AO_BC_Source_Select, AO_Personal_Register); devpriv->stc_writew(dev, 0x3f98, Interrupt_B_Ack_Register); devpriv->stc_writew(dev, AO_BC_Source_Select | AO_UPDATE_Pulse_Width | AO_TMRDACWR_Pulse_Width, AO_Personal_Register); devpriv->stc_writew(dev, 0, AO_Output_Control_Register); devpriv->stc_writew(dev, 0, AO_Start_Select_Register); devpriv->ao_cmd1 = 0; devpriv->stc_writew(dev, devpriv->ao_cmd1, AO_Command_1_Register); devpriv->ao_cmd2 = 0; devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register); devpriv->ao_mode1 = 0; devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); devpriv->ao_mode2 = 0; devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); if (board->reg_type & ni_reg_m_series_mask) devpriv->ao_mode3 = AO_Last_Gate_Disable; else devpriv->ao_mode3 = 0; devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register); devpriv->ao_trigger_select = 0; devpriv->stc_writew(dev, devpriv->ao_trigger_select, AO_Trigger_Select_Register); if (board->reg_type & ni_reg_6xxx_mask) { unsigned immediate_bits = 0; unsigned i; for (i = 0; i < s->n_chan; ++i) { immediate_bits |= 1 << i; } ao_win_out(immediate_bits, AO_Immediate_671x); ao_win_out(CLEAR_WG, AO_Misc_611x); } devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register); return 0; } /* digital io */ static int ni_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; #ifdef DEBUG_DIO printk("ni_dio_insn_config() chan=%d io=%d\n", CR_CHAN(insn->chanspec), data[0]); #endif switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= 1 << CR_CHAN(insn->chanspec); break; case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~(1 << CR_CHAN(insn->chanspec)); break; case INSN_CONFIG_DIO_QUERY: data[1] = (s-> io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; break; default: return -EINVAL; } devpriv->dio_control &= ~DIO_Pins_Dir_Mask; devpriv->dio_control |= DIO_Pins_Dir(s->io_bits); devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); return 1; } static int ni_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; #ifdef DEBUG_DIO printk("ni_dio_insn_bits() mask=0x%x bits=0x%x\n", data[0], data[1]); #endif if (data[0]) { /* Perform check to make sure we're not using the serial part of the dio */ if ((data[0] & (DIO_SDIN | DIO_SDOUT)) && devpriv->serial_interval_ns) return -EBUSY; s->state &= ~data[0]; s->state |= (data[0] & data[1]); devpriv->dio_output &= ~DIO_Parallel_Data_Mask; devpriv->dio_output |= DIO_Parallel_Data_Out(s->state); devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register); } data[1] = devpriv->stc_readw(dev, DIO_Parallel_Input_Register); return insn->n; } static int ni_m_series_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv __maybe_unused = dev->private; #ifdef DEBUG_DIO printk("ni_m_series_dio_insn_config() chan=%d io=%d\n", CR_CHAN(insn->chanspec), data[0]); #endif switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= 1 << CR_CHAN(insn->chanspec); break; case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~(1 << CR_CHAN(insn->chanspec)); break; case INSN_CONFIG_DIO_QUERY: data[1] = (s-> io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; break; default: return -EINVAL; } ni_writel(s->io_bits, M_Offset_DIO_Direction); return 1; } static int ni_m_series_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv __maybe_unused = dev->private; #ifdef DEBUG_DIO printk("ni_m_series_dio_insn_bits() mask=0x%x bits=0x%x\n", data[0], data[1]); #endif if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); ni_writel(s->state, M_Offset_Static_Digital_Output); } data[1] = ni_readl(M_Offset_Static_Digital_Input); return insn->n; } static int ni_cdio_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; unsigned i; /* Step 1 : check if triggers are trivially valid */ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_INT); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT); err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); tmp = cmd->scan_begin_arg; tmp &= CR_PACK_FLAGS(CDO_Sample_Source_Select_Mask, 0, 0, CR_INVERT); if (tmp != cmd->scan_begin_arg) err |= -EINVAL; err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0); err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* step 4: fix up any arguments */ if (err) return 4; /* step 5: check chanlist */ for (i = 0; i < cmd->chanlist_len; ++i) { if (cmd->chanlist[i] != i) err = 1; } if (err) return 5; return 0; } static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct ni_private *devpriv __maybe_unused = dev->private; const struct comedi_cmd *cmd = &s->async->cmd; unsigned cdo_mode_bits = CDO_FIFO_Mode_Bit | CDO_Halt_On_Error_Bit; int retval; ni_writel(CDO_Reset_Bit, M_Offset_CDIO_Command); switch (cmd->scan_begin_src) { case TRIG_EXT: cdo_mode_bits |= CR_CHAN(cmd->scan_begin_arg) & CDO_Sample_Source_Select_Mask; break; default: BUG(); break; } if (cmd->scan_begin_arg & CR_INVERT) cdo_mode_bits |= CDO_Polarity_Bit; ni_writel(cdo_mode_bits, M_Offset_CDO_Mode); if (s->io_bits) { ni_writel(s->state, M_Offset_CDO_FIFO_Data); ni_writel(CDO_SW_Update_Bit, M_Offset_CDIO_Command); ni_writel(s->io_bits, M_Offset_CDO_Mask_Enable); } else { comedi_error(dev, "attempted to run digital output command with no lines configured as outputs"); return -EIO; } retval = ni_request_cdo_mite_channel(dev); if (retval < 0) { return retval; } s->async->inttrig = &ni_cdo_inttrig; return 0; } static int ni_cdo_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { #ifdef PCIDMA struct ni_private *devpriv = dev->private; unsigned long flags; #endif int retval = 0; unsigned i; const unsigned timeout = 1000; s->async->inttrig = NULL; /* read alloc the entire buffer */ comedi_buf_read_alloc(s->async, s->async->prealloc_bufsz); #ifdef PCIDMA spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->cdo_mite_chan) { mite_prep_dma(devpriv->cdo_mite_chan, 32, 32); mite_dma_arm(devpriv->cdo_mite_chan); } else { comedi_error(dev, "BUG: no cdo mite channel?"); retval = -EIO; } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); if (retval < 0) return retval; #endif /* * XXX not sure what interrupt C group does * ni_writeb(Interrupt_Group_C_Enable_Bit, * M_Offset_Interrupt_C_Enable); wait for dma to fill output fifo */ for (i = 0; i < timeout; ++i) { if (ni_readl(M_Offset_CDIO_Status) & CDO_FIFO_Full_Bit) break; udelay(10); } if (i == timeout) { comedi_error(dev, "dma failed to fill cdo fifo!"); ni_cdio_cancel(dev, s); return -EIO; } ni_writel(CDO_Arm_Bit | CDO_Error_Interrupt_Enable_Set_Bit | CDO_Empty_FIFO_Interrupt_Enable_Set_Bit, M_Offset_CDIO_Command); return retval; } static int ni_cdio_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct ni_private *devpriv __maybe_unused = dev->private; ni_writel(CDO_Disarm_Bit | CDO_Error_Interrupt_Enable_Clear_Bit | CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit | CDO_FIFO_Request_Interrupt_Enable_Clear_Bit, M_Offset_CDIO_Command); /* * XXX not sure what interrupt C group does ni_writeb(0, * M_Offset_Interrupt_C_Enable); */ ni_writel(0, M_Offset_CDO_Mask_Enable); ni_release_cdo_mite_channel(dev); return 0; } static void handle_cdio_interrupt(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv __maybe_unused = dev->private; unsigned cdio_status; struct comedi_subdevice *s = &dev->subdevices[NI_DIO_SUBDEV]; #ifdef PCIDMA unsigned long flags; #endif if ((board->reg_type & ni_reg_m_series_mask) == 0) { return; } #ifdef PCIDMA spin_lock_irqsave(&devpriv->mite_channel_lock, flags); if (devpriv->cdo_mite_chan) { unsigned cdo_mite_status = mite_get_status(devpriv->cdo_mite_chan); if (cdo_mite_status & CHSR_LINKC) { writel(CHOR_CLRLC, devpriv->mite->mite_io_addr + MITE_CHOR(devpriv->cdo_mite_chan->channel)); } mite_sync_output_dma(devpriv->cdo_mite_chan, s->async); } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); #endif cdio_status = ni_readl(M_Offset_CDIO_Status); if (cdio_status & (CDO_Overrun_Bit | CDO_Underflow_Bit)) { /* printk("cdio error: statux=0x%x\n", cdio_status); */ ni_writel(CDO_Error_Interrupt_Confirm_Bit, M_Offset_CDIO_Command); /* XXX just guessing this is needed and does something useful */ s->async->events |= COMEDI_CB_OVERFLOW; } if (cdio_status & CDO_FIFO_Empty_Bit) { /* printk("cdio fifo empty\n"); */ ni_writel(CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit, M_Offset_CDIO_Command); /* s->async->events |= COMEDI_CB_EOA; */ } ni_event(dev, s); } static int ni_serial_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; int err = insn->n; unsigned char byte_out, byte_in = 0; if (insn->n != 2) return -EINVAL; switch (data[0]) { case INSN_CONFIG_SERIAL_CLOCK: #ifdef DEBUG_DIO printk("SPI serial clock Config cd\n", data[1]); #endif devpriv->serial_hw_mode = 1; devpriv->dio_control |= DIO_HW_Serial_Enable; if (data[1] == SERIAL_DISABLED) { devpriv->serial_hw_mode = 0; devpriv->dio_control &= ~(DIO_HW_Serial_Enable | DIO_Software_Serial_Control); data[1] = SERIAL_DISABLED; devpriv->serial_interval_ns = data[1]; } else if (data[1] <= SERIAL_600NS) { /* Warning: this clock speed is too fast to reliably control SCXI. */ devpriv->dio_control &= ~DIO_HW_Serial_Timebase; devpriv->clock_and_fout |= Slow_Internal_Timebase; devpriv->clock_and_fout &= ~DIO_Serial_Out_Divide_By_2; data[1] = SERIAL_600NS; devpriv->serial_interval_ns = data[1]; } else if (data[1] <= SERIAL_1_2US) { devpriv->dio_control &= ~DIO_HW_Serial_Timebase; devpriv->clock_and_fout |= Slow_Internal_Timebase | DIO_Serial_Out_Divide_By_2; data[1] = SERIAL_1_2US; devpriv->serial_interval_ns = data[1]; } else if (data[1] <= SERIAL_10US) { devpriv->dio_control |= DIO_HW_Serial_Timebase; devpriv->clock_and_fout |= Slow_Internal_Timebase | DIO_Serial_Out_Divide_By_2; /* Note: DIO_Serial_Out_Divide_By_2 only affects 600ns/1.2us. If you turn divide_by_2 off with the slow clock, you will still get 10us, except then all your delays are wrong. */ data[1] = SERIAL_10US; devpriv->serial_interval_ns = data[1]; } else { devpriv->dio_control &= ~(DIO_HW_Serial_Enable | DIO_Software_Serial_Control); devpriv->serial_hw_mode = 0; data[1] = (data[1] / 1000) * 1000; devpriv->serial_interval_ns = data[1]; } devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); devpriv->stc_writew(dev, devpriv->clock_and_fout, Clock_and_FOUT_Register); return 1; break; case INSN_CONFIG_BIDIRECTIONAL_DATA: if (devpriv->serial_interval_ns == 0) { return -EINVAL; } byte_out = data[1] & 0xFF; if (devpriv->serial_hw_mode) { err = ni_serial_hw_readwrite8(dev, s, byte_out, &byte_in); } else if (devpriv->serial_interval_ns > 0) { err = ni_serial_sw_readwrite8(dev, s, byte_out, &byte_in); } else { printk("ni_serial_insn_config: serial disabled!\n"); return -EINVAL; } if (err < 0) return err; data[1] = byte_in & 0xFF; return insn->n; break; default: return -EINVAL; } } static int ni_serial_hw_readwrite8(struct comedi_device *dev, struct comedi_subdevice *s, unsigned char data_out, unsigned char *data_in) { struct ni_private *devpriv = dev->private; unsigned int status1; int err = 0, count = 20; #ifdef DEBUG_DIO printk("ni_serial_hw_readwrite8: outputting 0x%x\n", data_out); #endif devpriv->dio_output &= ~DIO_Serial_Data_Mask; devpriv->dio_output |= DIO_Serial_Data_Out(data_out); devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register); status1 = devpriv->stc_readw(dev, Joint_Status_1_Register); if (status1 & DIO_Serial_IO_In_Progress_St) { err = -EBUSY; goto Error; } devpriv->dio_control |= DIO_HW_Serial_Start; devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); devpriv->dio_control &= ~DIO_HW_Serial_Start; /* Wait until STC says we're done, but don't loop infinitely. */ while ((status1 = devpriv->stc_readw(dev, Joint_Status_1_Register)) & DIO_Serial_IO_In_Progress_St) { /* Delay one bit per loop */ udelay((devpriv->serial_interval_ns + 999) / 1000); if (--count < 0) { printk ("ni_serial_hw_readwrite8: SPI serial I/O didn't finish in time!\n"); err = -ETIME; goto Error; } } /* Delay for last bit. This delay is absolutely necessary, because DIO_Serial_IO_In_Progress_St goes high one bit too early. */ udelay((devpriv->serial_interval_ns + 999) / 1000); if (data_in != NULL) { *data_in = devpriv->stc_readw(dev, DIO_Serial_Input_Register); #ifdef DEBUG_DIO printk("ni_serial_hw_readwrite8: inputted 0x%x\n", *data_in); #endif } Error: devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); return err; } static int ni_serial_sw_readwrite8(struct comedi_device *dev, struct comedi_subdevice *s, unsigned char data_out, unsigned char *data_in) { struct ni_private *devpriv = dev->private; unsigned char mask, input = 0; #ifdef DEBUG_DIO printk("ni_serial_sw_readwrite8: outputting 0x%x\n", data_out); #endif /* Wait for one bit before transfer */ udelay((devpriv->serial_interval_ns + 999) / 1000); for (mask = 0x80; mask; mask >>= 1) { /* Output current bit; note that we cannot touch s->state because it is a per-subdevice field, and serial is a separate subdevice from DIO. */ devpriv->dio_output &= ~DIO_SDOUT; if (data_out & mask) { devpriv->dio_output |= DIO_SDOUT; } devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register); /* Assert SDCLK (active low, inverted), wait for half of the delay, deassert SDCLK, and wait for the other half. */ devpriv->dio_control |= DIO_Software_Serial_Control; devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); udelay((devpriv->serial_interval_ns + 999) / 2000); devpriv->dio_control &= ~DIO_Software_Serial_Control; devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); udelay((devpriv->serial_interval_ns + 999) / 2000); /* Input current bit */ if (devpriv->stc_readw(dev, DIO_Parallel_Input_Register) & DIO_SDIN) { /* printk("DIO_P_I_R: 0x%x\n", devpriv->stc_readw(dev, DIO_Parallel_Input_Register)); */ input |= mask; } } #ifdef DEBUG_DIO printk("ni_serial_sw_readwrite8: inputted 0x%x\n", input); #endif if (data_in) *data_in = input; return 0; } static void mio_common_detach(struct comedi_device *dev) { struct ni_private *devpriv = dev->private; if (devpriv) { if (devpriv->counter_dev) { ni_gpct_device_destroy(devpriv->counter_dev); } } comedi_spriv_free(dev, NI_8255_DIO_SUBDEV); } static void init_ao_67xx(struct comedi_device *dev, struct comedi_subdevice *s) { int i; for (i = 0; i < s->n_chan; i++) { ni_ao_win_outw(dev, AO_Channel(i) | 0x0, AO_Configuration_2_67xx); } ao_win_out(0x0, AO_Later_Single_Point_Updates); } static unsigned ni_gpct_to_stc_register(enum ni_gpct_register reg) { unsigned stc_register; switch (reg) { case NITIO_G0_Autoincrement_Reg: stc_register = G_Autoincrement_Register(0); break; case NITIO_G1_Autoincrement_Reg: stc_register = G_Autoincrement_Register(1); break; case NITIO_G0_Command_Reg: stc_register = G_Command_Register(0); break; case NITIO_G1_Command_Reg: stc_register = G_Command_Register(1); break; case NITIO_G0_HW_Save_Reg: stc_register = G_HW_Save_Register(0); break; case NITIO_G1_HW_Save_Reg: stc_register = G_HW_Save_Register(1); break; case NITIO_G0_SW_Save_Reg: stc_register = G_Save_Register(0); break; case NITIO_G1_SW_Save_Reg: stc_register = G_Save_Register(1); break; case NITIO_G0_Mode_Reg: stc_register = G_Mode_Register(0); break; case NITIO_G1_Mode_Reg: stc_register = G_Mode_Register(1); break; case NITIO_G0_LoadA_Reg: stc_register = G_Load_A_Register(0); break; case NITIO_G1_LoadA_Reg: stc_register = G_Load_A_Register(1); break; case NITIO_G0_LoadB_Reg: stc_register = G_Load_B_Register(0); break; case NITIO_G1_LoadB_Reg: stc_register = G_Load_B_Register(1); break; case NITIO_G0_Input_Select_Reg: stc_register = G_Input_Select_Register(0); break; case NITIO_G1_Input_Select_Reg: stc_register = G_Input_Select_Register(1); break; case NITIO_G01_Status_Reg: stc_register = G_Status_Register; break; case NITIO_G01_Joint_Reset_Reg: stc_register = Joint_Reset_Register; break; case NITIO_G01_Joint_Status1_Reg: stc_register = Joint_Status_1_Register; break; case NITIO_G01_Joint_Status2_Reg: stc_register = Joint_Status_2_Register; break; case NITIO_G0_Interrupt_Acknowledge_Reg: stc_register = Interrupt_A_Ack_Register; break; case NITIO_G1_Interrupt_Acknowledge_Reg: stc_register = Interrupt_B_Ack_Register; break; case NITIO_G0_Status_Reg: stc_register = AI_Status_1_Register; break; case NITIO_G1_Status_Reg: stc_register = AO_Status_1_Register; break; case NITIO_G0_Interrupt_Enable_Reg: stc_register = Interrupt_A_Enable_Register; break; case NITIO_G1_Interrupt_Enable_Reg: stc_register = Interrupt_B_Enable_Register; break; default: printk("%s: unhandled register 0x%x in switch.\n", __func__, reg); BUG(); return 0; break; } return stc_register; } static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits, enum ni_gpct_register reg) { struct comedi_device *dev = counter->counter_dev->dev; struct ni_private *devpriv = dev->private; unsigned stc_register; /* bits in the join reset register which are relevant to counters */ static const unsigned gpct_joint_reset_mask = G0_Reset | G1_Reset; static const unsigned gpct_interrupt_a_enable_mask = G0_Gate_Interrupt_Enable | G0_TC_Interrupt_Enable; static const unsigned gpct_interrupt_b_enable_mask = G1_Gate_Interrupt_Enable | G1_TC_Interrupt_Enable; switch (reg) { /* m-series-only registers */ case NITIO_G0_Counting_Mode_Reg: ni_writew(bits, M_Offset_G0_Counting_Mode); break; case NITIO_G1_Counting_Mode_Reg: ni_writew(bits, M_Offset_G1_Counting_Mode); break; case NITIO_G0_Second_Gate_Reg: ni_writew(bits, M_Offset_G0_Second_Gate); break; case NITIO_G1_Second_Gate_Reg: ni_writew(bits, M_Offset_G1_Second_Gate); break; case NITIO_G0_DMA_Config_Reg: ni_writew(bits, M_Offset_G0_DMA_Config); break; case NITIO_G1_DMA_Config_Reg: ni_writew(bits, M_Offset_G1_DMA_Config); break; case NITIO_G0_ABZ_Reg: ni_writew(bits, M_Offset_G0_MSeries_ABZ); break; case NITIO_G1_ABZ_Reg: ni_writew(bits, M_Offset_G1_MSeries_ABZ); break; /* 32 bit registers */ case NITIO_G0_LoadA_Reg: case NITIO_G1_LoadA_Reg: case NITIO_G0_LoadB_Reg: case NITIO_G1_LoadB_Reg: stc_register = ni_gpct_to_stc_register(reg); devpriv->stc_writel(dev, bits, stc_register); break; /* 16 bit registers */ case NITIO_G0_Interrupt_Enable_Reg: BUG_ON(bits & ~gpct_interrupt_a_enable_mask); ni_set_bitfield(dev, Interrupt_A_Enable_Register, gpct_interrupt_a_enable_mask, bits); break; case NITIO_G1_Interrupt_Enable_Reg: BUG_ON(bits & ~gpct_interrupt_b_enable_mask); ni_set_bitfield(dev, Interrupt_B_Enable_Register, gpct_interrupt_b_enable_mask, bits); break; case NITIO_G01_Joint_Reset_Reg: BUG_ON(bits & ~gpct_joint_reset_mask); /* fall-through */ default: stc_register = ni_gpct_to_stc_register(reg); devpriv->stc_writew(dev, bits, stc_register); } } static unsigned ni_gpct_read_register(struct ni_gpct *counter, enum ni_gpct_register reg) { struct comedi_device *dev = counter->counter_dev->dev; struct ni_private *devpriv = dev->private; unsigned stc_register; switch (reg) { /* m-series only registers */ case NITIO_G0_DMA_Status_Reg: return ni_readw(M_Offset_G0_DMA_Status); break; case NITIO_G1_DMA_Status_Reg: return ni_readw(M_Offset_G1_DMA_Status); break; /* 32 bit registers */ case NITIO_G0_HW_Save_Reg: case NITIO_G1_HW_Save_Reg: case NITIO_G0_SW_Save_Reg: case NITIO_G1_SW_Save_Reg: stc_register = ni_gpct_to_stc_register(reg); return devpriv->stc_readl(dev, stc_register); break; /* 16 bit registers */ default: stc_register = ni_gpct_to_stc_register(reg); return devpriv->stc_readw(dev, stc_register); break; } return 0; } static int ni_freq_out_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; data[0] = devpriv->clock_and_fout & FOUT_Divider_mask; return 1; } static int ni_freq_out_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; devpriv->clock_and_fout &= ~FOUT_Enable; devpriv->stc_writew(dev, devpriv->clock_and_fout, Clock_and_FOUT_Register); devpriv->clock_and_fout &= ~FOUT_Divider_mask; devpriv->clock_and_fout |= FOUT_Divider(data[0]); devpriv->clock_and_fout |= FOUT_Enable; devpriv->stc_writew(dev, devpriv->clock_and_fout, Clock_and_FOUT_Register); return insn->n; } static int ni_set_freq_out_clock(struct comedi_device *dev, unsigned int clock_source) { struct ni_private *devpriv = dev->private; switch (clock_source) { case NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC: devpriv->clock_and_fout &= ~FOUT_Timebase_Select; break; case NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC: devpriv->clock_and_fout |= FOUT_Timebase_Select; break; default: return -EINVAL; } devpriv->stc_writew(dev, devpriv->clock_and_fout, Clock_and_FOUT_Register); return 3; } static void ni_get_freq_out_clock(struct comedi_device *dev, unsigned int *clock_source, unsigned int *clock_period_ns) { struct ni_private *devpriv = dev->private; if (devpriv->clock_and_fout & FOUT_Timebase_Select) { *clock_source = NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC; *clock_period_ns = TIMEBASE_2_NS; } else { *clock_source = NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC; *clock_period_ns = TIMEBASE_1_NS * 2; } } static int ni_freq_out_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { switch (data[0]) { case INSN_CONFIG_SET_CLOCK_SRC: return ni_set_freq_out_clock(dev, data[1]); break; case INSN_CONFIG_GET_CLOCK_SRC: ni_get_freq_out_clock(dev, &data[1], &data[2]); return 3; default: break; } return -EINVAL; } static int ni_alloc_private(struct comedi_device *dev) { struct ni_private *devpriv; devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; spin_lock_init(&devpriv->window_lock); spin_lock_init(&devpriv->soft_reg_copy_lock); spin_lock_init(&devpriv->mite_channel_lock); return 0; }; static int ni_E_init(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; struct comedi_subdevice *s; unsigned j; enum ni_gpct_variant counter_variant; int ret; if (board->n_aochan > MAX_N_AO_CHAN) { printk("bug! n_aochan > MAX_N_AO_CHAN\n"); return -EINVAL; } ret = comedi_alloc_subdevices(dev, NI_NUM_SUBDEVICES); if (ret) return ret; /* analog input subdevice */ s = &dev->subdevices[NI_AI_SUBDEV]; dev->read_subdev = s; if (board->n_adchan) { s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_DITHER | SDF_CMD_READ; if (board->reg_type != ni_reg_611x) s->subdev_flags |= SDF_GROUND | SDF_COMMON | SDF_OTHER; if (board->adbits > 16) s->subdev_flags |= SDF_LSAMPL; if (board->reg_type & ni_reg_m_series_mask) s->subdev_flags |= SDF_SOFT_CALIBRATED; s->n_chan = board->n_adchan; s->len_chanlist = 512; s->maxdata = (1 << board->adbits) - 1; s->range_table = ni_range_lkup[board->gainlkup]; s->insn_read = &ni_ai_insn_read; s->insn_config = &ni_ai_insn_config; s->do_cmdtest = &ni_ai_cmdtest; s->do_cmd = &ni_ai_cmd; s->cancel = &ni_ai_reset; s->poll = &ni_ai_poll; s->munge = &ni_ai_munge; #ifdef PCIDMA s->async_dma_dir = DMA_FROM_DEVICE; #endif } else { s->type = COMEDI_SUBD_UNUSED; } /* analog output subdevice */ s = &dev->subdevices[NI_AO_SUBDEV]; if (board->n_aochan) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_DEGLITCH | SDF_GROUND; if (board->reg_type & ni_reg_m_series_mask) s->subdev_flags |= SDF_SOFT_CALIBRATED; s->n_chan = board->n_aochan; s->maxdata = (1 << board->aobits) - 1; s->range_table = board->ao_range_table; s->insn_read = &ni_ao_insn_read; if (board->reg_type & ni_reg_6xxx_mask) { s->insn_write = &ni_ao_insn_write_671x; } else { s->insn_write = &ni_ao_insn_write; } s->insn_config = &ni_ao_insn_config; #ifdef PCIDMA if (board->n_aochan) { s->async_dma_dir = DMA_TO_DEVICE; #else if (board->ao_fifo_depth) { #endif dev->write_subdev = s; s->subdev_flags |= SDF_CMD_WRITE; s->do_cmd = &ni_ao_cmd; s->do_cmdtest = &ni_ao_cmdtest; s->len_chanlist = board->n_aochan; if ((board->reg_type & ni_reg_m_series_mask) == 0) s->munge = ni_ao_munge; } s->cancel = &ni_ao_reset; } else { s->type = COMEDI_SUBD_UNUSED; } if ((board->reg_type & ni_reg_67xx_mask)) init_ao_67xx(dev, s); /* digital i/o subdevice */ s = &dev->subdevices[NI_DIO_SUBDEV]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->maxdata = 1; s->io_bits = 0; /* all bits input */ s->range_table = &range_digital; s->n_chan = board->num_p0_dio_channels; if (board->reg_type & ni_reg_m_series_mask) { s->subdev_flags |= SDF_LSAMPL | SDF_CMD_WRITE /* | SDF_CMD_READ */ ; s->insn_bits = &ni_m_series_dio_insn_bits; s->insn_config = &ni_m_series_dio_insn_config; s->do_cmd = &ni_cdio_cmd; s->do_cmdtest = &ni_cdio_cmdtest; s->cancel = &ni_cdio_cancel; s->async_dma_dir = DMA_BIDIRECTIONAL; s->len_chanlist = s->n_chan; ni_writel(CDO_Reset_Bit | CDI_Reset_Bit, M_Offset_CDIO_Command); ni_writel(s->io_bits, M_Offset_DIO_Direction); } else { s->insn_bits = &ni_dio_insn_bits; s->insn_config = &ni_dio_insn_config; devpriv->dio_control = DIO_Pins_Dir(s->io_bits); ni_writew(devpriv->dio_control, DIO_Control_Register); } /* 8255 device */ s = &dev->subdevices[NI_8255_DIO_SUBDEV]; if (board->has_8255) { subdev_8255_init(dev, s, ni_8255_callback, (unsigned long)dev); } else { s->type = COMEDI_SUBD_UNUSED; } /* formerly general purpose counter/timer device, but no longer used */ s = &dev->subdevices[NI_UNUSED_SUBDEV]; s->type = COMEDI_SUBD_UNUSED; /* calibration subdevice -- ai and ao */ s = &dev->subdevices[NI_CALIBRATION_SUBDEV]; s->type = COMEDI_SUBD_CALIB; if (board->reg_type & ni_reg_m_series_mask) { /* internal PWM analog output used for AI nonlinearity calibration */ s->subdev_flags = SDF_INTERNAL; s->insn_config = &ni_m_series_pwm_config; s->n_chan = 1; s->maxdata = 0; ni_writel(0x0, M_Offset_Cal_PWM); } else if (board->reg_type == ni_reg_6143) { /* internal PWM analog output used for AI nonlinearity calibration */ s->subdev_flags = SDF_INTERNAL; s->insn_config = &ni_6143_pwm_config; s->n_chan = 1; s->maxdata = 0; } else { s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL; s->insn_read = &ni_calib_insn_read; s->insn_write = &ni_calib_insn_write; caldac_setup(dev, s); } /* EEPROM */ s = &dev->subdevices[NI_EEPROM_SUBDEV]; s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->maxdata = 0xff; if (board->reg_type & ni_reg_m_series_mask) { s->n_chan = M_SERIES_EEPROM_SIZE; s->insn_read = &ni_m_series_eeprom_insn_read; } else { s->n_chan = 512; s->insn_read = &ni_eeprom_insn_read; } /* PFI */ s = &dev->subdevices[NI_PFI_DIO_SUBDEV]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; if (board->reg_type & ni_reg_m_series_mask) { unsigned i; s->n_chan = 16; ni_writew(s->state, M_Offset_PFI_DO); for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) { ni_writew(devpriv->pfi_output_select_reg[i], M_Offset_PFI_Output_Select(i + 1)); } } else { s->n_chan = 10; } s->maxdata = 1; if (board->reg_type & ni_reg_m_series_mask) { s->insn_bits = &ni_pfi_insn_bits; } s->insn_config = &ni_pfi_insn_config; ni_set_bits(dev, IO_Bidirection_Pin_Register, ~0, 0); /* cs5529 calibration adc */ s = &dev->subdevices[NI_CS5529_CALIBRATION_SUBDEV]; if (board->reg_type & ni_reg_67xx_mask) { s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_INTERNAL; /* one channel for each analog output channel */ s->n_chan = board->n_aochan; s->maxdata = (1 << 16) - 1; s->range_table = &range_unknown; /* XXX */ s->insn_read = cs5529_ai_insn_read; s->insn_config = NULL; init_cs5529(dev); } else { s->type = COMEDI_SUBD_UNUSED; } /* Serial */ s = &dev->subdevices[NI_SERIAL_SUBDEV]; s->type = COMEDI_SUBD_SERIAL; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = 1; s->maxdata = 0xff; s->insn_config = ni_serial_insn_config; devpriv->serial_interval_ns = 0; devpriv->serial_hw_mode = 0; /* RTSI */ s = &dev->subdevices[NI_RTSI_SUBDEV]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = 8; s->maxdata = 1; s->insn_bits = ni_rtsi_insn_bits; s->insn_config = ni_rtsi_insn_config; ni_rtsi_init(dev); if (board->reg_type & ni_reg_m_series_mask) { counter_variant = ni_gpct_variant_m_series; } else { counter_variant = ni_gpct_variant_e_series; } devpriv->counter_dev = ni_gpct_device_construct(dev, &ni_gpct_write_register, &ni_gpct_read_register, counter_variant, NUM_GPCT); /* General purpose counters */ for (j = 0; j < NUM_GPCT; ++j) { s = &dev->subdevices[NI_GPCT_SUBDEV(j)]; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL; s->n_chan = 3; if (board->reg_type & ni_reg_m_series_mask) s->maxdata = 0xffffffff; else s->maxdata = 0xffffff; s->insn_read = &ni_gpct_insn_read; s->insn_write = &ni_gpct_insn_write; s->insn_config = &ni_gpct_insn_config; #ifdef PCIDMA s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */; s->do_cmd = &ni_gpct_cmd; s->len_chanlist = 1; s->do_cmdtest = &ni_gpct_cmdtest; s->cancel = &ni_gpct_cancel; s->async_dma_dir = DMA_BIDIRECTIONAL; #endif s->private = &devpriv->counter_dev->counters[j]; devpriv->counter_dev->counters[j].chip_index = 0; devpriv->counter_dev->counters[j].counter_index = j; ni_tio_init_counter(&devpriv->counter_dev->counters[j]); } /* Frequency output */ s = &dev->subdevices[NI_FREQ_OUT_SUBDEV]; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 1; s->maxdata = 0xf; s->insn_read = &ni_freq_out_insn_read; s->insn_write = &ni_freq_out_insn_write; s->insn_config = &ni_freq_out_insn_config; /* ai configuration */ s = &dev->subdevices[NI_AI_SUBDEV]; ni_ai_reset(dev, s); if ((board->reg_type & ni_reg_6xxx_mask) == 0) { /* BEAM is this needed for PCI-6143 ?? */ devpriv->clock_and_fout = Slow_Internal_Time_Divide_By_2 | Slow_Internal_Timebase | Clock_To_Board_Divide_By_2 | Clock_To_Board | AI_Output_Divide_By_2 | AO_Output_Divide_By_2; } else { devpriv->clock_and_fout = Slow_Internal_Time_Divide_By_2 | Slow_Internal_Timebase | Clock_To_Board_Divide_By_2 | Clock_To_Board; } devpriv->stc_writew(dev, devpriv->clock_and_fout, Clock_and_FOUT_Register); /* analog output configuration */ s = &dev->subdevices[NI_AO_SUBDEV]; ni_ao_reset(dev, s); if (dev->irq) { devpriv->stc_writew(dev, (IRQ_POLARITY ? Interrupt_Output_Polarity : 0) | (Interrupt_Output_On_3_Pins & 0) | Interrupt_A_Enable | Interrupt_B_Enable | Interrupt_A_Output_Select(interrupt_pin (dev->irq)) | Interrupt_B_Output_Select(interrupt_pin (dev->irq)), Interrupt_Control_Register); } /* DMA setup */ ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select); ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select); if (board->reg_type & ni_reg_6xxx_mask) { ni_writeb(0, Magic_611x); } else if (board->reg_type & ni_reg_m_series_mask) { int channel; for (channel = 0; channel < board->n_aochan; ++channel) { ni_writeb(0xf, M_Offset_AO_Waveform_Order(channel)); ni_writeb(0x0, M_Offset_AO_Reference_Attenuation(channel)); } ni_writeb(0x0, M_Offset_AO_Calibration); } printk("\n"); return 0; } static int ni_8255_callback(int dir, int port, int data, unsigned long arg) { struct comedi_device *dev = (struct comedi_device *)arg; struct ni_private *devpriv __maybe_unused = dev->private; if (dir) { ni_writeb(data, Port_A + 2 * port); return 0; } else { return ni_readb(Port_A + 2 * port); } } /* presents the EEPROM as a subdevice */ static int ni_eeprom_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = ni_read_eeprom(dev, CR_CHAN(insn->chanspec)); return 1; } /* reads bytes out of eeprom */ static int ni_read_eeprom(struct comedi_device *dev, int addr) { struct ni_private *devpriv __maybe_unused = dev->private; int bit; int bitstring; bitstring = 0x0300 | ((addr & 0x100) << 3) | (addr & 0xff); ni_writeb(0x04, Serial_Command); for (bit = 0x8000; bit; bit >>= 1) { ni_writeb(0x04 | ((bit & bitstring) ? 0x02 : 0), Serial_Command); ni_writeb(0x05 | ((bit & bitstring) ? 0x02 : 0), Serial_Command); } bitstring = 0; for (bit = 0x80; bit; bit >>= 1) { ni_writeb(0x04, Serial_Command); ni_writeb(0x05, Serial_Command); bitstring |= ((ni_readb(XXX_Status) & PROMOUT) ? bit : 0); } ni_writeb(0x00, Serial_Command); return bitstring; } static int ni_m_series_eeprom_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; data[0] = devpriv->eeprom_buffer[CR_CHAN(insn->chanspec)]; return 1; } static int ni_get_pwm_config(struct comedi_device *dev, unsigned int *data) { struct ni_private *devpriv = dev->private; data[1] = devpriv->pwm_up_count * devpriv->clock_ns; data[2] = devpriv->pwm_down_count * devpriv->clock_ns; return 3; } static int ni_m_series_pwm_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; unsigned up_count, down_count; switch (data[0]) { case INSN_CONFIG_PWM_OUTPUT: switch (data[1]) { case TRIG_ROUND_NEAREST: up_count = (data[2] + devpriv->clock_ns / 2) / devpriv->clock_ns; break; case TRIG_ROUND_DOWN: up_count = data[2] / devpriv->clock_ns; break; case TRIG_ROUND_UP: up_count = (data[2] + devpriv->clock_ns - 1) / devpriv->clock_ns; break; default: return -EINVAL; break; } switch (data[3]) { case TRIG_ROUND_NEAREST: down_count = (data[4] + devpriv->clock_ns / 2) / devpriv->clock_ns; break; case TRIG_ROUND_DOWN: down_count = data[4] / devpriv->clock_ns; break; case TRIG_ROUND_UP: down_count = (data[4] + devpriv->clock_ns - 1) / devpriv->clock_ns; break; default: return -EINVAL; break; } if (up_count * devpriv->clock_ns != data[2] || down_count * devpriv->clock_ns != data[4]) { data[2] = up_count * devpriv->clock_ns; data[4] = down_count * devpriv->clock_ns; return -EAGAIN; } ni_writel(MSeries_Cal_PWM_High_Time_Bits(up_count) | MSeries_Cal_PWM_Low_Time_Bits(down_count), M_Offset_Cal_PWM); devpriv->pwm_up_count = up_count; devpriv->pwm_down_count = down_count; return 5; break; case INSN_CONFIG_GET_PWM_OUTPUT: return ni_get_pwm_config(dev, data); break; default: return -EINVAL; break; } return 0; } static int ni_6143_pwm_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; unsigned up_count, down_count; switch (data[0]) { case INSN_CONFIG_PWM_OUTPUT: switch (data[1]) { case TRIG_ROUND_NEAREST: up_count = (data[2] + devpriv->clock_ns / 2) / devpriv->clock_ns; break; case TRIG_ROUND_DOWN: up_count = data[2] / devpriv->clock_ns; break; case TRIG_ROUND_UP: up_count = (data[2] + devpriv->clock_ns - 1) / devpriv->clock_ns; break; default: return -EINVAL; break; } switch (data[3]) { case TRIG_ROUND_NEAREST: down_count = (data[4] + devpriv->clock_ns / 2) / devpriv->clock_ns; break; case TRIG_ROUND_DOWN: down_count = data[4] / devpriv->clock_ns; break; case TRIG_ROUND_UP: down_count = (data[4] + devpriv->clock_ns - 1) / devpriv->clock_ns; break; default: return -EINVAL; break; } if (up_count * devpriv->clock_ns != data[2] || down_count * devpriv->clock_ns != data[4]) { data[2] = up_count * devpriv->clock_ns; data[4] = down_count * devpriv->clock_ns; return -EAGAIN; } ni_writel(up_count, Calibration_HighTime_6143); devpriv->pwm_up_count = up_count; ni_writel(down_count, Calibration_LowTime_6143); devpriv->pwm_down_count = down_count; return 5; break; case INSN_CONFIG_GET_PWM_OUTPUT: return ni_get_pwm_config(dev, data); default: return -EINVAL; break; } return 0; } static void ni_write_caldac(struct comedi_device *dev, int addr, int val); /* calibration subdevice */ static int ni_calib_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { ni_write_caldac(dev, CR_CHAN(insn->chanspec), data[0]); return 1; } static int ni_calib_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; data[0] = devpriv->caldacs[CR_CHAN(insn->chanspec)]; return 1; } static int pack_mb88341(int addr, int val, int *bitstring); static int pack_dac8800(int addr, int val, int *bitstring); static int pack_dac8043(int addr, int val, int *bitstring); static int pack_ad8522(int addr, int val, int *bitstring); static int pack_ad8804(int addr, int val, int *bitstring); static int pack_ad8842(int addr, int val, int *bitstring); struct caldac_struct { int n_chans; int n_bits; int (*packbits) (int, int, int *); }; static struct caldac_struct caldacs[] = { [mb88341] = {12, 8, pack_mb88341}, [dac8800] = {8, 8, pack_dac8800}, [dac8043] = {1, 12, pack_dac8043}, [ad8522] = {2, 12, pack_ad8522}, [ad8804] = {12, 8, pack_ad8804}, [ad8842] = {8, 8, pack_ad8842}, [ad8804_debug] = {16, 8, pack_ad8804}, }; static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; int i, j; int n_dacs; int n_chans = 0; int n_bits; int diffbits = 0; int type; int chan; type = board->caldac[0]; if (type == caldac_none) return; n_bits = caldacs[type].n_bits; for (i = 0; i < 3; i++) { type = board->caldac[i]; if (type == caldac_none) break; if (caldacs[type].n_bits != n_bits) diffbits = 1; n_chans += caldacs[type].n_chans; } n_dacs = i; s->n_chan = n_chans; if (diffbits) { unsigned int *maxdata_list; if (n_chans > MAX_N_CALDACS) { printk("BUG! MAX_N_CALDACS too small\n"); } s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list; chan = 0; for (i = 0; i < n_dacs; i++) { type = board->caldac[i]; for (j = 0; j < caldacs[type].n_chans; j++) { maxdata_list[chan] = (1 << caldacs[type].n_bits) - 1; chan++; } } for (chan = 0; chan < s->n_chan; chan++) ni_write_caldac(dev, i, s->maxdata_list[i] / 2); } else { type = board->caldac[0]; s->maxdata = (1 << caldacs[type].n_bits) - 1; for (chan = 0; chan < s->n_chan; chan++) ni_write_caldac(dev, i, s->maxdata / 2); } } static void ni_write_caldac(struct comedi_device *dev, int addr, int val) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; unsigned int loadbit = 0, bits = 0, bit, bitstring = 0; int i; int type; /* printk("ni_write_caldac: chan=%d val=%d\n",addr,val); */ if (devpriv->caldacs[addr] == val) return; devpriv->caldacs[addr] = val; for (i = 0; i < 3; i++) { type = board->caldac[i]; if (type == caldac_none) break; if (addr < caldacs[type].n_chans) { bits = caldacs[type].packbits(addr, val, &bitstring); loadbit = SerDacLd(i); /* printk("caldac: using i=%d addr=%d %x\n",i,addr,bitstring); */ break; } addr -= caldacs[type].n_chans; } for (bit = 1 << (bits - 1); bit; bit >>= 1) { ni_writeb(((bit & bitstring) ? 0x02 : 0), Serial_Command); udelay(1); ni_writeb(1 | ((bit & bitstring) ? 0x02 : 0), Serial_Command); udelay(1); } ni_writeb(loadbit, Serial_Command); udelay(1); ni_writeb(0, Serial_Command); } static int pack_mb88341(int addr, int val, int *bitstring) { /* Fujitsu MB 88341 Note that address bits are reversed. Thanks to Ingo Keen for noticing this. Note also that the 88341 expects address values from 1-12, whereas we use channel numbers 0-11. The NI docs use 1-12, also, so be careful here. */ addr++; *bitstring = ((addr & 0x1) << 11) | ((addr & 0x2) << 9) | ((addr & 0x4) << 7) | ((addr & 0x8) << 5) | (val & 0xff); return 12; } static int pack_dac8800(int addr, int val, int *bitstring) { *bitstring = ((addr & 0x7) << 8) | (val & 0xff); return 11; } static int pack_dac8043(int addr, int val, int *bitstring) { *bitstring = val & 0xfff; return 12; } static int pack_ad8522(int addr, int val, int *bitstring) { *bitstring = (val & 0xfff) | (addr ? 0xc000 : 0xa000); return 16; } static int pack_ad8804(int addr, int val, int *bitstring) { *bitstring = ((addr & 0xf) << 8) | (val & 0xff); return 12; } static int pack_ad8842(int addr, int val, int *bitstring) { *bitstring = ((addr + 1) << 8) | (val & 0xff); return 12; } #if 0 /* * Read the GPCTs current value. */ static int GPCT_G_Watch(struct comedi_device *dev, int chan) { unsigned int hi1, hi2, lo; devpriv->gpct_command[chan] &= ~G_Save_Trace; devpriv->stc_writew(dev, devpriv->gpct_command[chan], G_Command_Register(chan)); devpriv->gpct_command[chan] |= G_Save_Trace; devpriv->stc_writew(dev, devpriv->gpct_command[chan], G_Command_Register(chan)); /* This procedure is used because the two registers cannot * be read atomically. */ do { hi1 = devpriv->stc_readw(dev, G_Save_Register_High(chan)); lo = devpriv->stc_readw(dev, G_Save_Register_Low(chan)); hi2 = devpriv->stc_readw(dev, G_Save_Register_High(chan)); } while (hi1 != hi2); return (hi1 << 16) | lo; } static void GPCT_Reset(struct comedi_device *dev, int chan) { int temp_ack_reg = 0; /* printk("GPCT_Reset..."); */ devpriv->gpct_cur_operation[chan] = GPCT_RESET; switch (chan) { case 0: devpriv->stc_writew(dev, G0_Reset, Joint_Reset_Register); ni_set_bits(dev, Interrupt_A_Enable_Register, G0_TC_Interrupt_Enable, 0); ni_set_bits(dev, Interrupt_A_Enable_Register, G0_Gate_Interrupt_Enable, 0); temp_ack_reg |= G0_Gate_Error_Confirm; temp_ack_reg |= G0_TC_Error_Confirm; temp_ack_reg |= G0_TC_Interrupt_Ack; temp_ack_reg |= G0_Gate_Interrupt_Ack; devpriv->stc_writew(dev, temp_ack_reg, Interrupt_A_Ack_Register); /* problem...this interferes with the other ctr... */ devpriv->an_trig_etc_reg |= GPFO_0_Output_Enable; devpriv->stc_writew(dev, devpriv->an_trig_etc_reg, Analog_Trigger_Etc_Register); break; case 1: devpriv->stc_writew(dev, G1_Reset, Joint_Reset_Register); ni_set_bits(dev, Interrupt_B_Enable_Register, G1_TC_Interrupt_Enable, 0); ni_set_bits(dev, Interrupt_B_Enable_Register, G0_Gate_Interrupt_Enable, 0); temp_ack_reg |= G1_Gate_Error_Confirm; temp_ack_reg |= G1_TC_Error_Confirm; temp_ack_reg |= G1_TC_Interrupt_Ack; temp_ack_reg |= G1_Gate_Interrupt_Ack; devpriv->stc_writew(dev, temp_ack_reg, Interrupt_B_Ack_Register); devpriv->an_trig_etc_reg |= GPFO_1_Output_Enable; devpriv->stc_writew(dev, devpriv->an_trig_etc_reg, Analog_Trigger_Etc_Register); break; } devpriv->gpct_mode[chan] = 0; devpriv->gpct_input_select[chan] = 0; devpriv->gpct_command[chan] = 0; devpriv->gpct_command[chan] |= G_Synchronized_Gate; devpriv->stc_writew(dev, devpriv->gpct_mode[chan], G_Mode_Register(chan)); devpriv->stc_writew(dev, devpriv->gpct_input_select[chan], G_Input_Select_Register(chan)); devpriv->stc_writew(dev, 0, G_Autoincrement_Register(chan)); /* printk("exit GPCT_Reset\n"); */ } #endif static int ni_gpct_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_gpct *counter = s->private; return ni_tio_insn_config(counter, insn, data); } static int ni_gpct_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_gpct *counter = s->private; return ni_tio_rinsn(counter, insn, data); } static int ni_gpct_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_gpct *counter = s->private; return ni_tio_winsn(counter, insn, data); } #ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int retval; struct ni_gpct *counter = s->private; /* const struct comedi_cmd *cmd = &s->async->cmd; */ retval = ni_request_gpct_mite_channel(dev, counter->counter_index, COMEDI_INPUT); if (retval) { comedi_error(dev, "no dma channel available for use by counter"); return retval; } ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL); ni_e_series_enable_second_irq(dev, counter->counter_index, 1); retval = ni_tio_cmd(counter, s->async); return retval; } #endif #ifdef PCIDMA static int ni_gpct_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { struct ni_gpct *counter = s->private; return ni_tio_cmdtest(counter, cmd); return -ENOTSUPP; } #endif static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { #ifdef PCIDMA struct ni_gpct *counter = s->private; int retval; retval = ni_tio_cancel(counter); ni_e_series_enable_second_irq(dev, counter->counter_index, 0); ni_release_gpct_mite_channel(dev, counter->counter_index); return retval; #else return 0; #endif } /* * * Programmable Function Inputs * */ static int ni_m_series_set_pfi_routing(struct comedi_device *dev, unsigned chan, unsigned source) { struct ni_private *devpriv = dev->private; unsigned pfi_reg_index; unsigned array_offset; if ((source & 0x1f) != source) return -EINVAL; pfi_reg_index = 1 + chan / 3; array_offset = pfi_reg_index - 1; devpriv->pfi_output_select_reg[array_offset] &= ~MSeries_PFI_Output_Select_Mask(chan); devpriv->pfi_output_select_reg[array_offset] |= MSeries_PFI_Output_Select_Bits(chan, source); ni_writew(devpriv->pfi_output_select_reg[array_offset], M_Offset_PFI_Output_Select(pfi_reg_index)); return 2; } static int ni_old_set_pfi_routing(struct comedi_device *dev, unsigned chan, unsigned source) { /* pre-m-series boards have fixed signals on pfi pins */ if (source != ni_old_get_pfi_routing(dev, chan)) return -EINVAL; return 2; } static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan, unsigned source) { const struct ni_board_struct *board = comedi_board(dev); if (board->reg_type & ni_reg_m_series_mask) return ni_m_series_set_pfi_routing(dev, chan, source); else return ni_old_set_pfi_routing(dev, chan, source); } static unsigned ni_m_series_get_pfi_routing(struct comedi_device *dev, unsigned chan) { struct ni_private *devpriv = dev->private; const unsigned array_offset = chan / 3; return MSeries_PFI_Output_Select_Source(chan, devpriv-> pfi_output_select_reg [array_offset]); } static unsigned ni_old_get_pfi_routing(struct comedi_device *dev, unsigned chan) { /* pre-m-series boards have fixed signals on pfi pins */ switch (chan) { case 0: return NI_PFI_OUTPUT_AI_START1; break; case 1: return NI_PFI_OUTPUT_AI_START2; break; case 2: return NI_PFI_OUTPUT_AI_CONVERT; break; case 3: return NI_PFI_OUTPUT_G_SRC1; break; case 4: return NI_PFI_OUTPUT_G_GATE1; break; case 5: return NI_PFI_OUTPUT_AO_UPDATE_N; break; case 6: return NI_PFI_OUTPUT_AO_START1; break; case 7: return NI_PFI_OUTPUT_AI_START_PULSE; break; case 8: return NI_PFI_OUTPUT_G_SRC0; break; case 9: return NI_PFI_OUTPUT_G_GATE0; break; default: printk("%s: bug, unhandled case in switch.\n", __func__); break; } return 0; } static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan) { const struct ni_board_struct *board = comedi_board(dev); if (board->reg_type & ni_reg_m_series_mask) return ni_m_series_get_pfi_routing(dev, chan); else return ni_old_get_pfi_routing(dev, chan); } static int ni_config_filter(struct comedi_device *dev, unsigned pfi_channel, enum ni_pfi_filter_select filter) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv __maybe_unused = dev->private; unsigned bits; if ((board->reg_type & ni_reg_m_series_mask) == 0) { return -ENOTSUPP; } bits = ni_readl(M_Offset_PFI_Filter); bits &= ~MSeries_PFI_Filter_Select_Mask(pfi_channel); bits |= MSeries_PFI_Filter_Select_Bits(pfi_channel, filter); ni_writel(bits, M_Offset_PFI_Filter); return 0; } static int ni_pfi_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv __maybe_unused = dev->private; if ((board->reg_type & ni_reg_m_series_mask) == 0) { return -ENOTSUPP; } if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); ni_writew(s->state, M_Offset_PFI_DO); } data[1] = ni_readw(M_Offset_PFI_DI); return insn->n; } static int ni_pfi_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct ni_private *devpriv = dev->private; unsigned int chan; if (insn->n < 1) return -EINVAL; chan = CR_CHAN(insn->chanspec); switch (data[0]) { case COMEDI_OUTPUT: ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 1); break; case COMEDI_INPUT: ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 0); break; case INSN_CONFIG_DIO_QUERY: data[1] = (devpriv->io_bidirection_pin_reg & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT; return 0; break; case INSN_CONFIG_SET_ROUTING: return ni_set_pfi_routing(dev, chan, data[1]); break; case INSN_CONFIG_GET_ROUTING: data[1] = ni_get_pfi_routing(dev, chan); break; case INSN_CONFIG_FILTER: return ni_config_filter(dev, chan, data[1]); break; default: return -EINVAL; } return 0; } /* * * NI RTSI Bus Functions * */ static void ni_rtsi_init(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; /* Initialises the RTSI bus signal switch to a default state */ /* Set clock mode to internal */ devpriv->clock_and_fout2 = MSeries_RTSI_10MHz_Bit; if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) { printk("ni_set_master_clock failed, bug?"); } /* default internal lines routing to RTSI bus lines */ devpriv->rtsi_trig_a_output_reg = RTSI_Trig_Output_Bits(0, NI_RTSI_OUTPUT_ADR_START1) | RTSI_Trig_Output_Bits(1, NI_RTSI_OUTPUT_ADR_START2) | RTSI_Trig_Output_Bits(2, NI_RTSI_OUTPUT_SCLKG) | RTSI_Trig_Output_Bits(3, NI_RTSI_OUTPUT_DACUPDN); devpriv->stc_writew(dev, devpriv->rtsi_trig_a_output_reg, RTSI_Trig_A_Output_Register); devpriv->rtsi_trig_b_output_reg = RTSI_Trig_Output_Bits(4, NI_RTSI_OUTPUT_DA_START1) | RTSI_Trig_Output_Bits(5, NI_RTSI_OUTPUT_G_SRC0) | RTSI_Trig_Output_Bits(6, NI_RTSI_OUTPUT_G_GATE0); if (board->reg_type & ni_reg_m_series_mask) devpriv->rtsi_trig_b_output_reg |= RTSI_Trig_Output_Bits(7, NI_RTSI_OUTPUT_RTSI_OSC); devpriv->stc_writew(dev, devpriv->rtsi_trig_b_output_reg, RTSI_Trig_B_Output_Register); /* * Sets the source and direction of the 4 on board lines * devpriv->stc_writew(dev, 0x0000, RTSI_Board_Register); */ } static int ni_rtsi_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = 0; return insn->n; } /* Find best multiplier/divider to try and get the PLL running at 80 MHz * given an arbitrary frequency input clock */ static int ni_mseries_get_pll_parameters(unsigned reference_period_ns, unsigned *freq_divider, unsigned *freq_multiplier, unsigned *actual_period_ns) { unsigned div; unsigned best_div = 1; static const unsigned max_div = 0x10; unsigned mult; unsigned best_mult = 1; static const unsigned max_mult = 0x100; static const unsigned pico_per_nano = 1000; const unsigned reference_picosec = reference_period_ns * pico_per_nano; /* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to * 20 MHz for most timing clocks */ static const unsigned target_picosec = 12500; static const unsigned fudge_factor_80_to_20Mhz = 4; int best_period_picosec = 0; for (div = 1; div <= max_div; ++div) { for (mult = 1; mult <= max_mult; ++mult) { unsigned new_period_ps = (reference_picosec * div) / mult; if (abs(new_period_ps - target_picosec) < abs(best_period_picosec - target_picosec)) { best_period_picosec = new_period_ps; best_div = div; best_mult = mult; } } } if (best_period_picosec == 0) { printk("%s: bug, failed to find pll parameters\n", __func__); return -EIO; } *freq_divider = best_div; *freq_multiplier = best_mult; *actual_period_ns = (best_period_picosec * fudge_factor_80_to_20Mhz + (pico_per_nano / 2)) / pico_per_nano; return 0; } static inline unsigned num_configurable_rtsi_channels(struct comedi_device *dev) { const struct ni_board_struct *board = comedi_board(dev); if (board->reg_type & ni_reg_m_series_mask) return 8; else return 7; } static int ni_mseries_set_pll_master_clock(struct comedi_device *dev, unsigned source, unsigned period_ns) { struct ni_private *devpriv = dev->private; static const unsigned min_period_ns = 50; static const unsigned max_period_ns = 1000; static const unsigned timeout = 1000; unsigned pll_control_bits; unsigned freq_divider; unsigned freq_multiplier; unsigned i; int retval; if (source == NI_MIO_PLL_PXI10_CLOCK) period_ns = 100; /* these limits are somewhat arbitrary, but NI advertises 1 to 20MHz range so we'll use that */ if (period_ns < min_period_ns || period_ns > max_period_ns) { printk ("%s: you must specify an input clock frequency between %i and %i nanosec " "for the phased-lock loop.\n", __func__, min_period_ns, max_period_ns); return -EINVAL; } devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit; devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg, RTSI_Trig_Direction_Register); pll_control_bits = MSeries_PLL_Enable_Bit | MSeries_PLL_VCO_Mode_75_150MHz_Bits; devpriv->clock_and_fout2 |= MSeries_Timebase1_Select_Bit | MSeries_Timebase3_Select_Bit; devpriv->clock_and_fout2 &= ~MSeries_PLL_In_Source_Select_Mask; switch (source) { case NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK: devpriv->clock_and_fout2 |= MSeries_PLL_In_Source_Select_Star_Trigger_Bits; retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider, &freq_multiplier, &devpriv->clock_ns); if (retval < 0) return retval; break; case NI_MIO_PLL_PXI10_CLOCK: /* pxi clock is 10MHz */ devpriv->clock_and_fout2 |= MSeries_PLL_In_Source_Select_PXI_Clock10; retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider, &freq_multiplier, &devpriv->clock_ns); if (retval < 0) return retval; break; default: { unsigned rtsi_channel; static const unsigned max_rtsi_channel = 7; for (rtsi_channel = 0; rtsi_channel <= max_rtsi_channel; ++rtsi_channel) { if (source == NI_MIO_PLL_RTSI_CLOCK(rtsi_channel)) { devpriv->clock_and_fout2 |= MSeries_PLL_In_Source_Select_RTSI_Bits (rtsi_channel); break; } } if (rtsi_channel > max_rtsi_channel) return -EINVAL; retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider, &freq_multiplier, &devpriv-> clock_ns); if (retval < 0) return retval; } break; } ni_writew(devpriv->clock_and_fout2, M_Offset_Clock_and_Fout2); pll_control_bits |= MSeries_PLL_Divisor_Bits(freq_divider) | MSeries_PLL_Multiplier_Bits(freq_multiplier); /* printk("using divider=%i, multiplier=%i for PLL. pll_control_bits = 0x%x\n", * freq_divider, freq_multiplier, pll_control_bits); */ /* printk("clock_ns=%d\n", devpriv->clock_ns); */ ni_writew(pll_control_bits, M_Offset_PLL_Control); devpriv->clock_source = source; /* it seems to typically take a few hundred microseconds for PLL to lock */ for (i = 0; i < timeout; ++i) { if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) { break; } udelay(1); } if (i == timeout) { printk ("%s: timed out waiting for PLL to lock to reference clock source %i with period %i ns.\n", __func__, source, period_ns); return -ETIMEDOUT; } return 3; } static int ni_set_master_clock(struct comedi_device *dev, unsigned source, unsigned period_ns) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; if (source == NI_MIO_INTERNAL_CLOCK) { devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit; devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg, RTSI_Trig_Direction_Register); devpriv->clock_ns = TIMEBASE_1_NS; if (board->reg_type & ni_reg_m_series_mask) { devpriv->clock_and_fout2 &= ~(MSeries_Timebase1_Select_Bit | MSeries_Timebase3_Select_Bit); ni_writew(devpriv->clock_and_fout2, M_Offset_Clock_and_Fout2); ni_writew(0, M_Offset_PLL_Control); } devpriv->clock_source = source; } else { if (board->reg_type & ni_reg_m_series_mask) { return ni_mseries_set_pll_master_clock(dev, source, period_ns); } else { if (source == NI_MIO_RTSI_CLOCK) { devpriv->rtsi_trig_direction_reg |= Use_RTSI_Clock_Bit; devpriv->stc_writew(dev, devpriv-> rtsi_trig_direction_reg, RTSI_Trig_Direction_Register); if (period_ns == 0) { printk ("%s: we don't handle an unspecified clock period correctly yet, returning error.\n", __func__); return -EINVAL; } else { devpriv->clock_ns = period_ns; } devpriv->clock_source = source; } else return -EINVAL; } } return 3; } static int ni_valid_rtsi_output_source(struct comedi_device *dev, unsigned chan, unsigned source) { const struct ni_board_struct *board = comedi_board(dev); if (chan >= num_configurable_rtsi_channels(dev)) { if (chan == old_RTSI_clock_channel) { if (source == NI_RTSI_OUTPUT_RTSI_OSC) return 1; else { printk ("%s: invalid source for channel=%i, channel %i is always the RTSI clock for pre-m-series boards.\n", __func__, chan, old_RTSI_clock_channel); return 0; } } return 0; } switch (source) { case NI_RTSI_OUTPUT_ADR_START1: case NI_RTSI_OUTPUT_ADR_START2: case NI_RTSI_OUTPUT_SCLKG: case NI_RTSI_OUTPUT_DACUPDN: case NI_RTSI_OUTPUT_DA_START1: case NI_RTSI_OUTPUT_G_SRC0: case NI_RTSI_OUTPUT_G_GATE0: case NI_RTSI_OUTPUT_RGOUT0: case NI_RTSI_OUTPUT_RTSI_BRD_0: return 1; break; case NI_RTSI_OUTPUT_RTSI_OSC: if (board->reg_type & ni_reg_m_series_mask) return 1; else return 0; break; default: return 0; break; } } static int ni_set_rtsi_routing(struct comedi_device *dev, unsigned chan, unsigned source) { struct ni_private *devpriv = dev->private; if (ni_valid_rtsi_output_source(dev, chan, source) == 0) return -EINVAL; if (chan < 4) { devpriv->rtsi_trig_a_output_reg &= ~RTSI_Trig_Output_Mask(chan); devpriv->rtsi_trig_a_output_reg |= RTSI_Trig_Output_Bits(chan, source); devpriv->stc_writew(dev, devpriv->rtsi_trig_a_output_reg, RTSI_Trig_A_Output_Register); } else if (chan < 8) { devpriv->rtsi_trig_b_output_reg &= ~RTSI_Trig_Output_Mask(chan); devpriv->rtsi_trig_b_output_reg |= RTSI_Trig_Output_Bits(chan, source); devpriv->stc_writew(dev, devpriv->rtsi_trig_b_output_reg, RTSI_Trig_B_Output_Register); } return 2; } static unsigned ni_get_rtsi_routing(struct comedi_device *dev, unsigned chan) { struct ni_private *devpriv = dev->private; if (chan < 4) { return RTSI_Trig_Output_Source(chan, devpriv->rtsi_trig_a_output_reg); } else if (chan < num_configurable_rtsi_channels(dev)) { return RTSI_Trig_Output_Source(chan, devpriv->rtsi_trig_b_output_reg); } else { if (chan == old_RTSI_clock_channel) return NI_RTSI_OUTPUT_RTSI_OSC; printk("%s: bug! should never get here?\n", __func__); return 0; } } static int ni_rtsi_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const struct ni_board_struct *board = comedi_board(dev); struct ni_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: if (chan < num_configurable_rtsi_channels(dev)) { devpriv->rtsi_trig_direction_reg |= RTSI_Output_Bit(chan, (board->reg_type & ni_reg_m_series_mask) != 0); } else if (chan == old_RTSI_clock_channel) { devpriv->rtsi_trig_direction_reg |= Drive_RTSI_Clock_Bit; } devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg, RTSI_Trig_Direction_Register); break; case INSN_CONFIG_DIO_INPUT: if (chan < num_configurable_rtsi_channels(dev)) { devpriv->rtsi_trig_direction_reg &= ~RTSI_Output_Bit(chan, (board->reg_type & ni_reg_m_series_mask) != 0); } else if (chan == old_RTSI_clock_channel) { devpriv->rtsi_trig_direction_reg &= ~Drive_RTSI_Clock_Bit; } devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg, RTSI_Trig_Direction_Register); break; case INSN_CONFIG_DIO_QUERY: if (chan < num_configurable_rtsi_channels(dev)) { data[1] = (devpriv->rtsi_trig_direction_reg & RTSI_Output_Bit(chan, (board->reg_type & ni_reg_m_series_mask) != 0)) ? INSN_CONFIG_DIO_OUTPUT : INSN_CONFIG_DIO_INPUT; } else if (chan == old_RTSI_clock_channel) { data[1] = (devpriv->rtsi_trig_direction_reg & Drive_RTSI_Clock_Bit) ? INSN_CONFIG_DIO_OUTPUT : INSN_CONFIG_DIO_INPUT; } return 2; break; case INSN_CONFIG_SET_CLOCK_SRC: return ni_set_master_clock(dev, data[1], data[2]); break; case INSN_CONFIG_GET_CLOCK_SRC: data[1] = devpriv->clock_source; data[2] = devpriv->clock_ns; return 3; break; case INSN_CONFIG_SET_ROUTING: return ni_set_rtsi_routing(dev, chan, data[1]); break; case INSN_CONFIG_GET_ROUTING: data[1] = ni_get_rtsi_routing(dev, chan); return 2; break; default: return -EINVAL; break; } return 1; } static int cs5529_wait_for_idle(struct comedi_device *dev) { unsigned short status; const int timeout = HZ; int i; for (i = 0; i < timeout; i++) { status = ni_ao_win_inw(dev, CAL_ADC_Status_67xx); if ((status & CSS_ADC_BUSY) == 0) { break; } set_current_state(TASK_INTERRUPTIBLE); if (schedule_timeout(1)) { return -EIO; } } /* printk("looped %i times waiting for idle\n", i); */ if (i == timeout) { printk("%s: %s: timeout\n", __FILE__, __func__); return -ETIME; } return 0; } static void cs5529_command(struct comedi_device *dev, unsigned short value) { static const int timeout = 100; int i; ni_ao_win_outw(dev, value, CAL_ADC_Command_67xx); /* give time for command to start being serially clocked into cs5529. * this insures that the CSS_ADC_BUSY bit will get properly * set before we exit this function. */ for (i = 0; i < timeout; i++) { if ((ni_ao_win_inw(dev, CAL_ADC_Status_67xx) & CSS_ADC_BUSY)) break; udelay(1); } /* printk("looped %i times writing command to cs5529\n", i); */ if (i == timeout) { comedi_error(dev, "possible problem - never saw adc go busy?"); } } /* write to cs5529 register */ static void cs5529_config_write(struct comedi_device *dev, unsigned int value, unsigned int reg_select_bits) { ni_ao_win_outw(dev, ((value >> 16) & 0xff), CAL_ADC_Config_Data_High_Word_67xx); ni_ao_win_outw(dev, (value & 0xffff), CAL_ADC_Config_Data_Low_Word_67xx); reg_select_bits &= CSCMD_REGISTER_SELECT_MASK; cs5529_command(dev, CSCMD_COMMAND | reg_select_bits); if (cs5529_wait_for_idle(dev)) comedi_error(dev, "time or signal in cs5529_config_write()"); } #ifdef NI_CS5529_DEBUG /* read from cs5529 register */ static unsigned int cs5529_config_read(struct comedi_device *dev, unsigned int reg_select_bits) { unsigned int value; reg_select_bits &= CSCMD_REGISTER_SELECT_MASK; cs5529_command(dev, CSCMD_COMMAND | CSCMD_READ | reg_select_bits); if (cs5529_wait_for_idle(dev)) comedi_error(dev, "timeout or signal in cs5529_config_read()"); value = (ni_ao_win_inw(dev, CAL_ADC_Config_Data_High_Word_67xx) << 16) & 0xff0000; value |= ni_ao_win_inw(dev, CAL_ADC_Config_Data_Low_Word_67xx) & 0xffff; return value; } #endif static int cs5529_do_conversion(struct comedi_device *dev, unsigned short *data) { int retval; unsigned short status; cs5529_command(dev, CSCMD_COMMAND | CSCMD_SINGLE_CONVERSION); retval = cs5529_wait_for_idle(dev); if (retval) { comedi_error(dev, "timeout or signal in cs5529_do_conversion()"); return -ETIME; } status = ni_ao_win_inw(dev, CAL_ADC_Status_67xx); if (status & CSS_OSC_DETECT) { printk ("ni_mio_common: cs5529 conversion error, status CSS_OSC_DETECT\n"); return -EIO; } if (status & CSS_OVERRANGE) { printk ("ni_mio_common: cs5529 conversion error, overrange (ignoring)\n"); } if (data) { *data = ni_ao_win_inw(dev, CAL_ADC_Data_67xx); /* cs5529 returns 16 bit signed data in bipolar mode */ *data ^= (1 << 15); } return 0; } static int cs5529_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, retval; unsigned short sample; unsigned int channel_select; const unsigned int INTERNAL_REF = 0x1000; /* Set calibration adc source. Docs lie, reference select bits 8 to 11 * do nothing. bit 12 seems to chooses internal reference voltage, bit * 13 causes the adc input to go overrange (maybe reads external reference?) */ if (insn->chanspec & CR_ALT_SOURCE) channel_select = INTERNAL_REF; else channel_select = CR_CHAN(insn->chanspec); ni_ao_win_outw(dev, channel_select, AO_Calibration_Channel_Select_67xx); for (n = 0; n < insn->n; n++) { retval = cs5529_do_conversion(dev, &sample); if (retval < 0) return retval; data[n] = sample; } return insn->n; } static int init_cs5529(struct comedi_device *dev) { unsigned int config_bits = CSCFG_PORT_MODE | CSCFG_WORD_RATE_2180_CYCLES; #if 1 /* do self-calibration */ cs5529_config_write(dev, config_bits | CSCFG_SELF_CAL_OFFSET_GAIN, CSCMD_CONFIG_REGISTER); /* need to force a conversion for calibration to run */ cs5529_do_conversion(dev, NULL); #else /* force gain calibration to 1 */ cs5529_config_write(dev, 0x400000, CSCMD_GAIN_REGISTER); cs5529_config_write(dev, config_bits | CSCFG_SELF_CAL_OFFSET, CSCMD_CONFIG_REGISTER); if (cs5529_wait_for_idle(dev)) comedi_error(dev, "timeout or signal in init_cs5529()\n"); #endif #ifdef NI_CS5529_DEBUG printk("config: 0x%x\n", cs5529_config_read(dev, CSCMD_CONFIG_REGISTER)); printk("gain: 0x%x\n", cs5529_config_read(dev, CSCMD_GAIN_REGISTER)); printk("offset: 0x%x\n", cs5529_config_read(dev, CSCMD_OFFSET_REGISTER)); #endif return 0; }
gpl-2.0
mkaluza/Samsung_STE_Kernel
drivers/media/dvb/frontends/mb86a16.c
3104
46803
/* Fujitsu MB86A16 DVB-S/DSS DC Receiver driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "mb86a16.h" #include "mb86a16_priv.h" unsigned int verbose = 5; module_param(verbose, int, 0644); #define ABS(x) ((x) < 0 ? (-x) : (x)) struct mb86a16_state { struct i2c_adapter *i2c_adap; const struct mb86a16_config *config; struct dvb_frontend frontend; /* tuning parameters */ int frequency; int srate; /* Internal stuff */ int master_clk; int deci; int csel; int rsel; }; #define MB86A16_ERROR 0 #define MB86A16_NOTICE 1 #define MB86A16_INFO 2 #define MB86A16_DEBUG 3 #define dprintk(x, y, z, format, arg...) do { \ if (z) { \ if ((x > MB86A16_ERROR) && (x > y)) \ printk(KERN_ERR "%s: " format "\n", __func__, ##arg); \ else if ((x > MB86A16_NOTICE) && (x > y)) \ printk(KERN_NOTICE "%s: " format "\n", __func__, ##arg); \ else if ((x > MB86A16_INFO) && (x > y)) \ printk(KERN_INFO "%s: " format "\n", __func__, ##arg); \ else if ((x > MB86A16_DEBUG) && (x > y)) \ printk(KERN_DEBUG "%s: " format "\n", __func__, ##arg); \ } else { \ if (x > y) \ printk(format, ##arg); \ } \ } while (0) #define TRACE_IN dprintk(verbose, MB86A16_DEBUG, 1, "-->()") #define TRACE_OUT dprintk(verbose, MB86A16_DEBUG, 1, "()-->") static int mb86a16_write(struct mb86a16_state *state, u8 reg, u8 val) { int ret; u8 buf[] = { reg, val }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; dprintk(verbose, MB86A16_DEBUG, 1, "writing to [0x%02x],Reg[0x%02x],Data[0x%02x]", state->config->demod_address, buf[0], buf[1]); ret = i2c_transfer(state->i2c_adap, &msg, 1); return (ret != 1) ? -EREMOTEIO : 0; } static int mb86a16_read(struct mb86a16_state *state, u8 reg, u8 *val) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c_adap, msg, 2); if (ret != 2) { dprintk(verbose, MB86A16_ERROR, 1, "read error(reg=0x%02x, ret=0x%i)", reg, ret); return -EREMOTEIO; } *val = b1[0]; return ret; } static int CNTM_set(struct mb86a16_state *state, unsigned char timint1, unsigned char timint2, unsigned char cnext) { unsigned char val; val = (timint1 << 4) | (timint2 << 2) | cnext; if (mb86a16_write(state, MB86A16_CNTMR, val) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int smrt_set(struct mb86a16_state *state, int rate) { int tmp ; int m ; unsigned char STOFS0, STOFS1; m = 1 << state->deci; tmp = (8192 * state->master_clk - 2 * m * rate * 8192 + state->master_clk / 2) / state->master_clk; STOFS0 = tmp & 0x0ff; STOFS1 = (tmp & 0xf00) >> 8; if (mb86a16_write(state, MB86A16_SRATE1, (state->deci << 2) | (state->csel << 1) | state->rsel) < 0) goto err; if (mb86a16_write(state, MB86A16_SRATE2, STOFS0) < 0) goto err; if (mb86a16_write(state, MB86A16_SRATE3, STOFS1) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -1; } static int srst(struct mb86a16_state *state) { if (mb86a16_write(state, MB86A16_RESET, 0x04) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int afcex_data_set(struct mb86a16_state *state, unsigned char AFCEX_L, unsigned char AFCEX_H) { if (mb86a16_write(state, MB86A16_AFCEXL, AFCEX_L) < 0) goto err; if (mb86a16_write(state, MB86A16_AFCEXH, AFCEX_H) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -1; } static int afcofs_data_set(struct mb86a16_state *state, unsigned char AFCEX_L, unsigned char AFCEX_H) { if (mb86a16_write(state, 0x58, AFCEX_L) < 0) goto err; if (mb86a16_write(state, 0x59, AFCEX_H) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int stlp_set(struct mb86a16_state *state, unsigned char STRAS, unsigned char STRBS) { if (mb86a16_write(state, MB86A16_STRFILTCOEF1, (STRBS << 3) | (STRAS)) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int Vi_set(struct mb86a16_state *state, unsigned char ETH, unsigned char VIA) { if (mb86a16_write(state, MB86A16_VISET2, 0x04) < 0) goto err; if (mb86a16_write(state, MB86A16_VISET3, 0xf5) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int initial_set(struct mb86a16_state *state) { if (stlp_set(state, 5, 7)) goto err; udelay(100); if (afcex_data_set(state, 0, 0)) goto err; udelay(100); if (afcofs_data_set(state, 0, 0)) goto err; udelay(100); if (mb86a16_write(state, MB86A16_CRLFILTCOEF1, 0x16) < 0) goto err; if (mb86a16_write(state, 0x2f, 0x21) < 0) goto err; if (mb86a16_write(state, MB86A16_VIMAG, 0x38) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS1, 0x00) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS2, 0x1c) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS3, 0x20) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS4, 0x1e) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS5, 0x23) < 0) goto err; if (mb86a16_write(state, 0x54, 0xff) < 0) goto err; if (mb86a16_write(state, MB86A16_TSOUT, 0x00) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int S01T_set(struct mb86a16_state *state, unsigned char s1t, unsigned s0t) { if (mb86a16_write(state, 0x33, (s1t << 3) | s0t) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int EN_set(struct mb86a16_state *state, int cren, int afcen) { unsigned char val; val = 0x7a | (cren << 7) | (afcen << 2); if (mb86a16_write(state, 0x49, val) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int AFCEXEN_set(struct mb86a16_state *state, int afcexen, int smrt) { unsigned char AFCA ; if (smrt > 18875) AFCA = 4; else if (smrt > 9375) AFCA = 3; else if (smrt > 2250) AFCA = 2; else AFCA = 1; if (mb86a16_write(state, 0x2a, 0x02 | (afcexen << 5) | (AFCA << 2)) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int DAGC_data_set(struct mb86a16_state *state, unsigned char DAGCA, unsigned char DAGCW) { if (mb86a16_write(state, 0x2d, (DAGCA << 3) | DAGCW) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static void smrt_info_get(struct mb86a16_state *state, int rate) { if (rate >= 37501) { state->deci = 0; state->csel = 0; state->rsel = 0; } else if (rate >= 30001) { state->deci = 0; state->csel = 0; state->rsel = 1; } else if (rate >= 26251) { state->deci = 0; state->csel = 1; state->rsel = 0; } else if (rate >= 22501) { state->deci = 0; state->csel = 1; state->rsel = 1; } else if (rate >= 18751) { state->deci = 1; state->csel = 0; state->rsel = 0; } else if (rate >= 15001) { state->deci = 1; state->csel = 0; state->rsel = 1; } else if (rate >= 13126) { state->deci = 1; state->csel = 1; state->rsel = 0; } else if (rate >= 11251) { state->deci = 1; state->csel = 1; state->rsel = 1; } else if (rate >= 9376) { state->deci = 2; state->csel = 0; state->rsel = 0; } else if (rate >= 7501) { state->deci = 2; state->csel = 0; state->rsel = 1; } else if (rate >= 6563) { state->deci = 2; state->csel = 1; state->rsel = 0; } else if (rate >= 5626) { state->deci = 2; state->csel = 1; state->rsel = 1; } else if (rate >= 4688) { state->deci = 3; state->csel = 0; state->rsel = 0; } else if (rate >= 3751) { state->deci = 3; state->csel = 0; state->rsel = 1; } else if (rate >= 3282) { state->deci = 3; state->csel = 1; state->rsel = 0; } else if (rate >= 2814) { state->deci = 3; state->csel = 1; state->rsel = 1; } else if (rate >= 2344) { state->deci = 4; state->csel = 0; state->rsel = 0; } else if (rate >= 1876) { state->deci = 4; state->csel = 0; state->rsel = 1; } else if (rate >= 1641) { state->deci = 4; state->csel = 1; state->rsel = 0; } else if (rate >= 1407) { state->deci = 4; state->csel = 1; state->rsel = 1; } else if (rate >= 1172) { state->deci = 5; state->csel = 0; state->rsel = 0; } else if (rate >= 939) { state->deci = 5; state->csel = 0; state->rsel = 1; } else if (rate >= 821) { state->deci = 5; state->csel = 1; state->rsel = 0; } else { state->deci = 5; state->csel = 1; state->rsel = 1; } if (state->csel == 0) state->master_clk = 92000; else state->master_clk = 61333; } static int signal_det(struct mb86a16_state *state, int smrt, unsigned char *SIG) { int ret ; int smrtd ; int wait_sym ; u32 wait_t; unsigned char S[3] ; int i ; if (*SIG > 45) { if (CNTM_set(state, 2, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } wait_sym = 40000; } else { if (CNTM_set(state, 3, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } wait_sym = 80000; } for (i = 0; i < 3; i++) { if (i == 0) smrtd = smrt * 98 / 100; else if (i == 1) smrtd = smrt; else smrtd = smrt * 102 / 100; smrt_info_get(state, smrtd); smrt_set(state, smrtd); srst(state); wait_t = (wait_sym + 99 * smrtd / 100) / smrtd; if (wait_t == 0) wait_t = 1; msleep_interruptible(10); if (mb86a16_read(state, 0x37, &(S[i])) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } } if ((S[1] > S[0] * 112 / 100) && (S[1] > S[2] * 112 / 100)) { ret = 1; } else { ret = 0; } *SIG = S[1]; if (CNTM_set(state, 0, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } return ret; } static int rf_val_set(struct mb86a16_state *state, int f, int smrt, unsigned char R) { unsigned char C, F, B; int M; unsigned char rf_val[5]; int ack = -1; if (smrt > 37750) C = 1; else if (smrt > 18875) C = 2; else if (smrt > 5500) C = 3; else C = 4; if (smrt > 30500) F = 3; else if (smrt > 9375) F = 1; else if (smrt > 4625) F = 0; else F = 2; if (f < 1060) B = 0; else if (f < 1175) B = 1; else if (f < 1305) B = 2; else if (f < 1435) B = 3; else if (f < 1570) B = 4; else if (f < 1715) B = 5; else if (f < 1845) B = 6; else if (f < 1980) B = 7; else if (f < 2080) B = 8; else B = 9; M = f * (1 << R) / 2; rf_val[0] = 0x01 | (C << 3) | (F << 1); rf_val[1] = (R << 5) | ((M & 0x1f000) >> 12); rf_val[2] = (M & 0x00ff0) >> 4; rf_val[3] = ((M & 0x0000f) << 4) | B; /* Frequency Set */ if (mb86a16_write(state, 0x21, rf_val[0]) < 0) ack = 0; if (mb86a16_write(state, 0x22, rf_val[1]) < 0) ack = 0; if (mb86a16_write(state, 0x23, rf_val[2]) < 0) ack = 0; if (mb86a16_write(state, 0x24, rf_val[3]) < 0) ack = 0; if (mb86a16_write(state, 0x25, 0x01) < 0) ack = 0; if (ack == 0) { dprintk(verbose, MB86A16_ERROR, 1, "RF Setup - I2C transfer error"); return -EREMOTEIO; } return 0; } static int afcerr_chk(struct mb86a16_state *state) { unsigned char AFCM_L, AFCM_H ; int AFCM ; int afcm, afcerr ; if (mb86a16_read(state, 0x0e, &AFCM_L) != 2) goto err; if (mb86a16_read(state, 0x0f, &AFCM_H) != 2) goto err; AFCM = (AFCM_H << 8) + AFCM_L; if (AFCM > 2048) afcm = AFCM - 4096; else afcm = AFCM; afcerr = afcm * state->master_clk / 8192; return afcerr; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int dagcm_val_get(struct mb86a16_state *state) { int DAGCM; unsigned char DAGCM_H, DAGCM_L; if (mb86a16_read(state, 0x45, &DAGCM_L) != 2) goto err; if (mb86a16_read(state, 0x46, &DAGCM_H) != 2) goto err; DAGCM = (DAGCM_H << 8) + DAGCM_L; return DAGCM; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_read_status(struct dvb_frontend *fe, fe_status_t *status) { u8 stat, stat2; struct mb86a16_state *state = fe->demodulator_priv; *status = 0; if (mb86a16_read(state, MB86A16_SIG1, &stat) != 2) goto err; if (mb86a16_read(state, MB86A16_SIG2, &stat2) != 2) goto err; if ((stat > 25) && (stat2 > 25)) *status |= FE_HAS_SIGNAL; if ((stat > 45) && (stat2 > 45)) *status |= FE_HAS_CARRIER; if (mb86a16_read(state, MB86A16_STATUS, &stat) != 2) goto err; if (stat & 0x01) *status |= FE_HAS_SYNC; if (stat & 0x01) *status |= FE_HAS_VITERBI; if (mb86a16_read(state, MB86A16_FRAMESYNC, &stat) != 2) goto err; if ((stat & 0x0f) && (*status & FE_HAS_VITERBI)) *status |= FE_HAS_LOCK; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int sync_chk(struct mb86a16_state *state, unsigned char *VIRM) { unsigned char val; int sync; if (mb86a16_read(state, 0x0d, &val) != 2) goto err; dprintk(verbose, MB86A16_INFO, 1, "Status = %02x,", val); sync = val & 0x01; *VIRM = (val & 0x1c) >> 2; return sync; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int freqerr_chk(struct mb86a16_state *state, int fTP, int smrt, int unit) { unsigned char CRM, AFCML, AFCMH; unsigned char temp1, temp2, temp3; int crm, afcm, AFCM; int crrerr, afcerr; /* kHz */ int frqerr; /* MHz */ int afcen, afcexen = 0; int R, M, fOSC, fOSC_OFS; if (mb86a16_read(state, 0x43, &CRM) != 2) goto err; if (CRM > 127) crm = CRM - 256; else crm = CRM; crrerr = smrt * crm / 256; if (mb86a16_read(state, 0x49, &temp1) != 2) goto err; afcen = (temp1 & 0x04) >> 2; if (afcen == 0) { if (mb86a16_read(state, 0x2a, &temp1) != 2) goto err; afcexen = (temp1 & 0x20) >> 5; } if (afcen == 1) { if (mb86a16_read(state, 0x0e, &AFCML) != 2) goto err; if (mb86a16_read(state, 0x0f, &AFCMH) != 2) goto err; } else if (afcexen == 1) { if (mb86a16_read(state, 0x2b, &AFCML) != 2) goto err; if (mb86a16_read(state, 0x2c, &AFCMH) != 2) goto err; } if ((afcen == 1) || (afcexen == 1)) { smrt_info_get(state, smrt); AFCM = ((AFCMH & 0x01) << 8) + AFCML; if (AFCM > 255) afcm = AFCM - 512; else afcm = AFCM; afcerr = afcm * state->master_clk / 8192; } else afcerr = 0; if (mb86a16_read(state, 0x22, &temp1) != 2) goto err; if (mb86a16_read(state, 0x23, &temp2) != 2) goto err; if (mb86a16_read(state, 0x24, &temp3) != 2) goto err; R = (temp1 & 0xe0) >> 5; M = ((temp1 & 0x1f) << 12) + (temp2 << 4) + (temp3 >> 4); if (R == 0) fOSC = 2 * M; else fOSC = M; fOSC_OFS = fOSC - fTP; if (unit == 0) { /* MHz */ if (crrerr + afcerr + fOSC_OFS * 1000 >= 0) frqerr = (crrerr + afcerr + fOSC_OFS * 1000 + 500) / 1000; else frqerr = (crrerr + afcerr + fOSC_OFS * 1000 - 500) / 1000; } else { /* kHz */ frqerr = crrerr + afcerr + fOSC_OFS * 1000; } return frqerr; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static unsigned char vco_dev_get(struct mb86a16_state *state, int smrt) { unsigned char R; if (smrt > 9375) R = 0; else R = 1; return R; } static void swp_info_get(struct mb86a16_state *state, int fOSC_start, int smrt, int v, int R, int swp_ofs, int *fOSC, int *afcex_freq, unsigned char *AFCEX_L, unsigned char *AFCEX_H) { int AFCEX ; int crnt_swp_freq ; crnt_swp_freq = fOSC_start * 1000 + v * swp_ofs; if (R == 0) *fOSC = (crnt_swp_freq + 1000) / 2000 * 2; else *fOSC = (crnt_swp_freq + 500) / 1000; if (*fOSC >= crnt_swp_freq) *afcex_freq = *fOSC * 1000 - crnt_swp_freq; else *afcex_freq = crnt_swp_freq - *fOSC * 1000; AFCEX = *afcex_freq * 8192 / state->master_clk; *AFCEX_L = AFCEX & 0x00ff; *AFCEX_H = (AFCEX & 0x0f00) >> 8; } static int swp_freq_calcuation(struct mb86a16_state *state, int i, int v, int *V, int vmax, int vmin, int SIGMIN, int fOSC, int afcex_freq, int swp_ofs, unsigned char *SIG1) { int swp_freq ; if ((i % 2 == 1) && (v <= vmax)) { /* positive v (case 1) */ if ((v - 1 == vmin) && (*(V + 30 + v) >= 0) && (*(V + 30 + v - 1) >= 0) && (*(V + 30 + v - 1) > *(V + 30 + v)) && (*(V + 30 + v - 1) > SIGMIN)) { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs; *SIG1 = *(V + 30 + v - 1); } else if ((v == vmax) && (*(V + 30 + v) >= 0) && (*(V + 30 + v - 1) >= 0) && (*(V + 30 + v) > *(V + 30 + v - 1)) && (*(V + 30 + v) > SIGMIN)) { /* (case 2) */ swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else if ((*(V + 30 + v) > 0) && (*(V + 30 + v - 1) > 0) && (*(V + 30 + v - 2) > 0) && (*(V + 30 + v - 3) > 0) && (*(V + 30 + v - 1) > *(V + 30 + v)) && (*(V + 30 + v - 2) > *(V + 30 + v - 3)) && ((*(V + 30 + v - 1) > SIGMIN) || (*(V + 30 + v - 2) > SIGMIN))) { /* (case 3) */ if (*(V + 30 + v - 1) >= *(V + 30 + v - 2)) { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs; *SIG1 = *(V + 30 + v - 1); } else { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs * 2; *SIG1 = *(V + 30 + v - 2); } } else if ((v == vmax) && (*(V + 30 + v) >= 0) && (*(V + 30 + v - 1) >= 0) && (*(V + 30 + v - 2) >= 0) && (*(V + 30 + v) > *(V + 30 + v - 2)) && (*(V + 30 + v - 1) > *(V + 30 + v - 2)) && ((*(V + 30 + v) > SIGMIN) || (*(V + 30 + v - 1) > SIGMIN))) { /* (case 4) */ if (*(V + 30 + v) >= *(V + 30 + v - 1)) { swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs; *SIG1 = *(V + 30 + v - 1); } } else { swp_freq = -1 ; } } else if ((i % 2 == 0) && (v >= vmin)) { /* Negative v (case 1) */ if ((*(V + 30 + v) > 0) && (*(V + 30 + v + 1) > 0) && (*(V + 30 + v + 2) > 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 1) > *(V + 30 + v + 2)) && (*(V + 30 + v + 1) > SIGMIN)) { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } else if ((v + 1 == vmax) && (*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 1) > SIGMIN)) { /* (case 2) */ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v); } else if ((v == vmin) && (*(V + 30 + v) > 0) && (*(V + 30 + v + 1) > 0) && (*(V + 30 + v + 2) > 0) && (*(V + 30 + v) > *(V + 30 + v + 1)) && (*(V + 30 + v) > *(V + 30 + v + 2)) && (*(V + 30 + v) > SIGMIN)) { /* (case 3) */ swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else if ((*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 2) >= 0) && (*(V + 30 + v + 3) >= 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 2) > *(V + 30 + v + 3)) && ((*(V + 30 + v + 1) > SIGMIN) || (*(V + 30 + v + 2) > SIGMIN))) { /* (case 4) */ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } else { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2; *SIG1 = *(V + 30 + v + 2); } } else if ((*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 2) >= 0) && (*(V + 30 + v + 3) >= 0) && (*(V + 30 + v) > *(V + 30 + v + 2)) && (*(V + 30 + v + 1) > *(V + 30 + v + 2)) && (*(V + 30 + v) > *(V + 30 + v + 3)) && (*(V + 30 + v + 1) > *(V + 30 + v + 3)) && ((*(V + 30 + v) > SIGMIN) || (*(V + 30 + v + 1) > SIGMIN))) { /* (case 5) */ if (*(V + 30 + v) >= *(V + 30 + v + 1)) { swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } } else if ((v + 2 == vmin) && (*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 2) >= 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 2) > *(V + 30 + v)) && ((*(V + 30 + v + 1) > SIGMIN) || (*(V + 30 + v + 2) > SIGMIN))) { /* (case 6) */ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } else { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2; *SIG1 = *(V + 30 + v + 2); } } else if ((vmax == 0) && (vmin == 0) && (*(V + 30 + v) > SIGMIN)) { swp_freq = fOSC * 1000; *SIG1 = *(V + 30 + v); } else swp_freq = -1; } else swp_freq = -1; return swp_freq; } static void swp_info_get2(struct mb86a16_state *state, int smrt, int R, int swp_freq, int *afcex_freq, int *fOSC, unsigned char *AFCEX_L, unsigned char *AFCEX_H) { int AFCEX ; if (R == 0) *fOSC = (swp_freq + 1000) / 2000 * 2; else *fOSC = (swp_freq + 500) / 1000; if (*fOSC >= swp_freq) *afcex_freq = *fOSC * 1000 - swp_freq; else *afcex_freq = swp_freq - *fOSC * 1000; AFCEX = *afcex_freq * 8192 / state->master_clk; *AFCEX_L = AFCEX & 0x00ff; *AFCEX_H = (AFCEX & 0x0f00) >> 8; } static void afcex_info_get(struct mb86a16_state *state, int afcex_freq, unsigned char *AFCEX_L, unsigned char *AFCEX_H) { int AFCEX ; AFCEX = afcex_freq * 8192 / state->master_clk; *AFCEX_L = AFCEX & 0x00ff; *AFCEX_H = (AFCEX & 0x0f00) >> 8; } static int SEQ_set(struct mb86a16_state *state, unsigned char loop) { /* SLOCK0 = 0 */ if (mb86a16_write(state, 0x32, 0x02 | (loop << 2)) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int iq_vt_set(struct mb86a16_state *state, unsigned char IQINV) { /* Viterbi Rate, IQ Settings */ if (mb86a16_write(state, 0x06, 0xdf | (IQINV << 5)) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int FEC_srst(struct mb86a16_state *state) { if (mb86a16_write(state, MB86A16_RESET, 0x02) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int S2T_set(struct mb86a16_state *state, unsigned char S2T) { if (mb86a16_write(state, 0x34, 0x70 | S2T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int S45T_set(struct mb86a16_state *state, unsigned char S4T, unsigned char S5T) { if (mb86a16_write(state, 0x35, 0x00 | (S5T << 4) | S4T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int mb86a16_set_fe(struct mb86a16_state *state) { u8 agcval, cnmval; int i, j; int fOSC = 0; int fOSC_start = 0; int wait_t; int fcp; int swp_ofs; int V[60]; u8 SIG1MIN; unsigned char CREN, AFCEN, AFCEXEN; unsigned char SIG1; unsigned char TIMINT1, TIMINT2, TIMEXT; unsigned char S0T, S1T; unsigned char S2T; /* unsigned char S2T, S3T; */ unsigned char S4T, S5T; unsigned char AFCEX_L, AFCEX_H; unsigned char R; unsigned char VIRM; unsigned char ETH, VIA; unsigned char junk; int loop; int ftemp; int v, vmax, vmin; int vmax_his, vmin_his; int swp_freq, prev_swp_freq[20]; int prev_freq_num; int signal_dupl; int afcex_freq; int signal; int afcerr; int temp_freq, delta_freq; int dagcm[4]; int smrt_d; /* int freq_err; */ int n; int ret = -1; int sync; dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate); fcp = 3000; swp_ofs = state->srate / 4; for (i = 0; i < 60; i++) V[i] = -1; for (i = 0; i < 20; i++) prev_swp_freq[i] = 0; SIG1MIN = 25; for (n = 0; ((n < 3) && (ret == -1)); n++) { SEQ_set(state, 0); iq_vt_set(state, 0); CREN = 0; AFCEN = 0; AFCEXEN = 1; TIMINT1 = 0; TIMINT2 = 1; TIMEXT = 2; S1T = 0; S0T = 0; if (initial_set(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "initial set failed"); return -1; } if (DAGC_data_set(state, 3, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error"); return -1; } if (EN_set(state, CREN, AFCEN) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "EN set error"); return -1; /* (0, 0) */ } if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; /* (1, smrt) = (1, symbolrate) */ } if (CNTM_set(state, TIMINT1, TIMINT2, TIMEXT) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set error"); return -1; /* (0, 1, 2) */ } if (S01T_set(state, S1T, S0T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "S01T set error"); return -1; /* (0, 0) */ } smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt info get error"); return -1; } R = vco_dev_get(state, state->srate); if (R == 1) fOSC_start = state->frequency; else if (R == 0) { if (state->frequency % 2 == 0) { fOSC_start = state->frequency; } else { fOSC_start = state->frequency + 1; if (fOSC_start > 2150) fOSC_start = state->frequency - 1; } } loop = 1; ftemp = fOSC_start * 1000; vmax = 0 ; while (loop == 1) { ftemp = ftemp + swp_ofs; vmax++; /* Upper bound */ if (ftemp > 2150000) { loop = 0; vmax--; } else { if ((ftemp == 2150000) || (ftemp - state->frequency * 1000 >= fcp + state->srate / 4)) loop = 0; } } loop = 1; ftemp = fOSC_start * 1000; vmin = 0 ; while (loop == 1) { ftemp = ftemp - swp_ofs; vmin--; /* Lower bound */ if (ftemp < 950000) { loop = 0; vmin++; } else { if ((ftemp == 950000) || (state->frequency * 1000 - ftemp >= fcp + state->srate / 4)) loop = 0; } } wait_t = (8000 + state->srate / 2) / state->srate; if (wait_t == 0) wait_t = 1; i = 0; j = 0; prev_freq_num = 0; loop = 1; signal = 0; vmax_his = 0; vmin_his = 0; v = 0; while (loop == 1) { swp_info_get(state, fOSC_start, state->srate, v, R, swp_ofs, &fOSC, &afcex_freq, &AFCEX_L, &AFCEX_H); udelay(100); if (rf_val_set(state, fOSC, state->srate, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } udelay(100); if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } if (srst(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "srst error"); return -1; } msleep_interruptible(wait_t); if (mb86a16_read(state, 0x37, &SIG1) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -1; } V[30 + v] = SIG1 ; swp_freq = swp_freq_calcuation(state, i, v, V, vmax, vmin, SIG1MIN, fOSC, afcex_freq, swp_ofs, &SIG1); /* changed */ signal_dupl = 0; for (j = 0; j < prev_freq_num; j++) { if ((ABS(prev_swp_freq[j] - swp_freq)) < (swp_ofs * 3 / 2)) { signal_dupl = 1; dprintk(verbose, MB86A16_INFO, 1, "Probably Duplicate Signal, j = %d", j); } } if ((signal_dupl == 0) && (swp_freq > 0) && (ABS(swp_freq - state->frequency * 1000) < fcp + state->srate / 6)) { dprintk(verbose, MB86A16_DEBUG, 1, "------ Signal detect ------ [swp_freq=[%07d, srate=%05d]]", swp_freq, state->srate); prev_swp_freq[prev_freq_num] = swp_freq; prev_freq_num++; swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, state->srate, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } signal = signal_det(state, state->srate, &SIG1); if (signal == 1) { dprintk(verbose, MB86A16_ERROR, 1, "***** Signal Found *****"); loop = 0; } else { dprintk(verbose, MB86A16_ERROR, 1, "!!!!! No signal !!!!!, try again..."); smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } } } if (v > vmax) vmax_his = 1 ; if (v < vmin) vmin_his = 1 ; i++; if ((i % 2 == 1) && (vmax_his == 1)) i++; if ((i % 2 == 0) && (vmin_his == 1)) i++; if (i % 2 == 1) v = (i + 1) / 2; else v = -i / 2; if ((vmax_his == 1) && (vmin_his == 1)) loop = 0 ; } if (signal == 1) { dprintk(verbose, MB86A16_INFO, 1, " Start Freq Error Check"); S1T = 7 ; S0T = 1 ; CREN = 0 ; AFCEN = 1 ; AFCEXEN = 0 ; if (S01T_set(state, S1T, S0T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "S01T set error"); return -1; } smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } if (EN_set(state, CREN, AFCEN) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "EN set error"); return -1; } if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; } afcex_info_get(state, afcex_freq, &AFCEX_L, &AFCEX_H); if (afcofs_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCOFS data set error"); return -1; } if (srst(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "srst error"); return -1; } /* delay 4~200 */ wait_t = 200000 / state->master_clk + 200000 / state->srate; msleep(wait_t); afcerr = afcerr_chk(state); if (afcerr == -1) return -1; swp_freq = fOSC * 1000 + afcerr ; AFCEXEN = 1 ; if (state->srate >= 1500) smrt_d = state->srate / 3; else smrt_d = state->srate / 2; smrt_info_get(state, smrt_d); if (smrt_set(state, smrt_d) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } if (AFCEXEN_set(state, AFCEXEN, smrt_d) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; } R = vco_dev_get(state, smrt_d); if (DAGC_data_set(state, 2, 0) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error"); return -1; } for (i = 0; i < 3; i++) { temp_freq = swp_freq + (i - 1) * state->srate / 8; swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, smrt_d, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } wait_t = 200000 / state->master_clk + 40000 / smrt_d; msleep(wait_t); dagcm[i] = dagcm_val_get(state); } if ((dagcm[0] > dagcm[1]) && (dagcm[0] > dagcm[2]) && (dagcm[0] - dagcm[1] > 2 * (dagcm[2] - dagcm[1]))) { temp_freq = swp_freq - 2 * state->srate / 8; swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, smrt_d, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set"); return -1; } wait_t = 200000 / state->master_clk + 40000 / smrt_d; msleep(wait_t); dagcm[3] = dagcm_val_get(state); if (dagcm[3] > dagcm[1]) delta_freq = (dagcm[2] - dagcm[0] + dagcm[1] - dagcm[3]) * state->srate / 300; else delta_freq = 0; } else if ((dagcm[2] > dagcm[1]) && (dagcm[2] > dagcm[0]) && (dagcm[2] - dagcm[1] > 2 * (dagcm[0] - dagcm[1]))) { temp_freq = swp_freq + 2 * state->srate / 8; swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, smrt_d, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set"); return -1; } wait_t = 200000 / state->master_clk + 40000 / smrt_d; msleep(wait_t); dagcm[3] = dagcm_val_get(state); if (dagcm[3] > dagcm[1]) delta_freq = (dagcm[2] - dagcm[0] + dagcm[3] - dagcm[1]) * state->srate / 300; else delta_freq = 0 ; } else { delta_freq = 0 ; } dprintk(verbose, MB86A16_INFO, 1, "SWEEP Frequency = %d", swp_freq); swp_freq += delta_freq; dprintk(verbose, MB86A16_INFO, 1, "Adjusting .., DELTA Freq = %d, SWEEP Freq=%d", delta_freq, swp_freq); if (ABS(state->frequency * 1000 - swp_freq) > 3800) { dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL !"); } else { S1T = 0; S0T = 3; CREN = 1; AFCEN = 0; AFCEXEN = 1; if (S01T_set(state, S1T, S0T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "S01T set error"); return -1; } if (DAGC_data_set(state, 0, 0) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error"); return -1; } R = vco_dev_get(state, state->srate); smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } if (EN_set(state, CREN, AFCEN) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "EN set error"); return -1; } if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; } swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, state->srate, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } if (srst(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "srst error"); return -1; } wait_t = 7 + (10000 + state->srate / 2) / state->srate; if (wait_t == 0) wait_t = 1; msleep_interruptible(wait_t); if (mb86a16_read(state, 0x37, &SIG1) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } if (SIG1 > 110) { S2T = 4; S4T = 1; S5T = 6; ETH = 4; VIA = 6; wait_t = 7 + (917504 + state->srate / 2) / state->srate; } else if (SIG1 > 105) { S2T = 4; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (1048576 + state->srate / 2) / state->srate; } else if (SIG1 > 85) { S2T = 5; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (1310720 + state->srate / 2) / state->srate; } else if (SIG1 > 65) { S2T = 6; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (1572864 + state->srate / 2) / state->srate; } else { S2T = 7; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (2097152 + state->srate / 2) / state->srate; } wait_t *= 2; /* FOS */ S2T_set(state, S2T); S45T_set(state, S4T, S5T); Vi_set(state, ETH, VIA); srst(state); msleep_interruptible(wait_t); sync = sync_chk(state, &VIRM); dprintk(verbose, MB86A16_INFO, 1, "-------- Viterbi=[%d] SYNC=[%d] ---------", VIRM, sync); if (VIRM) { if (VIRM == 4) { /* 5/6 */ if (SIG1 > 110) wait_t = (786432 + state->srate / 2) / state->srate; else wait_t = (1572864 + state->srate / 2) / state->srate; if (state->srate < 5000) /* FIXME ! , should be a long wait ! */ msleep_interruptible(wait_t); else msleep_interruptible(wait_t); if (sync_chk(state, &junk) == 0) { iq_vt_set(state, 1); FEC_srst(state); } } /* 1/2, 2/3, 3/4, 7/8 */ if (SIG1 > 110) wait_t = (786432 + state->srate / 2) / state->srate; else wait_t = (1572864 + state->srate / 2) / state->srate; msleep_interruptible(wait_t); SEQ_set(state, 1); } else { dprintk(verbose, MB86A16_INFO, 1, "NO -- SYNC"); SEQ_set(state, 1); ret = -1; } } } else { dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL"); ret = -1; } sync = sync_chk(state, &junk); if (sync) { dprintk(verbose, MB86A16_INFO, 1, "******* SYNC *******"); freqerr_chk(state, state->frequency, state->srate, 1); ret = 0; break; } } mb86a16_read(state, 0x15, &agcval); mb86a16_read(state, 0x26, &cnmval); dprintk(verbose, MB86A16_INFO, 1, "AGC = %02x CNM = %02x", agcval, cnmval); return ret; } static int mb86a16_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd) { struct mb86a16_state *state = fe->demodulator_priv; int i; u8 regs; if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0) goto err; if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0) goto err; regs = 0x18; if (cmd->msg_len > 5 || cmd->msg_len < 4) return -EINVAL; for (i = 0; i < cmd->msg_len; i++) { if (mb86a16_write(state, regs, cmd->msg[i]) < 0) goto err; regs++; } i += 0x90; msleep_interruptible(10); if (mb86a16_write(state, MB86A16_DCC1, i) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct mb86a16_state *state = fe->demodulator_priv; switch (burst) { case SEC_MINI_A: if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA | MB86A16_DCC1_TBEN | MB86A16_DCC1_TBO) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; break; case SEC_MINI_B: if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA | MB86A16_DCC1_TBEN) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; break; } return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct mb86a16_state *state = fe->demodulator_priv; switch (tone) { case SEC_TONE_ON: if (mb86a16_write(state, MB86A16_TONEOUT2, 0x00) < 0) goto err; if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA | MB86A16_DCC1_CTOE) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; break; case SEC_TONE_OFF: if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0) goto err; if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0) goto err; break; default: return -EINVAL; } return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static enum dvbfe_search mb86a16_search(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct mb86a16_state *state = fe->demodulator_priv; state->frequency = p->frequency / 1000; state->srate = p->u.qpsk.symbol_rate / 1000; if (!mb86a16_set_fe(state)) { dprintk(verbose, MB86A16_ERROR, 1, "Successfully acquired LOCK"); return DVBFE_ALGO_SEARCH_SUCCESS; } dprintk(verbose, MB86A16_ERROR, 1, "Lock acquisition failed!"); return DVBFE_ALGO_SEARCH_FAILED; } static void mb86a16_release(struct dvb_frontend *fe) { struct mb86a16_state *state = fe->demodulator_priv; kfree(state); } static int mb86a16_init(struct dvb_frontend *fe) { return 0; } static int mb86a16_sleep(struct dvb_frontend *fe) { return 0; } static int mb86a16_read_ber(struct dvb_frontend *fe, u32 *ber) { u8 ber_mon, ber_tab, ber_lsb, ber_mid, ber_msb, ber_tim, ber_rst; u32 timer; struct mb86a16_state *state = fe->demodulator_priv; *ber = 0; if (mb86a16_read(state, MB86A16_BERMON, &ber_mon) != 2) goto err; if (mb86a16_read(state, MB86A16_BERTAB, &ber_tab) != 2) goto err; if (mb86a16_read(state, MB86A16_BERLSB, &ber_lsb) != 2) goto err; if (mb86a16_read(state, MB86A16_BERMID, &ber_mid) != 2) goto err; if (mb86a16_read(state, MB86A16_BERMSB, &ber_msb) != 2) goto err; /* BER monitor invalid when BER_EN = 0 */ if (ber_mon & 0x04) { /* coarse, fast calculation */ *ber = ber_tab & 0x1f; dprintk(verbose, MB86A16_DEBUG, 1, "BER coarse=[0x%02x]", *ber); if (ber_mon & 0x01) { /* * BER_SEL = 1, The monitored BER is the estimated * value with a Reed-Solomon decoder error amount at * the deinterleaver output. * monitored BER is expressed as a 20 bit output in total */ ber_rst = ber_mon >> 3; *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb; if (ber_rst == 0) timer = 12500000; if (ber_rst == 1) timer = 25000000; if (ber_rst == 2) timer = 50000000; if (ber_rst == 3) timer = 100000000; *ber /= timer; dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber); } else { /* * BER_SEL = 0, The monitored BER is the estimated * value with a Viterbi decoder error amount at the * QPSK demodulator output. * monitored BER is expressed as a 24 bit output in total */ ber_tim = ber_mon >> 1; *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb; if (ber_tim == 0) timer = 16; if (ber_tim == 1) timer = 24; *ber /= 2 ^ timer; dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber); } } return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { u8 agcm = 0; struct mb86a16_state *state = fe->demodulator_priv; *strength = 0; if (mb86a16_read(state, MB86A16_AGCM, &agcm) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } *strength = ((0xff - agcm) * 100) / 256; dprintk(verbose, MB86A16_DEBUG, 1, "Signal strength=[%d %%]", (u8) *strength); *strength = (0xffff - 0xff) + agcm; return 0; } struct cnr { u8 cn_reg; u8 cn_val; }; static const struct cnr cnr_tab[] = { { 35, 2 }, { 40, 3 }, { 50, 4 }, { 60, 5 }, { 70, 6 }, { 80, 7 }, { 92, 8 }, { 103, 9 }, { 115, 10 }, { 138, 12 }, { 162, 15 }, { 180, 18 }, { 185, 19 }, { 189, 20 }, { 195, 22 }, { 199, 24 }, { 201, 25 }, { 202, 26 }, { 203, 27 }, { 205, 28 }, { 208, 30 } }; static int mb86a16_read_snr(struct dvb_frontend *fe, u16 *snr) { struct mb86a16_state *state = fe->demodulator_priv; int i = 0; int low_tide = 2, high_tide = 30, q_level; u8 cn; *snr = 0; if (mb86a16_read(state, 0x26, &cn) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } for (i = 0; i < ARRAY_SIZE(cnr_tab); i++) { if (cn < cnr_tab[i].cn_reg) { *snr = cnr_tab[i].cn_val; break; } } q_level = (*snr * 100) / (high_tide - low_tide); dprintk(verbose, MB86A16_ERROR, 1, "SNR (Quality) = [%d dB], Level=%d %%", *snr, q_level); *snr = (0xffff - 0xff) + *snr; return 0; } static int mb86a16_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { u8 dist; struct mb86a16_state *state = fe->demodulator_priv; if (mb86a16_read(state, MB86A16_DISTMON, &dist) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } *ucblocks = dist; return 0; } static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_CUSTOM; } static struct dvb_frontend_ops mb86a16_ops = { .info = { .name = "Fujitsu MB86A16 DVB-S", .type = FE_QPSK, .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 3000, .frequency_tolerance = 0, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .symbol_rate_tolerance = 500, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_QPSK | FE_CAN_FEC_AUTO }, .release = mb86a16_release, .get_frontend_algo = mb86a16_frontend_algo, .search = mb86a16_search, .init = mb86a16_init, .sleep = mb86a16_sleep, .read_status = mb86a16_read_status, .read_ber = mb86a16_read_ber, .read_signal_strength = mb86a16_read_signal_strength, .read_snr = mb86a16_read_snr, .read_ucblocks = mb86a16_read_ucblocks, .diseqc_send_master_cmd = mb86a16_send_diseqc_msg, .diseqc_send_burst = mb86a16_send_diseqc_burst, .set_tone = mb86a16_set_tone, }; struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config, struct i2c_adapter *i2c_adap) { u8 dev_id = 0; struct mb86a16_state *state = NULL; state = kmalloc(sizeof(struct mb86a16_state), GFP_KERNEL); if (state == NULL) goto error; state->config = config; state->i2c_adap = i2c_adap; mb86a16_read(state, 0x7f, &dev_id); if (dev_id != 0xfe) goto error; memcpy(&state->frontend.ops, &mb86a16_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; state->frontend.ops.set_voltage = state->config->set_voltage; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(mb86a16_attach); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Manu Abraham");
gpl-2.0
zeroprobe/ZeroMHL-Overclocked-V3
tools/perf/builtin-evlist.c
3104
1141
/* * Builtin evlist command: Show the list of event selectors present * in a perf.data file. */ #include "builtin.h" #include "util/util.h" #include <linux/list.h> #include "perf.h" #include "util/evlist.h" #include "util/evsel.h" #include "util/parse-events.h" #include "util/parse-options.h" #include "util/session.h" static char const *input_name = "perf.data"; static int __cmd_evlist(void) { struct perf_session *session; struct perf_evsel *pos; session = perf_session__new(input_name, O_RDONLY, 0, false, NULL); if (session == NULL) return -ENOMEM; list_for_each_entry(pos, &session->evlist->entries, node) printf("%s\n", event_name(pos)); perf_session__delete(session); return 0; } static const char * const evlist_usage[] = { "perf evlist [<options>]", NULL }; static const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_END() }; int cmd_evlist(int argc, const char **argv, const char *prefix __used) { argc = parse_options(argc, argv, options, evlist_usage, 0); if (argc) usage_with_options(evlist_usage, options); return __cmd_evlist(); }
gpl-2.0
sub77/kernel_samsung_matissewifi
drivers/regulator/lp3972.c
4896
16679
/* * Regulator driver for National Semiconductors LP3972 PMIC chip * * Based on lp3971.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/bug.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/regulator/driver.h> #include <linux/regulator/lp3972.h> #include <linux/slab.h> struct lp3972 { struct device *dev; struct mutex io_lock; struct i2c_client *i2c; int num_regulators; struct regulator_dev **rdev; }; /* LP3972 Control Registers */ #define LP3972_SCR_REG 0x07 #define LP3972_OVER1_REG 0x10 #define LP3972_OVSR1_REG 0x11 #define LP3972_OVER2_REG 0x12 #define LP3972_OVSR2_REG 0x13 #define LP3972_VCC1_REG 0x20 #define LP3972_ADTV1_REG 0x23 #define LP3972_ADTV2_REG 0x24 #define LP3972_AVRC_REG 0x25 #define LP3972_CDTC1_REG 0x26 #define LP3972_CDTC2_REG 0x27 #define LP3972_SDTV1_REG 0x29 #define LP3972_SDTV2_REG 0x2A #define LP3972_MDTV1_REG 0x32 #define LP3972_MDTV2_REG 0x33 #define LP3972_L2VCR_REG 0x39 #define LP3972_L34VCR_REG 0x3A #define LP3972_SCR1_REG 0x80 #define LP3972_SCR2_REG 0x81 #define LP3972_OEN3_REG 0x82 #define LP3972_OSR3_REG 0x83 #define LP3972_LOER4_REG 0x84 #define LP3972_B2TV_REG 0x85 #define LP3972_B3TV_REG 0x86 #define LP3972_B32RC_REG 0x87 #define LP3972_ISRA_REG 0x88 #define LP3972_BCCR_REG 0x89 #define LP3972_II1RR_REG 0x8E #define LP3972_II2RR_REG 0x8F #define LP3972_SYS_CONTROL1_REG LP3972_SCR1_REG /* System control register 1 initial value, * bits 5, 6 and 7 are EPROM programmable */ #define SYS_CONTROL1_INIT_VAL 0x02 #define SYS_CONTROL1_INIT_MASK 0x1F #define LP3972_VOL_CHANGE_REG LP3972_VCC1_REG #define LP3972_VOL_CHANGE_FLAG_GO 0x01 #define LP3972_VOL_CHANGE_FLAG_MASK 0x03 /* LDO output enable mask */ #define LP3972_OEN3_L1EN BIT(0) #define LP3972_OVER2_LDO2_EN BIT(2) #define LP3972_OVER2_LDO3_EN BIT(3) #define LP3972_OVER2_LDO4_EN BIT(4) #define LP3972_OVER1_S_EN BIT(2) static const int ldo1_voltage_map[] = { 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875, 1900, 1925, 1950, 1975, 2000, }; static const int ldo23_voltage_map[] = { 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300, }; static const int ldo4_voltage_map[] = { 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350, 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300, }; static const int ldo5_voltage_map[] = { 0, 0, 0, 0, 0, 850, 875, 900, 925, 950, 975, 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500, }; static const int buck1_voltage_map[] = { 725, 750, 775, 800, 825, 850, 875, 900, 925, 950, 975, 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500, }; static const int buck23_voltage_map[] = { 0, 800, 850, 900, 950, 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500, 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800, 3000, 3300, }; static const int *ldo_voltage_map[] = { ldo1_voltage_map, ldo23_voltage_map, ldo23_voltage_map, ldo4_voltage_map, ldo5_voltage_map, }; static const int *buck_voltage_map[] = { buck1_voltage_map, buck23_voltage_map, buck23_voltage_map, }; static const int ldo_output_enable_mask[] = { LP3972_OEN3_L1EN, LP3972_OVER2_LDO2_EN, LP3972_OVER2_LDO3_EN, LP3972_OVER2_LDO4_EN, LP3972_OVER1_S_EN, }; static const int ldo_output_enable_addr[] = { LP3972_OEN3_REG, LP3972_OVER2_REG, LP3972_OVER2_REG, LP3972_OVER2_REG, LP3972_OVER1_REG, }; static const int ldo_vol_ctl_addr[] = { LP3972_MDTV1_REG, LP3972_L2VCR_REG, LP3972_L34VCR_REG, LP3972_L34VCR_REG, LP3972_SDTV1_REG, }; static const int buck_vol_enable_addr[] = { LP3972_OVER1_REG, LP3972_OEN3_REG, LP3972_OEN3_REG, }; static const int buck_base_addr[] = { LP3972_ADTV1_REG, LP3972_B2TV_REG, LP3972_B3TV_REG, }; #define LP3972_LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[x]) #define LP3972_LDO_OUTPUT_ENABLE_MASK(x) (ldo_output_enable_mask[x]) #define LP3972_LDO_OUTPUT_ENABLE_REG(x) (ldo_output_enable_addr[x]) /* LDO voltage control registers shift: LP3972_LDO1 -> 0, LP3972_LDO2 -> 4 LP3972_LDO3 -> 0, LP3972_LDO4 -> 4 LP3972_LDO5 -> 0 */ #define LP3972_LDO_VOL_CONTR_SHIFT(x) (((x) & 1) << 2) #define LP3972_LDO_VOL_CONTR_REG(x) (ldo_vol_ctl_addr[x]) #define LP3972_LDO_VOL_CHANGE_SHIFT(x) ((x) ? 4 : 6) #define LP3972_LDO_VOL_MASK(x) (((x) % 4) ? 0x0f : 0x1f) #define LP3972_LDO_VOL_MIN_IDX(x) (((x) == 4) ? 0x05 : 0x00) #define LP3972_LDO_VOL_MAX_IDX(x) ((x) ? (((x) == 4) ? 0x1f : 0x0f) : 0x0c) #define LP3972_BUCK_VOL_VALUE_MAP(x) (buck_voltage_map[x]) #define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x]) #define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x]) #define LP3972_BUCK_VOL_MASK 0x1f #define LP3972_BUCK_VOL_MIN_IDX(x) ((x) ? 0x01 : 0x00) #define LP3972_BUCK_VOL_MAX_IDX(x) ((x) ? 0x19 : 0x1f) static int lp3972_i2c_read(struct i2c_client *i2c, char reg, int count, u16 *dest) { int ret; if (count != 1) return -EIO; ret = i2c_smbus_read_byte_data(i2c, reg); if (ret < 0) return ret; *dest = ret; return 0; } static int lp3972_i2c_write(struct i2c_client *i2c, char reg, int count, const u16 *src) { if (count != 1) return -EIO; return i2c_smbus_write_byte_data(i2c, reg, *src); } static u8 lp3972_reg_read(struct lp3972 *lp3972, u8 reg) { u16 val = 0; mutex_lock(&lp3972->io_lock); lp3972_i2c_read(lp3972->i2c, reg, 1, &val); dev_dbg(lp3972->dev, "reg read 0x%02x -> 0x%02x\n", (int)reg, (unsigned)val & 0xff); mutex_unlock(&lp3972->io_lock); return val & 0xff; } static int lp3972_set_bits(struct lp3972 *lp3972, u8 reg, u16 mask, u16 val) { u16 tmp; int ret; mutex_lock(&lp3972->io_lock); ret = lp3972_i2c_read(lp3972->i2c, reg, 1, &tmp); tmp = (tmp & ~mask) | val; if (ret == 0) { ret = lp3972_i2c_write(lp3972->i2c, reg, 1, &tmp); dev_dbg(lp3972->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg, (unsigned)val & 0xff); } mutex_unlock(&lp3972->io_lock); return ret; } static int lp3972_ldo_list_voltage(struct regulator_dev *dev, unsigned index) { int ldo = rdev_get_id(dev) - LP3972_LDO1; return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[index]; } static int lp3972_ldo_is_enabled(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev) - LP3972_LDO1; u16 mask = LP3972_LDO_OUTPUT_ENABLE_MASK(ldo); u16 val; val = lp3972_reg_read(lp3972, LP3972_LDO_OUTPUT_ENABLE_REG(ldo)); return !!(val & mask); } static int lp3972_ldo_enable(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev) - LP3972_LDO1; u16 mask = LP3972_LDO_OUTPUT_ENABLE_MASK(ldo); return lp3972_set_bits(lp3972, LP3972_LDO_OUTPUT_ENABLE_REG(ldo), mask, mask); } static int lp3972_ldo_disable(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev) - LP3972_LDO1; u16 mask = LP3972_LDO_OUTPUT_ENABLE_MASK(ldo); return lp3972_set_bits(lp3972, LP3972_LDO_OUTPUT_ENABLE_REG(ldo), mask, 0); } static int lp3972_ldo_get_voltage(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev) - LP3972_LDO1; u16 mask = LP3972_LDO_VOL_MASK(ldo); u16 val, reg; reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo)); val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask; return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[val]; } static int lp3972_ldo_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned int *selector) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev) - LP3972_LDO1; int min_vol = min_uV / 1000, max_vol = max_uV / 1000; const int *vol_map = LP3972_LDO_VOL_VALUE_MAP(ldo); u16 val; int shift, ret; if (min_vol < vol_map[LP3972_LDO_VOL_MIN_IDX(ldo)] || min_vol > vol_map[LP3972_LDO_VOL_MAX_IDX(ldo)]) return -EINVAL; for (val = LP3972_LDO_VOL_MIN_IDX(ldo); val <= LP3972_LDO_VOL_MAX_IDX(ldo); val++) if (vol_map[val] >= min_vol) break; if (val > LP3972_LDO_VOL_MAX_IDX(ldo) || vol_map[val] > max_vol) return -EINVAL; *selector = val; shift = LP3972_LDO_VOL_CONTR_SHIFT(ldo); ret = lp3972_set_bits(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo), LP3972_LDO_VOL_MASK(ldo) << shift, val << shift); if (ret) return ret; /* * LDO1 and LDO5 support voltage control by either target voltage1 * or target voltage2 register. * We use target voltage1 register for LDO1 and LDO5 in this driver. * We need to update voltage change control register(0x20) to enable * LDO1 and LDO5 to change to their programmed target values. */ switch (ldo) { case LP3972_LDO1: case LP3972_LDO5: shift = LP3972_LDO_VOL_CHANGE_SHIFT(ldo); ret = lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG, LP3972_VOL_CHANGE_FLAG_MASK << shift, LP3972_VOL_CHANGE_FLAG_GO << shift); if (ret) return ret; ret = lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG, LP3972_VOL_CHANGE_FLAG_MASK << shift, 0); break; } return ret; } static struct regulator_ops lp3972_ldo_ops = { .list_voltage = lp3972_ldo_list_voltage, .is_enabled = lp3972_ldo_is_enabled, .enable = lp3972_ldo_enable, .disable = lp3972_ldo_disable, .get_voltage = lp3972_ldo_get_voltage, .set_voltage = lp3972_ldo_set_voltage, }; static int lp3972_dcdc_list_voltage(struct regulator_dev *dev, unsigned index) { int buck = rdev_get_id(dev) - LP3972_DCDC1; return 1000 * buck_voltage_map[buck][index]; } static int lp3972_dcdc_is_enabled(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int buck = rdev_get_id(dev) - LP3972_DCDC1; u16 mask = 1 << (buck * 2); u16 val; val = lp3972_reg_read(lp3972, LP3972_BUCK_VOL_ENABLE_REG(buck)); return !!(val & mask); } static int lp3972_dcdc_enable(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int buck = rdev_get_id(dev) - LP3972_DCDC1; u16 mask = 1 << (buck * 2); u16 val; val = lp3972_set_bits(lp3972, LP3972_BUCK_VOL_ENABLE_REG(buck), mask, mask); return val; } static int lp3972_dcdc_disable(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int buck = rdev_get_id(dev) - LP3972_DCDC1; u16 mask = 1 << (buck * 2); u16 val; val = lp3972_set_bits(lp3972, LP3972_BUCK_VOL_ENABLE_REG(buck), mask, 0); return val; } static int lp3972_dcdc_get_voltage(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int buck = rdev_get_id(dev) - LP3972_DCDC1; u16 reg; int val; reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck)); reg &= LP3972_BUCK_VOL_MASK; if (reg <= LP3972_BUCK_VOL_MAX_IDX(buck)) val = 1000 * buck_voltage_map[buck][reg]; else { val = 0; dev_warn(&dev->dev, "chip reported incorrect voltage value." " reg = %d\n", reg); } return val; } static int lp3972_dcdc_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned int *selector) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); int buck = rdev_get_id(dev) - LP3972_DCDC1; int min_vol = min_uV / 1000, max_vol = max_uV / 1000; const int *vol_map = buck_voltage_map[buck]; u16 val; int ret; if (min_vol < vol_map[LP3972_BUCK_VOL_MIN_IDX(buck)] || min_vol > vol_map[LP3972_BUCK_VOL_MAX_IDX(buck)]) return -EINVAL; for (val = LP3972_BUCK_VOL_MIN_IDX(buck); val <= LP3972_BUCK_VOL_MAX_IDX(buck); val++) if (vol_map[val] >= min_vol) break; if (val > LP3972_BUCK_VOL_MAX_IDX(buck) || vol_map[val] > max_vol) return -EINVAL; *selector = val; ret = lp3972_set_bits(lp3972, LP3972_BUCK_VOL1_REG(buck), LP3972_BUCK_VOL_MASK, val); if (ret) return ret; if (buck != 0) return ret; ret = lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG, LP3972_VOL_CHANGE_FLAG_MASK, LP3972_VOL_CHANGE_FLAG_GO); if (ret) return ret; return lp3972_set_bits(lp3972, LP3972_VOL_CHANGE_REG, LP3972_VOL_CHANGE_FLAG_MASK, 0); } static struct regulator_ops lp3972_dcdc_ops = { .list_voltage = lp3972_dcdc_list_voltage, .is_enabled = lp3972_dcdc_is_enabled, .enable = lp3972_dcdc_enable, .disable = lp3972_dcdc_disable, .get_voltage = lp3972_dcdc_get_voltage, .set_voltage = lp3972_dcdc_set_voltage, }; static struct regulator_desc regulators[] = { { .name = "LDO1", .id = LP3972_LDO1, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo1_voltage_map), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO2", .id = LP3972_LDO2, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo23_voltage_map), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO3", .id = LP3972_LDO3, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo23_voltage_map), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO4", .id = LP3972_LDO4, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo4_voltage_map), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO5", .id = LP3972_LDO5, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo5_voltage_map), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "DCDC1", .id = LP3972_DCDC1, .ops = &lp3972_dcdc_ops, .n_voltages = ARRAY_SIZE(buck1_voltage_map), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "DCDC2", .id = LP3972_DCDC2, .ops = &lp3972_dcdc_ops, .n_voltages = ARRAY_SIZE(buck23_voltage_map), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "DCDC3", .id = LP3972_DCDC3, .ops = &lp3972_dcdc_ops, .n_voltages = ARRAY_SIZE(buck23_voltage_map), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, }; static int __devinit setup_regulators(struct lp3972 *lp3972, struct lp3972_platform_data *pdata) { int i, err; lp3972->num_regulators = pdata->num_regulators; lp3972->rdev = kcalloc(pdata->num_regulators, sizeof(struct regulator_dev *), GFP_KERNEL); if (!lp3972->rdev) { err = -ENOMEM; goto err_nomem; } /* Instantiate the regulators */ for (i = 0; i < pdata->num_regulators; i++) { struct lp3972_regulator_subdev *reg = &pdata->regulators[i]; lp3972->rdev[i] = regulator_register(&regulators[reg->id], lp3972->dev, reg->initdata, lp3972, NULL); if (IS_ERR(lp3972->rdev[i])) { err = PTR_ERR(lp3972->rdev[i]); dev_err(lp3972->dev, "regulator init failed: %d\n", err); goto error; } } return 0; error: while (--i >= 0) regulator_unregister(lp3972->rdev[i]); kfree(lp3972->rdev); lp3972->rdev = NULL; err_nomem: return err; } static int __devinit lp3972_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct lp3972 *lp3972; struct lp3972_platform_data *pdata = i2c->dev.platform_data; int ret; u16 val; if (!pdata) { dev_dbg(&i2c->dev, "No platform init data supplied\n"); return -ENODEV; } lp3972 = kzalloc(sizeof(struct lp3972), GFP_KERNEL); if (!lp3972) return -ENOMEM; lp3972->i2c = i2c; lp3972->dev = &i2c->dev; mutex_init(&lp3972->io_lock); /* Detect LP3972 */ ret = lp3972_i2c_read(i2c, LP3972_SYS_CONTROL1_REG, 1, &val); if (ret == 0 && (val & SYS_CONTROL1_INIT_MASK) != SYS_CONTROL1_INIT_VAL) { ret = -ENODEV; dev_err(&i2c->dev, "chip reported: val = 0x%x\n", val); } if (ret < 0) { dev_err(&i2c->dev, "failed to detect device. ret = %d\n", ret); goto err_detect; } ret = setup_regulators(lp3972, pdata); if (ret < 0) goto err_detect; i2c_set_clientdata(i2c, lp3972); return 0; err_detect: kfree(lp3972); return ret; } static int __devexit lp3972_i2c_remove(struct i2c_client *i2c) { struct lp3972 *lp3972 = i2c_get_clientdata(i2c); int i; for (i = 0; i < lp3972->num_regulators; i++) regulator_unregister(lp3972->rdev[i]); kfree(lp3972->rdev); kfree(lp3972); return 0; } static const struct i2c_device_id lp3972_i2c_id[] = { { "lp3972", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lp3972_i2c_id); static struct i2c_driver lp3972_i2c_driver = { .driver = { .name = "lp3972", .owner = THIS_MODULE, }, .probe = lp3972_i2c_probe, .remove = __devexit_p(lp3972_i2c_remove), .id_table = lp3972_i2c_id, }; static int __init lp3972_module_init(void) { return i2c_add_driver(&lp3972_i2c_driver); } subsys_initcall(lp3972_module_init); static void __exit lp3972_module_exit(void) { i2c_del_driver(&lp3972_i2c_driver); } module_exit(lp3972_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Axel Lin <axel.lin@gmail.com>"); MODULE_DESCRIPTION("LP3972 PMIC driver");
gpl-2.0
Hashcode/android_kernel_samsung_hlte
drivers/usb/misc/legousbtower.c
4896
29075
/* * LEGO USB Tower driver * * Copyright (C) 2003 David Glance <davidgsf@sourceforge.net> * 2001-2004 Juergen Stuber <starblue@users.sourceforge.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * derived from USB Skeleton driver - 0.5 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * * History: * * 2001-10-13 - 0.1 js * - first version * 2001-11-03 - 0.2 js * - simplified buffering, one-shot URBs for writing * 2001-11-10 - 0.3 js * - removed IOCTL (setting power/mode is more complicated, postponed) * 2001-11-28 - 0.4 js * - added vendor commands for mode of operation and power level in open * 2001-12-04 - 0.5 js * - set IR mode by default (by oversight 0.4 set VLL mode) * 2002-01-11 - 0.5? pcchan * - make read buffer reusable and work around bytes_to_write issue between * uhci and legusbtower * 2002-09-23 - 0.52 david (david@csse.uwa.edu.au) * - imported into lejos project * - changed wake_up to wake_up_interruptible * - changed to use lego0 rather than tower0 * - changed dbg() to use __func__ rather than deprecated __func__ * 2003-01-12 - 0.53 david (david@csse.uwa.edu.au) * - changed read and write to write everything or * timeout (from a patch by Chris Riesen and Brett Thaeler driver) * - added ioctl functionality to set timeouts * 2003-07-18 - 0.54 davidgsf (david@csse.uwa.edu.au) * - initial import into LegoUSB project * - merge of existing LegoUSB.c driver * 2003-07-18 - 0.56 davidgsf (david@csse.uwa.edu.au) * - port to 2.6 style driver * 2004-02-29 - 0.6 Juergen Stuber <starblue@users.sourceforge.net> * - fix locking * - unlink read URBs which are no longer needed * - allow increased buffer size, eliminates need for timeout on write * - have read URB running continuously * - added poll * - forbid seeking * - added nonblocking I/O * - changed back __func__ to __func__ * - read and log tower firmware version * - reset tower on probe, avoids failure of first write * 2004-03-09 - 0.7 Juergen Stuber <starblue@users.sourceforge.net> * - timeout read now only after inactivity, shorten default accordingly * 2004-03-11 - 0.8 Juergen Stuber <starblue@users.sourceforge.net> * - log major, minor instead of possibly confusing device filename * - whitespace cleanup * 2004-03-12 - 0.9 Juergen Stuber <starblue@users.sourceforge.net> * - normalize whitespace in debug messages * - take care about endianness in control message responses * 2004-03-13 - 0.91 Juergen Stuber <starblue@users.sourceforge.net> * - make default intervals longer to accommodate current EHCI driver * 2004-03-19 - 0.92 Juergen Stuber <starblue@users.sourceforge.net> * - replaced atomic_t by memory barriers * 2004-04-21 - 0.93 Juergen Stuber <starblue@users.sourceforge.net> * - wait for completion of write urb in release (needed for remotecontrol) * - corrected poll for write direction (missing negation) * 2004-04-22 - 0.94 Juergen Stuber <starblue@users.sourceforge.net> * - make device locking interruptible * 2004-04-30 - 0.95 Juergen Stuber <starblue@users.sourceforge.net> * - check for valid udev on resubmitting and unlinking urbs * 2004-08-03 - 0.96 Juergen Stuber <starblue@users.sourceforge.net> * - move reset into open to clean out spurious data */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/mutex.h> #include <asm/uaccess.h> #include <linux/usb.h> #include <linux/poll.h> #ifdef CONFIG_USB_DEBUG static int debug = 4; #else static int debug = 0; #endif /* Use our own dbg macro */ #undef dbg #define dbg(lvl, format, arg...) \ do { \ if (debug >= lvl) \ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \ } while (0) /* Version Information */ #define DRIVER_VERSION "v0.96" #define DRIVER_AUTHOR "Juergen Stuber <starblue@sourceforge.net>" #define DRIVER_DESC "LEGO USB Tower Driver" /* Module parameters */ module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not"); /* The defaults are chosen to work with the latest versions of leJOS and NQC. */ /* Some legacy software likes to receive packets in one piece. * In this case read_buffer_size should exceed the maximal packet length * (417 for datalog uploads), and packet_timeout should be set. */ static int read_buffer_size = 480; module_param(read_buffer_size, int, 0); MODULE_PARM_DESC(read_buffer_size, "Read buffer size"); /* Some legacy software likes to send packets in one piece. * In this case write_buffer_size should exceed the maximal packet length * (417 for firmware and program downloads). * A problem with long writes is that the following read may time out * if the software is not prepared to wait long enough. */ static int write_buffer_size = 480; module_param(write_buffer_size, int, 0); MODULE_PARM_DESC(write_buffer_size, "Write buffer size"); /* Some legacy software expects reads to contain whole LASM packets. * To achieve this, characters which arrive before a packet timeout * occurs will be returned in a single read operation. * A problem with long reads is that the software may time out * if it is not prepared to wait long enough. * The packet timeout should be greater than the time between the * reception of subsequent characters, which should arrive about * every 5ms for the standard 2400 baud. * Set it to 0 to disable. */ static int packet_timeout = 50; module_param(packet_timeout, int, 0); MODULE_PARM_DESC(packet_timeout, "Packet timeout in ms"); /* Some legacy software expects blocking reads to time out. * Timeout occurs after the specified time of read and write inactivity. * Set it to 0 to disable. */ static int read_timeout = 200; module_param(read_timeout, int, 0); MODULE_PARM_DESC(read_timeout, "Read timeout in ms"); /* As of kernel version 2.6.4 ehci-hcd uses an * "only one interrupt transfer per frame" shortcut * to simplify the scheduling of periodic transfers. * This conflicts with our standard 1ms intervals for in and out URBs. * We use default intervals of 2ms for in and 8ms for out transfers, * which is fast enough for 2400 baud and allows a small additional load. * Increase the interval to allow more devices that do interrupt transfers, * or set to 0 to use the standard interval from the endpoint descriptors. */ static int interrupt_in_interval = 2; module_param(interrupt_in_interval, int, 0); MODULE_PARM_DESC(interrupt_in_interval, "Interrupt in interval in ms"); static int interrupt_out_interval = 8; module_param(interrupt_out_interval, int, 0); MODULE_PARM_DESC(interrupt_out_interval, "Interrupt out interval in ms"); /* Define these values to match your device */ #define LEGO_USB_TOWER_VENDOR_ID 0x0694 #define LEGO_USB_TOWER_PRODUCT_ID 0x0001 /* Vendor requests */ #define LEGO_USB_TOWER_REQUEST_RESET 0x04 #define LEGO_USB_TOWER_REQUEST_GET_VERSION 0xFD struct tower_reset_reply { __le16 size; /* little-endian */ __u8 err_code; __u8 spare; } __attribute__ ((packed)); struct tower_get_version_reply { __le16 size; /* little-endian */ __u8 err_code; __u8 spare; __u8 major; __u8 minor; __le16 build_no; /* little-endian */ } __attribute__ ((packed)); /* table of devices that work with this driver */ static const struct usb_device_id tower_table[] = { { USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, tower_table); static DEFINE_MUTEX(open_disc_mutex); #define LEGO_USB_TOWER_MINOR_BASE 160 /* Structure to hold all of our device specific stuff */ struct lego_usb_tower { struct mutex lock; /* locks this structure */ struct usb_device* udev; /* save off the usb device pointer */ unsigned char minor; /* the starting minor number for this device */ int open_count; /* number of times this port has been opened */ char* read_buffer; size_t read_buffer_length; /* this much came in */ size_t read_packet_length; /* this much will be returned on read */ spinlock_t read_buffer_lock; int packet_timeout_jiffies; unsigned long read_last_arrival; wait_queue_head_t read_wait; wait_queue_head_t write_wait; char* interrupt_in_buffer; struct usb_endpoint_descriptor* interrupt_in_endpoint; struct urb* interrupt_in_urb; int interrupt_in_interval; int interrupt_in_running; int interrupt_in_done; char* interrupt_out_buffer; struct usb_endpoint_descriptor* interrupt_out_endpoint; struct urb* interrupt_out_urb; int interrupt_out_interval; int interrupt_out_busy; }; /* local function prototypes */ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos); static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos); static inline void tower_delete (struct lego_usb_tower *dev); static int tower_open (struct inode *inode, struct file *file); static int tower_release (struct inode *inode, struct file *file); static unsigned int tower_poll (struct file *file, poll_table *wait); static loff_t tower_llseek (struct file *file, loff_t off, int whence); static void tower_abort_transfers (struct lego_usb_tower *dev); static void tower_check_for_read_packet (struct lego_usb_tower *dev); static void tower_interrupt_in_callback (struct urb *urb); static void tower_interrupt_out_callback (struct urb *urb); static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id); static void tower_disconnect (struct usb_interface *interface); /* file operations needed when we register this driver */ static const struct file_operations tower_fops = { .owner = THIS_MODULE, .read = tower_read, .write = tower_write, .open = tower_open, .release = tower_release, .poll = tower_poll, .llseek = tower_llseek, }; static char *legousbtower_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with the driver core */ static struct usb_class_driver tower_class = { .name = "legousbtower%d", .devnode = legousbtower_devnode, .fops = &tower_fops, .minor_base = LEGO_USB_TOWER_MINOR_BASE, }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver tower_driver = { .name = "legousbtower", .probe = tower_probe, .disconnect = tower_disconnect, .id_table = tower_table, }; /** * lego_usb_tower_debug_data */ static inline void lego_usb_tower_debug_data (int level, const char *function, int size, const unsigned char *data) { int i; if (debug < level) return; printk (KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size); for (i = 0; i < size; ++i) { printk ("%.2x ", data[i]); } printk ("\n"); } /** * tower_delete */ static inline void tower_delete (struct lego_usb_tower *dev) { dbg(2, "%s: enter", __func__); tower_abort_transfers (dev); /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); kfree (dev->read_buffer); kfree (dev->interrupt_in_buffer); kfree (dev->interrupt_out_buffer); kfree (dev); dbg(2, "%s: leave", __func__); } /** * tower_open */ static int tower_open (struct inode *inode, struct file *file) { struct lego_usb_tower *dev = NULL; int subminor; int retval = 0; struct usb_interface *interface; struct tower_reset_reply reset_reply; int result; dbg(2, "%s: enter", __func__); nonseekable_open(inode, file); subminor = iminor(inode); interface = usb_find_interface (&tower_driver, subminor); if (!interface) { err ("%s - error, can't find device for minor %d", __func__, subminor); retval = -ENODEV; goto exit; } mutex_lock(&open_disc_mutex); dev = usb_get_intfdata(interface); if (!dev) { mutex_unlock(&open_disc_mutex); retval = -ENODEV; goto exit; } /* lock this device */ if (mutex_lock_interruptible(&dev->lock)) { mutex_unlock(&open_disc_mutex); retval = -ERESTARTSYS; goto exit; } /* allow opening only once */ if (dev->open_count) { mutex_unlock(&open_disc_mutex); retval = -EBUSY; goto unlock_exit; } dev->open_count = 1; mutex_unlock(&open_disc_mutex); /* reset the tower */ result = usb_control_msg (dev->udev, usb_rcvctrlpipe(dev->udev, 0), LEGO_USB_TOWER_REQUEST_RESET, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, &reset_reply, sizeof(reset_reply), 1000); if (result < 0) { err("LEGO USB Tower reset control request failed"); retval = result; goto unlock_exit; } /* initialize in direction */ dev->read_buffer_length = 0; dev->read_packet_length = 0; usb_fill_int_urb (dev->interrupt_in_urb, dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), tower_interrupt_in_callback, dev, dev->interrupt_in_interval); dev->interrupt_in_running = 1; dev->interrupt_in_done = 0; mb(); retval = usb_submit_urb (dev->interrupt_in_urb, GFP_KERNEL); if (retval) { err("Couldn't submit interrupt_in_urb %d", retval); dev->interrupt_in_running = 0; dev->open_count = 0; goto unlock_exit; } /* save device in the file's private structure */ file->private_data = dev; unlock_exit: mutex_unlock(&dev->lock); exit: dbg(2, "%s: leave, return value %d ", __func__, retval); return retval; } /** * tower_release */ static int tower_release (struct inode *inode, struct file *file) { struct lego_usb_tower *dev; int retval = 0; dbg(2, "%s: enter", __func__); dev = file->private_data; if (dev == NULL) { dbg(1, "%s: object is NULL", __func__); retval = -ENODEV; goto exit_nolock; } mutex_lock(&open_disc_mutex); if (mutex_lock_interruptible(&dev->lock)) { retval = -ERESTARTSYS; goto exit; } if (dev->open_count != 1) { dbg(1, "%s: device not opened exactly once", __func__); retval = -ENODEV; goto unlock_exit; } if (dev->udev == NULL) { /* the device was unplugged before the file was released */ /* unlock here as tower_delete frees dev */ mutex_unlock(&dev->lock); tower_delete (dev); goto exit; } /* wait until write transfer is finished */ if (dev->interrupt_out_busy) { wait_event_interruptible_timeout (dev->write_wait, !dev->interrupt_out_busy, 2 * HZ); } tower_abort_transfers (dev); dev->open_count = 0; unlock_exit: mutex_unlock(&dev->lock); exit: mutex_unlock(&open_disc_mutex); exit_nolock: dbg(2, "%s: leave, return value %d", __func__, retval); return retval; } /** * tower_abort_transfers * aborts transfers and frees associated data structures */ static void tower_abort_transfers (struct lego_usb_tower *dev) { dbg(2, "%s: enter", __func__); if (dev == NULL) { dbg(1, "%s: dev is null", __func__); goto exit; } /* shutdown transfer */ if (dev->interrupt_in_running) { dev->interrupt_in_running = 0; mb(); if (dev->udev) usb_kill_urb (dev->interrupt_in_urb); } if (dev->interrupt_out_busy && dev->udev) usb_kill_urb(dev->interrupt_out_urb); exit: dbg(2, "%s: leave", __func__); } /** * tower_check_for_read_packet * * To get correct semantics for signals and non-blocking I/O * with packetizing we pretend not to see any data in the read buffer * until it has been there unchanged for at least * dev->packet_timeout_jiffies, or until the buffer is full. */ static void tower_check_for_read_packet (struct lego_usb_tower *dev) { spin_lock_irq (&dev->read_buffer_lock); if (!packet_timeout || time_after(jiffies, dev->read_last_arrival + dev->packet_timeout_jiffies) || dev->read_buffer_length == read_buffer_size) { dev->read_packet_length = dev->read_buffer_length; } dev->interrupt_in_done = 0; spin_unlock_irq (&dev->read_buffer_lock); } /** * tower_poll */ static unsigned int tower_poll (struct file *file, poll_table *wait) { struct lego_usb_tower *dev; unsigned int mask = 0; dbg(2, "%s: enter", __func__); dev = file->private_data; if (!dev->udev) return POLLERR | POLLHUP; poll_wait(file, &dev->read_wait, wait); poll_wait(file, &dev->write_wait, wait); tower_check_for_read_packet(dev); if (dev->read_packet_length > 0) { mask |= POLLIN | POLLRDNORM; } if (!dev->interrupt_out_busy) { mask |= POLLOUT | POLLWRNORM; } dbg(2, "%s: leave, mask = %d", __func__, mask); return mask; } /** * tower_llseek */ static loff_t tower_llseek (struct file *file, loff_t off, int whence) { return -ESPIPE; /* unseekable */ } /** * tower_read */ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct lego_usb_tower *dev; size_t bytes_to_read; int i; int retval = 0; unsigned long timeout = 0; dbg(2, "%s: enter, count = %Zd", __func__, count); dev = file->private_data; /* lock this object */ if (mutex_lock_interruptible(&dev->lock)) { retval = -ERESTARTSYS; goto exit; } /* verify that the device wasn't unplugged */ if (dev->udev == NULL) { retval = -ENODEV; err("No device or device unplugged %d", retval); goto unlock_exit; } /* verify that we actually have some data to read */ if (count == 0) { dbg(1, "%s: read request of 0 bytes", __func__); goto unlock_exit; } if (read_timeout) { timeout = jiffies + read_timeout * HZ / 1000; } /* wait for data */ tower_check_for_read_packet (dev); while (dev->read_packet_length == 0) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto unlock_exit; } retval = wait_event_interruptible_timeout(dev->read_wait, dev->interrupt_in_done, dev->packet_timeout_jiffies); if (retval < 0) { goto unlock_exit; } /* reset read timeout during read or write activity */ if (read_timeout && (dev->read_buffer_length || dev->interrupt_out_busy)) { timeout = jiffies + read_timeout * HZ / 1000; } /* check for read timeout */ if (read_timeout && time_after (jiffies, timeout)) { retval = -ETIMEDOUT; goto unlock_exit; } tower_check_for_read_packet (dev); } /* copy the data from read_buffer into userspace */ bytes_to_read = min(count, dev->read_packet_length); if (copy_to_user (buffer, dev->read_buffer, bytes_to_read)) { retval = -EFAULT; goto unlock_exit; } spin_lock_irq (&dev->read_buffer_lock); dev->read_buffer_length -= bytes_to_read; dev->read_packet_length -= bytes_to_read; for (i=0; i<dev->read_buffer_length; i++) { dev->read_buffer[i] = dev->read_buffer[i+bytes_to_read]; } spin_unlock_irq (&dev->read_buffer_lock); retval = bytes_to_read; unlock_exit: /* unlock the device */ mutex_unlock(&dev->lock); exit: dbg(2, "%s: leave, return value %d", __func__, retval); return retval; } /** * tower_write */ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct lego_usb_tower *dev; size_t bytes_to_write; int retval = 0; dbg(2, "%s: enter, count = %Zd", __func__, count); dev = file->private_data; /* lock this object */ if (mutex_lock_interruptible(&dev->lock)) { retval = -ERESTARTSYS; goto exit; } /* verify that the device wasn't unplugged */ if (dev->udev == NULL) { retval = -ENODEV; err("No device or device unplugged %d", retval); goto unlock_exit; } /* verify that we actually have some data to write */ if (count == 0) { dbg(1, "%s: write request of 0 bytes", __func__); goto unlock_exit; } /* wait until previous transfer is finished */ while (dev->interrupt_out_busy) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto unlock_exit; } retval = wait_event_interruptible (dev->write_wait, !dev->interrupt_out_busy); if (retval) { goto unlock_exit; } } /* write the data into interrupt_out_buffer from userspace */ bytes_to_write = min_t(int, count, write_buffer_size); dbg(4, "%s: count = %Zd, bytes_to_write = %Zd", __func__, count, bytes_to_write); if (copy_from_user (dev->interrupt_out_buffer, buffer, bytes_to_write)) { retval = -EFAULT; goto unlock_exit; } /* send off the urb */ usb_fill_int_urb(dev->interrupt_out_urb, dev->udev, usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress), dev->interrupt_out_buffer, bytes_to_write, tower_interrupt_out_callback, dev, dev->interrupt_out_interval); dev->interrupt_out_busy = 1; wmb(); retval = usb_submit_urb (dev->interrupt_out_urb, GFP_KERNEL); if (retval) { dev->interrupt_out_busy = 0; err("Couldn't submit interrupt_out_urb %d", retval); goto unlock_exit; } retval = bytes_to_write; unlock_exit: /* unlock the device */ mutex_unlock(&dev->lock); exit: dbg(2, "%s: leave, return value %d", __func__, retval); return retval; } /** * tower_interrupt_in_callback */ static void tower_interrupt_in_callback (struct urb *urb) { struct lego_usb_tower *dev = urb->context; int status = urb->status; int retval; dbg(4, "%s: enter, status %d", __func__, status); lego_usb_tower_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); if (status) { if (status == -ENOENT || status == -ECONNRESET || status == -ESHUTDOWN) { goto exit; } else { dbg(1, "%s: nonzero status received: %d", __func__, status); goto resubmit; /* maybe we can recover */ } } if (urb->actual_length > 0) { spin_lock (&dev->read_buffer_lock); if (dev->read_buffer_length + urb->actual_length < read_buffer_size) { memcpy (dev->read_buffer + dev->read_buffer_length, dev->interrupt_in_buffer, urb->actual_length); dev->read_buffer_length += urb->actual_length; dev->read_last_arrival = jiffies; dbg(3, "%s: received %d bytes", __func__, urb->actual_length); } else { printk(KERN_WARNING "%s: read_buffer overflow, %d bytes dropped", __func__, urb->actual_length); } spin_unlock (&dev->read_buffer_lock); } resubmit: /* resubmit if we're still running */ if (dev->interrupt_in_running && dev->udev) { retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC); if (retval) { err("%s: usb_submit_urb failed (%d)", __func__, retval); } } exit: dev->interrupt_in_done = 1; wake_up_interruptible (&dev->read_wait); lego_usb_tower_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); dbg(4, "%s: leave, status %d", __func__, status); } /** * tower_interrupt_out_callback */ static void tower_interrupt_out_callback (struct urb *urb) { struct lego_usb_tower *dev = urb->context; int status = urb->status; dbg(4, "%s: enter, status %d", __func__, status); lego_usb_tower_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); /* sync/async unlink faults aren't errors */ if (status && !(status == -ENOENT || status == -ECONNRESET || status == -ESHUTDOWN)) { dbg(1, "%s - nonzero write bulk status received: %d", __func__, status); } dev->interrupt_out_busy = 0; wake_up_interruptible(&dev->write_wait); lego_usb_tower_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); dbg(4, "%s: leave, status %d", __func__, status); } /** * tower_probe * * Called by the usb core when a new device is connected that it thinks * this driver might be interested in. */ static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct lego_usb_tower *dev = NULL; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor* endpoint; struct tower_get_version_reply get_version_reply; int i; int retval = -ENOMEM; int result; dbg(2, "%s: enter", __func__); if (udev == NULL) dev_info(&interface->dev, "udev is NULL.\n"); /* allocate memory for our device state and initialize it */ dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL); if (dev == NULL) { err ("Out of memory"); goto exit; } mutex_init(&dev->lock); dev->udev = udev; dev->open_count = 0; dev->read_buffer = NULL; dev->read_buffer_length = 0; dev->read_packet_length = 0; spin_lock_init (&dev->read_buffer_lock); dev->packet_timeout_jiffies = packet_timeout * HZ / 1000; dev->read_last_arrival = jiffies; init_waitqueue_head (&dev->read_wait); init_waitqueue_head (&dev->write_wait); dev->interrupt_in_buffer = NULL; dev->interrupt_in_endpoint = NULL; dev->interrupt_in_urb = NULL; dev->interrupt_in_running = 0; dev->interrupt_in_done = 0; dev->interrupt_out_buffer = NULL; dev->interrupt_out_endpoint = NULL; dev->interrupt_out_urb = NULL; dev->interrupt_out_busy = 0; iface_desc = interface->cur_altsetting; /* set up the endpoint information */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_xfer_int(endpoint)) { if (usb_endpoint_dir_in(endpoint)) dev->interrupt_in_endpoint = endpoint; else dev->interrupt_out_endpoint = endpoint; } } if(dev->interrupt_in_endpoint == NULL) { err("interrupt in endpoint not found"); goto error; } if (dev->interrupt_out_endpoint == NULL) { err("interrupt out endpoint not found"); goto error; } dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL); if (!dev->read_buffer) { err("Couldn't allocate read_buffer"); goto error; } dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL); if (!dev->interrupt_in_buffer) { err("Couldn't allocate interrupt_in_buffer"); goto error; } dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_in_urb) { err("Couldn't allocate interrupt_in_urb"); goto error; } dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL); if (!dev->interrupt_out_buffer) { err("Couldn't allocate interrupt_out_buffer"); goto error; } dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_out_urb) { err("Couldn't allocate interrupt_out_urb"); goto error; } dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval; dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval; /* we can register the device now, as it is ready */ usb_set_intfdata (interface, dev); retval = usb_register_dev (interface, &tower_class); if (retval) { /* something prevented us from registering this driver */ err ("Not able to get a minor for this device."); usb_set_intfdata (interface, NULL); goto error; } dev->minor = interface->minor; /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major " "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), USB_MAJOR, dev->minor); /* get the firmware version and log it */ result = usb_control_msg (udev, usb_rcvctrlpipe(udev, 0), LEGO_USB_TOWER_REQUEST_GET_VERSION, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, &get_version_reply, sizeof(get_version_reply), 1000); if (result < 0) { err("LEGO USB Tower get version control request failed"); retval = result; goto error; } dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d " "build %d\n", get_version_reply.major, get_version_reply.minor, le16_to_cpu(get_version_reply.build_no)); exit: dbg(2, "%s: leave, return value 0x%.8lx (dev)", __func__, (long) dev); return retval; error: tower_delete(dev); return retval; } /** * tower_disconnect * * Called by the usb core when the device is removed from the system. */ static void tower_disconnect (struct usb_interface *interface) { struct lego_usb_tower *dev; int minor; dbg(2, "%s: enter", __func__); dev = usb_get_intfdata (interface); mutex_lock(&open_disc_mutex); usb_set_intfdata (interface, NULL); minor = dev->minor; /* give back our minor */ usb_deregister_dev (interface, &tower_class); mutex_lock(&dev->lock); mutex_unlock(&open_disc_mutex); /* if the device is not opened, then we clean up right now */ if (!dev->open_count) { mutex_unlock(&dev->lock); tower_delete (dev); } else { dev->udev = NULL; /* wake up pollers */ wake_up_interruptible_all(&dev->read_wait); wake_up_interruptible_all(&dev->write_wait); mutex_unlock(&dev->lock); } dev_info(&interface->dev, "LEGO USB Tower #%d now disconnected\n", (minor - LEGO_USB_TOWER_MINOR_BASE)); dbg(2, "%s: leave", __func__); } module_usb_driver(tower_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); #ifdef MODULE_LICENSE MODULE_LICENSE("GPL"); #endif
gpl-2.0
sktjdgns1189/android_kernel_samsung_frescolteskt
drivers/input/keyboard/w90p910_keypad.c
4896
6786
/* * Copyright (c) 2008-2009 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/w90p910_keypad.h> /* Keypad Interface Control Registers */ #define KPI_CONF 0x00 #define KPI_3KCONF 0x04 #define KPI_LPCONF 0x08 #define KPI_STATUS 0x0C #define IS1KEY (0x01 << 16) #define INTTR (0x01 << 21) #define KEY0R (0x0f << 3) #define KEY0C 0x07 #define DEBOUNCE_BIT 0x08 #define KSIZE0 (0x01 << 16) #define KSIZE1 (0x01 << 17) #define KPSEL (0x01 << 19) #define ENKP (0x01 << 18) #define KGET_RAW(n) (((n) & KEY0R) >> 3) #define KGET_COLUMN(n) ((n) & KEY0C) #define W90P910_MAX_KEY_NUM (8 * 8) #define W90P910_ROW_SHIFT 3 struct w90p910_keypad { const struct w90p910_keypad_platform_data *pdata; struct clk *clk; struct input_dev *input_dev; void __iomem *mmio_base; int irq; unsigned short keymap[W90P910_MAX_KEY_NUM]; }; static void w90p910_keypad_scan_matrix(struct w90p910_keypad *keypad, unsigned int status) { struct input_dev *input_dev = keypad->input_dev; unsigned int row = KGET_RAW(status); unsigned int col = KGET_COLUMN(status); unsigned int code = MATRIX_SCAN_CODE(row, col, W90P910_ROW_SHIFT); unsigned int key = keypad->keymap[code]; input_event(input_dev, EV_MSC, MSC_SCAN, code); input_report_key(input_dev, key, 1); input_sync(input_dev); input_event(input_dev, EV_MSC, MSC_SCAN, code); input_report_key(input_dev, key, 0); input_sync(input_dev); } static irqreturn_t w90p910_keypad_irq_handler(int irq, void *dev_id) { struct w90p910_keypad *keypad = dev_id; unsigned int kstatus, val; kstatus = __raw_readl(keypad->mmio_base + KPI_STATUS); val = INTTR | IS1KEY; if (kstatus & val) w90p910_keypad_scan_matrix(keypad, kstatus); return IRQ_HANDLED; } static int w90p910_keypad_open(struct input_dev *dev) { struct w90p910_keypad *keypad = input_get_drvdata(dev); const struct w90p910_keypad_platform_data *pdata = keypad->pdata; unsigned int val, config; /* Enable unit clock */ clk_enable(keypad->clk); val = __raw_readl(keypad->mmio_base + KPI_CONF); val |= (KPSEL | ENKP); val &= ~(KSIZE0 | KSIZE1); config = pdata->prescale | (pdata->debounce << DEBOUNCE_BIT); val |= config; __raw_writel(val, keypad->mmio_base + KPI_CONF); return 0; } static void w90p910_keypad_close(struct input_dev *dev) { struct w90p910_keypad *keypad = input_get_drvdata(dev); /* Disable clock unit */ clk_disable(keypad->clk); } static int __devinit w90p910_keypad_probe(struct platform_device *pdev) { const struct w90p910_keypad_platform_data *pdata = pdev->dev.platform_data; const struct matrix_keymap_data *keymap_data; struct w90p910_keypad *keypad; struct input_dev *input_dev; struct resource *res; int irq; int error; if (!pdata) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } keymap_data = pdata->keymap_data; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get keypad irq\n"); return -ENXIO; } keypad = kzalloc(sizeof(struct w90p910_keypad), GFP_KERNEL); input_dev = input_allocate_device(); if (!keypad || !input_dev) { dev_err(&pdev->dev, "failed to allocate driver data\n"); error = -ENOMEM; goto failed_free; } keypad->pdata = pdata; keypad->input_dev = input_dev; keypad->irq = irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "failed to get I/O memory\n"); error = -ENXIO; goto failed_free; } res = request_mem_region(res->start, resource_size(res), pdev->name); if (res == NULL) { dev_err(&pdev->dev, "failed to request I/O memory\n"); error = -EBUSY; goto failed_free; } keypad->mmio_base = ioremap(res->start, resource_size(res)); if (keypad->mmio_base == NULL) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENXIO; goto failed_free_res; } keypad->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(keypad->clk)) { dev_err(&pdev->dev, "failed to get keypad clock\n"); error = PTR_ERR(keypad->clk); goto failed_free_io; } /* set multi-function pin for w90p910 kpi. */ mfp_set_groupi(&pdev->dev); input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; input_dev->open = w90p910_keypad_open; input_dev->close = w90p910_keypad_close; input_dev->dev.parent = &pdev->dev; input_dev->keycode = keypad->keymap; input_dev->keycodesize = sizeof(keypad->keymap[0]); input_dev->keycodemax = ARRAY_SIZE(keypad->keymap); input_set_drvdata(input_dev, keypad); input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_set_capability(input_dev, EV_MSC, MSC_SCAN); matrix_keypad_build_keymap(keymap_data, W90P910_ROW_SHIFT, input_dev->keycode, input_dev->keybit); error = request_irq(keypad->irq, w90p910_keypad_irq_handler, 0, pdev->name, keypad); if (error) { dev_err(&pdev->dev, "failed to request IRQ\n"); goto failed_put_clk; } /* Register the input device */ error = input_register_device(input_dev); if (error) { dev_err(&pdev->dev, "failed to register input device\n"); goto failed_free_irq; } platform_set_drvdata(pdev, keypad); return 0; failed_free_irq: free_irq(irq, pdev); failed_put_clk: clk_put(keypad->clk); failed_free_io: iounmap(keypad->mmio_base); failed_free_res: release_mem_region(res->start, resource_size(res)); failed_free: input_free_device(input_dev); kfree(keypad); return error; } static int __devexit w90p910_keypad_remove(struct platform_device *pdev) { struct w90p910_keypad *keypad = platform_get_drvdata(pdev); struct resource *res; free_irq(keypad->irq, pdev); clk_put(keypad->clk); input_unregister_device(keypad->input_dev); iounmap(keypad->mmio_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); platform_set_drvdata(pdev, NULL); kfree(keypad); return 0; } static struct platform_driver w90p910_keypad_driver = { .probe = w90p910_keypad_probe, .remove = __devexit_p(w90p910_keypad_remove), .driver = { .name = "nuc900-kpi", .owner = THIS_MODULE, }, }; module_platform_driver(w90p910_keypad_driver); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("w90p910 keypad driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:nuc900-keypad");
gpl-2.0
schqiushui/Kernel_Lollipop_GPE5.1_M8ACE
drivers/regulator/userspace-consumer.c
4896
4730
/* * userspace-consumer.c * * Copyright 2009 CompuLab, Ltd. * * Author: Mike Rapoport <mike@compulab.co.il> * * Based of virtual consumer driver: * Copyright 2008 Wolfson Microelectronics PLC. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * */ #include <linux/err.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/regulator/userspace-consumer.h> #include <linux/slab.h> struct userspace_consumer_data { const char *name; struct mutex lock; bool enabled; int num_supplies; struct regulator_bulk_data *supplies; }; static ssize_t reg_show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct userspace_consumer_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static ssize_t reg_show_state(struct device *dev, struct device_attribute *attr, char *buf) { struct userspace_consumer_data *data = dev_get_drvdata(dev); if (data->enabled) return sprintf(buf, "enabled\n"); return sprintf(buf, "disabled\n"); } static ssize_t reg_set_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct userspace_consumer_data *data = dev_get_drvdata(dev); bool enabled; int ret; /* * sysfs_streq() doesn't need the \n's, but we add them so the strings * will be shared with show_state(), above. */ if (sysfs_streq(buf, "enabled\n") || sysfs_streq(buf, "1")) enabled = true; else if (sysfs_streq(buf, "disabled\n") || sysfs_streq(buf, "0")) enabled = false; else { dev_err(dev, "Configuring invalid mode\n"); return count; } mutex_lock(&data->lock); if (enabled != data->enabled) { if (enabled) ret = regulator_bulk_enable(data->num_supplies, data->supplies); else ret = regulator_bulk_disable(data->num_supplies, data->supplies); if (ret == 0) data->enabled = enabled; else dev_err(dev, "Failed to configure state: %d\n", ret); } mutex_unlock(&data->lock); return count; } static DEVICE_ATTR(name, 0444, reg_show_name, NULL); static DEVICE_ATTR(state, 0644, reg_show_state, reg_set_state); static struct attribute *attributes[] = { &dev_attr_name.attr, &dev_attr_state.attr, NULL, }; static const struct attribute_group attr_group = { .attrs = attributes, }; static int regulator_userspace_consumer_probe(struct platform_device *pdev) { struct regulator_userspace_consumer_data *pdata; struct userspace_consumer_data *drvdata; int ret; pdata = pdev->dev.platform_data; if (!pdata) return -EINVAL; drvdata = kzalloc(sizeof(struct userspace_consumer_data), GFP_KERNEL); if (drvdata == NULL) return -ENOMEM; drvdata->name = pdata->name; drvdata->num_supplies = pdata->num_supplies; drvdata->supplies = pdata->supplies; mutex_init(&drvdata->lock); ret = regulator_bulk_get(&pdev->dev, drvdata->num_supplies, drvdata->supplies); if (ret) { dev_err(&pdev->dev, "Failed to get supplies: %d\n", ret); goto err_alloc_supplies; } ret = sysfs_create_group(&pdev->dev.kobj, &attr_group); if (ret != 0) goto err_create_attrs; if (pdata->init_on) { ret = regulator_bulk_enable(drvdata->num_supplies, drvdata->supplies); if (ret) { dev_err(&pdev->dev, "Failed to set initial state: %d\n", ret); goto err_enable; } } drvdata->enabled = pdata->init_on; platform_set_drvdata(pdev, drvdata); return 0; err_enable: sysfs_remove_group(&pdev->dev.kobj, &attr_group); err_create_attrs: regulator_bulk_free(drvdata->num_supplies, drvdata->supplies); err_alloc_supplies: kfree(drvdata); return ret; } static int regulator_userspace_consumer_remove(struct platform_device *pdev) { struct userspace_consumer_data *data = platform_get_drvdata(pdev); sysfs_remove_group(&pdev->dev.kobj, &attr_group); if (data->enabled) regulator_bulk_disable(data->num_supplies, data->supplies); regulator_bulk_free(data->num_supplies, data->supplies); kfree(data); return 0; } static struct platform_driver regulator_userspace_consumer_driver = { .probe = regulator_userspace_consumer_probe, .remove = regulator_userspace_consumer_remove, .driver = { .name = "reg-userspace-consumer", }, }; module_platform_driver(regulator_userspace_consumer_driver); MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>"); MODULE_DESCRIPTION("Userspace consumer for voltage and current regulators"); MODULE_LICENSE("GPL");
gpl-2.0
DirtyUnicorns/android_kernel_motorola_ghost
drivers/staging/comedi/drivers/cb_pcidas.c
4896
57863
/* comedi/drivers/cb_pcidas.c Developed by Ivan Martinez and Frank Mori Hess, with valuable help from David Schleef and the rest of the Comedi developers comunity. Copyright (C) 2001-2003 Ivan Martinez <imr@oersted.dtu.dk> Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1997-8 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: cb_pcidas Description: MeasurementComputing PCI-DAS series with the AMCC S5933 PCI controller Author: Ivan Martinez <imr@oersted.dtu.dk>, Frank Mori Hess <fmhess@users.sourceforge.net> Updated: 2003-3-11 Devices: [Measurement Computing] PCI-DAS1602/16 (cb_pcidas), PCI-DAS1602/16jr, PCI-DAS1602/12, PCI-DAS1200, PCI-DAS1200jr, PCI-DAS1000, PCI-DAS1001, PCI_DAS1002 Status: There are many reports of the driver being used with most of the supported cards. Despite no detailed log is maintained, it can be said that the driver is quite tested and stable. The boards may be autocalibrated using the comedi_calibrate utility. Configuration options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. For commands, the scanned channels must be consecutive (i.e. 4-5-6-7, 2-3-4,...), and must all have the same range and aref. AI Triggering: For start_src == TRIG_EXT, the A/D EXTERNAL TRIGGER IN (pin 45) is used. For 1602 series, the start_arg is interpreted as follows: start_arg == 0 => gated triger (level high) start_arg == CR_INVERT => gated triger (level low) start_arg == CR_EDGE => Rising edge start_arg == CR_EDGE | CR_INVERT => Falling edge For the other boards the trigger will be done on rising edge */ /* TODO: analog triggering on 1602 series */ #include "../comedidev.h" #include <linux/delay.h> #include <linux/interrupt.h> #include "8253.h" #include "8255.h" #include "amcc_s5933.h" #include "comedi_pci.h" #include "comedi_fc.h" #undef CB_PCIDAS_DEBUG /* disable debugging code */ /* #define CB_PCIDAS_DEBUG enable debugging code */ /* PCI vendor number of ComputerBoards/MeasurementComputing */ #define PCI_VENDOR_ID_CB 0x1307 #define TIMER_BASE 100 /* 10MHz master clock */ #define AI_BUFFER_SIZE 1024 /* maximum fifo size of any supported board */ #define AO_BUFFER_SIZE 1024 /* maximum fifo size of any supported board */ #define NUM_CHANNELS_8800 8 #define NUM_CHANNELS_7376 1 #define NUM_CHANNELS_8402 2 #define NUM_CHANNELS_DAC08 1 /* PCI-DAS base addresses */ /* indices of base address regions */ #define S5933_BADRINDEX 0 #define CONT_STAT_BADRINDEX 1 #define ADC_FIFO_BADRINDEX 2 #define PACER_BADRINDEX 3 #define AO_BADRINDEX 4 /* sizes of io regions */ #define CONT_STAT_SIZE 10 #define ADC_FIFO_SIZE 4 #define PACER_SIZE 12 #define AO_SIZE 4 /* Control/Status registers */ #define INT_ADCFIFO 0 /* INTERRUPT / ADC FIFO register */ #define INT_EOS 0x1 /* interrupt end of scan */ #define INT_FHF 0x2 /* interrupt fifo half full */ #define INT_FNE 0x3 /* interrupt fifo not empty */ #define INT_MASK 0x3 /* mask of interrupt select bits */ #define INTE 0x4 /* interrupt enable */ #define DAHFIE 0x8 /* dac half full interrupt enable */ #define EOAIE 0x10 /* end of acquisition interrupt enable */ #define DAHFI 0x20 /* dac half full read status / write interrupt clear */ #define EOAI 0x40 /* read end of acq. interrupt status / write clear */ #define INT 0x80 /* read interrupt status / write clear */ #define EOBI 0x200 /* read end of burst interrupt status */ #define ADHFI 0x400 /* read half-full interrupt status */ #define ADNEI 0x800 /* read fifo not empty interrupt latch status */ #define ADNE 0x1000 /* read, fifo not empty (realtime, not latched) status */ #define DAEMIE 0x1000 /* write, dac empty interrupt enable */ #define LADFUL 0x2000 /* read fifo overflow / write clear */ #define DAEMI 0x4000 /* dac fifo empty interrupt status / write clear */ #define ADCMUX_CONT 2 /* ADC CHANNEL MUX AND CONTROL register */ #define BEGIN_SCAN(x) ((x) & 0xf) #define END_SCAN(x) (((x) & 0xf) << 4) #define GAIN_BITS(x) (((x) & 0x3) << 8) #define UNIP 0x800 /* Analog front-end unipolar for range */ #define SE 0x400 /* Inputs in single-ended mode */ #define PACER_MASK 0x3000 /* pacer source bits */ #define PACER_INT 0x1000 /* internal pacer */ #define PACER_EXT_FALL 0x2000 /* external falling edge */ #define PACER_EXT_RISE 0x3000 /* external rising edge */ #define EOC 0x4000 /* adc not busy */ #define TRIG_CONTSTAT 4 /* TRIGGER CONTROL/STATUS register */ #define SW_TRIGGER 0x1 /* software start trigger */ #define EXT_TRIGGER 0x2 /* external start trigger */ #define ANALOG_TRIGGER 0x3 /* external analog trigger */ #define TRIGGER_MASK 0x3 /* mask of bits that determine start trigger */ #define TGPOL 0x04 /* invert the edge/level of the external trigger (1602 only) */ #define TGSEL 0x08 /* if set edge triggered, otherwise level trigerred (1602 only) */ #define TGEN 0x10 /* enable external start trigger */ #define BURSTE 0x20 /* burst mode enable */ #define XTRCL 0x80 /* clear external trigger */ #define CALIBRATION_REG 6 /* CALIBRATION register */ #define SELECT_8800_BIT 0x100 /* select 8800 caldac */ #define SELECT_TRIMPOT_BIT 0x200 /* select ad7376 trim pot */ #define SELECT_DAC08_BIT 0x400 /* select dac08 caldac */ #define CAL_SRC_BITS(x) (((x) & 0x7) << 11) #define CAL_EN_BIT 0x4000 /* read calibration source instead of analog input channel 0 */ #define SERIAL_DATA_IN_BIT 0x8000 /* serial data stream going to 8800 and 7376 */ #define DAC_CSR 0x8 /* dac control and status register */ enum dac_csr_bits { DACEN = 0x2, /* dac enable */ DAC_MODE_UPDATE_BOTH = 0x80, /* update both dacs when dac0 is written */ }; static inline unsigned int DAC_RANGE(unsigned int channel, unsigned int range) { return (range & 0x3) << (8 + 2 * (channel & 0x1)); } static inline unsigned int DAC_RANGE_MASK(unsigned int channel) { return 0x3 << (8 + 2 * (channel & 0x1)); }; /* bits for 1602 series only */ enum dac_csr_bits_1602 { DAC_EMPTY = 0x1, /* dac fifo empty, read, write clear */ DAC_START = 0x4, /* start/arm dac fifo operations */ DAC_PACER_MASK = 0x18, /* bits that set dac pacer source */ DAC_PACER_INT = 0x8, /* dac internal pacing */ DAC_PACER_EXT_FALL = 0x10, /* dac external pacing, falling edge */ DAC_PACER_EXT_RISE = 0x18, /* dac external pacing, rising edge */ }; static inline unsigned int DAC_CHAN_EN(unsigned int channel) { return 1 << (5 + (channel & 0x1)); /* enable channel 0 or 1 */ }; /* analog input fifo */ #define ADCDATA 0 /* ADC DATA register */ #define ADCFIFOCLR 2 /* ADC FIFO CLEAR */ /* pacer, counter, dio registers */ #define ADC8254 0 #define DIO_8255 4 #define DAC8254 8 /* analog output registers for 100x, 1200 series */ static inline unsigned int DAC_DATA_REG(unsigned int channel) { return 2 * (channel & 0x1); } /* analog output registers for 1602 series*/ #define DACDATA 0 /* DAC DATA register */ #define DACFIFOCLR 2 /* DAC FIFO CLEAR */ /* bit in hexadecimal representation of range index that indicates unipolar input range */ #define IS_UNIPOLAR 0x4 /* analog input ranges for most boards */ static const struct comedi_lrange cb_pcidas_ranges = { 8, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; /* pci-das1001 input ranges */ static const struct comedi_lrange cb_pcidas_alt_ranges = { 8, { BIP_RANGE(10), BIP_RANGE(1), BIP_RANGE(0.1), BIP_RANGE(0.01), UNI_RANGE(10), UNI_RANGE(1), UNI_RANGE(0.1), UNI_RANGE(0.01) } }; /* analog output ranges */ static const struct comedi_lrange cb_pcidas_ao_ranges = { 4, { BIP_RANGE(5), BIP_RANGE(10), UNI_RANGE(5), UNI_RANGE(10), } }; enum trimpot_model { AD7376, AD8402, }; struct cb_pcidas_board { const char *name; unsigned short device_id; int ai_se_chans; /* Inputs in single-ended mode */ int ai_diff_chans; /* Inputs in differential mode */ int ai_bits; /* analog input resolution */ int ai_speed; /* fastest conversion period in ns */ int ao_nchan; /* number of analog out channels */ int has_ao_fifo; /* analog output has fifo */ int ao_scan_speed; /* analog output speed for 1602 series (for a scan, not conversion) */ int fifo_size; /* number of samples fifo can hold */ const struct comedi_lrange *ranges; enum trimpot_model trimpot; unsigned has_dac08:1; unsigned has_ai_trig_gated:1; /* Tells if the AI trigger can be gated */ unsigned has_ai_trig_invert:1; /* Tells if the AI trigger can be inverted */ }; static const struct cb_pcidas_board cb_pcidas_boards[] = { { .name = "pci-das1602/16", .device_id = 0x1, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .has_ao_fifo = 1, .ao_scan_speed = 10000, .fifo_size = 512, .ranges = &cb_pcidas_ranges, .trimpot = AD8402, .has_dac08 = 1, .has_ai_trig_gated = 1, .has_ai_trig_invert = 1, }, { .name = "pci-das1200", .device_id = 0xF, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 3200, .ao_nchan = 2, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1602/12", .device_id = 0x10, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 3200, .ao_nchan = 2, .has_ao_fifo = 1, .ao_scan_speed = 4000, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 1, .has_ai_trig_invert = 1, }, { .name = "pci-das1200/jr", .device_id = 0x19, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 3200, .ao_nchan = 0, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1602/16/jr", .device_id = 0x1C, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 0, .has_ao_fifo = 0, .fifo_size = 512, .ranges = &cb_pcidas_ranges, .trimpot = AD8402, .has_dac08 = 1, .has_ai_trig_gated = 1, .has_ai_trig_invert = 1, }, { .name = "pci-das1000", .device_id = 0x4C, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 4000, .ao_nchan = 0, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1001", .device_id = 0x1a, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 6800, .ao_nchan = 2, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_alt_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1002", .device_id = 0x1b, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 6800, .ao_nchan = 2, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, }; static DEFINE_PCI_DEVICE_TABLE(cb_pcidas_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0001) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x000f) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0010) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0019) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001c) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x004c) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001a) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001b) }, { 0 } }; MODULE_DEVICE_TABLE(pci, cb_pcidas_pci_table); /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct cb_pcidas_board *)dev->board_ptr) /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct cb_pcidas_private { /* would be useful for a PCI device */ struct pci_dev *pci_dev; /* base addresses */ unsigned long s5933_config; unsigned long control_status; unsigned long adc_fifo; unsigned long pacer_counter_dio; unsigned long ao_registers; /* divisors of master clock for analog input pacing */ unsigned int divisor1; unsigned int divisor2; volatile unsigned int count; /* number of analog input samples remaining */ volatile unsigned int adc_fifo_bits; /* bits to write to interrupt/adcfifo register */ volatile unsigned int s5933_intcsr_bits; /* bits to write to amcc s5933 interrupt control/status register */ volatile unsigned int ao_control_bits; /* bits to write to ao control and status register */ short ai_buffer[AI_BUFFER_SIZE]; short ao_buffer[AO_BUFFER_SIZE]; /* divisors of master clock for analog output pacing */ unsigned int ao_divisor1; unsigned int ao_divisor2; volatile unsigned int ao_count; /* number of analog output samples remaining */ int ao_value[2]; /* remember what the analog outputs are set to, to allow readback */ unsigned int caldac_value[NUM_CHANNELS_8800]; /* for readback of caldac */ unsigned int trimpot_value[NUM_CHANNELS_8402]; /* for readback of trimpot */ unsigned int dac08_value; unsigned int calibration_source; }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct cb_pcidas_private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int cb_pcidas_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int cb_pcidas_detach(struct comedi_device *dev); static struct comedi_driver driver_cb_pcidas = { .driver_name = "cb_pcidas", .module = THIS_MODULE, .attach = cb_pcidas_attach, .detach = cb_pcidas_detach, }; static int cb_pcidas_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ao_nofifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ao_fifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int cb_pcidas_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int cb_pcidas_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int cb_pcidas_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *subdev, unsigned int trig_num); static int cb_pcidas_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static irqreturn_t cb_pcidas_interrupt(int irq, void *d); static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status); static int cb_pcidas_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int cb_pcidas_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void cb_pcidas_load_counters(struct comedi_device *dev, unsigned int *ns, int round_flags); static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int caldac_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int caldac_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int trimpot_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_trimpot_write(struct comedi_device *dev, unsigned int channel, unsigned int value); static int trimpot_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dac08_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dac08_write(struct comedi_device *dev, unsigned int value); static int dac08_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value); static int trimpot_7376_write(struct comedi_device *dev, uint8_t value); static int trimpot_8402_write(struct comedi_device *dev, unsigned int channel, uint8_t value); static int nvram_read(struct comedi_device *dev, unsigned int address, uint8_t *data); static inline unsigned int cal_enable_bits(struct comedi_device *dev) { return CAL_EN_BIT | CAL_SRC_BITS(devpriv->calibration_source); } /* * Attach is called by the Comedi core to configure the driver * for a particular board. */ static int cb_pcidas_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; struct pci_dev *pcidev = NULL; int index; int i; /* * Allocate the private structure area. */ if (alloc_private(dev, sizeof(struct cb_pcidas_private)) < 0) return -ENOMEM; /* * Probe the device to determine what device in the series it is. */ for_each_pci_dev(pcidev) { /* is it not a computer boards card? */ if (pcidev->vendor != PCI_VENDOR_ID_CB) continue; /* loop through cards supported by this driver */ for (index = 0; index < ARRAY_SIZE(cb_pcidas_boards); index++) { if (cb_pcidas_boards[index].device_id != pcidev->device) continue; /* was a particular bus/slot requested? */ if (it->options[0] || it->options[1]) { /* are we on the wrong bus/slot? */ if (pcidev->bus->number != it->options[0] || PCI_SLOT(pcidev->devfn) != it->options[1]) { continue; } } devpriv->pci_dev = pcidev; dev->board_ptr = cb_pcidas_boards + index; goto found; } } dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n"); return -EIO; found: dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", cb_pcidas_boards[index].name, pcidev->bus->number, PCI_SLOT(pcidev->devfn)); /* * Enable PCI device and reserve I/O ports. */ if (comedi_pci_enable(pcidev, "cb_pcidas")) { dev_err(dev->hw_dev, "Failed to enable PCI device and request regions\n"); return -EIO; } /* * Initialize devpriv->control_status and devpriv->adc_fifo to point to * their base address. */ devpriv->s5933_config = pci_resource_start(devpriv->pci_dev, S5933_BADRINDEX); devpriv->control_status = pci_resource_start(devpriv->pci_dev, CONT_STAT_BADRINDEX); devpriv->adc_fifo = pci_resource_start(devpriv->pci_dev, ADC_FIFO_BADRINDEX); devpriv->pacer_counter_dio = pci_resource_start(devpriv->pci_dev, PACER_BADRINDEX); if (thisboard->ao_nchan) { devpriv->ao_registers = pci_resource_start(devpriv->pci_dev, AO_BADRINDEX); } /* disable and clear interrupts on amcc s5933 */ outl(INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); /* get irq */ if (request_irq(devpriv->pci_dev->irq, cb_pcidas_interrupt, IRQF_SHARED, "cb_pcidas", dev)) { dev_dbg(dev->hw_dev, "unable to allocate irq %d\n", devpriv->pci_dev->irq); return -EINVAL; } dev->irq = devpriv->pci_dev->irq; /* Initialize dev->board_name */ dev->board_name = thisboard->name; /* * Allocate the subdevice structures. */ if (alloc_subdevices(dev, 7) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog input subdevice */ dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ; /* WARNING: Number of inputs in differential mode is ignored */ s->n_chan = thisboard->ai_se_chans; s->len_chanlist = thisboard->ai_se_chans; s->maxdata = (1 << thisboard->ai_bits) - 1; s->range_table = thisboard->ranges; s->insn_read = cb_pcidas_ai_rinsn; s->insn_config = ai_config_insn; s->do_cmd = cb_pcidas_ai_cmd; s->do_cmdtest = cb_pcidas_ai_cmdtest; s->cancel = cb_pcidas_cancel; /* analog output subdevice */ s = dev->subdevices + 1; if (thisboard->ao_nchan) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND; s->n_chan = thisboard->ao_nchan; /* analog out resolution is the same as analog input resolution, so use ai_bits */ s->maxdata = (1 << thisboard->ai_bits) - 1; s->range_table = &cb_pcidas_ao_ranges; s->insn_read = cb_pcidas_ao_readback_insn; if (thisboard->has_ao_fifo) { dev->write_subdev = s; s->subdev_flags |= SDF_CMD_WRITE; s->insn_write = cb_pcidas_ao_fifo_winsn; s->do_cmdtest = cb_pcidas_ao_cmdtest; s->do_cmd = cb_pcidas_ao_cmd; s->cancel = cb_pcidas_ao_cancel; } else { s->insn_write = cb_pcidas_ao_nofifo_winsn; } } else { s->type = COMEDI_SUBD_UNUSED; } /* 8255 */ s = dev->subdevices + 2; subdev_8255_init(dev, s, NULL, devpriv->pacer_counter_dio + DIO_8255); /* serial EEPROM, */ s = dev->subdevices + 3; s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->n_chan = 256; s->maxdata = 0xff; s->insn_read = eeprom_read_insn; /* 8800 caldac */ s = dev->subdevices + 4; s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = NUM_CHANNELS_8800; s->maxdata = 0xff; s->insn_read = caldac_read_insn; s->insn_write = caldac_write_insn; for (i = 0; i < s->n_chan; i++) caldac_8800_write(dev, i, s->maxdata / 2); /* trim potentiometer */ s = dev->subdevices + 5; s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; if (thisboard->trimpot == AD7376) { s->n_chan = NUM_CHANNELS_7376; s->maxdata = 0x7f; } else { s->n_chan = NUM_CHANNELS_8402; s->maxdata = 0xff; } s->insn_read = trimpot_read_insn; s->insn_write = trimpot_write_insn; for (i = 0; i < s->n_chan; i++) cb_pcidas_trimpot_write(dev, i, s->maxdata / 2); /* dac08 caldac */ s = dev->subdevices + 6; if (thisboard->has_dac08) { s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = NUM_CHANNELS_DAC08; s->insn_read = dac08_read_insn; s->insn_write = dac08_write_insn; s->maxdata = 0xff; dac08_write(dev, s->maxdata / 2); } else s->type = COMEDI_SUBD_UNUSED; /* make sure mailbox 4 is empty */ inl(devpriv->s5933_config + AMCC_OP_REG_IMB4); /* Set bits to enable incoming mailbox interrupts on amcc s5933. */ devpriv->s5933_intcsr_bits = INTCSR_INBOX_BYTE(3) | INTCSR_INBOX_SELECT(3) | INTCSR_INBOX_FULL_INT; /* clear and enable interrupt on amcc s5933 */ outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); return 1; } /* * cb_pcidas_detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int cb_pcidas_detach(struct comedi_device *dev) { if (devpriv) { if (devpriv->s5933_config) { /* disable and clear interrupts on amcc s5933 */ outl(INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "detaching, incsr is 0x%x\n", inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR)); #endif } } if (dev->irq) free_irq(dev->irq, dev); if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 2); if (devpriv && devpriv->pci_dev) { if (devpriv->s5933_config) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } return 0; } /* * "instructions" read/write data in "one-shot" or "software-triggered" * mode. */ static int cb_pcidas_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, i; unsigned int bits; static const int timeout = 10000; int channel; /* enable calibration input if appropriate */ if (insn->chanspec & CR_ALT_SOURCE) { outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); channel = 0; } else { outw(0, devpriv->control_status + CALIBRATION_REG); channel = CR_CHAN(insn->chanspec); } /* set mux limits and gain */ bits = BEGIN_SCAN(channel) | END_SCAN(channel) | GAIN_BITS(CR_RANGE(insn->chanspec)); /* set unipolar/bipolar */ if (CR_RANGE(insn->chanspec) & IS_UNIPOLAR) bits |= UNIP; /* set singleended/differential */ if (CR_AREF(insn->chanspec) != AREF_DIFF) bits |= SE; outw(bits, devpriv->control_status + ADCMUX_CONT); /* clear fifo */ outw(0, devpriv->adc_fifo + ADCFIFOCLR); /* convert n samples */ for (n = 0; n < insn->n; n++) { /* trigger conversion */ outw(0, devpriv->adc_fifo + ADCDATA); /* wait for conversion to end */ /* return -ETIMEDOUT if there is a timeout */ for (i = 0; i < timeout; i++) { if (inw(devpriv->control_status + ADCMUX_CONT) & EOC) break; } if (i == timeout) return -ETIMEDOUT; /* read data */ data[n] = inw(devpriv->adc_fifo + ADCDATA); } /* return the number of samples read/written */ return n; } static int ai_config_calibration_source(struct comedi_device *dev, unsigned int *data) { static const int num_calibration_sources = 8; unsigned int source = data[1]; if (source >= num_calibration_sources) { dev_err(dev->hw_dev, "invalid calibration source: %i\n", source); return -EINVAL; } devpriv->calibration_source = source; return 2; } static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int id = data[0]; switch (id) { case INSN_CONFIG_ALT_SOURCE: return ai_config_calibration_source(dev, data); break; default: return -EINVAL; break; } return -EINVAL; } /* analog output insn for pcidas-1000 and 1200 series */ static int cb_pcidas_ao_nofifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel; unsigned long flags; /* set channel and range */ channel = CR_CHAN(insn->chanspec); spin_lock_irqsave(&dev->spinlock, flags); devpriv->ao_control_bits &= ~DAC_MODE_UPDATE_BOTH & ~DAC_RANGE_MASK(channel); devpriv->ao_control_bits |= DACEN | DAC_RANGE(channel, CR_RANGE(insn->chanspec)); outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); /* remember value for readback */ devpriv->ao_value[channel] = data[0]; /* send data */ outw(data[0], devpriv->ao_registers + DAC_DATA_REG(channel)); return 1; } /* analog output insn for pcidas-1602 series */ static int cb_pcidas_ao_fifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel; unsigned long flags; /* clear dac fifo */ outw(0, devpriv->ao_registers + DACFIFOCLR); /* set channel and range */ channel = CR_CHAN(insn->chanspec); spin_lock_irqsave(&dev->spinlock, flags); devpriv->ao_control_bits &= ~DAC_CHAN_EN(0) & ~DAC_CHAN_EN(1) & ~DAC_RANGE_MASK(channel) & ~DAC_PACER_MASK; devpriv->ao_control_bits |= DACEN | DAC_RANGE(channel, CR_RANGE(insn-> chanspec)) | DAC_CHAN_EN(channel) | DAC_START; outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); /* remember value for readback */ devpriv->ao_value[channel] = data[0]; /* send data */ outw(data[0], devpriv->ao_registers + DACDATA); return 1; } /* analog output readback insn */ /* XXX loses track of analog output value back after an analog ouput command is executed */ static int cb_pcidas_ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->ao_value[CR_CHAN(insn->chanspec)]; return 1; } static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { uint8_t nvram_data; int retval; retval = nvram_read(dev, CR_CHAN(insn->chanspec), &nvram_data); if (retval < 0) return retval; data[0] = nvram_data; return 1; } static int caldac_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const unsigned int channel = CR_CHAN(insn->chanspec); return caldac_8800_write(dev, channel, data[0]); } static int caldac_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->caldac_value[CR_CHAN(insn->chanspec)]; return 1; } /* 1602/16 pregain offset */ static int dac08_write(struct comedi_device *dev, unsigned int value) { if (devpriv->dac08_value == value) return 1; devpriv->dac08_value = value; outw(cal_enable_bits(dev) | (value & 0xff), devpriv->control_status + CALIBRATION_REG); udelay(1); outw(cal_enable_bits(dev) | SELECT_DAC08_BIT | (value & 0xff), devpriv->control_status + CALIBRATION_REG); udelay(1); outw(cal_enable_bits(dev) | (value & 0xff), devpriv->control_status + CALIBRATION_REG); udelay(1); return 1; } static int dac08_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { return dac08_write(dev, data[0]); } static int dac08_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->dac08_value; return 1; } static int cb_pcidas_trimpot_write(struct comedi_device *dev, unsigned int channel, unsigned int value) { if (devpriv->trimpot_value[channel] == value) return 1; devpriv->trimpot_value[channel] = value; switch (thisboard->trimpot) { case AD7376: trimpot_7376_write(dev, value); break; case AD8402: trimpot_8402_write(dev, channel, value); break; default: comedi_error(dev, "driver bug?"); return -1; break; } return 1; } static int trimpot_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); return cb_pcidas_trimpot_write(dev, channel, data[0]); } static int trimpot_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); data[0] = devpriv->trimpot_value[channel]; return 1; } static int cb_pcidas_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; int i, gain, start_chan; /* cmdtest tests a particular command to see if it is valid. * Using the cmdtest ioctl, a user can create a valid cmd * and then have it executes by the cmd ioctl. * * cmdtest returns 1,2,3,4 or 0, depending on which tests * the command passes. */ /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_NOW | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_NOW) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; /* make sure trigger sources are compatible with each other */ if (cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_NOW) err++; if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->convert_src != TRIG_NOW) err++; if (cmd->start_src == TRIG_EXT && (cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT)) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ switch (cmd->start_src) { case TRIG_EXT: /* External trigger, only CR_EDGE and CR_INVERT flags allowed */ if ((cmd->start_arg & (CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT))) != 0) { cmd->start_arg &= ~(CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT)); err++; } if (!thisboard->has_ai_trig_invert && (cmd->start_arg & CR_INVERT)) { cmd->start_arg &= (CR_FLAGS_MASK & ~CR_INVERT); err++; } break; default: if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } break; } if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < thisboard->ai_speed * cmd->chanlist_len) { cmd->scan_begin_arg = thisboard->ai_speed * cmd->chanlist_len; err++; } } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_NONE) { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->scan_begin_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->convert_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; } if (err) return 4; /* check channel/gain list against card's limitations */ if (cmd->chanlist) { gain = CR_RANGE(cmd->chanlist[0]); start_chan = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != (start_chan + i) % s->n_chan) { comedi_error(dev, "entries in chanlist must be consecutive channels, counting upwards\n"); err++; } if (CR_RANGE(cmd->chanlist[i]) != gain) { comedi_error(dev, "entries in chanlist must all have the same gain\n"); err++; } } } if (err) return 5; return 0; } static int cb_pcidas_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int bits; unsigned long flags; /* make sure CAL_EN_BIT is disabled */ outw(0, devpriv->control_status + CALIBRATION_REG); /* initialize before settings pacer source and count values */ outw(0, devpriv->control_status + TRIG_CONTSTAT); /* clear fifo */ outw(0, devpriv->adc_fifo + ADCFIFOCLR); /* set mux limits, gain and pacer source */ bits = BEGIN_SCAN(CR_CHAN(cmd->chanlist[0])) | END_SCAN(CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1])) | GAIN_BITS(CR_RANGE(cmd->chanlist[0])); /* set unipolar/bipolar */ if (CR_RANGE(cmd->chanlist[0]) & IS_UNIPOLAR) bits |= UNIP; /* set singleended/differential */ if (CR_AREF(cmd->chanlist[0]) != AREF_DIFF) bits |= SE; /* set pacer source */ if (cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT) bits |= PACER_EXT_RISE; else bits |= PACER_INT; outw(bits, devpriv->control_status + ADCMUX_CONT); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: sent 0x%x to adcmux control\n", bits); #endif /* load counters */ if (cmd->convert_src == TRIG_TIMER) cb_pcidas_load_counters(dev, &cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); else if (cmd->scan_begin_src == TRIG_TIMER) cb_pcidas_load_counters(dev, &cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); /* set number of conversions */ if (cmd->stop_src == TRIG_COUNT) devpriv->count = cmd->chanlist_len * cmd->stop_arg; /* enable interrupts */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->adc_fifo_bits |= INTE; devpriv->adc_fifo_bits &= ~INT_MASK; if (cmd->flags & TRIG_WAKE_EOS) { if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1) devpriv->adc_fifo_bits |= INT_EOS; /* interrupt end of burst */ else devpriv->adc_fifo_bits |= INT_FNE; /* interrupt fifo not empty */ } else { devpriv->adc_fifo_bits |= INT_FHF; /* interrupt fifo half full */ } #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits); #endif /* enable (and clear) interrupts */ outw(devpriv->adc_fifo_bits | EOAI | INT | LADFUL, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); /* set start trigger and burst mode */ bits = 0; if (cmd->start_src == TRIG_NOW) bits |= SW_TRIGGER; else if (cmd->start_src == TRIG_EXT) { bits |= EXT_TRIGGER | TGEN | XTRCL; if (thisboard->has_ai_trig_invert && (cmd->start_arg & CR_INVERT)) bits |= TGPOL; if (thisboard->has_ai_trig_gated && (cmd->start_arg & CR_EDGE)) bits |= TGSEL; } else { comedi_error(dev, "bug!"); return -1; } if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1) bits |= BURSTE; outw(bits, devpriv->control_status + TRIG_CONTSTAT); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: sent 0x%x to trig control\n", bits); #endif return 0; } static int cb_pcidas_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* cmdtest tests a particular command to see if it is valid. * Using the cmdtest ioctl, a user can create a valid cmd * and then have it executes by the cmd ioctl. * * cmdtest returns 1,2,3,4 or 0, depending on which tests * the command passes. */ /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_INT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < thisboard->ao_scan_speed) { cmd->scan_begin_arg = thisboard->ao_scan_speed; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_NONE) { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->ao_divisor1), &(devpriv->ao_divisor2), &(cmd->scan_begin_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (err) return 4; /* check channel/gain list against card's limitations */ if (cmd->chanlist && cmd->chanlist_len > 1) { if (CR_CHAN(cmd->chanlist[0]) != 0 || CR_CHAN(cmd->chanlist[1]) != 1) { comedi_error(dev, "channels must be ordered channel 0, channel 1 in chanlist\n"); err++; } } if (err) return 5; return 0; } static int cb_pcidas_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int i; unsigned long flags; /* set channel limits, gain */ spin_lock_irqsave(&dev->spinlock, flags); for (i = 0; i < cmd->chanlist_len; i++) { /* enable channel */ devpriv->ao_control_bits |= DAC_CHAN_EN(CR_CHAN(cmd->chanlist[i])); /* set range */ devpriv->ao_control_bits |= DAC_RANGE(CR_CHAN(cmd->chanlist[i]), CR_RANGE(cmd-> chanlist[i])); } /* disable analog out before settings pacer source and count values */ outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); /* clear fifo */ outw(0, devpriv->ao_registers + DACFIFOCLR); /* load counters */ if (cmd->scan_begin_src == TRIG_TIMER) { i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->ao_divisor1), &(devpriv->ao_divisor2), &(cmd->scan_begin_arg), cmd->flags); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(devpriv->pacer_counter_dio + DAC8254, 0, 1, devpriv->ao_divisor1, 2); i8254_load(devpriv->pacer_counter_dio + DAC8254, 0, 2, devpriv->ao_divisor2, 2); } /* set number of conversions */ if (cmd->stop_src == TRIG_COUNT) devpriv->ao_count = cmd->chanlist_len * cmd->stop_arg; /* set pacer source */ spin_lock_irqsave(&dev->spinlock, flags); switch (cmd->scan_begin_src) { case TRIG_TIMER: devpriv->ao_control_bits |= DAC_PACER_INT; break; case TRIG_EXT: devpriv->ao_control_bits |= DAC_PACER_EXT_RISE; break; default: spin_unlock_irqrestore(&dev->spinlock, flags); comedi_error(dev, "error setting dac pacer source"); return -1; break; } spin_unlock_irqrestore(&dev->spinlock, flags); async->inttrig = cb_pcidas_ao_inttrig; return 0; } static int cb_pcidas_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { unsigned int num_bytes, num_points = thisboard->fifo_size; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &s->async->cmd; unsigned long flags; if (trig_num != 0) return -EINVAL; /* load up fifo */ if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count < num_points) num_points = devpriv->ao_count; num_bytes = cfc_read_array_from_buffer(s, devpriv->ao_buffer, num_points * sizeof(short)); num_points = num_bytes / sizeof(short); if (cmd->stop_src == TRIG_COUNT) devpriv->ao_count -= num_points; /* write data to board's fifo */ outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, num_bytes); /* enable dac half-full and empty interrupts */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->adc_fifo_bits |= DAEMIE | DAHFIE; #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits); #endif /* enable and clear interrupts */ outw(devpriv->adc_fifo_bits | DAEMI | DAHFI, devpriv->control_status + INT_ADCFIFO); /* start dac */ devpriv->ao_control_bits |= DAC_START | DACEN | DAC_EMPTY; outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: sent 0x%x to dac control\n", devpriv->ao_control_bits); #endif spin_unlock_irqrestore(&dev->spinlock, flags); async->inttrig = NULL; return 0; } static irqreturn_t cb_pcidas_interrupt(int irq, void *d) { struct comedi_device *dev = (struct comedi_device *)d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async; int status, s5933_status; int half_fifo = thisboard->fifo_size / 2; unsigned int num_samples, i; static const int timeout = 10000; unsigned long flags; if (dev->attached == 0) return IRQ_NONE; async = s->async; async->events = 0; s5933_status = inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "intcsr 0x%x\n", s5933_status); dev_dbg(dev->hw_dev, "mbef 0x%x\n", inl(devpriv->s5933_config + AMCC_OP_REG_MBEF)); #endif if ((INTCSR_INTR_ASSERTED & s5933_status) == 0) return IRQ_NONE; /* make sure mailbox 4 is empty */ inl_p(devpriv->s5933_config + AMCC_OP_REG_IMB4); /* clear interrupt on amcc s5933 */ outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); status = inw(devpriv->control_status + INT_ADCFIFO); #ifdef CB_PCIDAS_DEBUG if ((status & (INT | EOAI | LADFUL | DAHFI | DAEMI)) == 0) comedi_error(dev, "spurious interrupt"); #endif /* check for analog output interrupt */ if (status & (DAHFI | DAEMI)) handle_ao_interrupt(dev, status); /* check for analog input interrupts */ /* if fifo half-full */ if (status & ADHFI) { /* read data */ num_samples = half_fifo; if (async->cmd.stop_src == TRIG_COUNT && num_samples > devpriv->count) { num_samples = devpriv->count; } insw(devpriv->adc_fifo + ADCDATA, devpriv->ai_buffer, num_samples); cfc_write_array_to_buffer(s, devpriv->ai_buffer, num_samples * sizeof(short)); devpriv->count -= num_samples; if (async->cmd.stop_src == TRIG_COUNT && devpriv->count == 0) { async->events |= COMEDI_CB_EOA; cb_pcidas_cancel(dev, s); } /* clear half-full interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | INT, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); /* else if fifo not empty */ } else if (status & (ADNEI | EOBI)) { for (i = 0; i < timeout; i++) { /* break if fifo is empty */ if ((ADNE & inw(devpriv->control_status + INT_ADCFIFO)) == 0) break; cfc_write_to_buffer(s, inw(devpriv->adc_fifo)); if (async->cmd.stop_src == TRIG_COUNT && --devpriv->count == 0) { /* end of acquisition */ cb_pcidas_cancel(dev, s); async->events |= COMEDI_CB_EOA; break; } } /* clear not-empty interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | INT, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); } else if (status & EOAI) { comedi_error(dev, "bug! encountered end of acquisition interrupt?"); /* clear EOA interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | EOAI, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); } /* check for fifo overflow */ if (status & LADFUL) { comedi_error(dev, "fifo overflow"); /* clear overflow interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | LADFUL, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); cb_pcidas_cancel(dev, s); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; } comedi_event(dev, s); return IRQ_HANDLED; } static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status) { struct comedi_subdevice *s = dev->write_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int half_fifo = thisboard->fifo_size / 2; unsigned int num_points; unsigned long flags; async->events = 0; if (status & DAEMI) { /* clear dac empty interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | DAEMI, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); if (inw(devpriv->ao_registers + DAC_CSR) & DAC_EMPTY) { if (cmd->stop_src == TRIG_NONE || (cmd->stop_src == TRIG_COUNT && devpriv->ao_count)) { comedi_error(dev, "dac fifo underflow"); cb_pcidas_ao_cancel(dev, s); async->events |= COMEDI_CB_ERROR; } async->events |= COMEDI_CB_EOA; } } else if (status & DAHFI) { unsigned int num_bytes; /* figure out how many points we are writing to fifo */ num_points = half_fifo; if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count < num_points) num_points = devpriv->ao_count; num_bytes = cfc_read_array_from_buffer(s, devpriv->ao_buffer, num_points * sizeof(short)); num_points = num_bytes / sizeof(short); if (async->cmd.stop_src == TRIG_COUNT) devpriv->ao_count -= num_points; /* write data to board's fifo */ outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, num_points); /* clear half-full interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | DAHFI, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); } comedi_event(dev, s); } /* cancel analog input command */ static int cb_pcidas_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); /* disable interrupts */ devpriv->adc_fifo_bits &= ~INTE & ~EOAIE; outw(devpriv->adc_fifo_bits, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); /* disable start trigger source and burst mode */ outw(0, devpriv->control_status + TRIG_CONTSTAT); /* software pacer source */ outw(0, devpriv->control_status + ADCMUX_CONT); return 0; } /* cancel analog output command */ static int cb_pcidas_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); /* disable interrupts */ devpriv->adc_fifo_bits &= ~DAHFIE & ~DAEMIE; outw(devpriv->adc_fifo_bits, devpriv->control_status + INT_ADCFIFO); /* disable output */ devpriv->ao_control_bits &= ~DACEN & ~DAC_PACER_MASK; outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); return 0; } static void cb_pcidas_load_counters(struct comedi_device *dev, unsigned int *ns, int rounding_flags) { i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), ns, rounding_flags & TRIG_ROUND_MASK); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(devpriv->pacer_counter_dio + ADC8254, 0, 1, devpriv->divisor1, 2); i8254_load(devpriv->pacer_counter_dio + ADC8254, 0, 2, devpriv->divisor2, 2); } static void write_calibration_bitstream(struct comedi_device *dev, unsigned int register_bits, unsigned int bitstream, unsigned int bitstream_length) { static const int write_delay = 1; unsigned int bit; for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) { if (bitstream & bit) register_bits |= SERIAL_DATA_IN_BIT; else register_bits &= ~SERIAL_DATA_IN_BIT; udelay(write_delay); outw(register_bits, devpriv->control_status + CALIBRATION_REG); } } static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value) { static const int num_caldac_channels = 8; static const int bitstream_length = 11; unsigned int bitstream = ((address & 0x7) << 8) | value; static const int caldac_8800_udelay = 1; if (address >= num_caldac_channels) { comedi_error(dev, "illegal caldac channel"); return -1; } if (value == devpriv->caldac_value[address]) return 1; devpriv->caldac_value[address] = value; write_calibration_bitstream(dev, cal_enable_bits(dev), bitstream, bitstream_length); udelay(caldac_8800_udelay); outw(cal_enable_bits(dev) | SELECT_8800_BIT, devpriv->control_status + CALIBRATION_REG); udelay(caldac_8800_udelay); outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); return 1; } static int trimpot_7376_write(struct comedi_device *dev, uint8_t value) { static const int bitstream_length = 7; unsigned int bitstream = value & 0x7f; unsigned int register_bits; static const int ad7376_udelay = 1; register_bits = cal_enable_bits(dev) | SELECT_TRIMPOT_BIT; udelay(ad7376_udelay); outw(register_bits, devpriv->control_status + CALIBRATION_REG); write_calibration_bitstream(dev, register_bits, bitstream, bitstream_length); udelay(ad7376_udelay); outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); return 0; } /* For 1602/16 only * ch 0 : adc gain * ch 1 : adc postgain offset */ static int trimpot_8402_write(struct comedi_device *dev, unsigned int channel, uint8_t value) { static const int bitstream_length = 10; unsigned int bitstream = ((channel & 0x3) << 8) | (value & 0xff); unsigned int register_bits; static const int ad8402_udelay = 1; register_bits = cal_enable_bits(dev) | SELECT_TRIMPOT_BIT; udelay(ad8402_udelay); outw(register_bits, devpriv->control_status + CALIBRATION_REG); write_calibration_bitstream(dev, register_bits, bitstream, bitstream_length); udelay(ad8402_udelay); outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); return 0; } static int wait_for_nvram_ready(unsigned long s5933_base_addr) { static const int timeout = 1000; unsigned int i; for (i = 0; i < timeout; i++) { if ((inb(s5933_base_addr + AMCC_OP_REG_MCSR_NVCMD) & MCSR_NV_BUSY) == 0) return 0; udelay(1); } return -1; } static int nvram_read(struct comedi_device *dev, unsigned int address, uint8_t *data) { unsigned long iobase = devpriv->s5933_config; if (wait_for_nvram_ready(iobase) < 0) return -ETIMEDOUT; outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_LOW_ADDR, iobase + AMCC_OP_REG_MCSR_NVCMD); outb(address & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA); outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_HIGH_ADDR, iobase + AMCC_OP_REG_MCSR_NVCMD); outb((address >> 8) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA); outb(MCSR_NV_ENABLE | MCSR_NV_READ, iobase + AMCC_OP_REG_MCSR_NVCMD); if (wait_for_nvram_ready(iobase) < 0) return -ETIMEDOUT; *data = inb(iobase + AMCC_OP_REG_MCSR_NVDATA); return 0; } /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ static int __devinit driver_cb_pcidas_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_cb_pcidas.driver_name); } static void __devexit driver_cb_pcidas_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_cb_pcidas_pci_driver = { .id_table = cb_pcidas_pci_table, .probe = &driver_cb_pcidas_pci_probe, .remove = __devexit_p(&driver_cb_pcidas_pci_remove) }; static int __init driver_cb_pcidas_init_module(void) { int retval; retval = comedi_driver_register(&driver_cb_pcidas); if (retval < 0) return retval; driver_cb_pcidas_pci_driver.name = (char *)driver_cb_pcidas.driver_name; return pci_register_driver(&driver_cb_pcidas_pci_driver); } static void __exit driver_cb_pcidas_cleanup_module(void) { pci_unregister_driver(&driver_cb_pcidas_pci_driver); comedi_driver_unregister(&driver_cb_pcidas); } module_init(driver_cb_pcidas_init_module); module_exit(driver_cb_pcidas_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
leehz/android_kernel_samsung_ms013g
arch/hexagon/kernel/vdso.c
7200
2541
/* * vDSO implementation for Hexagon * * Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/err.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/binfmts.h> #include <asm/vdso.h> static struct page *vdso_page; /* Create a vDSO page holding the signal trampoline. * We want this for a non-executable stack. */ static int __init vdso_init(void) { struct hexagon_vdso *vdso; vdso_page = alloc_page(GFP_KERNEL); if (!vdso_page) panic("Cannot allocate vdso"); vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); if (!vdso) panic("Cannot map vdso"); clear_page(vdso); /* Install the signal trampoline; currently looks like this: * r6 = #__NR_rt_sigreturn; * trap0(#1); */ vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0]; vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1]; vunmap(vdso); return 0; } arch_initcall(vdso_init); /* * Called from binfmt_elf. Create a VMA for the vDSO page. */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { int ret; unsigned long vdso_base; struct mm_struct *mm = current->mm; down_write(&mm->mmap_sem); /* Try to get it loaded right near ld.so/glibc. */ vdso_base = STACK_TOP; vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = vdso_base; goto up_fail; } /* MAYWRITE to allow gdb to COW and set breakpoints. */ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_page); if (ret) goto up_fail; mm->context.vdso = (void *)vdso_base; up_fail: up_write(&mm->mmap_sem); return ret; } const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) return "[vdso]"; return NULL; }
gpl-2.0
ztemt/Z5S_NX503A_KitKat_kernel
arch/powerpc/sysdev/dcr.c
7456
6158
/* * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #undef DEBUG #include <linux/kernel.h> #include <linux/export.h> #include <asm/prom.h> #include <asm/dcr.h> #ifdef CONFIG_PPC_DCR_MMIO static struct device_node *find_dcr_parent(struct device_node *node) { struct device_node *par, *tmp; const u32 *p; for (par = of_node_get(node); par;) { if (of_get_property(par, "dcr-controller", NULL)) break; p = of_get_property(par, "dcr-parent", NULL); tmp = par; if (p == NULL) par = of_get_parent(par); else par = of_find_node_by_phandle(*p); of_node_put(tmp); } return par; } #endif #if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) bool dcr_map_ok_generic(dcr_host_t host) { if (host.type == DCR_HOST_NATIVE) return dcr_map_ok_native(host.host.native); else if (host.type == DCR_HOST_MMIO) return dcr_map_ok_mmio(host.host.mmio); else return 0; } EXPORT_SYMBOL_GPL(dcr_map_ok_generic); dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n, unsigned int dcr_c) { dcr_host_t host; struct device_node *dp; const char *prop; host.type = DCR_HOST_INVALID; dp = find_dcr_parent(dev); if (dp == NULL) return host; prop = of_get_property(dp, "dcr-access-method", NULL); pr_debug("dcr_map_generic(dcr-access-method = %s)\n", prop); if (!strcmp(prop, "native")) { host.type = DCR_HOST_NATIVE; host.host.native = dcr_map_native(dev, dcr_n, dcr_c); } else if (!strcmp(prop, "mmio")) { host.type = DCR_HOST_MMIO; host.host.mmio = dcr_map_mmio(dev, dcr_n, dcr_c); } of_node_put(dp); return host; } EXPORT_SYMBOL_GPL(dcr_map_generic); void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c) { if (host.type == DCR_HOST_NATIVE) dcr_unmap_native(host.host.native, dcr_c); else if (host.type == DCR_HOST_MMIO) dcr_unmap_mmio(host.host.mmio, dcr_c); else /* host.type == DCR_HOST_INVALID */ WARN_ON(true); } EXPORT_SYMBOL_GPL(dcr_unmap_generic); u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n) { if (host.type == DCR_HOST_NATIVE) return dcr_read_native(host.host.native, dcr_n); else if (host.type == DCR_HOST_MMIO) return dcr_read_mmio(host.host.mmio, dcr_n); else /* host.type == DCR_HOST_INVALID */ WARN_ON(true); return 0; } EXPORT_SYMBOL_GPL(dcr_read_generic); void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value) { if (host.type == DCR_HOST_NATIVE) dcr_write_native(host.host.native, dcr_n, value); else if (host.type == DCR_HOST_MMIO) dcr_write_mmio(host.host.mmio, dcr_n, value); else /* host.type == DCR_HOST_INVALID */ WARN_ON(true); } EXPORT_SYMBOL_GPL(dcr_write_generic); #endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ unsigned int dcr_resource_start(const struct device_node *np, unsigned int index) { unsigned int ds; const u32 *dr = of_get_property(np, "dcr-reg", &ds); if (dr == NULL || ds & 1 || index >= (ds / 8)) return 0; return dr[index * 2]; } EXPORT_SYMBOL_GPL(dcr_resource_start); unsigned int dcr_resource_len(const struct device_node *np, unsigned int index) { unsigned int ds; const u32 *dr = of_get_property(np, "dcr-reg", &ds); if (dr == NULL || ds & 1 || index >= (ds / 8)) return 0; return dr[index * 2 + 1]; } EXPORT_SYMBOL_GPL(dcr_resource_len); #ifdef CONFIG_PPC_DCR_MMIO u64 of_translate_dcr_address(struct device_node *dev, unsigned int dcr_n, unsigned int *out_stride) { struct device_node *dp; const u32 *p; unsigned int stride; u64 ret = OF_BAD_ADDR; dp = find_dcr_parent(dev); if (dp == NULL) return OF_BAD_ADDR; /* Stride is not properly defined yet, default to 0x10 for Axon */ p = of_get_property(dp, "dcr-mmio-stride", NULL); stride = (p == NULL) ? 0x10 : *p; /* XXX FIXME: Which property name is to use of the 2 following ? */ p = of_get_property(dp, "dcr-mmio-range", NULL); if (p == NULL) p = of_get_property(dp, "dcr-mmio-space", NULL); if (p == NULL) goto done; /* Maybe could do some better range checking here */ ret = of_translate_address(dp, p); if (ret != OF_BAD_ADDR) ret += (u64)(stride) * (u64)dcr_n; if (out_stride) *out_stride = stride; done: of_node_put(dp); return ret; } dcr_host_mmio_t dcr_map_mmio(struct device_node *dev, unsigned int dcr_n, unsigned int dcr_c) { dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n }; u64 addr; pr_debug("dcr_map(%s, 0x%x, 0x%x)\n", dev->full_name, dcr_n, dcr_c); addr = of_translate_dcr_address(dev, dcr_n, &ret.stride); pr_debug("translates to addr: 0x%llx, stride: 0x%x\n", (unsigned long long) addr, ret.stride); if (addr == OF_BAD_ADDR) return ret; pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride); ret.token = ioremap(addr, dcr_c * ret.stride); if (ret.token == NULL) return ret; pr_debug("mapped at 0x%p -> base is 0x%p\n", ret.token, ret.token - dcr_n * ret.stride); ret.token -= dcr_n * ret.stride; return ret; } EXPORT_SYMBOL_GPL(dcr_map_mmio); void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c) { dcr_host_mmio_t h = host; if (h.token == NULL) return; h.token += host.base * h.stride; iounmap(h.token); h.token = NULL; } EXPORT_SYMBOL_GPL(dcr_unmap_mmio); #endif /* defined(CONFIG_PPC_DCR_MMIO) */ #ifdef CONFIG_PPC_DCR_NATIVE DEFINE_SPINLOCK(dcr_ind_lock); #endif /* defined(CONFIG_PPC_DCR_NATIVE) */
gpl-2.0
santod/android_kernel_htc_m7vzw
drivers/s390/char/tape_char.c
7968
11925
/* * drivers/s390/char/tape_char.c * character device frontend for tape device driver * * S390 and zSeries version * Copyright IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #define KMSG_COMPONENT "tape" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/mtio.h> #include <linux/compat.h> #include <asm/uaccess.h> #define TAPE_DBF_AREA tape_core_dbf #include "tape.h" #include "tape_std.h" #include "tape_class.h" #define TAPECHAR_MAJOR 0 /* get dynamic major */ /* * file operation structure for tape character frontend */ static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *); static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *); static int tapechar_open(struct inode *,struct file *); static int tapechar_release(struct inode *,struct file *); static long tapechar_ioctl(struct file *, unsigned int, unsigned long); #ifdef CONFIG_COMPAT static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long); #endif static const struct file_operations tape_fops = { .owner = THIS_MODULE, .read = tapechar_read, .write = tapechar_write, .unlocked_ioctl = tapechar_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = tapechar_compat_ioctl, #endif .open = tapechar_open, .release = tapechar_release, .llseek = no_llseek, }; static int tapechar_major = TAPECHAR_MAJOR; /* * This function is called for every new tapedevice */ int tapechar_setup_device(struct tape_device * device) { char device_name[20]; sprintf(device_name, "ntibm%i", device->first_minor / 2); device->nt = register_tape_dev( &device->cdev->dev, MKDEV(tapechar_major, device->first_minor), &tape_fops, device_name, "non-rewinding" ); device_name[0] = 'r'; device->rt = register_tape_dev( &device->cdev->dev, MKDEV(tapechar_major, device->first_minor + 1), &tape_fops, device_name, "rewinding" ); return 0; } void tapechar_cleanup_device(struct tape_device *device) { unregister_tape_dev(&device->cdev->dev, device->rt); device->rt = NULL; unregister_tape_dev(&device->cdev->dev, device->nt); device->nt = NULL; } static int tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) { struct idal_buffer *new; if (device->char_data.idal_buf != NULL && device->char_data.idal_buf->size == block_size) return 0; if (block_size > MAX_BLOCKSIZE) { DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", block_size, MAX_BLOCKSIZE); return -EINVAL; } /* The current idal buffer is not correct. Allocate a new one. */ new = idal_buffer_alloc(block_size, 0); if (IS_ERR(new)) return -ENOMEM; if (device->char_data.idal_buf != NULL) idal_buffer_free(device->char_data.idal_buf); device->char_data.idal_buf = new; return 0; } /* * Tape device read function */ static ssize_t tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) { struct tape_device *device; struct tape_request *request; size_t block_size; int rc; DBF_EVENT(6, "TCHAR:read\n"); device = (struct tape_device *) filp->private_data; /* * If the tape isn't terminated yet, do it now. And since we then * are at the end of the tape there wouldn't be anything to read * anyways. So we return immediately. */ if(device->required_tapemarks) { return tape_std_terminate_write(device); } /* Find out block size to use */ if (device->char_data.block_size != 0) { if (count < device->char_data.block_size) { DBF_EVENT(3, "TCHAR:read smaller than block " "size was requested\n"); return -EINVAL; } block_size = device->char_data.block_size; } else { block_size = count; } rc = tapechar_check_idalbuffer(device, block_size); if (rc) return rc; #ifdef CONFIG_S390_TAPE_BLOCK /* Changes position. */ device->blk_data.medium_changed = 1; #endif DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); /* Let the discipline build the ccw chain. */ request = device->discipline->read_block(device, block_size); if (IS_ERR(request)) return PTR_ERR(request); /* Execute it. */ rc = tape_do_io(device, request); if (rc == 0) { rc = block_size - request->rescnt; DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); /* Copy data from idal buffer to user space. */ if (idal_buffer_to_user(device->char_data.idal_buf, data, rc) != 0) rc = -EFAULT; } tape_free_request(request); return rc; } /* * Tape device write function */ static ssize_t tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) { struct tape_device *device; struct tape_request *request; size_t block_size; size_t written; int nblocks; int i, rc; DBF_EVENT(6, "TCHAR:write\n"); device = (struct tape_device *) filp->private_data; /* Find out block size and number of blocks */ if (device->char_data.block_size != 0) { if (count < device->char_data.block_size) { DBF_EVENT(3, "TCHAR:write smaller than block " "size was requested\n"); return -EINVAL; } block_size = device->char_data.block_size; nblocks = count / block_size; } else { block_size = count; nblocks = 1; } rc = tapechar_check_idalbuffer(device, block_size); if (rc) return rc; #ifdef CONFIG_S390_TAPE_BLOCK /* Changes position. */ device->blk_data.medium_changed = 1; #endif DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); /* Let the discipline build the ccw chain. */ request = device->discipline->write_block(device, block_size); if (IS_ERR(request)) return PTR_ERR(request); rc = 0; written = 0; for (i = 0; i < nblocks; i++) { /* Copy data from user space to idal buffer. */ if (idal_buffer_from_user(device->char_data.idal_buf, data, block_size)) { rc = -EFAULT; break; } rc = tape_do_io(device, request); if (rc) break; DBF_EVENT(6, "TCHAR:wbytes: %lx\n", block_size - request->rescnt); written += block_size - request->rescnt; if (request->rescnt != 0) break; data += block_size; } tape_free_request(request); if (rc == -ENOSPC) { /* * Ok, the device has no more space. It has NOT written * the block. */ if (device->discipline->process_eov) device->discipline->process_eov(device); if (written > 0) rc = 0; } /* * After doing a write we always need two tapemarks to correctly * terminate the tape (one to terminate the file, the second to * flag the end of recorded data. * Since process_eov positions the tape in front of the written * tapemark it doesn't hurt to write two marks again. */ if (!rc) device->required_tapemarks = 2; return rc ? rc : written; } /* * Character frontend tape device open function. */ static int tapechar_open (struct inode *inode, struct file *filp) { struct tape_device *device; int minor, rc; DBF_EVENT(6, "TCHAR:open: %i:%i\n", imajor(filp->f_path.dentry->d_inode), iminor(filp->f_path.dentry->d_inode)); if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) return -ENODEV; minor = iminor(filp->f_path.dentry->d_inode); device = tape_find_device(minor / TAPE_MINORS_PER_DEV); if (IS_ERR(device)) { DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n"); return PTR_ERR(device); } rc = tape_open(device); if (rc == 0) { filp->private_data = device; nonseekable_open(inode, filp); } else tape_put_device(device); return rc; } /* * Character frontend tape device release function. */ static int tapechar_release(struct inode *inode, struct file *filp) { struct tape_device *device; DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode)); device = (struct tape_device *) filp->private_data; /* * If this is the rewinding tape minor then rewind. In that case we * write all required tapemarks. Otherwise only one to terminate the * file. */ if ((iminor(inode) & 1) != 0) { if (device->required_tapemarks) tape_std_terminate_write(device); tape_mtop(device, MTREW, 1); } else { if (device->required_tapemarks > 1) { if (tape_mtop(device, MTWEOF, 1) == 0) device->required_tapemarks--; } } if (device->char_data.idal_buf != NULL) { idal_buffer_free(device->char_data.idal_buf); device->char_data.idal_buf = NULL; } tape_release(device); filp->private_data = NULL; tape_put_device(device); return 0; } /* * Tape device io controls. */ static int __tapechar_ioctl(struct tape_device *device, unsigned int no, unsigned long data) { int rc; if (no == MTIOCTOP) { struct mtop op; if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0) return -EFAULT; if (op.mt_count < 0) return -EINVAL; /* * Operations that change tape position should write final * tapemarks. */ switch (op.mt_op) { case MTFSF: case MTBSF: case MTFSR: case MTBSR: case MTREW: case MTOFFL: case MTEOM: case MTRETEN: case MTBSFM: case MTFSFM: case MTSEEK: #ifdef CONFIG_S390_TAPE_BLOCK device->blk_data.medium_changed = 1; #endif if (device->required_tapemarks) tape_std_terminate_write(device); default: ; } rc = tape_mtop(device, op.mt_op, op.mt_count); if (op.mt_op == MTWEOF && rc == 0) { if (op.mt_count > device->required_tapemarks) device->required_tapemarks = 0; else device->required_tapemarks -= op.mt_count; } return rc; } if (no == MTIOCPOS) { /* MTIOCPOS: query the tape position. */ struct mtpos pos; rc = tape_mtop(device, MTTELL, 1); if (rc < 0) return rc; pos.mt_blkno = rc; if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0) return -EFAULT; return 0; } if (no == MTIOCGET) { /* MTIOCGET: query the tape drive status. */ struct mtget get; memset(&get, 0, sizeof(get)); get.mt_type = MT_ISUNKNOWN; get.mt_resid = 0 /* device->devstat.rescnt */; get.mt_dsreg = device->tape_state; /* FIXME: mt_gstat, mt_erreg, mt_fileno */ get.mt_gstat = 0; get.mt_erreg = 0; get.mt_fileno = 0; get.mt_gstat = device->tape_generic_status; if (device->medium_state == MS_LOADED) { rc = tape_mtop(device, MTTELL, 1); if (rc < 0) return rc; if (rc == 0) get.mt_gstat |= GMT_BOT(~0); get.mt_blkno = rc; } if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0) return -EFAULT; return 0; } /* Try the discipline ioctl function. */ if (device->discipline->ioctl_fn == NULL) return -EINVAL; return device->discipline->ioctl_fn(device, no, data); } static long tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data) { struct tape_device *device; long rc; DBF_EVENT(6, "TCHAR:ioct\n"); device = (struct tape_device *) filp->private_data; mutex_lock(&device->mutex); rc = __tapechar_ioctl(device, no, data); mutex_unlock(&device->mutex); return rc; } #ifdef CONFIG_COMPAT static long tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) { struct tape_device *device = filp->private_data; int rval = -ENOIOCTLCMD; unsigned long argp; /* The 'arg' argument of any ioctl function may only be used for * pointers because of the compat pointer conversion. * Consider this when adding new ioctls. */ argp = (unsigned long) compat_ptr(data); if (device->discipline->ioctl_fn) { mutex_lock(&device->mutex); rval = device->discipline->ioctl_fn(device, no, argp); mutex_unlock(&device->mutex); if (rval == -EINVAL) rval = -ENOIOCTLCMD; } return rval; } #endif /* CONFIG_COMPAT */ /* * Initialize character device frontend. */ int tapechar_init (void) { dev_t dev; if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0) return -1; tapechar_major = MAJOR(dev); return 0; } /* * cleanup */ void tapechar_exit(void) { unregister_chrdev_region(MKDEV(tapechar_major, 0), 256); }
gpl-2.0
staceyson/qemu-bsd-user-old
target-xtensa/translate.c
33
101671
/* * Xtensa ISA: * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm * * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the Open Source and Linux Lab nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include "cpu.h" #include "exec/exec-all.h" #include "disas/disas.h" #include "tcg-op.h" #include "qemu/log.h" #include "sysemu/sysemu.h" #include "helper.h" #define GEN_HELPER 1 #include "helper.h" typedef struct DisasContext { const XtensaConfig *config; TranslationBlock *tb; uint32_t pc; uint32_t next_pc; int cring; int ring; uint32_t lbeg; uint32_t lend; TCGv_i32 litbase; int is_jmp; int singlestep_enabled; bool sar_5bit; bool sar_m32_5bit; bool sar_m32_allocated; TCGv_i32 sar_m32; uint32_t ccount_delta; unsigned used_window; bool debug; bool icount; TCGv_i32 next_icount; unsigned cpenable; } DisasContext; static TCGv_ptr cpu_env; static TCGv_i32 cpu_pc; static TCGv_i32 cpu_R[16]; static TCGv_i32 cpu_FR[16]; static TCGv_i32 cpu_SR[256]; static TCGv_i32 cpu_UR[256]; #include "exec/gen-icount.h" typedef struct XtensaReg { const char *name; uint64_t opt_bits; enum { SR_R = 1, SR_W = 2, SR_X = 4, SR_RW = 3, SR_RWX = 7, } access; } XtensaReg; #define XTENSA_REG_ACCESS(regname, opt, acc) { \ .name = (regname), \ .opt_bits = XTENSA_OPTION_BIT(opt), \ .access = (acc), \ } #define XTENSA_REG(regname, opt) XTENSA_REG_ACCESS(regname, opt, SR_RWX) #define XTENSA_REG_BITS(regname, opt) { \ .name = (regname), \ .opt_bits = (opt), \ .access = SR_RWX, \ } static const XtensaReg sregnames[256] = { [LBEG] = XTENSA_REG("LBEG", XTENSA_OPTION_LOOP), [LEND] = XTENSA_REG("LEND", XTENSA_OPTION_LOOP), [LCOUNT] = XTENSA_REG("LCOUNT", XTENSA_OPTION_LOOP), [SAR] = XTENSA_REG_BITS("SAR", XTENSA_OPTION_ALL), [BR] = XTENSA_REG("BR", XTENSA_OPTION_BOOLEAN), [LITBASE] = XTENSA_REG("LITBASE", XTENSA_OPTION_EXTENDED_L32R), [SCOMPARE1] = XTENSA_REG("SCOMPARE1", XTENSA_OPTION_CONDITIONAL_STORE), [ACCLO] = XTENSA_REG("ACCLO", XTENSA_OPTION_MAC16), [ACCHI] = XTENSA_REG("ACCHI", XTENSA_OPTION_MAC16), [MR] = XTENSA_REG("MR0", XTENSA_OPTION_MAC16), [MR + 1] = XTENSA_REG("MR1", XTENSA_OPTION_MAC16), [MR + 2] = XTENSA_REG("MR2", XTENSA_OPTION_MAC16), [MR + 3] = XTENSA_REG("MR3", XTENSA_OPTION_MAC16), [WINDOW_BASE] = XTENSA_REG("WINDOW_BASE", XTENSA_OPTION_WINDOWED_REGISTER), [WINDOW_START] = XTENSA_REG("WINDOW_START", XTENSA_OPTION_WINDOWED_REGISTER), [PTEVADDR] = XTENSA_REG("PTEVADDR", XTENSA_OPTION_MMU), [RASID] = XTENSA_REG("RASID", XTENSA_OPTION_MMU), [ITLBCFG] = XTENSA_REG("ITLBCFG", XTENSA_OPTION_MMU), [DTLBCFG] = XTENSA_REG("DTLBCFG", XTENSA_OPTION_MMU), [IBREAKENABLE] = XTENSA_REG("IBREAKENABLE", XTENSA_OPTION_DEBUG), [CACHEATTR] = XTENSA_REG("CACHEATTR", XTENSA_OPTION_CACHEATTR), [ATOMCTL] = XTENSA_REG("ATOMCTL", XTENSA_OPTION_ATOMCTL), [IBREAKA] = XTENSA_REG("IBREAKA0", XTENSA_OPTION_DEBUG), [IBREAKA + 1] = XTENSA_REG("IBREAKA1", XTENSA_OPTION_DEBUG), [DBREAKA] = XTENSA_REG("DBREAKA0", XTENSA_OPTION_DEBUG), [DBREAKA + 1] = XTENSA_REG("DBREAKA1", XTENSA_OPTION_DEBUG), [DBREAKC] = XTENSA_REG("DBREAKC0", XTENSA_OPTION_DEBUG), [DBREAKC + 1] = XTENSA_REG("DBREAKC1", XTENSA_OPTION_DEBUG), [EPC1] = XTENSA_REG("EPC1", XTENSA_OPTION_EXCEPTION), [EPC1 + 1] = XTENSA_REG("EPC2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 2] = XTENSA_REG("EPC3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 3] = XTENSA_REG("EPC4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 4] = XTENSA_REG("EPC5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 5] = XTENSA_REG("EPC6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 6] = XTENSA_REG("EPC7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [DEPC] = XTENSA_REG("DEPC", XTENSA_OPTION_EXCEPTION), [EPS2] = XTENSA_REG("EPS2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 1] = XTENSA_REG("EPS3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 2] = XTENSA_REG("EPS4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 3] = XTENSA_REG("EPS5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 4] = XTENSA_REG("EPS6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 5] = XTENSA_REG("EPS7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1] = XTENSA_REG("EXCSAVE1", XTENSA_OPTION_EXCEPTION), [EXCSAVE1 + 1] = XTENSA_REG("EXCSAVE2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 2] = XTENSA_REG("EXCSAVE3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 3] = XTENSA_REG("EXCSAVE4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 4] = XTENSA_REG("EXCSAVE5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 5] = XTENSA_REG("EXCSAVE6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 6] = XTENSA_REG("EXCSAVE7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [CPENABLE] = XTENSA_REG("CPENABLE", XTENSA_OPTION_COPROCESSOR), [INTSET] = XTENSA_REG_ACCESS("INTSET", XTENSA_OPTION_INTERRUPT, SR_RW), [INTCLEAR] = XTENSA_REG_ACCESS("INTCLEAR", XTENSA_OPTION_INTERRUPT, SR_W), [INTENABLE] = XTENSA_REG("INTENABLE", XTENSA_OPTION_INTERRUPT), [PS] = XTENSA_REG_BITS("PS", XTENSA_OPTION_ALL), [VECBASE] = XTENSA_REG("VECBASE", XTENSA_OPTION_RELOCATABLE_VECTOR), [EXCCAUSE] = XTENSA_REG("EXCCAUSE", XTENSA_OPTION_EXCEPTION), [DEBUGCAUSE] = XTENSA_REG_ACCESS("DEBUGCAUSE", XTENSA_OPTION_DEBUG, SR_R), [CCOUNT] = XTENSA_REG("CCOUNT", XTENSA_OPTION_TIMER_INTERRUPT), [PRID] = XTENSA_REG_ACCESS("PRID", XTENSA_OPTION_PROCESSOR_ID, SR_R), [ICOUNT] = XTENSA_REG("ICOUNT", XTENSA_OPTION_DEBUG), [ICOUNTLEVEL] = XTENSA_REG("ICOUNTLEVEL", XTENSA_OPTION_DEBUG), [EXCVADDR] = XTENSA_REG("EXCVADDR", XTENSA_OPTION_EXCEPTION), [CCOMPARE] = XTENSA_REG("CCOMPARE0", XTENSA_OPTION_TIMER_INTERRUPT), [CCOMPARE + 1] = XTENSA_REG("CCOMPARE1", XTENSA_OPTION_TIMER_INTERRUPT), [CCOMPARE + 2] = XTENSA_REG("CCOMPARE2", XTENSA_OPTION_TIMER_INTERRUPT), [MISC] = XTENSA_REG("MISC0", XTENSA_OPTION_MISC_SR), [MISC + 1] = XTENSA_REG("MISC1", XTENSA_OPTION_MISC_SR), [MISC + 2] = XTENSA_REG("MISC2", XTENSA_OPTION_MISC_SR), [MISC + 3] = XTENSA_REG("MISC3", XTENSA_OPTION_MISC_SR), }; static const XtensaReg uregnames[256] = { [THREADPTR] = XTENSA_REG("THREADPTR", XTENSA_OPTION_THREAD_POINTER), [FCR] = XTENSA_REG("FCR", XTENSA_OPTION_FP_COPROCESSOR), [FSR] = XTENSA_REG("FSR", XTENSA_OPTION_FP_COPROCESSOR), }; void xtensa_translate_init(void) { static const char * const regnames[] = { "ar0", "ar1", "ar2", "ar3", "ar4", "ar5", "ar6", "ar7", "ar8", "ar9", "ar10", "ar11", "ar12", "ar13", "ar14", "ar15", }; static const char * const fregnames[] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", }; int i; cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); cpu_pc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUXtensaState, pc), "pc"); for (i = 0; i < 16; i++) { cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUXtensaState, regs[i]), regnames[i]); } for (i = 0; i < 16; i++) { cpu_FR[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUXtensaState, fregs[i]), fregnames[i]); } for (i = 0; i < 256; ++i) { if (sregnames[i].name) { cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUXtensaState, sregs[i]), sregnames[i].name); } } for (i = 0; i < 256; ++i) { if (uregnames[i].name) { cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUXtensaState, uregs[i]), uregnames[i].name); } } } static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt) { return xtensa_option_bits_enabled(dc->config, opt); } static inline bool option_enabled(DisasContext *dc, int opt) { return xtensa_option_enabled(dc->config, opt); } static void init_litbase(DisasContext *dc) { if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) { dc->litbase = tcg_temp_local_new_i32(); tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000); } } static void reset_litbase(DisasContext *dc) { if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) { tcg_temp_free(dc->litbase); } } static void init_sar_tracker(DisasContext *dc) { dc->sar_5bit = false; dc->sar_m32_5bit = false; dc->sar_m32_allocated = false; } static void reset_sar_tracker(DisasContext *dc) { if (dc->sar_m32_allocated) { tcg_temp_free(dc->sar_m32); } } static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) { tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f); if (dc->sar_m32_5bit) { tcg_gen_discard_i32(dc->sar_m32); } dc->sar_5bit = true; dc->sar_m32_5bit = false; } static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) { TCGv_i32 tmp = tcg_const_i32(32); if (!dc->sar_m32_allocated) { dc->sar_m32 = tcg_temp_local_new_i32(); dc->sar_m32_allocated = true; } tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f); tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32); dc->sar_5bit = false; dc->sar_m32_5bit = true; tcg_temp_free(tmp); } static void gen_advance_ccount_cond(DisasContext *dc) { if (dc->ccount_delta > 0) { TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta); gen_helper_advance_ccount(cpu_env, tmp); tcg_temp_free(tmp); } } static void gen_advance_ccount(DisasContext *dc) { gen_advance_ccount_cond(dc); dc->ccount_delta = 0; } static void reset_used_window(DisasContext *dc) { dc->used_window = 0; } static void gen_exception(DisasContext *dc, int excp) { TCGv_i32 tmp = tcg_const_i32(excp); gen_advance_ccount(dc); gen_helper_exception(cpu_env, tmp); tcg_temp_free(tmp); } static void gen_exception_cause(DisasContext *dc, uint32_t cause) { TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); gen_advance_ccount(dc); gen_helper_exception_cause(cpu_env, tpc, tcause); tcg_temp_free(tpc); tcg_temp_free(tcause); if (cause == ILLEGAL_INSTRUCTION_CAUSE || cause == SYSCALL_CAUSE) { dc->is_jmp = DISAS_UPDATE; } } static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause, TCGv_i32 vaddr) { TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); gen_advance_ccount(dc); gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr); tcg_temp_free(tpc); tcg_temp_free(tcause); } static void gen_debug_exception(DisasContext *dc, uint32_t cause) { TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); gen_advance_ccount(dc); gen_helper_debug_exception(cpu_env, tpc, tcause); tcg_temp_free(tpc); tcg_temp_free(tcause); if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) { dc->is_jmp = DISAS_UPDATE; } } static void gen_check_privilege(DisasContext *dc) { if (dc->cring) { gen_exception_cause(dc, PRIVILEGED_CAUSE); dc->is_jmp = DISAS_UPDATE; } } static void gen_check_cpenable(DisasContext *dc, unsigned cp) { if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) && !(dc->cpenable & (1 << cp))) { gen_exception_cause(dc, COPROCESSOR0_DISABLED + cp); dc->is_jmp = DISAS_UPDATE; } } static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot) { tcg_gen_mov_i32(cpu_pc, dest); gen_advance_ccount(dc); if (dc->icount) { tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); } if (dc->singlestep_enabled) { gen_exception(dc, EXCP_DEBUG); } else { if (slot >= 0) { tcg_gen_goto_tb(slot); tcg_gen_exit_tb((uintptr_t)dc->tb + slot); } else { tcg_gen_exit_tb(0); } } dc->is_jmp = DISAS_UPDATE; } static void gen_jump(DisasContext *dc, TCGv dest) { gen_jump_slot(dc, dest, -1); } static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot) { TCGv_i32 tmp = tcg_const_i32(dest); if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) { slot = -1; } gen_jump_slot(dc, tmp, slot); tcg_temp_free(tmp); } static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest, int slot) { TCGv_i32 tcallinc = tcg_const_i32(callinc); tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS], tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN); tcg_temp_free(tcallinc); tcg_gen_movi_i32(cpu_R[callinc << 2], (callinc << 30) | (dc->next_pc & 0x3fffffff)); gen_jump_slot(dc, dest, slot); } static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest) { gen_callw_slot(dc, callinc, dest, -1); } static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot) { TCGv_i32 tmp = tcg_const_i32(dest); if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) { slot = -1; } gen_callw_slot(dc, callinc, tmp, slot); tcg_temp_free(tmp); } static bool gen_check_loop_end(DisasContext *dc, int slot) { if (option_enabled(dc, XTENSA_OPTION_LOOP) && !(dc->tb->flags & XTENSA_TBFLAG_EXCM) && dc->next_pc == dc->lend) { int label = gen_new_label(); gen_advance_ccount(dc); tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label); tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1); gen_jumpi(dc, dc->lbeg, slot); gen_set_label(label); gen_jumpi(dc, dc->next_pc, -1); return true; } return false; } static void gen_jumpi_check_loop_end(DisasContext *dc, int slot) { if (!gen_check_loop_end(dc, slot)) { gen_jumpi(dc, dc->next_pc, slot); } } static void gen_brcond(DisasContext *dc, TCGCond cond, TCGv_i32 t0, TCGv_i32 t1, uint32_t offset) { int label = gen_new_label(); gen_advance_ccount(dc); tcg_gen_brcond_i32(cond, t0, t1, label); gen_jumpi_check_loop_end(dc, 0); gen_set_label(label); gen_jumpi(dc, dc->pc + offset, 1); } static void gen_brcondi(DisasContext *dc, TCGCond cond, TCGv_i32 t0, uint32_t t1, uint32_t offset) { TCGv_i32 tmp = tcg_const_i32(t1); gen_brcond(dc, cond, t0, tmp, offset); tcg_temp_free(tmp); } static bool gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access) { if (!xtensa_option_bits_enabled(dc->config, sregnames[sr].opt_bits)) { if (sregnames[sr].name) { qemu_log("SR %s is not configured\n", sregnames[sr].name); } else { qemu_log("SR %d is not implemented\n", sr); } gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); return false; } else if (!(sregnames[sr].access & access)) { static const char * const access_text[] = { [SR_R] = "rsr", [SR_W] = "wsr", [SR_X] = "xsr", }; assert(access < ARRAY_SIZE(access_text) && access_text[access]); qemu_log("SR %s is not available for %s\n", sregnames[sr].name, access_text[access]); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); return false; } return true; } static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr) { gen_advance_ccount(dc); tcg_gen_mov_i32(d, cpu_SR[sr]); } static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr) { tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10); tcg_gen_or_i32(d, d, cpu_SR[sr]); tcg_gen_andi_i32(d, d, 0xfffffffc); } static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr) { static void (* const rsr_handler[256])(DisasContext *dc, TCGv_i32 d, uint32_t sr) = { [CCOUNT] = gen_rsr_ccount, [PTEVADDR] = gen_rsr_ptevaddr, }; if (rsr_handler[sr]) { rsr_handler[sr](dc, d, sr); } else { tcg_gen_mov_i32(d, cpu_SR[sr]); } } static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s) { gen_helper_wsr_lbeg(cpu_env, s); gen_jumpi_check_loop_end(dc, 0); } static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s) { gen_helper_wsr_lend(cpu_env, s); gen_jumpi_check_loop_end(dc, 0); } static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f); if (dc->sar_m32_5bit) { tcg_gen_discard_i32(dc->sar_m32); } dc->sar_5bit = false; dc->sar_m32_5bit = false; } static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff); } static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); } static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_ext8s_i32(cpu_SR[sr], s); } static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_windowbase(cpu_env, v); reset_used_window(dc); } static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1); reset_used_window(dc); } static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000); } static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_rasid(cpu_env, v); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); } static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000); } static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_ibreakenable(cpu_env, v); gen_jumpi_check_loop_end(dc, 0); } static void gen_wsr_atomctl(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0x3f); } static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) { unsigned id = sr - IBREAKA; if (id < dc->config->nibreak) { TCGv_i32 tmp = tcg_const_i32(id); gen_helper_wsr_ibreaka(cpu_env, tmp, v); tcg_temp_free(tmp); gen_jumpi_check_loop_end(dc, 0); } } static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) { unsigned id = sr - DBREAKA; if (id < dc->config->ndbreak) { TCGv_i32 tmp = tcg_const_i32(id); gen_helper_wsr_dbreaka(cpu_env, tmp, v); tcg_temp_free(tmp); } } static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v) { unsigned id = sr - DBREAKC; if (id < dc->config->ndbreak) { TCGv_i32 tmp = tcg_const_i32(id); gen_helper_wsr_dbreakc(cpu_env, tmp, v); tcg_temp_free(tmp); } } static void gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0xff); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); } static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, dc->config->inttype_mask[INTTYPE_SOFTWARE]); gen_helper_check_interrupts(cpu_env); gen_jumpi_check_loop_end(dc, 0); } static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, v, dc->config->inttype_mask[INTTYPE_EDGE] | dc->config->inttype_mask[INTTYPE_NMI] | dc->config->inttype_mask[INTTYPE_SOFTWARE]); tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp); tcg_temp_free(tmp); gen_helper_check_interrupts(cpu_env); } static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_mov_i32(cpu_SR[sr], v); gen_helper_check_interrupts(cpu_env); gen_jumpi_check_loop_end(dc, 0); } static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v) { uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB | PS_UM | PS_EXCM | PS_INTLEVEL; if (option_enabled(dc, XTENSA_OPTION_MMU)) { mask |= PS_RING; } tcg_gen_andi_i32(cpu_SR[sr], v, mask); reset_used_window(dc); gen_helper_check_interrupts(cpu_env); /* This can change mmu index and tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); } static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v) { if (dc->icount) { tcg_gen_mov_i32(dc->next_icount, v); } else { tcg_gen_mov_i32(cpu_SR[sr], v); } } static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0xf); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); } static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v) { uint32_t id = sr - CCOMPARE; if (id < dc->config->nccompare) { uint32_t int_bit = 1 << dc->config->timerint[id]; gen_advance_ccount(dc); tcg_gen_mov_i32(cpu_SR[sr], v); tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit); gen_helper_check_interrupts(cpu_env); } } static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) { static void (* const wsr_handler[256])(DisasContext *dc, uint32_t sr, TCGv_i32 v) = { [LBEG] = gen_wsr_lbeg, [LEND] = gen_wsr_lend, [SAR] = gen_wsr_sar, [BR] = gen_wsr_br, [LITBASE] = gen_wsr_litbase, [ACCHI] = gen_wsr_acchi, [WINDOW_BASE] = gen_wsr_windowbase, [WINDOW_START] = gen_wsr_windowstart, [PTEVADDR] = gen_wsr_ptevaddr, [RASID] = gen_wsr_rasid, [ITLBCFG] = gen_wsr_tlbcfg, [DTLBCFG] = gen_wsr_tlbcfg, [IBREAKENABLE] = gen_wsr_ibreakenable, [ATOMCTL] = gen_wsr_atomctl, [IBREAKA] = gen_wsr_ibreaka, [IBREAKA + 1] = gen_wsr_ibreaka, [DBREAKA] = gen_wsr_dbreaka, [DBREAKA + 1] = gen_wsr_dbreaka, [DBREAKC] = gen_wsr_dbreakc, [DBREAKC + 1] = gen_wsr_dbreakc, [CPENABLE] = gen_wsr_cpenable, [INTSET] = gen_wsr_intset, [INTCLEAR] = gen_wsr_intclear, [INTENABLE] = gen_wsr_intenable, [PS] = gen_wsr_ps, [ICOUNT] = gen_wsr_icount, [ICOUNTLEVEL] = gen_wsr_icountlevel, [CCOMPARE] = gen_wsr_ccompare, [CCOMPARE + 1] = gen_wsr_ccompare, [CCOMPARE + 2] = gen_wsr_ccompare, }; if (wsr_handler[sr]) { wsr_handler[sr](dc, sr, s); } else { tcg_gen_mov_i32(cpu_SR[sr], s); } } static void gen_wur(uint32_t ur, TCGv_i32 s) { switch (ur) { case FCR: gen_helper_wur_fcr(cpu_env, s); break; case FSR: tcg_gen_andi_i32(cpu_UR[ur], s, 0xffffff80); break; default: tcg_gen_mov_i32(cpu_UR[ur], s); break; } } static void gen_load_store_alignment(DisasContext *dc, int shift, TCGv_i32 addr, bool no_hw_alignment) { if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) { tcg_gen_andi_i32(addr, addr, ~0 << shift); } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) && no_hw_alignment) { int label = gen_new_label(); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, addr, ~(~0 << shift)); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr); gen_set_label(label); tcg_temp_free(tmp); } } static void gen_waiti(DisasContext *dc, uint32_t imm4) { TCGv_i32 pc = tcg_const_i32(dc->next_pc); TCGv_i32 intlevel = tcg_const_i32(imm4); gen_advance_ccount(dc); gen_helper_waiti(cpu_env, pc, intlevel); tcg_temp_free(pc); tcg_temp_free(intlevel); } static void gen_window_check1(DisasContext *dc, unsigned r1) { if (dc->tb->flags & XTENSA_TBFLAG_EXCM) { return; } if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) && r1 / 4 > dc->used_window) { int label = gen_new_label(); TCGv_i32 ws = tcg_temp_new_i32(); dc->used_window = r1 / 4; tcg_gen_deposit_i32(ws, cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], dc->config->nareg / 4, dc->config->nareg / 4); tcg_gen_shr_i32(ws, ws, cpu_SR[WINDOW_BASE]); tcg_gen_andi_i32(ws, ws, (2 << (r1 / 4)) - 2); tcg_gen_brcondi_i32(TCG_COND_EQ, ws, 0, label); { TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 w = tcg_const_i32(r1 / 4); gen_advance_ccount_cond(dc); gen_helper_window_check(cpu_env, pc, w); tcg_temp_free(w); tcg_temp_free(pc); } gen_set_label(label); tcg_temp_free(ws); } } static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2) { gen_window_check1(dc, r1 > r2 ? r1 : r2); } static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2, unsigned r3) { gen_window_check2(dc, r1, r2 > r3 ? r2 : r3); } static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned) { TCGv_i32 m = tcg_temp_new_i32(); if (hi) { (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16); } else { (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v); } return m; } static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) { #define HAS_OPTION_BITS(opt) do { \ if (!option_bits_enabled(dc, opt)) { \ qemu_log("Option is not enabled %s:%d\n", \ __FILE__, __LINE__); \ goto invalid_opcode; \ } \ } while (0) #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt)) #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__) #define RESERVED() do { \ qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \ dc->pc, b0, b1, b2, __FILE__, __LINE__); \ goto invalid_opcode; \ } while (0) #ifdef TARGET_WORDS_BIGENDIAN #define OP0 (((b0) & 0xf0) >> 4) #define OP1 (((b2) & 0xf0) >> 4) #define OP2 ((b2) & 0xf) #define RRR_R ((b1) & 0xf) #define RRR_S (((b1) & 0xf0) >> 4) #define RRR_T ((b0) & 0xf) #else #define OP0 (((b0) & 0xf)) #define OP1 (((b2) & 0xf)) #define OP2 (((b2) & 0xf0) >> 4) #define RRR_R (((b1) & 0xf0) >> 4) #define RRR_S (((b1) & 0xf)) #define RRR_T (((b0) & 0xf0) >> 4) #endif #define RRR_X ((RRR_R & 0x4) >> 2) #define RRR_Y ((RRR_T & 0x4) >> 2) #define RRR_W (RRR_R & 0x3) #define RRRN_R RRR_R #define RRRN_S RRR_S #define RRRN_T RRR_T #define RRI8_R RRR_R #define RRI8_S RRR_S #define RRI8_T RRR_T #define RRI8_IMM8 (b2) #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8) #ifdef TARGET_WORDS_BIGENDIAN #define RI16_IMM16 (((b1) << 8) | (b2)) #else #define RI16_IMM16 (((b2) << 8) | (b1)) #endif #ifdef TARGET_WORDS_BIGENDIAN #define CALL_N (((b0) & 0xc) >> 2) #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2)) #else #define CALL_N (((b0) & 0x30) >> 4) #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10)) #endif #define CALL_OFFSET_SE \ (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET) #define CALLX_N CALL_N #ifdef TARGET_WORDS_BIGENDIAN #define CALLX_M ((b0) & 0x3) #else #define CALLX_M (((b0) & 0xc0) >> 6) #endif #define CALLX_S RRR_S #define BRI12_M CALLX_M #define BRI12_S RRR_S #ifdef TARGET_WORDS_BIGENDIAN #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2)) #else #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4)) #endif #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12) #define BRI8_M BRI12_M #define BRI8_R RRI8_R #define BRI8_S RRI8_S #define BRI8_IMM8 RRI8_IMM8 #define BRI8_IMM8_SE RRI8_IMM8_SE #define RSR_SR (b1) uint8_t b0 = cpu_ldub_code(env, dc->pc); uint8_t b1 = cpu_ldub_code(env, dc->pc + 1); uint8_t b2 = 0; static const uint32_t B4CONST[] = { 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 }; static const uint32_t B4CONSTU[] = { 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 }; if (OP0 >= 8) { dc->next_pc = dc->pc + 2; HAS_OPTION(XTENSA_OPTION_CODE_DENSITY); } else { dc->next_pc = dc->pc + 3; b2 = cpu_ldub_code(env, dc->pc + 2); } switch (OP0) { case 0: /*QRST*/ switch (OP1) { case 0: /*RST0*/ switch (OP2) { case 0: /*ST0*/ if ((RRR_R & 0xc) == 0x8) { HAS_OPTION(XTENSA_OPTION_BOOLEAN); } switch (RRR_R) { case 0: /*SNM0*/ switch (CALLX_M) { case 0: /*ILL*/ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); break; case 1: /*reserved*/ RESERVED(); break; case 2: /*JR*/ switch (CALLX_N) { case 0: /*RET*/ case 2: /*JX*/ gen_window_check1(dc, CALLX_S); gen_jump(dc, cpu_R[CALLX_S]); break; case 1: /*RETWw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 3: /*reserved*/ RESERVED(); break; } break; case 3: /*CALLX*/ gen_window_check2(dc, CALLX_S, CALLX_N << 2); switch (CALLX_N) { case 0: /*CALLX0*/ { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); tcg_gen_movi_i32(cpu_R[0], dc->next_pc); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 1: /*CALLX4w*/ case 2: /*CALLX8w*/ case 3: /*CALLX12w*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); gen_callw(dc, CALLX_N, tmp); tcg_temp_free(tmp); } break; } break; } break; case 1: /*MOVSPw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_window_check2(dc, RRR_T, RRR_S); { TCGv_i32 pc = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_movsp(cpu_env, pc); tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]); tcg_temp_free(pc); } break; case 2: /*SYNC*/ switch (RRR_T) { case 0: /*ISYNC*/ break; case 1: /*RSYNC*/ break; case 2: /*ESYNC*/ break; case 3: /*DSYNC*/ break; case 8: /*EXCW*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); break; case 12: /*MEMW*/ break; case 13: /*EXTW*/ break; case 15: /*NOP*/ break; default: /*reserved*/ RESERVED(); break; } break; case 3: /*RFEIx*/ switch (RRR_T) { case 0: /*RFETx*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); switch (RRR_S) { case 0: /*RFEx*/ gen_check_privilege(dc); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1]); break; case 1: /*RFUEx*/ RESERVED(); break; case 2: /*RFDEx*/ gen_check_privilege(dc); gen_jump(dc, cpu_SR[ dc->config->ndepc ? DEPC : EPC1]); break; case 4: /*RFWOw*/ case 5: /*RFWUw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 tmp = tcg_const_i32(1); tcg_gen_andi_i32( cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); if (RRR_S == 4) { tcg_gen_andc_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } else { tcg_gen_or_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } gen_helper_restore_owb(cpu_env); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1]); tcg_temp_free(tmp); } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*RFIx*/ HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT); if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) { gen_check_privilege(dc); tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + RRR_S - 2]); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]); } else { qemu_log("RFI %d is illegal\n", RRR_S); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } break; case 2: /*RFME*/ TBD(); break; default: /*reserved*/ RESERVED(); break; } break; case 4: /*BREAKx*/ HAS_OPTION(XTENSA_OPTION_DEBUG); if (dc->debug) { gen_debug_exception(dc, DEBUGCAUSE_BI); } break; case 5: /*SYSCALLx*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); switch (RRR_S) { case 0: /*SYSCALLx*/ gen_exception_cause(dc, SYSCALL_CAUSE); break; case 1: /*SIMCALL*/ if (semihosting_enabled) { gen_check_privilege(dc); gen_helper_simcall(cpu_env); } else { qemu_log("SIMCALL but semihosting is disabled\n"); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } break; default: RESERVED(); break; } break; case 6: /*RSILx*/ HAS_OPTION(XTENSA_OPTION_INTERRUPT); gen_check_privilege(dc); gen_window_check1(dc, RRR_T); tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL); tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S); gen_helper_check_interrupts(cpu_env); gen_jumpi_check_loop_end(dc, 0); break; case 7: /*WAITIx*/ HAS_OPTION(XTENSA_OPTION_INTERRUPT); gen_check_privilege(dc); gen_waiti(dc, RRR_S); break; case 8: /*ANY4p*/ case 9: /*ALL4p*/ case 10: /*ANY8p*/ case 11: /*ALL8p*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); { const unsigned shift = (RRR_R & 2) ? 8 : 4; TCGv_i32 mask = tcg_const_i32( ((1 << shift) - 1) << RRR_S); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_SR[BR], mask); if (RRR_R & 1) { /*ALL*/ tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S); } else { /*ANY*/ tcg_gen_add_i32(tmp, tmp, mask); } tcg_gen_shri_i32(tmp, tmp, RRR_S + shift); tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp, RRR_T, 1); tcg_temp_free(mask); tcg_temp_free(tmp); } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*AND*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 2: /*OR*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 3: /*XOR*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 4: /*ST1*/ switch (RRR_R) { case 0: /*SSR*/ gen_window_check1(dc, RRR_S); gen_right_shift_sar(dc, cpu_R[RRR_S]); break; case 1: /*SSL*/ gen_window_check1(dc, RRR_S); gen_left_shift_sar(dc, cpu_R[RRR_S]); break; case 2: /*SSA8L*/ gen_window_check1(dc, RRR_S); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 3: /*SSA8B*/ gen_window_check1(dc, RRR_S); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3); gen_left_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 4: /*SSAI*/ { TCGv_i32 tmp = tcg_const_i32( RRR_S | ((RRR_T & 1) << 4)); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 6: /*RER*/ TBD(); break; case 7: /*WER*/ TBD(); break; case 8: /*ROTWw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 tmp = tcg_const_i32( RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0)); gen_helper_rotw(cpu_env, tmp); tcg_temp_free(tmp); reset_used_window(dc); } break; case 14: /*NSAu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); gen_window_check2(dc, RRR_S, RRR_T); gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]); break; case 15: /*NSAUu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); gen_window_check2(dc, RRR_S, RRR_T); gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]); break; default: /*reserved*/ RESERVED(); break; } break; case 5: /*TLB*/ HAS_OPTION_BITS( XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION)); gen_check_privilege(dc); gen_window_check2(dc, RRR_S, RRR_T); { TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0); switch (RRR_R & 7) { case 3: /*RITLB0*/ /*RDTLB0*/ gen_helper_rtlb0(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S], dtlb); break; case 4: /*IITLB*/ /*IDTLB*/ gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 5: /*PITLB*/ /*PDTLB*/ tcg_gen_movi_i32(cpu_pc, dc->pc); gen_helper_ptlb(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S], dtlb); break; case 6: /*WITLB*/ /*WDTLB*/ gen_helper_wtlb( cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 7: /*RITLB1*/ /*RDTLB1*/ gen_helper_rtlb1(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S], dtlb); break; default: tcg_temp_free(dtlb); RESERVED(); break; } tcg_temp_free(dtlb); } break; case 6: /*RT0*/ gen_window_check2(dc, RRR_R, RRR_T); switch (RRR_S) { case 0: /*NEG*/ tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]); break; case 1: /*ABS*/ { TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 neg = tcg_temp_new_i32(); tcg_gen_neg_i32(neg, cpu_R[RRR_T]); tcg_gen_movcond_i32(TCG_COND_GE, cpu_R[RRR_R], cpu_R[RRR_T], zero, cpu_R[RRR_T], neg); tcg_temp_free(neg); tcg_temp_free(zero); } break; default: /*reserved*/ RESERVED(); break; } break; case 7: /*reserved*/ RESERVED(); break; case 8: /*ADD*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 9: /*ADD**/ case 10: case 11: gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8); tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); tcg_temp_free(tmp); } break; case 12: /*SUB*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 13: /*SUB**/ case 14: case 15: gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12); tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); tcg_temp_free(tmp); } break; } break; case 1: /*RST1*/ switch (OP2) { case 0: /*SLLI*/ case 1: gen_window_check2(dc, RRR_R, RRR_S); tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S], 32 - (RRR_T | ((OP2 & 1) << 4))); break; case 2: /*SRAI*/ case 3: gen_window_check2(dc, RRR_R, RRR_T); tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S | ((OP2 & 1) << 4)); break; case 4: /*SRLI*/ gen_window_check2(dc, RRR_R, RRR_T); tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S); break; case 6: /*XSR*/ if (gen_check_sr(dc, RSR_SR, SR_X)) { TCGv_i32 tmp = tcg_temp_new_i32(); if (RSR_SR >= 64) { gen_check_privilege(dc); } gen_window_check1(dc, RRR_T); tcg_gen_mov_i32(tmp, cpu_R[RRR_T]); gen_rsr(dc, cpu_R[RRR_T], RSR_SR); gen_wsr(dc, RSR_SR, tmp); tcg_temp_free(tmp); } break; /* * Note: 64 bit ops are used here solely because SAR values * have range 0..63 */ #define gen_shift_reg(cmd, reg) do { \ TCGv_i64 tmp = tcg_temp_new_i64(); \ tcg_gen_extu_i32_i64(tmp, reg); \ tcg_gen_##cmd##_i64(v, v, tmp); \ tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \ tcg_temp_free_i64(v); \ tcg_temp_free_i64(tmp); \ } while (0) #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR]) case 8: /*SRC*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]); gen_shift(shr); } break; case 9: /*SRL*/ gen_window_check2(dc, RRR_R, RRR_T); if (dc->sar_5bit) { tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]); gen_shift(shr); } break; case 10: /*SLL*/ gen_window_check2(dc, RRR_R, RRR_S); if (dc->sar_m32_5bit) { tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32); } else { TCGv_i64 v = tcg_temp_new_i64(); TCGv_i32 s = tcg_const_i32(32); tcg_gen_sub_i32(s, s, cpu_SR[SAR]); tcg_gen_andi_i32(s, s, 0x3f); tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]); gen_shift_reg(shl, s); tcg_temp_free(s); } break; case 11: /*SRA*/ gen_window_check2(dc, RRR_R, RRR_T); if (dc->sar_5bit) { tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]); gen_shift(sar); } break; #undef gen_shift #undef gen_shift_reg case 12: /*MUL16U*/ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 v1 = tcg_temp_new_i32(); TCGv_i32 v2 = tcg_temp_new_i32(); tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]); tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]); tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); tcg_temp_free(v2); tcg_temp_free(v1); } break; case 13: /*MUL16S*/ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 v1 = tcg_temp_new_i32(); TCGv_i32 v2 = tcg_temp_new_i32(); tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]); tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]); tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); tcg_temp_free(v2); tcg_temp_free(v1); } break; default: /*reserved*/ RESERVED(); break; } break; case 2: /*RST2*/ if (OP2 >= 8) { gen_window_check3(dc, RRR_R, RRR_S, RRR_T); } if (OP2 >= 12) { HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV); int label = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label); gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE); gen_set_label(label); } switch (OP2) { #define BOOLEAN_LOGIC(fn, r, s, t) \ do { \ HAS_OPTION(XTENSA_OPTION_BOOLEAN); \ TCGv_i32 tmp1 = tcg_temp_new_i32(); \ TCGv_i32 tmp2 = tcg_temp_new_i32(); \ \ tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \ tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \ tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \ tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \ tcg_temp_free(tmp1); \ tcg_temp_free(tmp2); \ } while (0) case 0: /*ANDBp*/ BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T); break; case 1: /*ANDBCp*/ BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T); break; case 2: /*ORBp*/ BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T); break; case 3: /*ORBCp*/ BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T); break; case 4: /*XORBp*/ BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T); break; #undef BOOLEAN_LOGIC case 8: /*MULLi*/ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL); tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 10: /*MULUHi*/ case 11: /*MULSHi*/ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH); { TCGv lo = tcg_temp_new(); if (OP2 == 10) { tcg_gen_mulu2_i32(lo, cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } else { tcg_gen_muls2_i32(lo, cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } tcg_temp_free(lo); } break; case 12: /*QUOUi*/ tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 13: /*QUOSi*/ case 15: /*REMSi*/ { int label1 = gen_new_label(); int label2 = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000, label1); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff, label1); tcg_gen_movi_i32(cpu_R[RRR_R], OP2 == 13 ? 0x80000000 : 0); tcg_gen_br(label2); gen_set_label(label1); if (OP2 == 13) { tcg_gen_div_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } else { tcg_gen_rem_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } gen_set_label(label2); } break; case 14: /*REMUi*/ tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; default: /*reserved*/ RESERVED(); break; } break; case 3: /*RST3*/ switch (OP2) { case 0: /*RSR*/ if (gen_check_sr(dc, RSR_SR, SR_R)) { if (RSR_SR >= 64) { gen_check_privilege(dc); } gen_window_check1(dc, RRR_T); gen_rsr(dc, cpu_R[RRR_T], RSR_SR); } break; case 1: /*WSR*/ if (gen_check_sr(dc, RSR_SR, SR_W)) { if (RSR_SR >= 64) { gen_check_privilege(dc); } gen_window_check1(dc, RRR_T); gen_wsr(dc, RSR_SR, cpu_R[RRR_T]); } break; case 2: /*SEXTu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT); gen_window_check2(dc, RRR_R, RRR_S); { int shift = 24 - RRR_T; if (shift == 24) { tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } else if (shift == 16) { tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } else { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift); tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift); tcg_temp_free(tmp); } } break; case 3: /*CLAMPSu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS); gen_window_check2(dc, RRR_R, RRR_S); { TCGv_i32 tmp1 = tcg_temp_new_i32(); TCGv_i32 tmp2 = tcg_temp_new_i32(); TCGv_i32 zero = tcg_const_i32(0); tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T); tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]); tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7)); tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31); tcg_gen_xori_i32(tmp1, tmp1, 0xffffffff >> (25 - RRR_T)); tcg_gen_movcond_i32(TCG_COND_EQ, cpu_R[RRR_R], tmp2, zero, cpu_R[RRR_S], tmp1); tcg_temp_free(tmp1); tcg_temp_free(tmp2); tcg_temp_free(zero); } break; case 4: /*MINu*/ case 5: /*MAXu*/ case 6: /*MINUu*/ case 7: /*MAXUu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX); gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { static const TCGCond cond[] = { TCG_COND_LE, TCG_COND_GE, TCG_COND_LEU, TCG_COND_GEU }; tcg_gen_movcond_i32(cond[OP2 - 4], cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T], cpu_R[RRR_S], cpu_R[RRR_T]); } break; case 8: /*MOVEQZ*/ case 9: /*MOVNEZ*/ case 10: /*MOVLTZ*/ case 11: /*MOVGEZ*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { static const TCGCond cond[] = { TCG_COND_EQ, TCG_COND_NE, TCG_COND_LT, TCG_COND_GE, }; TCGv_i32 zero = tcg_const_i32(0); tcg_gen_movcond_i32(cond[OP2 - 8], cpu_R[RRR_R], cpu_R[RRR_T], zero, cpu_R[RRR_S], cpu_R[RRR_R]); tcg_temp_free(zero); } break; case 12: /*MOVFp*/ case 13: /*MOVTp*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); gen_window_check2(dc, RRR_R, RRR_S); { TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T); tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ, cpu_R[RRR_R], tmp, zero, cpu_R[RRR_S], cpu_R[RRR_R]); tcg_temp_free(tmp); tcg_temp_free(zero); } break; case 14: /*RUR*/ gen_window_check1(dc, RRR_R); { int st = (RRR_S << 4) + RRR_T; if (uregnames[st].name) { tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]); } else { qemu_log("RUR %d not implemented, ", st); TBD(); } } break; case 15: /*WUR*/ gen_window_check1(dc, RRR_T); if (uregnames[RSR_SR].name) { gen_wur(RSR_SR, cpu_R[RRR_T]); } else { qemu_log("WUR %d not implemented, ", RSR_SR); TBD(); } break; } break; case 4: /*EXTUI*/ case 5: gen_window_check2(dc, RRR_R, RRR_T); { int shiftimm = RRR_S | ((OP1 & 1) << 4); int maskimm = (1 << (OP2 + 1)) - 1; TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm); tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm); tcg_temp_free(tmp); } break; case 6: /*CUST0*/ RESERVED(); break; case 7: /*CUST1*/ RESERVED(); break; case 8: /*LSCXp*/ switch (OP2) { case 0: /*LSXf*/ case 1: /*LSXUf*/ case 4: /*SSXf*/ case 5: /*SSXUf*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); gen_window_check2(dc, RRR_S, RRR_T); gen_check_cpenable(dc, 0); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_add_i32(addr, cpu_R[RRR_S], cpu_R[RRR_T]); gen_load_store_alignment(dc, 2, addr, false); if (OP2 & 0x4) { tcg_gen_qemu_st32(cpu_FR[RRR_R], addr, dc->cring); } else { tcg_gen_qemu_ld32u(cpu_FR[RRR_R], addr, dc->cring); } if (OP2 & 0x1) { tcg_gen_mov_i32(cpu_R[RRR_S], addr); } tcg_temp_free(addr); } break; default: /*reserved*/ RESERVED(); break; } break; case 9: /*LSC4*/ gen_window_check2(dc, RRR_S, RRR_T); switch (OP2) { case 0: /*L32E*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRR_S], (0xffffffc0 | (RRR_R << 2))); tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring); tcg_temp_free(addr); } break; case 4: /*S32E*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRR_S], (0xffffffc0 | (RRR_R << 2))); tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring); tcg_temp_free(addr); } break; default: RESERVED(); break; } break; case 10: /*FP0*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); switch (OP2) { case 0: /*ADD.Sf*/ gen_check_cpenable(dc, 0); gen_helper_add_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 1: /*SUB.Sf*/ gen_check_cpenable(dc, 0); gen_helper_sub_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 2: /*MUL.Sf*/ gen_check_cpenable(dc, 0); gen_helper_mul_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 4: /*MADD.Sf*/ gen_check_cpenable(dc, 0); gen_helper_madd_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 5: /*MSUB.Sf*/ gen_check_cpenable(dc, 0); gen_helper_msub_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 8: /*ROUND.Sf*/ case 9: /*TRUNC.Sf*/ case 10: /*FLOOR.Sf*/ case 11: /*CEIL.Sf*/ case 14: /*UTRUNC.Sf*/ gen_window_check1(dc, RRR_R); gen_check_cpenable(dc, 0); { static const unsigned rounding_mode_const[] = { float_round_nearest_even, float_round_to_zero, float_round_down, float_round_up, [6] = float_round_to_zero, }; TCGv_i32 rounding_mode = tcg_const_i32( rounding_mode_const[OP2 & 7]); TCGv_i32 scale = tcg_const_i32(RRR_T); if (OP2 == 14) { gen_helper_ftoui(cpu_R[RRR_R], cpu_FR[RRR_S], rounding_mode, scale); } else { gen_helper_ftoi(cpu_R[RRR_R], cpu_FR[RRR_S], rounding_mode, scale); } tcg_temp_free(rounding_mode); tcg_temp_free(scale); } break; case 12: /*FLOAT.Sf*/ case 13: /*UFLOAT.Sf*/ gen_window_check1(dc, RRR_S); gen_check_cpenable(dc, 0); { TCGv_i32 scale = tcg_const_i32(-RRR_T); if (OP2 == 13) { gen_helper_uitof(cpu_FR[RRR_R], cpu_env, cpu_R[RRR_S], scale); } else { gen_helper_itof(cpu_FR[RRR_R], cpu_env, cpu_R[RRR_S], scale); } tcg_temp_free(scale); } break; case 15: /*FP1OP*/ switch (RRR_T) { case 0: /*MOV.Sf*/ gen_check_cpenable(dc, 0); tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]); break; case 1: /*ABS.Sf*/ gen_check_cpenable(dc, 0); gen_helper_abs_s(cpu_FR[RRR_R], cpu_FR[RRR_S]); break; case 4: /*RFRf*/ gen_window_check1(dc, RRR_R); gen_check_cpenable(dc, 0); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_FR[RRR_S]); break; case 5: /*WFRf*/ gen_window_check1(dc, RRR_S); gen_check_cpenable(dc, 0); tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_R[RRR_S]); break; case 6: /*NEG.Sf*/ gen_check_cpenable(dc, 0); gen_helper_neg_s(cpu_FR[RRR_R], cpu_FR[RRR_S]); break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; case 11: /*FP1*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); #define gen_compare(rel, br, a, b) \ do { \ TCGv_i32 bit = tcg_const_i32(1 << br); \ \ gen_check_cpenable(dc, 0); \ gen_helper_##rel(cpu_env, bit, cpu_FR[a], cpu_FR[b]); \ tcg_temp_free(bit); \ } while (0) switch (OP2) { case 1: /*UN.Sf*/ gen_compare(un_s, RRR_R, RRR_S, RRR_T); break; case 2: /*OEQ.Sf*/ gen_compare(oeq_s, RRR_R, RRR_S, RRR_T); break; case 3: /*UEQ.Sf*/ gen_compare(ueq_s, RRR_R, RRR_S, RRR_T); break; case 4: /*OLT.Sf*/ gen_compare(olt_s, RRR_R, RRR_S, RRR_T); break; case 5: /*ULT.Sf*/ gen_compare(ult_s, RRR_R, RRR_S, RRR_T); break; case 6: /*OLE.Sf*/ gen_compare(ole_s, RRR_R, RRR_S, RRR_T); break; case 7: /*ULE.Sf*/ gen_compare(ule_s, RRR_R, RRR_S, RRR_T); break; #undef gen_compare case 8: /*MOVEQZ.Sf*/ case 9: /*MOVNEZ.Sf*/ case 10: /*MOVLTZ.Sf*/ case 11: /*MOVGEZ.Sf*/ gen_window_check1(dc, RRR_T); gen_check_cpenable(dc, 0); { static const TCGCond cond[] = { TCG_COND_EQ, TCG_COND_NE, TCG_COND_LT, TCG_COND_GE, }; TCGv_i32 zero = tcg_const_i32(0); tcg_gen_movcond_i32(cond[OP2 - 8], cpu_FR[RRR_R], cpu_R[RRR_T], zero, cpu_FR[RRR_S], cpu_FR[RRR_R]); tcg_temp_free(zero); } break; case 12: /*MOVF.Sf*/ case 13: /*MOVT.Sf*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); gen_check_cpenable(dc, 0); { TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T); tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ, cpu_FR[RRR_R], tmp, zero, cpu_FR[RRR_S], cpu_FR[RRR_R]); tcg_temp_free(tmp); tcg_temp_free(zero); } break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*L32R*/ gen_window_check1(dc, RRR_T); { TCGv_i32 tmp = tcg_const_i32( ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ? 0 : ((dc->pc + 3) & ~3)) + (0xfffc0000 | (RI16_IMM16 << 2))); if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) { tcg_gen_add_i32(tmp, tmp, dc->litbase); } tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring); tcg_temp_free(tmp); } break; case 2: /*LSAI*/ #define gen_load_store(type, shift) do { \ TCGv_i32 addr = tcg_temp_new_i32(); \ gen_window_check2(dc, RRI8_S, RRI8_T); \ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \ if (shift) { \ gen_load_store_alignment(dc, shift, addr, false); \ } \ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ tcg_temp_free(addr); \ } while (0) switch (RRI8_R) { case 0: /*L8UI*/ gen_load_store(ld8u, 0); break; case 1: /*L16UI*/ gen_load_store(ld16u, 1); break; case 2: /*L32I*/ gen_load_store(ld32u, 2); break; case 4: /*S8I*/ gen_load_store(st8, 0); break; case 5: /*S16I*/ gen_load_store(st16, 1); break; case 6: /*S32I*/ gen_load_store(st32, 2); break; case 7: /*CACHEc*/ if (RRI8_T < 8) { HAS_OPTION(XTENSA_OPTION_DCACHE); } switch (RRI8_T) { case 0: /*DPFRc*/ break; case 1: /*DPFWc*/ break; case 2: /*DPFROc*/ break; case 3: /*DPFWOc*/ break; case 4: /*DHWBc*/ break; case 5: /*DHWBIc*/ break; case 6: /*DHIc*/ break; case 7: /*DIIc*/ break; case 8: /*DCEc*/ switch (OP1) { case 0: /*DPFLl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); break; case 2: /*DHUl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); break; case 3: /*DIUl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); break; case 4: /*DIWBc*/ HAS_OPTION(XTENSA_OPTION_DCACHE); break; case 5: /*DIWBIc*/ HAS_OPTION(XTENSA_OPTION_DCACHE); break; default: /*reserved*/ RESERVED(); break; } break; case 12: /*IPFc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); break; case 13: /*ICEc*/ switch (OP1) { case 0: /*IPFLl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); break; case 2: /*IHUl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); break; case 3: /*IIUl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); break; default: /*reserved*/ RESERVED(); break; } break; case 14: /*IHIc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); break; case 15: /*IIIc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); break; default: /*reserved*/ RESERVED(); break; } break; case 9: /*L16SI*/ gen_load_store(ld16s, 1); break; #undef gen_load_store case 10: /*MOVI*/ gen_window_check1(dc, RRI8_T); tcg_gen_movi_i32(cpu_R[RRI8_T], RRI8_IMM8 | (RRI8_S << 8) | ((RRI8_S & 0x8) ? 0xfffff000 : 0)); break; #define gen_load_store_no_hw_align(type) do { \ TCGv_i32 addr = tcg_temp_local_new_i32(); \ gen_window_check2(dc, RRI8_S, RRI8_T); \ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \ gen_load_store_alignment(dc, 2, addr, true); \ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ tcg_temp_free(addr); \ } while (0) case 11: /*L32AIy*/ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/ break; case 12: /*ADDI*/ gen_window_check2(dc, RRI8_S, RRI8_T); tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE); break; case 13: /*ADDMI*/ gen_window_check2(dc, RRI8_S, RRI8_T); tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8); break; case 14: /*S32C1Iy*/ HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE); gen_window_check2(dc, RRI8_S, RRI8_T); { int label = gen_new_label(); TCGv_i32 tmp = tcg_temp_local_new_i32(); TCGv_i32 addr = tcg_temp_local_new_i32(); TCGv_i32 tpc; tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]); tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); gen_load_store_alignment(dc, 2, addr, true); gen_advance_ccount(dc); tpc = tcg_const_i32(dc->pc); gen_helper_check_atomctl(cpu_env, tpc, addr); tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring); tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T], cpu_SR[SCOMPARE1], label); tcg_gen_qemu_st32(tmp, addr, dc->cring); gen_set_label(label); tcg_temp_free(tpc); tcg_temp_free(addr); tcg_temp_free(tmp); } break; case 15: /*S32RIy*/ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); gen_load_store_no_hw_align(st32); /*TODO release?*/ break; #undef gen_load_store_no_hw_align default: /*reserved*/ RESERVED(); break; } break; case 3: /*LSCIp*/ switch (RRI8_R) { case 0: /*LSIf*/ case 4: /*SSIf*/ case 8: /*LSIUf*/ case 12: /*SSIUf*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); gen_window_check1(dc, RRI8_S); gen_check_cpenable(dc, 0); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); gen_load_store_alignment(dc, 2, addr, false); if (RRI8_R & 0x4) { tcg_gen_qemu_st32(cpu_FR[RRI8_T], addr, dc->cring); } else { tcg_gen_qemu_ld32u(cpu_FR[RRI8_T], addr, dc->cring); } if (RRI8_R & 0x8) { tcg_gen_mov_i32(cpu_R[RRI8_S], addr); } tcg_temp_free(addr); } break; default: /*reserved*/ RESERVED(); break; } break; case 4: /*MAC16d*/ HAS_OPTION(XTENSA_OPTION_MAC16); { enum { MAC16_UMUL = 0x0, MAC16_MUL = 0x4, MAC16_MULA = 0x8, MAC16_MULS = 0xc, MAC16_NONE = 0xf, } op = OP1 & 0xc; bool is_m1_sr = (OP2 & 0x3) == 2; bool is_m2_sr = (OP2 & 0xc) == 0; uint32_t ld_offset = 0; if (OP2 > 9) { RESERVED(); } switch (OP2 & 2) { case 0: /*MACI?/MACC?*/ is_m1_sr = true; ld_offset = (OP2 & 1) ? -4 : 4; if (OP2 >= 8) { /*MACI/MACC*/ if (OP1 == 0) { /*LDINC/LDDEC*/ op = MAC16_NONE; } else { RESERVED(); } } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/ RESERVED(); } break; case 2: /*MACD?/MACA?*/ if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/ RESERVED(); } break; } if (op != MAC16_NONE) { if (!is_m1_sr) { gen_window_check1(dc, RRR_S); } if (!is_m2_sr) { gen_window_check1(dc, RRR_T); } } { TCGv_i32 vaddr = tcg_temp_new_i32(); TCGv_i32 mem32 = tcg_temp_new_i32(); if (ld_offset) { gen_window_check1(dc, RRR_S); tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset); gen_load_store_alignment(dc, 2, vaddr, false); tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring); } if (op != MAC16_NONE) { TCGv_i32 m1 = gen_mac16_m( is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S], OP1 & 1, op == MAC16_UMUL); TCGv_i32 m2 = gen_mac16_m( is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T], OP1 & 2, op == MAC16_UMUL); if (op == MAC16_MUL || op == MAC16_UMUL) { tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2); if (op == MAC16_UMUL) { tcg_gen_movi_i32(cpu_SR[ACCHI], 0); } else { tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31); } } else { TCGv_i32 lo = tcg_temp_new_i32(); TCGv_i32 hi = tcg_temp_new_i32(); tcg_gen_mul_i32(lo, m1, m2); tcg_gen_sari_i32(hi, lo, 31); if (op == MAC16_MULA) { tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI], cpu_SR[ACCLO], cpu_SR[ACCHI], lo, hi); } else { tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI], cpu_SR[ACCLO], cpu_SR[ACCHI], lo, hi); } tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); tcg_temp_free_i32(lo); tcg_temp_free_i32(hi); } tcg_temp_free(m1); tcg_temp_free(m2); } if (ld_offset) { tcg_gen_mov_i32(cpu_R[RRR_S], vaddr); tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32); } tcg_temp_free(vaddr); tcg_temp_free(mem32); } } break; case 5: /*CALLN*/ switch (CALL_N) { case 0: /*CALL0*/ tcg_gen_movi_i32(cpu_R[0], dc->next_pc); gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); break; case 1: /*CALL4w*/ case 2: /*CALL8w*/ case 3: /*CALL12w*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_window_check1(dc, CALL_N << 2); gen_callwi(dc, CALL_N, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); break; } break; case 6: /*SI*/ switch (CALL_N) { case 0: /*J*/ gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0); break; case 1: /*BZ*/ gen_window_check1(dc, BRI12_S); { static const TCGCond cond[] = { TCG_COND_EQ, /*BEQZ*/ TCG_COND_NE, /*BNEZ*/ TCG_COND_LT, /*BLTZ*/ TCG_COND_GE, /*BGEZ*/ }; gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0, 4 + BRI12_IMM12_SE); } break; case 2: /*BI0*/ gen_window_check1(dc, BRI8_S); { static const TCGCond cond[] = { TCG_COND_EQ, /*BEQI*/ TCG_COND_NE, /*BNEI*/ TCG_COND_LT, /*BLTI*/ TCG_COND_GE, /*BGEI*/ }; gen_brcondi(dc, cond[BRI8_M & 3], cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE); } break; case 3: /*BI1*/ switch (BRI8_M) { case 0: /*ENTRYw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 s = tcg_const_i32(BRI12_S); TCGv_i32 imm = tcg_const_i32(BRI12_IMM12); gen_advance_ccount(dc); gen_helper_entry(cpu_env, pc, s, imm); tcg_temp_free(imm); tcg_temp_free(s); tcg_temp_free(pc); reset_used_window(dc); } break; case 1: /*B1*/ switch (BRI8_R) { case 0: /*BFp*/ case 1: /*BTp*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S); gen_brcondi(dc, BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 8: /*LOOP*/ case 9: /*LOOPNEZ*/ case 10: /*LOOPGTZ*/ HAS_OPTION(XTENSA_OPTION_LOOP); gen_window_check1(dc, RRI8_S); { uint32_t lend = dc->pc + RRI8_IMM8 + 4; TCGv_i32 tmp = tcg_const_i32(lend); tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1); tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc); gen_helper_wsr_lend(cpu_env, tmp); tcg_temp_free(tmp); if (BRI8_R > 8) { int label = gen_new_label(); tcg_gen_brcondi_i32( BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT, cpu_R[RRI8_S], 0, label); gen_jumpi(dc, lend, 1); gen_set_label(label); } gen_jumpi(dc, dc->next_pc, 0); } break; default: /*reserved*/ RESERVED(); break; } break; case 2: /*BLTUI*/ case 3: /*BGEUI*/ gen_window_check1(dc, BRI8_S); gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU, cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE); break; } break; } break; case 7: /*B*/ { TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ; switch (RRI8_R & 7) { case 0: /*BNONE*/ /*BANY*/ gen_window_check2(dc, RRI8_S, RRI8_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 1: /*BEQ*/ /*BNE*/ case 2: /*BLT*/ /*BGE*/ case 3: /*BLTU*/ /*BGEU*/ gen_window_check2(dc, RRI8_S, RRI8_T); { static const TCGCond cond[] = { [1] = TCG_COND_EQ, [2] = TCG_COND_LT, [3] = TCG_COND_LTU, [9] = TCG_COND_NE, [10] = TCG_COND_GE, [11] = TCG_COND_GEU, }; gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T], 4 + RRI8_IMM8_SE); } break; case 4: /*BALL*/ /*BNALL*/ gen_window_check2(dc, RRI8_S, RRI8_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T], 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 5: /*BBC*/ /*BBS*/ gen_window_check2(dc, RRI8_S, RRI8_T); { #ifdef TARGET_WORDS_BIGENDIAN TCGv_i32 bit = tcg_const_i32(0x80000000); #else TCGv_i32 bit = tcg_const_i32(0x00000001); #endif TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f); #ifdef TARGET_WORDS_BIGENDIAN tcg_gen_shr_i32(bit, bit, tmp); #else tcg_gen_shl_i32(bit, bit, tmp); #endif tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit); gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); tcg_temp_free(bit); } break; case 6: /*BBCI*/ /*BBSI*/ case 7: gen_window_check1(dc, RRI8_S); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_R[RRI8_S], #ifdef TARGET_WORDS_BIGENDIAN 0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T)); #else 0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T)); #endif gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; } } break; #define gen_narrow_load_store(type) do { \ TCGv_i32 addr = tcg_temp_new_i32(); \ gen_window_check2(dc, RRRN_S, RRRN_T); \ tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \ gen_load_store_alignment(dc, 2, addr, false); \ tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \ tcg_temp_free(addr); \ } while (0) case 8: /*L32I.Nn*/ gen_narrow_load_store(ld32u); break; case 9: /*S32I.Nn*/ gen_narrow_load_store(st32); break; #undef gen_narrow_load_store case 10: /*ADD.Nn*/ gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T); tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]); break; case 11: /*ADDI.Nn*/ gen_window_check2(dc, RRRN_R, RRRN_S); tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1); break; case 12: /*ST2n*/ gen_window_check1(dc, RRRN_S); if (RRRN_T < 8) { /*MOVI.Nn*/ tcg_gen_movi_i32(cpu_R[RRRN_S], RRRN_R | (RRRN_T << 4) | ((RRRN_T & 6) == 6 ? 0xffffff80 : 0)); } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/ TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ; gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0, 4 + (RRRN_R | ((RRRN_T & 3) << 4))); } break; case 13: /*ST3n*/ switch (RRRN_R) { case 0: /*MOV.Nn*/ gen_window_check2(dc, RRRN_S, RRRN_T); tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]); break; case 15: /*S3*/ switch (RRRN_T) { case 0: /*RET.Nn*/ gen_jump(dc, cpu_R[0]); break; case 1: /*RETW.Nn*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 2: /*BREAK.Nn*/ HAS_OPTION(XTENSA_OPTION_DEBUG); if (dc->debug) { gen_debug_exception(dc, DEBUGCAUSE_BN); } break; case 3: /*NOP.Nn*/ break; case 6: /*ILL.Nn*/ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } if (dc->is_jmp == DISAS_NEXT) { gen_check_loop_end(dc, 0); } dc->pc = dc->next_pc; return; invalid_opcode: qemu_log("INVALID(pc = %08x)\n", dc->pc); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); #undef HAS_OPTION } static void check_breakpoint(CPUXtensaState *env, DisasContext *dc) { CPUBreakpoint *bp; if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { QTAILQ_FOREACH(bp, &env->breakpoints, entry) { if (bp->pc == dc->pc) { tcg_gen_movi_i32(cpu_pc, dc->pc); gen_exception(dc, EXCP_DEBUG); dc->is_jmp = DISAS_UPDATE; } } } } static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc) { unsigned i; for (i = 0; i < dc->config->nibreak; ++i) { if ((env->sregs[IBREAKENABLE] & (1 << i)) && env->sregs[IBREAKA + i] == dc->pc) { gen_debug_exception(dc, DEBUGCAUSE_IB); break; } } } static inline void gen_intermediate_code_internal(XtensaCPU *cpu, TranslationBlock *tb, bool search_pc) { CPUState *cs = CPU(cpu); CPUXtensaState *env = &cpu->env; DisasContext dc; int insn_count = 0; int j, lj = -1; uint16_t *gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; int max_insns = tb->cflags & CF_COUNT_MASK; uint32_t pc_start = tb->pc; uint32_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } dc.config = env->config; dc.singlestep_enabled = cs->singlestep_enabled; dc.tb = tb; dc.pc = pc_start; dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK; dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring; dc.lbeg = env->sregs[LBEG]; dc.lend = env->sregs[LEND]; dc.is_jmp = DISAS_NEXT; dc.ccount_delta = 0; dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG; dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT; dc.cpenable = (tb->flags & XTENSA_TBFLAG_CPENABLE_MASK) >> XTENSA_TBFLAG_CPENABLE_SHIFT; init_litbase(&dc); init_sar_tracker(&dc); reset_used_window(&dc); if (dc.icount) { dc.next_icount = tcg_temp_local_new_i32(); } gen_tb_start(); if (tb->flags & XTENSA_TBFLAG_EXCEPTION) { tcg_gen_movi_i32(cpu_pc, dc.pc); gen_exception(&dc, EXCP_DEBUG); } do { check_breakpoint(env, &dc); if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; if (lj < j) { lj++; while (lj < j) { tcg_ctx.gen_opc_instr_start[lj++] = 0; } } tcg_ctx.gen_opc_pc[lj] = dc.pc; tcg_ctx.gen_opc_instr_start[lj] = 1; tcg_ctx.gen_opc_icount[lj] = insn_count; } if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { tcg_gen_debug_insn_start(dc.pc); } ++dc.ccount_delta; if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { gen_io_start(); } if (dc.icount) { int label = gen_new_label(); tcg_gen_addi_i32(dc.next_icount, cpu_SR[ICOUNT], 1); tcg_gen_brcondi_i32(TCG_COND_NE, dc.next_icount, 0, label); tcg_gen_mov_i32(dc.next_icount, cpu_SR[ICOUNT]); if (dc.debug) { gen_debug_exception(&dc, DEBUGCAUSE_IC); } gen_set_label(label); } if (dc.debug) { gen_ibreak_check(env, &dc); } disas_xtensa_insn(env, &dc); ++insn_count; if (dc.icount) { tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount); } if (cs->singlestep_enabled) { tcg_gen_movi_i32(cpu_pc, dc.pc); gen_exception(&dc, EXCP_DEBUG); break; } } while (dc.is_jmp == DISAS_NEXT && insn_count < max_insns && dc.pc < next_page_start && tcg_ctx.gen_opc_ptr < gen_opc_end); reset_litbase(&dc); reset_sar_tracker(&dc); if (dc.icount) { tcg_temp_free(dc.next_icount); } if (tb->cflags & CF_LAST_IO) { gen_io_end(); } if (dc.is_jmp == DISAS_NEXT) { gen_jumpi(&dc, dc.pc, 0); } gen_tb_end(tb, insn_count); *tcg_ctx.gen_opc_ptr = INDEX_op_end; #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log("----------------\n"); qemu_log("IN: %s\n", lookup_symbol(pc_start)); log_target_disas(env, pc_start, dc.pc - pc_start, 0); qemu_log("\n"); } #endif if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; memset(tcg_ctx.gen_opc_instr_start + lj + 1, 0, (j - lj) * sizeof(tcg_ctx.gen_opc_instr_start[0])); } else { tb->size = dc.pc - pc_start; tb->icount = insn_count; } } void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb) { gen_intermediate_code_internal(xtensa_env_get_cpu(env), tb, false); } void gen_intermediate_code_pc(CPUXtensaState *env, TranslationBlock *tb) { gen_intermediate_code_internal(xtensa_env_get_cpu(env), tb, true); } void xtensa_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, int flags) { XtensaCPU *cpu = XTENSA_CPU(cs); CPUXtensaState *env = &cpu->env; int i, j; cpu_fprintf(f, "PC=%08x\n\n", env->pc); for (i = j = 0; i < 256; ++i) { if (xtensa_option_bits_enabled(env->config, sregnames[i].opt_bits)) { cpu_fprintf(f, "%12s=%08x%c", sregnames[i].name, env->sregs[i], (j++ % 4) == 3 ? '\n' : ' '); } } cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n"); for (i = j = 0; i < 256; ++i) { if (xtensa_option_bits_enabled(env->config, uregnames[i].opt_bits)) { cpu_fprintf(f, "%s=%08x%c", uregnames[i].name, env->uregs[i], (j++ % 4) == 3 ? '\n' : ' '); } } cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n"); for (i = 0; i < 16; ++i) { cpu_fprintf(f, " A%02d=%08x%c", i, env->regs[i], (i % 4) == 3 ? '\n' : ' '); } cpu_fprintf(f, "\n"); for (i = 0; i < env->config->nareg; ++i) { cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i], (i % 4) == 3 ? '\n' : ' '); } if (xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) { cpu_fprintf(f, "\n"); for (i = 0; i < 16; ++i) { cpu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i, float32_val(env->fregs[i]), *(float *)&env->fregs[i], (i % 2) == 1 ? '\n' : ' '); } } } void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, int pc_pos) { env->pc = tcg_ctx.gen_opc_pc[pc_pos]; }
gpl-2.0
pengdonglin137/linux-2.6.11
drivers/media/video/cpia_pp.c
33
22794
/* * cpia_pp CPiA Parallel Port driver * * Supports CPiA based parallel port Video Camera's. * * (C) Copyright 1999 Bas Huisman <bhuism@cs.utwente.nl> * (C) Copyright 1999-2000 Scott J. Bertin <sbertin@securenym.net>, * (C) Copyright 1999-2000 Peter Pregler <Peter_Pregler@email.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* define _CPIA_DEBUG_ for verbose debug output (see cpia.h) */ /* #define _CPIA_DEBUG_ 1 */ #include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/parport.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/smp_lock.h> #include <linux/sched.h> #include <linux/kmod.h> /* #define _CPIA_DEBUG_ define for verbose debug output */ #include "cpia.h" static int cpia_pp_open(void *privdata); static int cpia_pp_registerCallback(void *privdata, void (*cb) (void *cbdata), void *cbdata); static int cpia_pp_transferCmd(void *privdata, u8 *command, u8 *data); static int cpia_pp_streamStart(void *privdata); static int cpia_pp_streamStop(void *privdata); static int cpia_pp_streamRead(void *privdata, u8 *buffer, int noblock); static int cpia_pp_close(void *privdata); #define ABOUT "Parallel port driver for Vision CPiA based cameras" #define PACKET_LENGTH 8 /* Magic numbers for defining port-device mappings */ #define PPCPIA_PARPORT_UNSPEC -4 #define PPCPIA_PARPORT_AUTO -3 #define PPCPIA_PARPORT_OFF -2 #define PPCPIA_PARPORT_NONE -1 #ifdef MODULE static int parport_nr[PARPORT_MAX] = {[0 ... PARPORT_MAX - 1] = PPCPIA_PARPORT_UNSPEC}; static char *parport[PARPORT_MAX] = {NULL,}; MODULE_AUTHOR("B. Huisman <bhuism@cs.utwente.nl> & Peter Pregler <Peter_Pregler@email.com>"); MODULE_DESCRIPTION("Parallel port driver for Vision CPiA based cameras"); MODULE_LICENSE("GPL"); module_param_array(parport, charp, NULL, 0); MODULE_PARM_DESC(parport, "'auto' or a list of parallel port numbers. Just like lp."); #else static int parport_nr[PARPORT_MAX] __initdata = {[0 ... PARPORT_MAX - 1] = PPCPIA_PARPORT_UNSPEC}; static int parport_ptr = 0; #endif struct pp_cam_entry { struct pardevice *pdev; struct parport *port; struct work_struct cb_task; int open_count; wait_queue_head_t wq_stream; /* image state flags */ int image_ready; /* we got an interrupt */ int image_complete; /* we have seen 4 EOI */ int streaming; /* we are in streaming mode */ int stream_irq; }; static struct cpia_camera_ops cpia_pp_ops = { cpia_pp_open, cpia_pp_registerCallback, cpia_pp_transferCmd, cpia_pp_streamStart, cpia_pp_streamStop, cpia_pp_streamRead, cpia_pp_close, 1, THIS_MODULE }; static LIST_HEAD(cam_list); static spinlock_t cam_list_lock_pp; /* FIXME */ static void cpia_parport_enable_irq( struct parport *port ) { parport_enable_irq(port); mdelay(10); return; } static void cpia_parport_disable_irq( struct parport *port ) { parport_disable_irq(port); mdelay(10); return; } /* Special CPiA PPC modes: These are invoked by using the 1284 Extensibility * Link Flag during negotiation */ #define UPLOAD_FLAG 0x08 #define NIBBLE_TRANSFER 0x01 #define ECP_TRANSFER 0x03 #define PARPORT_CHUNK_SIZE PAGE_SIZE /**************************************************************************** * * CPiA-specific low-level parport functions for nibble uploads * ***************************************************************************/ /* CPiA nonstandard "Nibble" mode (no nDataAvail signal after each byte). */ /* The standard kernel parport_ieee1284_read_nibble() fails with the CPiA... */ static size_t cpia_read_nibble (struct parport *port, void *buffer, size_t len, int flags) { /* adapted verbatim, with one change, from parport_ieee1284_read_nibble() in drivers/parport/ieee1284-ops.c */ unsigned char *buf = buffer; int i; unsigned char byte = 0; len *= 2; /* in nibbles */ for (i=0; i < len; i++) { unsigned char nibble; /* The CPiA firmware suppresses the use of nDataAvail (nFault LO) * after every second nibble to signal that more * data is available. (the total number of Bytes that * should be sent is known; if too few are received, an error * will be recorded after a timeout). * This is incompatible with parport_ieee1284_read_nibble(), * which expects to find nFault LO after every second nibble. */ /* Solution: modify cpia_read_nibble to only check for * nDataAvail before the first nibble is sent. */ /* Does the error line indicate end of data? */ if (((i /*& 1*/) == 0) && (parport_read_status(port) & PARPORT_STATUS_ERROR)) { port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; DBG("%s: No more nibble data (%d bytes)\n", port->name, i/2); /* Go to reverse idle phase. */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_AUTOFD); port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE; break; } /* Event 7: Set nAutoFd low. */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_AUTOFD); /* Event 9: nAck goes low. */ port->ieee1284.phase = IEEE1284_PH_REV_DATA; if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) { /* Timeout -- no more data? */ DBG("%s: Nibble timeout at event 9 (%d bytes)\n", port->name, i/2); parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0); break; } /* Read a nibble. */ nibble = parport_read_status (port) >> 3; nibble &= ~8; if ((nibble & 0x10) == 0) nibble |= 8; nibble &= 0xf; /* Event 10: Set nAutoFd high. */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0); /* Event 11: nAck goes high. */ if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, PARPORT_STATUS_ACK)) { /* Timeout -- no more data? */ DBG("%s: Nibble timeout at event 11\n", port->name); break; } if (i & 1) { /* Second nibble */ byte |= nibble << 4; *buf++ = byte; } else byte = nibble; } i /= 2; /* i is now in bytes */ if (i == len) { /* Read the last nibble without checking data avail. */ port = port->physport; if (parport_read_status (port) & PARPORT_STATUS_ERROR) port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; else port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; } return i; } /* CPiA nonstandard "Nibble Stream" mode (2 nibbles per cycle, instead of 1) * (See CPiA Data sheet p. 31) * * "Nibble Stream" mode used by CPiA for uploads to non-ECP ports is a * nonstandard variant of nibble mode which allows the same (mediocre) * data flow of 8 bits per cycle as software-enabled ECP by TRISTATE-capable * parallel ports, but works also for non-TRISTATE-capable ports. * (Standard nibble mode only send 4 bits per cycle) * */ static size_t cpia_read_nibble_stream(struct parport *port, void *buffer, size_t len, int flags) { int i; unsigned char *buf = buffer; int endseen = 0; for (i=0; i < len; i++) { unsigned char nibble[2], byte = 0; int j; /* Image Data is complete when 4 consecutive EOI bytes (0xff) are seen */ if (endseen > 3 ) break; /* Event 7: Set nAutoFd low. */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, PARPORT_CONTROL_AUTOFD); /* Event 9: nAck goes low. */ port->ieee1284.phase = IEEE1284_PH_REV_DATA; if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) { /* Timeout -- no more data? */ DBG("%s: Nibble timeout at event 9 (%d bytes)\n", port->name, i/2); parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0); break; } /* Read lower nibble */ nibble[0] = parport_read_status (port) >>3; /* Event 10: Set nAutoFd high. */ parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0); /* Event 11: nAck goes high. */ if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, PARPORT_STATUS_ACK)) { /* Timeout -- no more data? */ DBG("%s: Nibble timeout at event 11\n", port->name); break; } /* Read upper nibble */ nibble[1] = parport_read_status (port) >>3; /* reassemble the byte */ for (j = 0; j < 2 ; j++ ) { nibble[j] &= ~8; if ((nibble[j] & 0x10) == 0) nibble[j] |= 8; nibble[j] &= 0xf; } byte = (nibble[0] |(nibble[1] << 4)); *buf++ = byte; if(byte == EOI) endseen++; else endseen = 0; } return i; } /**************************************************************************** * * EndTransferMode * ***************************************************************************/ static void EndTransferMode(struct pp_cam_entry *cam) { parport_negotiate(cam->port, IEEE1284_MODE_COMPAT); } /**************************************************************************** * * ForwardSetup * ***************************************************************************/ static int ForwardSetup(struct pp_cam_entry *cam) { int retry; /* The CPiA uses ECP protocol for Downloads from the Host to the camera. * This will be software-emulated if ECP hardware is not present */ /* the usual camera maximum response time is 10ms, but after receiving * some commands, it needs up to 40ms. (Data Sheet p. 32)*/ for(retry = 0; retry < 4; ++retry) { if(!parport_negotiate(cam->port, IEEE1284_MODE_ECP)) { break; } mdelay(10); } if(retry == 4) { DBG("Unable to negotiate IEEE1284 ECP Download mode\n"); return -1; } return 0; } /**************************************************************************** * * ReverseSetup * ***************************************************************************/ static int ReverseSetup(struct pp_cam_entry *cam, int extensibility) { int retry; int upload_mode, mode = IEEE1284_MODE_ECP; int transfer_mode = ECP_TRANSFER; if (!(cam->port->modes & PARPORT_MODE_ECP) && !(cam->port->modes & PARPORT_MODE_TRISTATE)) { mode = IEEE1284_MODE_NIBBLE; transfer_mode = NIBBLE_TRANSFER; } upload_mode = mode; if(extensibility) mode = UPLOAD_FLAG|transfer_mode|IEEE1284_EXT_LINK; /* the usual camera maximum response time is 10ms, but after * receiving some commands, it needs up to 40ms. */ for(retry = 0; retry < 4; ++retry) { if(!parport_negotiate(cam->port, mode)) { break; } mdelay(10); } if(retry == 4) { if(extensibility) DBG("Unable to negotiate upload extensibility mode\n"); else DBG("Unable to negotiate upload mode\n"); return -1; } if(extensibility) cam->port->ieee1284.mode = upload_mode; return 0; } /**************************************************************************** * * WritePacket * ***************************************************************************/ static int WritePacket(struct pp_cam_entry *cam, const u8 *packet, size_t size) { int retval=0; int size_written; if (packet == NULL) { return -EINVAL; } if (ForwardSetup(cam)) { DBG("Write failed in setup\n"); return -EIO; } size_written = parport_write(cam->port, packet, size); if(size_written != size) { DBG("Write failed, wrote %d/%d\n", size_written, size); retval = -EIO; } EndTransferMode(cam); return retval; } /**************************************************************************** * * ReadPacket * ***************************************************************************/ static int ReadPacket(struct pp_cam_entry *cam, u8 *packet, size_t size) { int retval=0; if (packet == NULL) { return -EINVAL; } if (ReverseSetup(cam, 0)) { return -EIO; } /* support for CPiA variant nibble reads */ if(cam->port->ieee1284.mode == IEEE1284_MODE_NIBBLE) { if(cpia_read_nibble(cam->port, packet, size, 0) != size) retval = -EIO; } else { if(parport_read(cam->port, packet, size) != size) retval = -EIO; } EndTransferMode(cam); return retval; } /**************************************************************************** * * cpia_pp_streamStart * ***************************************************************************/ static int cpia_pp_streamStart(void *privdata) { struct pp_cam_entry *cam = privdata; DBG("\n"); cam->streaming=1; cam->image_ready=0; //if (ReverseSetup(cam,1)) return -EIO; if(cam->stream_irq) cpia_parport_enable_irq(cam->port); return 0; } /**************************************************************************** * * cpia_pp_streamStop * ***************************************************************************/ static int cpia_pp_streamStop(void *privdata) { struct pp_cam_entry *cam = privdata; DBG("\n"); cam->streaming=0; cpia_parport_disable_irq(cam->port); //EndTransferMode(cam); return 0; } /**************************************************************************** * * cpia_pp_streamRead * ***************************************************************************/ static int cpia_pp_read(struct parport *port, u8 *buffer, int len) { int bytes_read; /* support for CPiA variant "nibble stream" reads */ if(port->ieee1284.mode == IEEE1284_MODE_NIBBLE) bytes_read = cpia_read_nibble_stream(port,buffer,len,0); else { int new_bytes; for(bytes_read=0; bytes_read<len; bytes_read += new_bytes) { new_bytes = parport_read(port, buffer+bytes_read, len-bytes_read); if(new_bytes < 0) break; } } return bytes_read; } static int cpia_pp_streamRead(void *privdata, u8 *buffer, int noblock) { struct pp_cam_entry *cam = privdata; int read_bytes = 0; int i, endseen, block_size, new_bytes; if(cam == NULL) { DBG("Internal driver error: cam is NULL\n"); return -EINVAL; } if(buffer == NULL) { DBG("Internal driver error: buffer is NULL\n"); return -EINVAL; } //if(cam->streaming) DBG("%d / %d\n", cam->image_ready, noblock); if( cam->stream_irq ) { DBG("%d\n", cam->image_ready); cam->image_ready--; } cam->image_complete=0; if (0/*cam->streaming*/) { if(!cam->image_ready) { if(noblock) return -EWOULDBLOCK; interruptible_sleep_on(&cam->wq_stream); if( signal_pending(current) ) return -EINTR; DBG("%d\n", cam->image_ready); } } else { if (ReverseSetup(cam, 1)) { DBG("unable to ReverseSetup\n"); return -EIO; } } endseen = 0; block_size = PARPORT_CHUNK_SIZE; while( !cam->image_complete ) { cond_resched(); new_bytes = cpia_pp_read(cam->port, buffer, block_size ); if( new_bytes <= 0 ) { break; } i=-1; while(++i<new_bytes && endseen<4) { if(*buffer==EOI) { endseen++; } else { endseen=0; } buffer++; } read_bytes += i; if( endseen==4 ) { cam->image_complete=1; break; } if( CPIA_MAX_IMAGE_SIZE-read_bytes <= PARPORT_CHUNK_SIZE ) { block_size=CPIA_MAX_IMAGE_SIZE-read_bytes; } } EndTransferMode(cam); return cam->image_complete ? read_bytes : -EIO; } /**************************************************************************** * * cpia_pp_transferCmd * ***************************************************************************/ static int cpia_pp_transferCmd(void *privdata, u8 *command, u8 *data) { int err; int retval=0; int databytes; struct pp_cam_entry *cam = privdata; if(cam == NULL) { DBG("Internal driver error: cam is NULL\n"); return -EINVAL; } if(command == NULL) { DBG("Internal driver error: command is NULL\n"); return -EINVAL; } databytes = (((int)command[7])<<8) | command[6]; if ((err = WritePacket(cam, command, PACKET_LENGTH)) < 0) { DBG("Error writing command\n"); return err; } if(command[0] == DATA_IN) { u8 buffer[8]; if(data == NULL) { DBG("Internal driver error: data is NULL\n"); return -EINVAL; } if((err = ReadPacket(cam, buffer, 8)) < 0) { DBG("Error reading command result\n"); return err; } memcpy(data, buffer, databytes); } else if(command[0] == DATA_OUT) { if(databytes > 0) { if(data == NULL) { DBG("Internal driver error: data is NULL\n"); retval = -EINVAL; } else { if((err=WritePacket(cam, data, databytes)) < 0){ DBG("Error writing command data\n"); return err; } } } } else { DBG("Unexpected first byte of command: %x\n", command[0]); retval = -EINVAL; } return retval; } /**************************************************************************** * * cpia_pp_open * ***************************************************************************/ static int cpia_pp_open(void *privdata) { struct pp_cam_entry *cam = (struct pp_cam_entry *)privdata; if (cam == NULL) return -EINVAL; if(cam->open_count == 0) { if (parport_claim(cam->pdev)) { DBG("failed to claim the port\n"); return -EBUSY; } parport_negotiate(cam->port, IEEE1284_MODE_COMPAT); parport_data_forward(cam->port); parport_write_control(cam->port, PARPORT_CONTROL_SELECT); udelay(50); parport_write_control(cam->port, PARPORT_CONTROL_SELECT | PARPORT_CONTROL_INIT); } ++cam->open_count; return 0; } /**************************************************************************** * * cpia_pp_registerCallback * ***************************************************************************/ static int cpia_pp_registerCallback(void *privdata, void (*cb)(void *cbdata), void *cbdata) { struct pp_cam_entry *cam = privdata; int retval = 0; if(cam->port->irq != PARPORT_IRQ_NONE) { INIT_WORK(&cam->cb_task, cb, cbdata); } else { retval = -1; } return retval; } /**************************************************************************** * * cpia_pp_close * ***************************************************************************/ static int cpia_pp_close(void *privdata) { struct pp_cam_entry *cam = privdata; if (--cam->open_count == 0) { parport_release(cam->pdev); } return 0; } /**************************************************************************** * * cpia_pp_register * ***************************************************************************/ static int cpia_pp_register(struct parport *port) { struct pardevice *pdev = NULL; struct pp_cam_entry *cam; struct cam_data *cpia; if (!(port->modes & PARPORT_MODE_PCSPP)) { LOG("port is not supported by CPiA driver\n"); return -ENXIO; } cam = kmalloc(sizeof(struct pp_cam_entry), GFP_KERNEL); if (cam == NULL) { LOG("failed to allocate camera structure\n"); return -ENOMEM; } memset(cam,0,sizeof(struct pp_cam_entry)); pdev = parport_register_device(port, "cpia_pp", NULL, NULL, NULL, 0, cam); if (!pdev) { LOG("failed to parport_register_device\n"); kfree(cam); return -ENXIO; } cam->pdev = pdev; cam->port = port; init_waitqueue_head(&cam->wq_stream); cam->streaming = 0; cam->stream_irq = 0; if((cpia = cpia_register_camera(&cpia_pp_ops, cam)) == NULL) { LOG("failed to cpia_register_camera\n"); parport_unregister_device(pdev); kfree(cam); return -ENXIO; } spin_lock( &cam_list_lock_pp ); list_add( &cpia->cam_data_list, &cam_list ); spin_unlock( &cam_list_lock_pp ); return 0; } static void cpia_pp_detach (struct parport *port) { struct list_head *tmp; struct cam_data *cpia = NULL; struct pp_cam_entry *cam; spin_lock( &cam_list_lock_pp ); list_for_each (tmp, &cam_list) { cpia = list_entry(tmp, struct cam_data, cam_data_list); cam = (struct pp_cam_entry *) cpia->lowlevel_data; if (cam && cam->port->number == port->number) { list_del(&cpia->cam_data_list); break; } cpia = NULL; } spin_unlock( &cam_list_lock_pp ); if (!cpia) { DBG("cpia_pp_detach failed to find cam_data in cam_list\n"); return; } cam = (struct pp_cam_entry *) cpia->lowlevel_data; cpia_unregister_camera(cpia); if(cam->open_count > 0) cpia_pp_close(cam); parport_unregister_device(cam->pdev); cpia->lowlevel_data = NULL; kfree(cam); } static void cpia_pp_attach (struct parport *port) { unsigned int i; switch (parport_nr[0]) { case PPCPIA_PARPORT_UNSPEC: case PPCPIA_PARPORT_AUTO: if (port->probe_info[0].class != PARPORT_CLASS_MEDIA || port->probe_info[0].cmdset == NULL || strncmp(port->probe_info[0].cmdset, "CPIA_1", 6) != 0) return; cpia_pp_register(port); break; default: for (i = 0; i < PARPORT_MAX; ++i) { if (port->number == parport_nr[i]) { cpia_pp_register(port); break; } } break; } } static struct parport_driver cpia_pp_driver = { .name = "cpia_pp", .attach = cpia_pp_attach, .detach = cpia_pp_detach, }; int cpia_pp_init(void) { printk(KERN_INFO "%s v%d.%d.%d\n",ABOUT, CPIA_PP_MAJ_VER,CPIA_PP_MIN_VER,CPIA_PP_PATCH_VER); if(parport_nr[0] == PPCPIA_PARPORT_OFF) { printk(" disabled\n"); return 0; } spin_lock_init( &cam_list_lock_pp ); if (parport_register_driver (&cpia_pp_driver)) { LOG ("unable to register with parport\n"); return -EIO; } return 0; } #ifdef MODULE int init_module(void) { if (parport[0]) { /* The user gave some parameters. Let's see what they were. */ if (!strncmp(parport[0], "auto", 4)) { parport_nr[0] = PPCPIA_PARPORT_AUTO; } else { int n; for (n = 0; n < PARPORT_MAX && parport[n]; n++) { if (!strncmp(parport[n], "none", 4)) { parport_nr[n] = PPCPIA_PARPORT_NONE; } else { char *ep; unsigned long r = simple_strtoul(parport[n], &ep, 0); if (ep != parport[n]) { parport_nr[n] = r; } else { LOG("bad port specifier `%s'\n", parport[n]); return -ENODEV; } } } } } return cpia_pp_init(); } void cleanup_module(void) { parport_unregister_driver (&cpia_pp_driver); return; } #else /* !MODULE */ static int __init cpia_pp_setup(char *str) { if (!strncmp(str, "parport", 7)) { int n = simple_strtoul(str + 7, NULL, 10); if (parport_ptr < PARPORT_MAX) { parport_nr[parport_ptr++] = n; } else { LOG("too many ports, %s ignored.\n", str); } } else if (!strcmp(str, "auto")) { parport_nr[0] = PPCPIA_PARPORT_AUTO; } else if (!strcmp(str, "none")) { parport_nr[parport_ptr++] = PPCPIA_PARPORT_NONE; } return 0; } __setup("cpia_pp=", cpia_pp_setup); #endif /* !MODULE */
gpl-2.0
aviralg/R-dyntrace
src/extra/trio/compat.c
33
2349
/* Compatibility wrapper for R */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> int trio_printf(const char *format, ...); int trio_vprintf(const char *format, va_list args); int trio_fprintf(FILE *file, const char *format, ...); int trio_sprintf(char *buffer, const char *format, ...); int trio_vsprintf(char *buffer, const char *format, va_list args); int trio_vfprintf(FILE *file, const char *format, va_list args); int trio_snprintf(char *buffer, size_t max, const char *format, ...); int trio_vsnprintf(char *buffer, size_t bufferSize, const char *format, va_list args); int trio_vasprintf(char **ret, const char *format, va_list args); int printf(const char *format, ...) { int res; va_list(ap); va_start(ap, format); res = trio_vprintf(format, ap); va_end(ap); return res; } int fprintf(FILE *file, const char *format, ...) { int res; va_list(ap); va_start(ap, format); res = trio_vfprintf(file, format, ap); va_end(ap); return res; } int sprintf(char *buffer, const char *format, ...) { int res; va_list(ap); va_start(ap, format); res = trio_vsprintf(buffer, format, ap); va_end(ap); return res; } int vprintf(const char *format, va_list args) { return trio_vprintf(format, args); } int vsprintf(char *buffer, const char *format, va_list args) { return trio_vsprintf(buffer, format, args); } int vfprintf(FILE *file, const char *format, va_list args) { return trio_vfprintf(file, format, args); } /* The test below excludes both 32 and 64 bit Windows */ #ifndef _WIN32 /* These are needed as MinGW's stdio.h has inline snprintf and vnsprintf. Include the trioremap.h header file to get the replacements */ int snprintf(char *buffer, size_t max, const char *format, ...) { int res; va_list(ap); va_start(ap, format); res = trio_vsnprintf(buffer, max, format, ap); va_end(ap); return res; } int vsnprintf(char *buffer, size_t bufferSize, const char *format, va_list args) { return trio_vsnprintf(buffer, bufferSize, format, args); } int _vsnprintf(char *buffer, size_t bufferSize, const char *format, va_list args) { return trio_vsnprintf(buffer, bufferSize, format, args); } #endif int vasprintf(char **ret, const char *format, va_list args) { return trio_vasprintf(ret, format, args); }
gpl-2.0
cleech/linux
drivers/input/touchscreen/lpc32xx_ts.c
33
10026
// SPDX-License-Identifier: GPL-2.0-or-later /* * LPC32xx built-in touchscreen driver * * Copyright (C) 2010 NXP Semiconductors */ #include <linux/platform_device.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/of.h> /* * Touchscreen controller register offsets */ #define LPC32XX_TSC_STAT 0x00 #define LPC32XX_TSC_SEL 0x04 #define LPC32XX_TSC_CON 0x08 #define LPC32XX_TSC_FIFO 0x0C #define LPC32XX_TSC_DTR 0x10 #define LPC32XX_TSC_RTR 0x14 #define LPC32XX_TSC_UTR 0x18 #define LPC32XX_TSC_TTR 0x1C #define LPC32XX_TSC_DXP 0x20 #define LPC32XX_TSC_MIN_X 0x24 #define LPC32XX_TSC_MAX_X 0x28 #define LPC32XX_TSC_MIN_Y 0x2C #define LPC32XX_TSC_MAX_Y 0x30 #define LPC32XX_TSC_AUX_UTR 0x34 #define LPC32XX_TSC_AUX_MIN 0x38 #define LPC32XX_TSC_AUX_MAX 0x3C #define LPC32XX_TSC_STAT_FIFO_OVRRN (1 << 8) #define LPC32XX_TSC_STAT_FIFO_EMPTY (1 << 7) #define LPC32XX_TSC_SEL_DEFVAL 0x0284 #define LPC32XX_TSC_ADCCON_IRQ_TO_FIFO_4 (0x1 << 11) #define LPC32XX_TSC_ADCCON_X_SAMPLE_SIZE(s) ((10 - (s)) << 7) #define LPC32XX_TSC_ADCCON_Y_SAMPLE_SIZE(s) ((10 - (s)) << 4) #define LPC32XX_TSC_ADCCON_POWER_UP (1 << 2) #define LPC32XX_TSC_ADCCON_AUTO_EN (1 << 0) #define LPC32XX_TSC_FIFO_TS_P_LEVEL (1 << 31) #define LPC32XX_TSC_FIFO_NORMALIZE_X_VAL(x) (((x) & 0x03FF0000) >> 16) #define LPC32XX_TSC_FIFO_NORMALIZE_Y_VAL(y) ((y) & 0x000003FF) #define LPC32XX_TSC_ADCDAT_VALUE_MASK 0x000003FF #define LPC32XX_TSC_MIN_XY_VAL 0x0 #define LPC32XX_TSC_MAX_XY_VAL 0x3FF #define MOD_NAME "ts-lpc32xx" #define tsc_readl(dev, reg) \ __raw_readl((dev)->tsc_base + (reg)) #define tsc_writel(dev, reg, val) \ __raw_writel((val), (dev)->tsc_base + (reg)) struct lpc32xx_tsc { struct input_dev *dev; void __iomem *tsc_base; int irq; struct clk *clk; }; static void lpc32xx_fifo_clear(struct lpc32xx_tsc *tsc) { while (!(tsc_readl(tsc, LPC32XX_TSC_STAT) & LPC32XX_TSC_STAT_FIFO_EMPTY)) tsc_readl(tsc, LPC32XX_TSC_FIFO); } static irqreturn_t lpc32xx_ts_interrupt(int irq, void *dev_id) { u32 tmp, rv[4], xs[4], ys[4]; int idx; struct lpc32xx_tsc *tsc = dev_id; struct input_dev *input = tsc->dev; tmp = tsc_readl(tsc, LPC32XX_TSC_STAT); if (tmp & LPC32XX_TSC_STAT_FIFO_OVRRN) { /* FIFO overflow - throw away samples */ lpc32xx_fifo_clear(tsc); return IRQ_HANDLED; } /* * Gather and normalize 4 samples. Pen-up events may have less * than 4 samples, but its ok to pop 4 and let the last sample * pen status check drop the samples. */ idx = 0; while (idx < 4 && !(tsc_readl(tsc, LPC32XX_TSC_STAT) & LPC32XX_TSC_STAT_FIFO_EMPTY)) { tmp = tsc_readl(tsc, LPC32XX_TSC_FIFO); xs[idx] = LPC32XX_TSC_ADCDAT_VALUE_MASK - LPC32XX_TSC_FIFO_NORMALIZE_X_VAL(tmp); ys[idx] = LPC32XX_TSC_ADCDAT_VALUE_MASK - LPC32XX_TSC_FIFO_NORMALIZE_Y_VAL(tmp); rv[idx] = tmp; idx++; } /* Data is only valid if pen is still down in last sample */ if (!(rv[3] & LPC32XX_TSC_FIFO_TS_P_LEVEL) && idx == 4) { /* Use average of 2nd and 3rd sample for position */ input_report_abs(input, ABS_X, (xs[1] + xs[2]) / 2); input_report_abs(input, ABS_Y, (ys[1] + ys[2]) / 2); input_report_key(input, BTN_TOUCH, 1); } else { input_report_key(input, BTN_TOUCH, 0); } input_sync(input); return IRQ_HANDLED; } static void lpc32xx_stop_tsc(struct lpc32xx_tsc *tsc) { /* Disable auto mode */ tsc_writel(tsc, LPC32XX_TSC_CON, tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_AUTO_EN); clk_disable_unprepare(tsc->clk); } static int lpc32xx_setup_tsc(struct lpc32xx_tsc *tsc) { u32 tmp; int err; err = clk_prepare_enable(tsc->clk); if (err) return err; tmp = tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_POWER_UP; /* Set the TSC FIFO depth to 4 samples @ 10-bits per sample (max) */ tmp = LPC32XX_TSC_ADCCON_IRQ_TO_FIFO_4 | LPC32XX_TSC_ADCCON_X_SAMPLE_SIZE(10) | LPC32XX_TSC_ADCCON_Y_SAMPLE_SIZE(10); tsc_writel(tsc, LPC32XX_TSC_CON, tmp); /* These values are all preset */ tsc_writel(tsc, LPC32XX_TSC_SEL, LPC32XX_TSC_SEL_DEFVAL); tsc_writel(tsc, LPC32XX_TSC_MIN_X, LPC32XX_TSC_MIN_XY_VAL); tsc_writel(tsc, LPC32XX_TSC_MAX_X, LPC32XX_TSC_MAX_XY_VAL); tsc_writel(tsc, LPC32XX_TSC_MIN_Y, LPC32XX_TSC_MIN_XY_VAL); tsc_writel(tsc, LPC32XX_TSC_MAX_Y, LPC32XX_TSC_MAX_XY_VAL); /* Aux support is not used */ tsc_writel(tsc, LPC32XX_TSC_AUX_UTR, 0); tsc_writel(tsc, LPC32XX_TSC_AUX_MIN, 0); tsc_writel(tsc, LPC32XX_TSC_AUX_MAX, 0); /* * Set sample rate to about 240Hz per X/Y pair. A single measurement * consists of 4 pairs which gives about a 60Hz sample rate based on * a stable 32768Hz clock source. Values are in clocks. * Rate is (32768 / (RTR + XCONV + RTR + YCONV + DXP + TTR + UTR) / 4 */ tsc_writel(tsc, LPC32XX_TSC_RTR, 0x2); tsc_writel(tsc, LPC32XX_TSC_DTR, 0x2); tsc_writel(tsc, LPC32XX_TSC_TTR, 0x10); tsc_writel(tsc, LPC32XX_TSC_DXP, 0x4); tsc_writel(tsc, LPC32XX_TSC_UTR, 88); lpc32xx_fifo_clear(tsc); /* Enable automatic ts event capture */ tsc_writel(tsc, LPC32XX_TSC_CON, tmp | LPC32XX_TSC_ADCCON_AUTO_EN); return 0; } static int lpc32xx_ts_open(struct input_dev *dev) { struct lpc32xx_tsc *tsc = input_get_drvdata(dev); return lpc32xx_setup_tsc(tsc); } static void lpc32xx_ts_close(struct input_dev *dev) { struct lpc32xx_tsc *tsc = input_get_drvdata(dev); lpc32xx_stop_tsc(tsc); } static int lpc32xx_ts_probe(struct platform_device *pdev) { struct lpc32xx_tsc *tsc; struct input_dev *input; struct resource *res; resource_size_t size; int irq; int error; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Can't get memory resource\n"); return -ENOENT; } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; tsc = kzalloc(sizeof(*tsc), GFP_KERNEL); input = input_allocate_device(); if (!tsc || !input) { dev_err(&pdev->dev, "failed allocating memory\n"); error = -ENOMEM; goto err_free_mem; } tsc->dev = input; tsc->irq = irq; size = resource_size(res); if (!request_mem_region(res->start, size, pdev->name)) { dev_err(&pdev->dev, "TSC registers are not free\n"); error = -EBUSY; goto err_free_mem; } tsc->tsc_base = ioremap(res->start, size); if (!tsc->tsc_base) { dev_err(&pdev->dev, "Can't map memory\n"); error = -ENOMEM; goto err_release_mem; } tsc->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(tsc->clk)) { dev_err(&pdev->dev, "failed getting clock\n"); error = PTR_ERR(tsc->clk); goto err_unmap; } input->name = MOD_NAME; input->phys = "lpc32xx/input0"; input->id.bustype = BUS_HOST; input->id.vendor = 0x0001; input->id.product = 0x0002; input->id.version = 0x0100; input->dev.parent = &pdev->dev; input->open = lpc32xx_ts_open; input->close = lpc32xx_ts_close; input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input, ABS_X, LPC32XX_TSC_MIN_XY_VAL, LPC32XX_TSC_MAX_XY_VAL, 0, 0); input_set_abs_params(input, ABS_Y, LPC32XX_TSC_MIN_XY_VAL, LPC32XX_TSC_MAX_XY_VAL, 0, 0); input_set_drvdata(input, tsc); error = request_irq(tsc->irq, lpc32xx_ts_interrupt, 0, pdev->name, tsc); if (error) { dev_err(&pdev->dev, "failed requesting interrupt\n"); goto err_put_clock; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "failed registering input device\n"); goto err_free_irq; } platform_set_drvdata(pdev, tsc); device_init_wakeup(&pdev->dev, 1); return 0; err_free_irq: free_irq(tsc->irq, tsc); err_put_clock: clk_put(tsc->clk); err_unmap: iounmap(tsc->tsc_base); err_release_mem: release_mem_region(res->start, size); err_free_mem: input_free_device(input); kfree(tsc); return error; } static int lpc32xx_ts_remove(struct platform_device *pdev) { struct lpc32xx_tsc *tsc = platform_get_drvdata(pdev); struct resource *res; free_irq(tsc->irq, tsc); input_unregister_device(tsc->dev); clk_put(tsc->clk); iounmap(tsc->tsc_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); kfree(tsc); return 0; } #ifdef CONFIG_PM static int lpc32xx_ts_suspend(struct device *dev) { struct lpc32xx_tsc *tsc = dev_get_drvdata(dev); struct input_dev *input = tsc->dev; /* * Suspend and resume can be called when the device hasn't been * enabled. If there are no users that have the device open, then * avoid calling the TSC stop and start functions as the TSC * isn't yet clocked. */ mutex_lock(&input->mutex); if (input_device_enabled(input)) { if (device_may_wakeup(dev)) enable_irq_wake(tsc->irq); else lpc32xx_stop_tsc(tsc); } mutex_unlock(&input->mutex); return 0; } static int lpc32xx_ts_resume(struct device *dev) { struct lpc32xx_tsc *tsc = dev_get_drvdata(dev); struct input_dev *input = tsc->dev; mutex_lock(&input->mutex); if (input_device_enabled(input)) { if (device_may_wakeup(dev)) disable_irq_wake(tsc->irq); else lpc32xx_setup_tsc(tsc); } mutex_unlock(&input->mutex); return 0; } static const struct dev_pm_ops lpc32xx_ts_pm_ops = { .suspend = lpc32xx_ts_suspend, .resume = lpc32xx_ts_resume, }; #define LPC32XX_TS_PM_OPS (&lpc32xx_ts_pm_ops) #else #define LPC32XX_TS_PM_OPS NULL #endif #ifdef CONFIG_OF static const struct of_device_id lpc32xx_tsc_of_match[] = { { .compatible = "nxp,lpc3220-tsc", }, { }, }; MODULE_DEVICE_TABLE(of, lpc32xx_tsc_of_match); #endif static struct platform_driver lpc32xx_ts_driver = { .probe = lpc32xx_ts_probe, .remove = lpc32xx_ts_remove, .driver = { .name = MOD_NAME, .pm = LPC32XX_TS_PM_OPS, .of_match_table = of_match_ptr(lpc32xx_tsc_of_match), }, }; module_platform_driver(lpc32xx_ts_driver); MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com"); MODULE_DESCRIPTION("LPC32XX TSC Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lpc32xx_ts");
gpl-2.0
omor1/linux-430
arch/arm/mach-omap2/omap_hwmod.c
33
113469
/* * omap_hwmod implementation for OMAP2/3/4 * * Copyright (C) 2009-2011 Nokia Corporation * Copyright (C) 2011-2012 Texas Instruments, Inc. * * Paul Walmsley, Benoît Cousson, Kevin Hilman * * Created in collaboration with (alphabetical order): Thara Gopinath, * Tony Lindgren, Rajendra Nayak, Vikram Pandita, Sakari Poussa, Anand * Sawant, Santosh Shilimkar, Richard Woodruff * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Introduction * ------------ * One way to view an OMAP SoC is as a collection of largely unrelated * IP blocks connected by interconnects. The IP blocks include * devices such as ARM processors, audio serial interfaces, UARTs, * etc. Some of these devices, like the DSP, are created by TI; * others, like the SGX, largely originate from external vendors. In * TI's documentation, on-chip devices are referred to as "OMAP * modules." Some of these IP blocks are identical across several * OMAP versions. Others are revised frequently. * * These OMAP modules are tied together by various interconnects. * Most of the address and data flow between modules is via OCP-based * interconnects such as the L3 and L4 buses; but there are other * interconnects that distribute the hardware clock tree, handle idle * and reset signaling, supply power, and connect the modules to * various pads or balls on the OMAP package. * * OMAP hwmod provides a consistent way to describe the on-chip * hardware blocks and their integration into the rest of the chip. * This description can be automatically generated from the TI * hardware database. OMAP hwmod provides a standard, consistent API * to reset, enable, idle, and disable these hardware blocks. And * hwmod provides a way for other core code, such as the Linux device * code or the OMAP power management and address space mapping code, * to query the hardware database. * * Using hwmod * ----------- * Drivers won't call hwmod functions directly. That is done by the * omap_device code, and in rare occasions, by custom integration code * in arch/arm/ *omap*. The omap_device code includes functions to * build a struct platform_device using omap_hwmod data, and that is * currently how hwmod data is communicated to drivers and to the * Linux driver model. Most drivers will call omap_hwmod functions only * indirectly, via pm_runtime*() functions. * * From a layering perspective, here is where the OMAP hwmod code * fits into the kernel software stack: * * +-------------------------------+ * | Device driver code | * | (e.g., drivers/) | * +-------------------------------+ * | Linux driver model | * | (platform_device / | * | platform_driver data/code) | * +-------------------------------+ * | OMAP core-driver integration | * |(arch/arm/mach-omap2/devices.c)| * +-------------------------------+ * | omap_device code | * | (../plat-omap/omap_device.c) | * +-------------------------------+ * ----> | omap_hwmod code/data | <----- * | (../mach-omap2/omap_hwmod*) | * +-------------------------------+ * | OMAP clock/PRCM/register fns | * | ({read,write}l_relaxed, clk*) | * +-------------------------------+ * * Device drivers should not contain any OMAP-specific code or data in * them. They should only contain code to operate the IP block that * the driver is responsible for. This is because these IP blocks can * also appear in other SoCs, either from TI (such as DaVinci) or from * other manufacturers; and drivers should be reusable across other * platforms. * * The OMAP hwmod code also will attempt to reset and idle all on-chip * devices upon boot. The goal here is for the kernel to be * completely self-reliant and independent from bootloaders. This is * to ensure a repeatable configuration, both to ensure consistent * runtime behavior, and to make it easier for others to reproduce * bugs. * * OMAP module activity states * --------------------------- * The hwmod code considers modules to be in one of several activity * states. IP blocks start out in an UNKNOWN state, then once they * are registered via the hwmod code, proceed to the REGISTERED state. * Once their clock names are resolved to clock pointers, the module * enters the CLKS_INITED state; and finally, once the module has been * reset and the integration registers programmed, the INITIALIZED state * is entered. The hwmod code will then place the module into either * the IDLE state to save power, or in the case of a critical system * module, the ENABLED state. * * OMAP core integration code can then call omap_hwmod*() functions * directly to move the module between the IDLE, ENABLED, and DISABLED * states, as needed. This is done during both the PM idle loop, and * in the OMAP core integration code's implementation of the PM runtime * functions. * * References * ---------- * This is a partial list. * - OMAP2420 Multimedia Processor Silicon Revision 2.1.1, 2.2 (SWPU064) * - OMAP2430 Multimedia Device POP Silicon Revision 2.1 (SWPU090) * - OMAP34xx Multimedia Device Silicon Revision 3.1 (SWPU108) * - OMAP4430 Multimedia Device Silicon Revision 1.0 (SWPU140) * - Open Core Protocol Specification 2.2 * * To do: * - handle IO mapping * - bus throughput & module latency measurement code * * XXX add tests at the beginning of each function to ensure the hwmod is * in the appropriate state * XXX error return values should be checked to ensure that they are * appropriate */ #undef DEBUG #include <linux/kernel.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/bootmem.h> #include <linux/cpu.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/system_misc.h> #include "clock.h" #include "omap_hwmod.h" #include "soc.h" #include "common.h" #include "clockdomain.h" #include "powerdomain.h" #include "cm2xxx.h" #include "cm3xxx.h" #include "cm33xx.h" #include "prm.h" #include "prm3xxx.h" #include "prm44xx.h" #include "prm33xx.h" #include "prminst44xx.h" #include "mux.h" #include "pm.h" /* Name of the OMAP hwmod for the MPU */ #define MPU_INITIATOR_NAME "mpu" /* * Number of struct omap_hwmod_link records per struct * omap_hwmod_ocp_if record (master->slave and slave->master) */ #define LINKS_PER_OCP_IF 2 /* * Address offset (in bytes) between the reset control and the reset * status registers: 4 bytes on OMAP4 */ #define OMAP4_RST_CTRL_ST_OFFSET 4 /** * struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations * @enable_module: function to enable a module (via MODULEMODE) * @disable_module: function to disable a module (via MODULEMODE) * * XXX Eventually this functionality will be hidden inside the PRM/CM * device drivers. Until then, this should avoid huge blocks of cpu_is_*() * conditionals in this code. */ struct omap_hwmod_soc_ops { void (*enable_module)(struct omap_hwmod *oh); int (*disable_module)(struct omap_hwmod *oh); int (*wait_target_ready)(struct omap_hwmod *oh); int (*assert_hardreset)(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri); int (*deassert_hardreset)(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri); int (*is_hardreset_asserted)(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri); int (*init_clkdm)(struct omap_hwmod *oh); void (*update_context_lost)(struct omap_hwmod *oh); int (*get_context_lost)(struct omap_hwmod *oh); }; /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */ static struct omap_hwmod_soc_ops soc_ops; /* omap_hwmod_list contains all registered struct omap_hwmods */ static LIST_HEAD(omap_hwmod_list); /* mpu_oh: used to add/remove MPU initiator from sleepdep list */ static struct omap_hwmod *mpu_oh; /* io_chain_lock: used to serialize reconfigurations of the I/O chain */ static DEFINE_SPINLOCK(io_chain_lock); /* * linkspace: ptr to a buffer that struct omap_hwmod_link records are * allocated from - used to reduce the number of small memory * allocations, which has a significant impact on performance */ static struct omap_hwmod_link *linkspace; /* * free_ls, max_ls: array indexes into linkspace; representing the * next free struct omap_hwmod_link index, and the maximum number of * struct omap_hwmod_link records allocated (respectively) */ static unsigned short free_ls, max_ls, ls_supp; /* inited: set to true once the hwmod code is initialized */ static bool inited; /* Private functions */ /** * _fetch_next_ocp_if - return the next OCP interface in a list * @p: ptr to a ptr to the list_head inside the ocp_if to return * @i: pointer to the index of the element pointed to by @p in the list * * Return a pointer to the struct omap_hwmod_ocp_if record * containing the struct list_head pointed to by @p, and increment * @p such that a future call to this routine will return the next * record. */ static struct omap_hwmod_ocp_if *_fetch_next_ocp_if(struct list_head **p, int *i) { struct omap_hwmod_ocp_if *oi; oi = list_entry(*p, struct omap_hwmod_link, node)->ocp_if; *p = (*p)->next; *i = *i + 1; return oi; } /** * _update_sysc_cache - return the module OCP_SYSCONFIG register, keep copy * @oh: struct omap_hwmod * * * Load the current value of the hwmod OCP_SYSCONFIG register into the * struct omap_hwmod for later use. Returns -EINVAL if the hwmod has no * OCP_SYSCONFIG register or 0 upon success. */ static int _update_sysc_cache(struct omap_hwmod *oh) { if (!oh->class->sysc) { WARN(1, "omap_hwmod: %s: cannot read OCP_SYSCONFIG: not defined on hwmod's class\n", oh->name); return -EINVAL; } /* XXX ensure module interface clock is up */ oh->_sysc_cache = omap_hwmod_read(oh, oh->class->sysc->sysc_offs); if (!(oh->class->sysc->sysc_flags & SYSC_NO_CACHE)) oh->_int_flags |= _HWMOD_SYSCONFIG_LOADED; return 0; } /** * _write_sysconfig - write a value to the module's OCP_SYSCONFIG register * @v: OCP_SYSCONFIG value to write * @oh: struct omap_hwmod * * * Write @v into the module class' OCP_SYSCONFIG register, if it has * one. No return value. */ static void _write_sysconfig(u32 v, struct omap_hwmod *oh) { if (!oh->class->sysc) { WARN(1, "omap_hwmod: %s: cannot write OCP_SYSCONFIG: not defined on hwmod's class\n", oh->name); return; } /* XXX ensure module interface clock is up */ /* Module might have lost context, always update cache and register */ oh->_sysc_cache = v; /* * Some IP blocks (such as RTC) require unlocking of IP before * accessing its registers. If a function pointer is present * to unlock, then call it before accessing sysconfig and * call lock after writing sysconfig. */ if (oh->class->unlock) oh->class->unlock(oh); omap_hwmod_write(v, oh, oh->class->sysc->sysc_offs); if (oh->class->lock) oh->class->lock(oh); } /** * _set_master_standbymode: set the OCP_SYSCONFIG MIDLEMODE field in @v * @oh: struct omap_hwmod * * @standbymode: MIDLEMODE field bits * @v: pointer to register contents to modify * * Update the master standby mode bits in @v to be @standbymode for * the @oh hwmod. Does not write to the hardware. Returns -EINVAL * upon error or 0 upon success. */ static int _set_master_standbymode(struct omap_hwmod *oh, u8 standbymode, u32 *v) { u32 mstandby_mask; u8 mstandby_shift; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_MIDLEMODE)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } mstandby_shift = oh->class->sysc->sysc_fields->midle_shift; mstandby_mask = (0x3 << mstandby_shift); *v &= ~mstandby_mask; *v |= __ffs(standbymode) << mstandby_shift; return 0; } /** * _set_slave_idlemode: set the OCP_SYSCONFIG SIDLEMODE field in @v * @oh: struct omap_hwmod * * @idlemode: SIDLEMODE field bits * @v: pointer to register contents to modify * * Update the slave idle mode bits in @v to be @idlemode for the @oh * hwmod. Does not write to the hardware. Returns -EINVAL upon error * or 0 upon success. */ static int _set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode, u32 *v) { u32 sidle_mask; u8 sidle_shift; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_SIDLEMODE)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } sidle_shift = oh->class->sysc->sysc_fields->sidle_shift; sidle_mask = (0x3 << sidle_shift); *v &= ~sidle_mask; *v |= __ffs(idlemode) << sidle_shift; return 0; } /** * _set_clockactivity: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v * @oh: struct omap_hwmod * * @clockact: CLOCKACTIVITY field bits * @v: pointer to register contents to modify * * Update the clockactivity mode bits in @v to be @clockact for the * @oh hwmod. Used for additional powersaving on some modules. Does * not write to the hardware. Returns -EINVAL upon error or 0 upon * success. */ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v) { u32 clkact_mask; u8 clkact_shift; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_CLOCKACTIVITY)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } clkact_shift = oh->class->sysc->sysc_fields->clkact_shift; clkact_mask = (0x3 << clkact_shift); *v &= ~clkact_mask; *v |= clockact << clkact_shift; return 0; } /** * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v * @oh: struct omap_hwmod * * @v: pointer to register contents to modify * * Set the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon * error or 0 upon success. */ static int _set_softreset(struct omap_hwmod *oh, u32 *v) { u32 softrst_mask; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); *v |= softrst_mask; return 0; } /** * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v * @oh: struct omap_hwmod * * @v: pointer to register contents to modify * * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon * error or 0 upon success. */ static int _clear_softreset(struct omap_hwmod *oh, u32 *v) { u32 softrst_mask; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: sysc_fields absent for sysconfig class\n", oh->name); return -EINVAL; } softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); *v &= ~softrst_mask; return 0; } /** * _wait_softreset_complete - wait for an OCP softreset to complete * @oh: struct omap_hwmod * to wait on * * Wait until the IP block represented by @oh reports that its OCP * softreset is complete. This can be triggered by software (see * _ocp_softreset()) or by hardware upon returning from off-mode (one * example is HSMMC). Waits for up to MAX_MODULE_SOFTRESET_WAIT * microseconds. Returns the number of microseconds waited. */ static int _wait_softreset_complete(struct omap_hwmod *oh) { struct omap_hwmod_class_sysconfig *sysc; u32 softrst_mask; int c = 0; sysc = oh->class->sysc; if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS) omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs) & SYSS_RESETDONE_MASK), MAX_MODULE_SOFTRESET_WAIT, c); else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) { softrst_mask = (0x1 << sysc->sysc_fields->srst_shift); omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs) & softrst_mask), MAX_MODULE_SOFTRESET_WAIT, c); } return c; } /** * _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v * @oh: struct omap_hwmod * * * The DMADISABLE bit is a semi-automatic bit present in sysconfig register * of some modules. When the DMA must perform read/write accesses, the * DMADISABLE bit is cleared by the hardware. But when the DMA must stop * for power management, software must set the DMADISABLE bit back to 1. * * Set the DMADISABLE bit in @v for hwmod @oh. Returns -EINVAL upon * error or 0 upon success. */ static int _set_dmadisable(struct omap_hwmod *oh) { u32 v; u32 dmadisable_mask; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_DMADISABLE)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } /* clocks must be on for this operation */ if (oh->_state != _HWMOD_STATE_ENABLED) { pr_warn("omap_hwmod: %s: dma can be disabled only from enabled state\n", oh->name); return -EINVAL; } pr_debug("omap_hwmod: %s: setting DMADISABLE\n", oh->name); v = oh->_sysc_cache; dmadisable_mask = (0x1 << oh->class->sysc->sysc_fields->dmadisable_shift); v |= dmadisable_mask; _write_sysconfig(v, oh); return 0; } /** * _set_module_autoidle: set the OCP_SYSCONFIG AUTOIDLE field in @v * @oh: struct omap_hwmod * * @autoidle: desired AUTOIDLE bitfield value (0 or 1) * @v: pointer to register contents to modify * * Update the module autoidle bit in @v to be @autoidle for the @oh * hwmod. The autoidle bit controls whether the module can gate * internal clocks automatically when it isn't doing anything; the * exact function of this bit varies on a per-module basis. This * function does not write to the hardware. Returns -EINVAL upon * error or 0 upon success. */ static int _set_module_autoidle(struct omap_hwmod *oh, u8 autoidle, u32 *v) { u32 autoidle_mask; u8 autoidle_shift; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_AUTOIDLE)) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } autoidle_shift = oh->class->sysc->sysc_fields->autoidle_shift; autoidle_mask = (0x1 << autoidle_shift); *v &= ~autoidle_mask; *v |= autoidle << autoidle_shift; return 0; } /** * _set_idle_ioring_wakeup - enable/disable IO pad wakeup on hwmod idle for mux * @oh: struct omap_hwmod * * @set_wake: bool value indicating to set (true) or clear (false) wakeup enable * * Set or clear the I/O pad wakeup flag in the mux entries for the * hwmod @oh. This function changes the @oh->mux->pads_dynamic array * in memory. If the hwmod is currently idled, and the new idle * values don't match the previous ones, this function will also * update the SCM PADCTRL registers. Otherwise, if the hwmod is not * currently idled, this function won't touch the hardware: the new * mux settings are written to the SCM PADCTRL registers when the * hwmod is idled. No return value. */ static void _set_idle_ioring_wakeup(struct omap_hwmod *oh, bool set_wake) { struct omap_device_pad *pad; bool change = false; u16 prev_idle; int j; if (!oh->mux || !oh->mux->enabled) return; for (j = 0; j < oh->mux->nr_pads_dynamic; j++) { pad = oh->mux->pads_dynamic[j]; if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP)) continue; prev_idle = pad->idle; if (set_wake) pad->idle |= OMAP_WAKEUP_EN; else pad->idle &= ~OMAP_WAKEUP_EN; if (prev_idle != pad->idle) change = true; } if (change && oh->_state == _HWMOD_STATE_IDLE) omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE); } /** * _enable_wakeup: set OCP_SYSCONFIG.ENAWAKEUP bit in the hardware * @oh: struct omap_hwmod * * * Allow the hardware module @oh to send wakeups. Returns -EINVAL * upon error or 0 upon success. */ static int _enable_wakeup(struct omap_hwmod *oh, u32 *v) { if (!oh->class->sysc || !((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) || (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) || (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP))) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } if (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) *v |= 0x1 << oh->class->sysc->sysc_fields->enwkup_shift; if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART_WKUP, v); if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v); /* XXX test pwrdm_get_wken for this hwmod's subsystem */ return 0; } /** * _disable_wakeup: clear OCP_SYSCONFIG.ENAWAKEUP bit in the hardware * @oh: struct omap_hwmod * * * Prevent the hardware module @oh to send wakeups. Returns -EINVAL * upon error or 0 upon success. */ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v) { if (!oh->class->sysc || !((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) || (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) || (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP))) return -EINVAL; if (!oh->class->sysc->sysc_fields) { WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name); return -EINVAL; } if (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) *v &= ~(0x1 << oh->class->sysc->sysc_fields->enwkup_shift); if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v); if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART, v); /* XXX test pwrdm_get_wken for this hwmod's subsystem */ return 0; } static struct clockdomain *_get_clkdm(struct omap_hwmod *oh) { struct clk_hw_omap *clk; if (oh->clkdm) { return oh->clkdm; } else if (oh->_clk) { if (__clk_get_flags(oh->_clk) & CLK_IS_BASIC) return NULL; clk = to_clk_hw_omap(__clk_get_hw(oh->_clk)); return clk->clkdm; } return NULL; } /** * _add_initiator_dep: prevent @oh from smart-idling while @init_oh is active * @oh: struct omap_hwmod * * * Prevent the hardware module @oh from entering idle while the * hardare module initiator @init_oh is active. Useful when a module * will be accessed by a particular initiator (e.g., if a module will * be accessed by the IVA, there should be a sleepdep between the IVA * initiator and the module). Only applies to modules in smart-idle * mode. If the clockdomain is marked as not needing autodeps, return * 0 without doing anything. Otherwise, returns -EINVAL upon error or * passes along clkdm_add_sleepdep() value upon success. */ static int _add_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh) { struct clockdomain *clkdm, *init_clkdm; clkdm = _get_clkdm(oh); init_clkdm = _get_clkdm(init_oh); if (!clkdm || !init_clkdm) return -EINVAL; if (clkdm && clkdm->flags & CLKDM_NO_AUTODEPS) return 0; return clkdm_add_sleepdep(clkdm, init_clkdm); } /** * _del_initiator_dep: allow @oh to smart-idle even if @init_oh is active * @oh: struct omap_hwmod * * * Allow the hardware module @oh to enter idle while the hardare * module initiator @init_oh is active. Useful when a module will not * be accessed by a particular initiator (e.g., if a module will not * be accessed by the IVA, there should be no sleepdep between the IVA * initiator and the module). Only applies to modules in smart-idle * mode. If the clockdomain is marked as not needing autodeps, return * 0 without doing anything. Returns -EINVAL upon error or passes * along clkdm_del_sleepdep() value upon success. */ static int _del_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh) { struct clockdomain *clkdm, *init_clkdm; clkdm = _get_clkdm(oh); init_clkdm = _get_clkdm(init_oh); if (!clkdm || !init_clkdm) return -EINVAL; if (clkdm && clkdm->flags & CLKDM_NO_AUTODEPS) return 0; return clkdm_del_sleepdep(clkdm, init_clkdm); } /** * _init_main_clk - get a struct clk * for the the hwmod's main functional clk * @oh: struct omap_hwmod * * * Called from _init_clocks(). Populates the @oh _clk (main * functional clock pointer) if a main_clk is present. Returns 0 on * success or -EINVAL on error. */ static int _init_main_clk(struct omap_hwmod *oh) { int ret = 0; if (!oh->main_clk) return 0; oh->_clk = clk_get(NULL, oh->main_clk); if (IS_ERR(oh->_clk)) { pr_warn("omap_hwmod: %s: cannot clk_get main_clk %s\n", oh->name, oh->main_clk); return -EINVAL; } /* * HACK: This needs a re-visit once clk_prepare() is implemented * to do something meaningful. Today its just a no-op. * If clk_prepare() is used at some point to do things like * voltage scaling etc, then this would have to be moved to * some point where subsystems like i2c and pmic become * available. */ clk_prepare(oh->_clk); if (!_get_clkdm(oh)) pr_debug("omap_hwmod: %s: missing clockdomain for %s.\n", oh->name, oh->main_clk); return ret; } /** * _init_interface_clks - get a struct clk * for the the hwmod's interface clks * @oh: struct omap_hwmod * * * Called from _init_clocks(). Populates the @oh OCP slave interface * clock pointers. Returns 0 on success or -EINVAL on error. */ static int _init_interface_clks(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; struct list_head *p; struct clk *c; int i = 0; int ret = 0; p = oh->slave_ports.next; while (i < oh->slaves_cnt) { os = _fetch_next_ocp_if(&p, &i); if (!os->clk) continue; c = clk_get(NULL, os->clk); if (IS_ERR(c)) { pr_warn("omap_hwmod: %s: cannot clk_get interface_clk %s\n", oh->name, os->clk); ret = -EINVAL; continue; } os->_clk = c; /* * HACK: This needs a re-visit once clk_prepare() is implemented * to do something meaningful. Today its just a no-op. * If clk_prepare() is used at some point to do things like * voltage scaling etc, then this would have to be moved to * some point where subsystems like i2c and pmic become * available. */ clk_prepare(os->_clk); } return ret; } /** * _init_opt_clk - get a struct clk * for the the hwmod's optional clocks * @oh: struct omap_hwmod * * * Called from _init_clocks(). Populates the @oh omap_hwmod_opt_clk * clock pointers. Returns 0 on success or -EINVAL on error. */ static int _init_opt_clks(struct omap_hwmod *oh) { struct omap_hwmod_opt_clk *oc; struct clk *c; int i; int ret = 0; for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) { c = clk_get(NULL, oc->clk); if (IS_ERR(c)) { pr_warn("omap_hwmod: %s: cannot clk_get opt_clk %s\n", oh->name, oc->clk); ret = -EINVAL; continue; } oc->_clk = c; /* * HACK: This needs a re-visit once clk_prepare() is implemented * to do something meaningful. Today its just a no-op. * If clk_prepare() is used at some point to do things like * voltage scaling etc, then this would have to be moved to * some point where subsystems like i2c and pmic become * available. */ clk_prepare(oc->_clk); } return ret; } static void _enable_optional_clocks(struct omap_hwmod *oh) { struct omap_hwmod_opt_clk *oc; int i; pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) if (oc->_clk) { pr_debug("omap_hwmod: enable %s:%s\n", oc->role, __clk_get_name(oc->_clk)); clk_enable(oc->_clk); } } static void _disable_optional_clocks(struct omap_hwmod *oh) { struct omap_hwmod_opt_clk *oc; int i; pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) if (oc->_clk) { pr_debug("omap_hwmod: disable %s:%s\n", oc->role, __clk_get_name(oc->_clk)); clk_disable(oc->_clk); } } /** * _enable_clocks - enable hwmod main clock and interface clocks * @oh: struct omap_hwmod * * * Enables all clocks necessary for register reads and writes to succeed * on the hwmod @oh. Returns 0. */ static int _enable_clocks(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; struct list_head *p; int i = 0; pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name); if (oh->_clk) clk_enable(oh->_clk); p = oh->slave_ports.next; while (i < oh->slaves_cnt) { os = _fetch_next_ocp_if(&p, &i); if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) clk_enable(os->_clk); } if (oh->flags & HWMOD_OPT_CLKS_NEEDED) _enable_optional_clocks(oh); /* The opt clocks are controlled by the device driver. */ return 0; } /** * _disable_clocks - disable hwmod main clock and interface clocks * @oh: struct omap_hwmod * * * Disables the hwmod @oh main functional and interface clocks. Returns 0. */ static int _disable_clocks(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; struct list_head *p; int i = 0; pr_debug("omap_hwmod: %s: disabling clocks\n", oh->name); if (oh->_clk) clk_disable(oh->_clk); p = oh->slave_ports.next; while (i < oh->slaves_cnt) { os = _fetch_next_ocp_if(&p, &i); if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) clk_disable(os->_clk); } if (oh->flags & HWMOD_OPT_CLKS_NEEDED) _disable_optional_clocks(oh); /* The opt clocks are controlled by the device driver. */ return 0; } /** * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 * @oh: struct omap_hwmod * * * Enables the PRCM module mode related to the hwmod @oh. * No return value. */ static void _omap4_enable_module(struct omap_hwmod *oh) { if (!oh->clkdm || !oh->prcm.omap4.modulemode) return; pr_debug("omap_hwmod: %s: %s: %d\n", oh->name, __func__, oh->prcm.omap4.modulemode); omap_cm_module_enable(oh->prcm.omap4.modulemode, oh->clkdm->prcm_partition, oh->clkdm->cm_inst, oh->prcm.omap4.clkctrl_offs); } /** * _omap4_wait_target_disable - wait for a module to be disabled on OMAP4 * @oh: struct omap_hwmod * * * Wait for a module @oh to enter slave idle. Returns 0 if the module * does not have an IDLEST bit or if the module successfully enters * slave idle; otherwise, pass along the return value of the * appropriate *_cm*_wait_module_idle() function. */ static int _omap4_wait_target_disable(struct omap_hwmod *oh) { if (!oh) return -EINVAL; if (oh->_int_flags & _HWMOD_NO_MPU_PORT || !oh->clkdm) return 0; if (oh->flags & HWMOD_NO_IDLEST) return 0; return omap_cm_wait_module_idle(oh->clkdm->prcm_partition, oh->clkdm->cm_inst, oh->prcm.omap4.clkctrl_offs, 0); } /** * _count_mpu_irqs - count the number of MPU IRQ lines associated with @oh * @oh: struct omap_hwmod *oh * * Count and return the number of MPU IRQs associated with the hwmod * @oh. Used to allocate struct resource data. Returns 0 if @oh is * NULL. */ static int _count_mpu_irqs(struct omap_hwmod *oh) { struct omap_hwmod_irq_info *ohii; int i = 0; if (!oh || !oh->mpu_irqs) return 0; do { ohii = &oh->mpu_irqs[i++]; } while (ohii->irq != -1); return i-1; } /** * _count_sdma_reqs - count the number of SDMA request lines associated with @oh * @oh: struct omap_hwmod *oh * * Count and return the number of SDMA request lines associated with * the hwmod @oh. Used to allocate struct resource data. Returns 0 * if @oh is NULL. */ static int _count_sdma_reqs(struct omap_hwmod *oh) { struct omap_hwmod_dma_info *ohdi; int i = 0; if (!oh || !oh->sdma_reqs) return 0; do { ohdi = &oh->sdma_reqs[i++]; } while (ohdi->dma_req != -1); return i-1; } /** * _count_ocp_if_addr_spaces - count the number of address space entries for @oh * @oh: struct omap_hwmod *oh * * Count and return the number of address space ranges associated with * the hwmod @oh. Used to allocate struct resource data. Returns 0 * if @oh is NULL. */ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os) { struct omap_hwmod_addr_space *mem; int i = 0; if (!os || !os->addr) return 0; do { mem = &os->addr[i++]; } while (mem->pa_start != mem->pa_end); return i-1; } /** * _get_mpu_irq_by_name - fetch MPU interrupt line number by name * @oh: struct omap_hwmod * to operate on * @name: pointer to the name of the MPU interrupt number to fetch (optional) * @irq: pointer to an unsigned int to store the MPU IRQ number to * * Retrieve a MPU hardware IRQ line number named by @name associated * with the IP block pointed to by @oh. The IRQ number will be filled * into the address pointed to by @dma. When @name is non-null, the * IRQ line number associated with the named entry will be returned. * If @name is null, the first matching entry will be returned. Data * order is not meaningful in hwmod data, so callers are strongly * encouraged to use a non-null @name whenever possible to avoid * unpredictable effects if hwmod data is later added that causes data * ordering to change. Returns 0 upon success or a negative error * code upon error. */ static int _get_mpu_irq_by_name(struct omap_hwmod *oh, const char *name, unsigned int *irq) { int i; bool found = false; if (!oh->mpu_irqs) return -ENOENT; i = 0; while (oh->mpu_irqs[i].irq != -1) { if (name == oh->mpu_irqs[i].name || !strcmp(name, oh->mpu_irqs[i].name)) { found = true; break; } i++; } if (!found) return -ENOENT; *irq = oh->mpu_irqs[i].irq; return 0; } /** * _get_sdma_req_by_name - fetch SDMA request line ID by name * @oh: struct omap_hwmod * to operate on * @name: pointer to the name of the SDMA request line to fetch (optional) * @dma: pointer to an unsigned int to store the request line ID to * * Retrieve an SDMA request line ID named by @name on the IP block * pointed to by @oh. The ID will be filled into the address pointed * to by @dma. When @name is non-null, the request line ID associated * with the named entry will be returned. If @name is null, the first * matching entry will be returned. Data order is not meaningful in * hwmod data, so callers are strongly encouraged to use a non-null * @name whenever possible to avoid unpredictable effects if hwmod * data is later added that causes data ordering to change. Returns 0 * upon success or a negative error code upon error. */ static int _get_sdma_req_by_name(struct omap_hwmod *oh, const char *name, unsigned int *dma) { int i; bool found = false; if (!oh->sdma_reqs) return -ENOENT; i = 0; while (oh->sdma_reqs[i].dma_req != -1) { if (name == oh->sdma_reqs[i].name || !strcmp(name, oh->sdma_reqs[i].name)) { found = true; break; } i++; } if (!found) return -ENOENT; *dma = oh->sdma_reqs[i].dma_req; return 0; } /** * _get_addr_space_by_name - fetch address space start & end by name * @oh: struct omap_hwmod * to operate on * @name: pointer to the name of the address space to fetch (optional) * @pa_start: pointer to a u32 to store the starting address to * @pa_end: pointer to a u32 to store the ending address to * * Retrieve address space start and end addresses for the IP block * pointed to by @oh. The data will be filled into the addresses * pointed to by @pa_start and @pa_end. When @name is non-null, the * address space data associated with the named entry will be * returned. If @name is null, the first matching entry will be * returned. Data order is not meaningful in hwmod data, so callers * are strongly encouraged to use a non-null @name whenever possible * to avoid unpredictable effects if hwmod data is later added that * causes data ordering to change. Returns 0 upon success or a * negative error code upon error. */ static int _get_addr_space_by_name(struct omap_hwmod *oh, const char *name, u32 *pa_start, u32 *pa_end) { int i, j; struct omap_hwmod_ocp_if *os; struct list_head *p = NULL; bool found = false; p = oh->slave_ports.next; i = 0; while (i < oh->slaves_cnt) { os = _fetch_next_ocp_if(&p, &i); if (!os->addr) return -ENOENT; j = 0; while (os->addr[j].pa_start != os->addr[j].pa_end) { if (name == os->addr[j].name || !strcmp(name, os->addr[j].name)) { found = true; break; } j++; } if (found) break; } if (!found) return -ENOENT; *pa_start = os->addr[j].pa_start; *pa_end = os->addr[j].pa_end; return 0; } /** * _save_mpu_port_index - find and save the index to @oh's MPU port * @oh: struct omap_hwmod * * * Determines the array index of the OCP slave port that the MPU uses * to address the device, and saves it into the struct omap_hwmod. * Intended to be called during hwmod registration only. No return * value. */ static void __init _save_mpu_port_index(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os = NULL; struct list_head *p; int i = 0; if (!oh) return; oh->_int_flags |= _HWMOD_NO_MPU_PORT; p = oh->slave_ports.next; while (i < oh->slaves_cnt) { os = _fetch_next_ocp_if(&p, &i); if (os->user & OCP_USER_MPU) { oh->_mpu_port = os; oh->_int_flags &= ~_HWMOD_NO_MPU_PORT; break; } } return; } /** * _find_mpu_rt_port - return omap_hwmod_ocp_if accessible by the MPU * @oh: struct omap_hwmod * * * Given a pointer to a struct omap_hwmod record @oh, return a pointer * to the struct omap_hwmod_ocp_if record that is used by the MPU to * communicate with the IP block. This interface need not be directly * connected to the MPU (and almost certainly is not), but is directly * connected to the IP block represented by @oh. Returns a pointer * to the struct omap_hwmod_ocp_if * upon success, or returns NULL upon * error or if there does not appear to be a path from the MPU to this * IP block. */ static struct omap_hwmod_ocp_if *_find_mpu_rt_port(struct omap_hwmod *oh) { if (!oh || oh->_int_flags & _HWMOD_NO_MPU_PORT || oh->slaves_cnt == 0) return NULL; return oh->_mpu_port; }; /** * _find_mpu_rt_addr_space - return MPU register target address space for @oh * @oh: struct omap_hwmod * * * Returns a pointer to the struct omap_hwmod_addr_space record representing * the register target MPU address space; or returns NULL upon error. */ static struct omap_hwmod_addr_space * __init _find_mpu_rt_addr_space(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; struct omap_hwmod_addr_space *mem; int found = 0, i = 0; os = _find_mpu_rt_port(oh); if (!os || !os->addr) return NULL; do { mem = &os->addr[i++]; if (mem->flags & ADDR_TYPE_RT) found = 1; } while (!found && mem->pa_start != mem->pa_end); return (found) ? mem : NULL; } /** * _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG * @oh: struct omap_hwmod * * * Ensure that the OCP_SYSCONFIG register for the IP block represented * by @oh is set to indicate to the PRCM that the IP block is active. * Usually this means placing the module into smart-idle mode and * smart-standby, but if there is a bug in the automatic idle handling * for the IP block, it may need to be placed into the force-idle or * no-idle variants of these modes. No return value. */ static void _enable_sysc(struct omap_hwmod *oh) { u8 idlemode, sf; u32 v; bool clkdm_act; struct clockdomain *clkdm; if (!oh->class->sysc) return; /* * Wait until reset has completed, this is needed as the IP * block is reset automatically by hardware in some cases * (off-mode for example), and the drivers require the * IP to be ready when they access it */ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _enable_optional_clocks(oh); _wait_softreset_complete(oh); if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _disable_optional_clocks(oh); v = oh->_sysc_cache; sf = oh->class->sysc->sysc_flags; clkdm = _get_clkdm(oh); if (sf & SYSC_HAS_SIDLEMODE) { if (oh->flags & HWMOD_SWSUP_SIDLE || oh->flags & HWMOD_SWSUP_SIDLE_ACT) { idlemode = HWMOD_IDLEMODE_NO; } else { if (sf & SYSC_HAS_ENAWAKEUP) _enable_wakeup(oh, &v); if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) idlemode = HWMOD_IDLEMODE_SMART_WKUP; else idlemode = HWMOD_IDLEMODE_SMART; } /* * This is special handling for some IPs like * 32k sync timer. Force them to idle! */ clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU); if (clkdm_act && !(oh->class->sysc->idlemodes & (SIDLE_SMART | SIDLE_SMART_WKUP))) idlemode = HWMOD_IDLEMODE_FORCE; _set_slave_idlemode(oh, idlemode, &v); } if (sf & SYSC_HAS_MIDLEMODE) { if (oh->flags & HWMOD_FORCE_MSTANDBY) { idlemode = HWMOD_IDLEMODE_FORCE; } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) { idlemode = HWMOD_IDLEMODE_NO; } else { if (sf & SYSC_HAS_ENAWAKEUP) _enable_wakeup(oh, &v); if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) idlemode = HWMOD_IDLEMODE_SMART_WKUP; else idlemode = HWMOD_IDLEMODE_SMART; } _set_master_standbymode(oh, idlemode, &v); } /* * XXX The clock framework should handle this, by * calling into this code. But this must wait until the * clock structures are tagged with omap_hwmod entries */ if ((oh->flags & HWMOD_SET_DEFAULT_CLOCKACT) && (sf & SYSC_HAS_CLOCKACTIVITY)) _set_clockactivity(oh, oh->class->sysc->clockact, &v); /* If the cached value is the same as the new value, skip the write */ if (oh->_sysc_cache != v) _write_sysconfig(v, oh); /* * Set the autoidle bit only after setting the smartidle bit * Setting this will not have any impact on the other modules. */ if (sf & SYSC_HAS_AUTOIDLE) { idlemode = (oh->flags & HWMOD_NO_OCP_AUTOIDLE) ? 0 : 1; _set_module_autoidle(oh, idlemode, &v); _write_sysconfig(v, oh); } } /** * _idle_sysc - try to put a module into idle via OCP_SYSCONFIG * @oh: struct omap_hwmod * * * If module is marked as SWSUP_SIDLE, force the module into slave * idle; otherwise, configure it for smart-idle. If module is marked * as SWSUP_MSUSPEND, force the module into master standby; otherwise, * configure it for smart-standby. No return value. */ static void _idle_sysc(struct omap_hwmod *oh) { u8 idlemode, sf; u32 v; if (!oh->class->sysc) return; v = oh->_sysc_cache; sf = oh->class->sysc->sysc_flags; if (sf & SYSC_HAS_SIDLEMODE) { if (oh->flags & HWMOD_SWSUP_SIDLE) { idlemode = HWMOD_IDLEMODE_FORCE; } else { if (sf & SYSC_HAS_ENAWAKEUP) _enable_wakeup(oh, &v); if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) idlemode = HWMOD_IDLEMODE_SMART_WKUP; else idlemode = HWMOD_IDLEMODE_SMART; } _set_slave_idlemode(oh, idlemode, &v); } if (sf & SYSC_HAS_MIDLEMODE) { if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || (oh->flags & HWMOD_FORCE_MSTANDBY)) { idlemode = HWMOD_IDLEMODE_FORCE; } else { if (sf & SYSC_HAS_ENAWAKEUP) _enable_wakeup(oh, &v); if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) idlemode = HWMOD_IDLEMODE_SMART_WKUP; else idlemode = HWMOD_IDLEMODE_SMART; } _set_master_standbymode(oh, idlemode, &v); } _write_sysconfig(v, oh); } /** * _shutdown_sysc - force a module into idle via OCP_SYSCONFIG * @oh: struct omap_hwmod * * * Force the module into slave idle and master suspend. No return * value. */ static void _shutdown_sysc(struct omap_hwmod *oh) { u32 v; u8 sf; if (!oh->class->sysc) return; v = oh->_sysc_cache; sf = oh->class->sysc->sysc_flags; if (sf & SYSC_HAS_SIDLEMODE) _set_slave_idlemode(oh, HWMOD_IDLEMODE_FORCE, &v); if (sf & SYSC_HAS_MIDLEMODE) _set_master_standbymode(oh, HWMOD_IDLEMODE_FORCE, &v); if (sf & SYSC_HAS_AUTOIDLE) _set_module_autoidle(oh, 1, &v); _write_sysconfig(v, oh); } /** * _lookup - find an omap_hwmod by name * @name: find an omap_hwmod by name * * Return a pointer to an omap_hwmod by name, or NULL if not found. */ static struct omap_hwmod *_lookup(const char *name) { struct omap_hwmod *oh, *temp_oh; oh = NULL; list_for_each_entry(temp_oh, &omap_hwmod_list, node) { if (!strcmp(name, temp_oh->name)) { oh = temp_oh; break; } } return oh; } /** * _init_clkdm - look up a clockdomain name, store pointer in omap_hwmod * @oh: struct omap_hwmod * * * Convert a clockdomain name stored in a struct omap_hwmod into a * clockdomain pointer, and save it into the struct omap_hwmod. * Return -EINVAL if the clkdm_name lookup failed. */ static int _init_clkdm(struct omap_hwmod *oh) { if (!oh->clkdm_name) { pr_debug("omap_hwmod: %s: missing clockdomain\n", oh->name); return 0; } oh->clkdm = clkdm_lookup(oh->clkdm_name); if (!oh->clkdm) { pr_warn("omap_hwmod: %s: could not associate to clkdm %s\n", oh->name, oh->clkdm_name); return 0; } pr_debug("omap_hwmod: %s: associated to clkdm %s\n", oh->name, oh->clkdm_name); return 0; } /** * _init_clocks - clk_get() all clocks associated with this hwmod. Retrieve as * well the clockdomain. * @oh: struct omap_hwmod * * @data: not used; pass NULL * * Called by omap_hwmod_setup_*() (after omap2_clk_init()). * Resolves all clock names embedded in the hwmod. Returns 0 on * success, or a negative error code on failure. */ static int _init_clocks(struct omap_hwmod *oh, void *data) { int ret = 0; if (oh->_state != _HWMOD_STATE_REGISTERED) return 0; pr_debug("omap_hwmod: %s: looking up clocks\n", oh->name); if (soc_ops.init_clkdm) ret |= soc_ops.init_clkdm(oh); ret |= _init_main_clk(oh); ret |= _init_interface_clks(oh); ret |= _init_opt_clks(oh); if (!ret) oh->_state = _HWMOD_STATE_CLKS_INITED; else pr_warn("omap_hwmod: %s: cannot _init_clocks\n", oh->name); return ret; } /** * _lookup_hardreset - fill register bit info for this hwmod/reset line * @oh: struct omap_hwmod * * @name: name of the reset line in the context of this hwmod * @ohri: struct omap_hwmod_rst_info * that this function will fill in * * Return the bit position of the reset line that match the * input name. Return -ENOENT if not found. */ static int _lookup_hardreset(struct omap_hwmod *oh, const char *name, struct omap_hwmod_rst_info *ohri) { int i; for (i = 0; i < oh->rst_lines_cnt; i++) { const char *rst_line = oh->rst_lines[i].name; if (!strcmp(rst_line, name)) { ohri->rst_shift = oh->rst_lines[i].rst_shift; ohri->st_shift = oh->rst_lines[i].st_shift; pr_debug("omap_hwmod: %s: %s: %s: rst %d st %d\n", oh->name, __func__, rst_line, ohri->rst_shift, ohri->st_shift); return 0; } } return -ENOENT; } /** * _assert_hardreset - assert the HW reset line of submodules * contained in the hwmod module. * @oh: struct omap_hwmod * * @name: name of the reset line to lookup and assert * * Some IP like dsp, ipu or iva contain processor that require an HW * reset line to be assert / deassert in order to enable fully the IP. * Returns -EINVAL if @oh is null, -ENOSYS if we have no way of * asserting the hardreset line on the currently-booted SoC, or passes * along the return value from _lookup_hardreset() or the SoC's * assert_hardreset code. */ static int _assert_hardreset(struct omap_hwmod *oh, const char *name) { struct omap_hwmod_rst_info ohri; int ret = -EINVAL; if (!oh) return -EINVAL; if (!soc_ops.assert_hardreset) return -ENOSYS; ret = _lookup_hardreset(oh, name, &ohri); if (ret < 0) return ret; ret = soc_ops.assert_hardreset(oh, &ohri); return ret; } /** * _deassert_hardreset - deassert the HW reset line of submodules contained * in the hwmod module. * @oh: struct omap_hwmod * * @name: name of the reset line to look up and deassert * * Some IP like dsp, ipu or iva contain processor that require an HW * reset line to be assert / deassert in order to enable fully the IP. * Returns -EINVAL if @oh is null, -ENOSYS if we have no way of * deasserting the hardreset line on the currently-booted SoC, or passes * along the return value from _lookup_hardreset() or the SoC's * deassert_hardreset code. */ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name) { struct omap_hwmod_rst_info ohri; int ret = -EINVAL; int hwsup = 0; if (!oh) return -EINVAL; if (!soc_ops.deassert_hardreset) return -ENOSYS; ret = _lookup_hardreset(oh, name, &ohri); if (ret < 0) return ret; if (oh->clkdm) { /* * A clockdomain must be in SW_SUP otherwise reset * might not be completed. The clockdomain can be set * in HW_AUTO only when the module become ready. */ hwsup = clkdm_in_hwsup(oh->clkdm); ret = clkdm_hwmod_enable(oh->clkdm, oh); if (ret) { WARN(1, "omap_hwmod: %s: could not enable clockdomain %s: %d\n", oh->name, oh->clkdm->name, ret); return ret; } } _enable_clocks(oh); if (soc_ops.enable_module) soc_ops.enable_module(oh); ret = soc_ops.deassert_hardreset(oh, &ohri); if (soc_ops.disable_module) soc_ops.disable_module(oh); _disable_clocks(oh); if (ret == -EBUSY) pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); if (oh->clkdm) { /* * Set the clockdomain to HW_AUTO, assuming that the * previous state was HW_AUTO. */ if (hwsup) clkdm_allow_idle(oh->clkdm); clkdm_hwmod_disable(oh->clkdm, oh); } return ret; } /** * _read_hardreset - read the HW reset line state of submodules * contained in the hwmod module * @oh: struct omap_hwmod * * @name: name of the reset line to look up and read * * Return the state of the reset line. Returns -EINVAL if @oh is * null, -ENOSYS if we have no way of reading the hardreset line * status on the currently-booted SoC, or passes along the return * value from _lookup_hardreset() or the SoC's is_hardreset_asserted * code. */ static int _read_hardreset(struct omap_hwmod *oh, const char *name) { struct omap_hwmod_rst_info ohri; int ret = -EINVAL; if (!oh) return -EINVAL; if (!soc_ops.is_hardreset_asserted) return -ENOSYS; ret = _lookup_hardreset(oh, name, &ohri); if (ret < 0) return ret; return soc_ops.is_hardreset_asserted(oh, &ohri); } /** * _are_all_hardreset_lines_asserted - return true if the @oh is hard-reset * @oh: struct omap_hwmod * * * If all hardreset lines associated with @oh are asserted, then return true. * Otherwise, if part of @oh is out hardreset or if no hardreset lines * associated with @oh are asserted, then return false. * This function is used to avoid executing some parts of the IP block * enable/disable sequence if its hardreset line is set. */ static bool _are_all_hardreset_lines_asserted(struct omap_hwmod *oh) { int i, rst_cnt = 0; if (oh->rst_lines_cnt == 0) return false; for (i = 0; i < oh->rst_lines_cnt; i++) if (_read_hardreset(oh, oh->rst_lines[i].name) > 0) rst_cnt++; if (oh->rst_lines_cnt == rst_cnt) return true; return false; } /** * _are_any_hardreset_lines_asserted - return true if any part of @oh is * hard-reset * @oh: struct omap_hwmod * * * If any hardreset lines associated with @oh are asserted, then * return true. Otherwise, if no hardreset lines associated with @oh * are asserted, or if @oh has no hardreset lines, then return false. * This function is used to avoid executing some parts of the IP block * enable/disable sequence if any hardreset line is set. */ static bool _are_any_hardreset_lines_asserted(struct omap_hwmod *oh) { int rst_cnt = 0; int i; for (i = 0; i < oh->rst_lines_cnt && rst_cnt == 0; i++) if (_read_hardreset(oh, oh->rst_lines[i].name) > 0) rst_cnt++; return (rst_cnt) ? true : false; } /** * _omap4_disable_module - enable CLKCTRL modulemode on OMAP4 * @oh: struct omap_hwmod * * * Disable the PRCM module mode related to the hwmod @oh. * Return EINVAL if the modulemode is not supported and 0 in case of success. */ static int _omap4_disable_module(struct omap_hwmod *oh) { int v; if (!oh->clkdm || !oh->prcm.omap4.modulemode) return -EINVAL; /* * Since integration code might still be doing something, only * disable if all lines are under hardreset. */ if (_are_any_hardreset_lines_asserted(oh)) return 0; pr_debug("omap_hwmod: %s: %s\n", oh->name, __func__); omap_cm_module_disable(oh->clkdm->prcm_partition, oh->clkdm->cm_inst, oh->prcm.omap4.clkctrl_offs); v = _omap4_wait_target_disable(oh); if (v) pr_warn("omap_hwmod: %s: _wait_target_disable failed\n", oh->name); return 0; } /** * _ocp_softreset - reset an omap_hwmod via the OCP_SYSCONFIG bit * @oh: struct omap_hwmod * * * Resets an omap_hwmod @oh via the OCP_SYSCONFIG bit. hwmod must be * enabled for this to work. Returns -ENOENT if the hwmod cannot be * reset this way, -EINVAL if the hwmod is in the wrong state, * -ETIMEDOUT if the module did not reset in time, or 0 upon success. * * In OMAP3 a specific SYSSTATUS register is used to get the reset status. * Starting in OMAP4, some IPs do not have SYSSTATUS registers and instead * use the SYSCONFIG softreset bit to provide the status. * * Note that some IP like McBSP do have reset control but don't have * reset status. */ static int _ocp_softreset(struct omap_hwmod *oh) { u32 v; int c = 0; int ret = 0; if (!oh->class->sysc || !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET)) return -ENOENT; /* clocks must be on for this operation */ if (oh->_state != _HWMOD_STATE_ENABLED) { pr_warn("omap_hwmod: %s: reset can only be entered from enabled state\n", oh->name); return -EINVAL; } /* For some modules, all optionnal clocks need to be enabled as well */ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _enable_optional_clocks(oh); pr_debug("omap_hwmod: %s: resetting via OCP SOFTRESET\n", oh->name); v = oh->_sysc_cache; ret = _set_softreset(oh, &v); if (ret) goto dis_opt_clks; _write_sysconfig(v, oh); if (oh->class->sysc->srst_udelay) udelay(oh->class->sysc->srst_udelay); c = _wait_softreset_complete(oh); if (c == MAX_MODULE_SOFTRESET_WAIT) { pr_warn("omap_hwmod: %s: softreset failed (waited %d usec)\n", oh->name, MAX_MODULE_SOFTRESET_WAIT); ret = -ETIMEDOUT; goto dis_opt_clks; } else { pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); } ret = _clear_softreset(oh, &v); if (ret) goto dis_opt_clks; _write_sysconfig(v, oh); /* * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from * _wait_target_ready() or _reset() */ dis_opt_clks: if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _disable_optional_clocks(oh); return ret; } /** * _reset - reset an omap_hwmod * @oh: struct omap_hwmod * * * Resets an omap_hwmod @oh. If the module has a custom reset * function pointer defined, then call it to reset the IP block, and * pass along its return value to the caller. Otherwise, if the IP * block has an OCP_SYSCONFIG register with a SOFTRESET bitfield * associated with it, call a function to reset the IP block via that * method, and pass along the return value to the caller. Finally, if * the IP block has some hardreset lines associated with it, assert * all of those, but do _not_ deassert them. (This is because driver * authors have expressed an apparent requirement to control the * deassertion of the hardreset lines themselves.) * * The default software reset mechanism for most OMAP IP blocks is * triggered via the OCP_SYSCONFIG.SOFTRESET bit. However, some * hwmods cannot be reset via this method. Some are not targets and * therefore have no OCP header registers to access. Others (like the * IVA) have idiosyncratic reset sequences. So for these relatively * rare cases, custom reset code can be supplied in the struct * omap_hwmod_class .reset function pointer. * * _set_dmadisable() is called to set the DMADISABLE bit so that it * does not prevent idling of the system. This is necessary for cases * where ROMCODE/BOOTLOADER uses dma and transfers control to the * kernel without disabling dma. * * Passes along the return value from either _ocp_softreset() or the * custom reset function - these must return -EINVAL if the hwmod * cannot be reset this way or if the hwmod is in the wrong state, * -ETIMEDOUT if the module did not reset in time, or 0 upon success. */ static int _reset(struct omap_hwmod *oh) { int i, r; pr_debug("omap_hwmod: %s: resetting\n", oh->name); if (oh->class->reset) { r = oh->class->reset(oh); } else { if (oh->rst_lines_cnt > 0) { for (i = 0; i < oh->rst_lines_cnt; i++) _assert_hardreset(oh, oh->rst_lines[i].name); return 0; } else { r = _ocp_softreset(oh); if (r == -ENOENT) r = 0; } } _set_dmadisable(oh); /* * OCP_SYSCONFIG bits need to be reprogrammed after a * softreset. The _enable() function should be split to avoid * the rewrite of the OCP_SYSCONFIG register. */ if (oh->class->sysc) { _update_sysc_cache(oh); _enable_sysc(oh); } return r; } /** * _reconfigure_io_chain - clear any I/O chain wakeups and reconfigure chain * * Call the appropriate PRM function to clear any logged I/O chain * wakeups and to reconfigure the chain. This apparently needs to be * done upon every mux change. Since hwmods can be concurrently * enabled and idled, hold a spinlock around the I/O chain * reconfiguration sequence. No return value. * * XXX When the PRM code is moved to drivers, this function can be removed, * as the PRM infrastructure should abstract this. */ static void _reconfigure_io_chain(void) { unsigned long flags; spin_lock_irqsave(&io_chain_lock, flags); omap_prm_reconfigure_io_chain(); spin_unlock_irqrestore(&io_chain_lock, flags); } /** * _omap4_update_context_lost - increment hwmod context loss counter if * hwmod context was lost, and clear hardware context loss reg * @oh: hwmod to check for context loss * * If the PRCM indicates that the hwmod @oh lost context, increment * our in-memory context loss counter, and clear the RM_*_CONTEXT * bits. No return value. */ static void _omap4_update_context_lost(struct omap_hwmod *oh) { if (oh->prcm.omap4.flags & HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT) return; if (!prm_was_any_context_lost_old(oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.context_offs)) return; oh->prcm.omap4.context_lost_counter++; prm_clear_context_loss_flags_old(oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.context_offs); } /** * _omap4_get_context_lost - get context loss counter for a hwmod * @oh: hwmod to get context loss counter for * * Returns the in-memory context loss counter for a hwmod. */ static int _omap4_get_context_lost(struct omap_hwmod *oh) { return oh->prcm.omap4.context_lost_counter; } /** * _enable_preprogram - Pre-program an IP block during the _enable() process * @oh: struct omap_hwmod * * * Some IP blocks (such as AESS) require some additional programming * after enable before they can enter idle. If a function pointer to * do so is present in the hwmod data, then call it and pass along the * return value; otherwise, return 0. */ static int _enable_preprogram(struct omap_hwmod *oh) { if (!oh->class->enable_preprogram) return 0; return oh->class->enable_preprogram(oh); } /** * _enable - enable an omap_hwmod * @oh: struct omap_hwmod * * * Enables an omap_hwmod @oh such that the MPU can access the hwmod's * register target. Returns -EINVAL if the hwmod is in the wrong * state or passes along the return value of _wait_target_ready(). */ static int _enable(struct omap_hwmod *oh) { int r; int hwsup = 0; pr_debug("omap_hwmod: %s: enabling\n", oh->name); /* * hwmods with HWMOD_INIT_NO_IDLE flag set are left in enabled * state at init. Now that someone is really trying to enable * them, just ensure that the hwmod mux is set. */ if (oh->_int_flags & _HWMOD_SKIP_ENABLE) { /* * If the caller has mux data populated, do the mux'ing * which wouldn't have been done as part of the _enable() * done during setup. */ if (oh->mux) omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED); oh->_int_flags &= ~_HWMOD_SKIP_ENABLE; return 0; } if (oh->_state != _HWMOD_STATE_INITIALIZED && oh->_state != _HWMOD_STATE_IDLE && oh->_state != _HWMOD_STATE_DISABLED) { WARN(1, "omap_hwmod: %s: enabled state can only be entered from initialized, idle, or disabled state\n", oh->name); return -EINVAL; } /* * If an IP block contains HW reset lines and all of them are * asserted, we let integration code associated with that * block handle the enable. We've received very little * information on what those driver authors need, and until * detailed information is provided and the driver code is * posted to the public lists, this is probably the best we * can do. */ if (_are_all_hardreset_lines_asserted(oh)) return 0; /* Mux pins for device runtime if populated */ if (oh->mux && (!oh->mux->enabled || ((oh->_state == _HWMOD_STATE_IDLE) && oh->mux->pads_dynamic))) { omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED); _reconfigure_io_chain(); } else if (oh->flags & HWMOD_RECONFIG_IO_CHAIN) { _reconfigure_io_chain(); } _add_initiator_dep(oh, mpu_oh); if (oh->clkdm) { /* * A clockdomain must be in SW_SUP before enabling * completely the module. The clockdomain can be set * in HW_AUTO only when the module become ready. */ hwsup = clkdm_in_hwsup(oh->clkdm) && !clkdm_missing_idle_reporting(oh->clkdm); r = clkdm_hwmod_enable(oh->clkdm, oh); if (r) { WARN(1, "omap_hwmod: %s: could not enable clockdomain %s: %d\n", oh->name, oh->clkdm->name, r); return r; } } _enable_clocks(oh); if (soc_ops.enable_module) soc_ops.enable_module(oh); if (oh->flags & HWMOD_BLOCK_WFI) cpu_idle_poll_ctrl(true); if (soc_ops.update_context_lost) soc_ops.update_context_lost(oh); r = (soc_ops.wait_target_ready) ? soc_ops.wait_target_ready(oh) : -EINVAL; if (!r) { /* * Set the clockdomain to HW_AUTO only if the target is ready, * assuming that the previous state was HW_AUTO */ if (oh->clkdm && hwsup) clkdm_allow_idle(oh->clkdm); oh->_state = _HWMOD_STATE_ENABLED; /* Access the sysconfig only if the target is ready */ if (oh->class->sysc) { if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED)) _update_sysc_cache(oh); _enable_sysc(oh); } r = _enable_preprogram(oh); } else { if (soc_ops.disable_module) soc_ops.disable_module(oh); _disable_clocks(oh); pr_err("omap_hwmod: %s: _wait_target_ready failed: %d\n", oh->name, r); if (oh->clkdm) clkdm_hwmod_disable(oh->clkdm, oh); } return r; } /** * _idle - idle an omap_hwmod * @oh: struct omap_hwmod * * * Idles an omap_hwmod @oh. This should be called once the hwmod has * no further work. Returns -EINVAL if the hwmod is in the wrong * state or returns 0. */ static int _idle(struct omap_hwmod *oh) { pr_debug("omap_hwmod: %s: idling\n", oh->name); if (oh->_state != _HWMOD_STATE_ENABLED) { WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n", oh->name); return -EINVAL; } if (_are_all_hardreset_lines_asserted(oh)) return 0; if (oh->class->sysc) _idle_sysc(oh); _del_initiator_dep(oh, mpu_oh); if (oh->flags & HWMOD_BLOCK_WFI) cpu_idle_poll_ctrl(false); if (soc_ops.disable_module) soc_ops.disable_module(oh); /* * The module must be in idle mode before disabling any parents * clocks. Otherwise, the parent clock might be disabled before * the module transition is done, and thus will prevent the * transition to complete properly. */ _disable_clocks(oh); if (oh->clkdm) clkdm_hwmod_disable(oh->clkdm, oh); /* Mux pins for device idle if populated */ if (oh->mux && oh->mux->pads_dynamic) { omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE); _reconfigure_io_chain(); } else if (oh->flags & HWMOD_RECONFIG_IO_CHAIN) { _reconfigure_io_chain(); } oh->_state = _HWMOD_STATE_IDLE; return 0; } /** * _shutdown - shutdown an omap_hwmod * @oh: struct omap_hwmod * * * Shut down an omap_hwmod @oh. This should be called when the driver * used for the hwmod is removed or unloaded or if the driver is not * used by the system. Returns -EINVAL if the hwmod is in the wrong * state or returns 0. */ static int _shutdown(struct omap_hwmod *oh) { int ret, i; u8 prev_state; if (oh->_state != _HWMOD_STATE_IDLE && oh->_state != _HWMOD_STATE_ENABLED) { WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n", oh->name); return -EINVAL; } if (_are_all_hardreset_lines_asserted(oh)) return 0; pr_debug("omap_hwmod: %s: disabling\n", oh->name); if (oh->class->pre_shutdown) { prev_state = oh->_state; if (oh->_state == _HWMOD_STATE_IDLE) _enable(oh); ret = oh->class->pre_shutdown(oh); if (ret) { if (prev_state == _HWMOD_STATE_IDLE) _idle(oh); return ret; } } if (oh->class->sysc) { if (oh->_state == _HWMOD_STATE_IDLE) _enable(oh); _shutdown_sysc(oh); } /* clocks and deps are already disabled in idle */ if (oh->_state == _HWMOD_STATE_ENABLED) { _del_initiator_dep(oh, mpu_oh); /* XXX what about the other system initiators here? dma, dsp */ if (oh->flags & HWMOD_BLOCK_WFI) cpu_idle_poll_ctrl(false); if (soc_ops.disable_module) soc_ops.disable_module(oh); _disable_clocks(oh); if (oh->clkdm) clkdm_hwmod_disable(oh->clkdm, oh); } /* XXX Should this code also force-disable the optional clocks? */ for (i = 0; i < oh->rst_lines_cnt; i++) _assert_hardreset(oh, oh->rst_lines[i].name); /* Mux pins to safe mode or use populated off mode values */ if (oh->mux) omap_hwmod_mux(oh->mux, _HWMOD_STATE_DISABLED); oh->_state = _HWMOD_STATE_DISABLED; return 0; } static int of_dev_find_hwmod(struct device_node *np, struct omap_hwmod *oh) { int count, i, res; const char *p; count = of_property_count_strings(np, "ti,hwmods"); if (count < 1) return -ENODEV; for (i = 0; i < count; i++) { res = of_property_read_string_index(np, "ti,hwmods", i, &p); if (res) continue; if (!strcmp(p, oh->name)) { pr_debug("omap_hwmod: dt %s[%i] uses hwmod %s\n", np->name, i, oh->name); return i; } } return -ENODEV; } /** * of_dev_hwmod_lookup - look up needed hwmod from dt blob * @np: struct device_node * * @oh: struct omap_hwmod * * @index: index of the entry found * @found: struct device_node * found or NULL * * Parse the dt blob and find out needed hwmod. Recursive function is * implemented to take care hierarchical dt blob parsing. * Return: Returns 0 on success, -ENODEV when not found. */ static int of_dev_hwmod_lookup(struct device_node *np, struct omap_hwmod *oh, int *index, struct device_node **found) { struct device_node *np0 = NULL; int res; res = of_dev_find_hwmod(np, oh); if (res >= 0) { *found = np; *index = res; return 0; } for_each_child_of_node(np, np0) { struct device_node *fc; int i; res = of_dev_hwmod_lookup(np0, oh, &i, &fc); if (res == 0) { *found = fc; *index = i; return 0; } } *found = NULL; *index = 0; return -ENODEV; } /** * _init_mpu_rt_base - populate the virtual address for a hwmod * @oh: struct omap_hwmod * to locate the virtual address * @data: (unused, caller should pass NULL) * @index: index of the reg entry iospace in device tree * @np: struct device_node * of the IP block's device node in the DT data * * Cache the virtual address used by the MPU to access this IP block's * registers. This address is needed early so the OCP registers that * are part of the device's address space can be ioremapped properly. * * If SYSC access is not needed, the registers will not be remapped * and non-availability of MPU access is not treated as an error. * * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and * -ENXIO on absent or invalid register target address space. */ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, int index, struct device_node *np) { struct omap_hwmod_addr_space *mem; void __iomem *va_start = NULL; if (!oh) return -EINVAL; _save_mpu_port_index(oh); /* if we don't need sysc access we don't need to ioremap */ if (!oh->class->sysc) return 0; /* we can't continue without MPU PORT if we need sysc access */ if (oh->_int_flags & _HWMOD_NO_MPU_PORT) return -ENXIO; mem = _find_mpu_rt_addr_space(oh); if (!mem) { pr_debug("omap_hwmod: %s: no MPU register target found\n", oh->name); /* Extract the IO space from device tree blob */ if (!np) { pr_err("omap_hwmod: %s: no dt node\n", oh->name); return -ENXIO; } va_start = of_iomap(np, index + oh->mpu_rt_idx); } else { va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); } if (!va_start) { if (mem) pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name); else pr_err("omap_hwmod: %s: Missing dt reg%i for %s\n", oh->name, index, np->full_name); return -ENXIO; } pr_debug("omap_hwmod: %s: MPU register target at va %p\n", oh->name, va_start); oh->_mpu_rt_va = va_start; return 0; } /** * _init - initialize internal data for the hwmod @oh * @oh: struct omap_hwmod * * @n: (unused) * * Look up the clocks and the address space used by the MPU to access * registers belonging to the hwmod @oh. @oh must already be * registered at this point. This is the first of two phases for * hwmod initialization. Code called here does not touch any hardware * registers, it simply prepares internal data structures. Returns 0 * upon success or if the hwmod isn't registered or if the hwmod's * address space is not defined, or -EINVAL upon failure. */ static int __init _init(struct omap_hwmod *oh, void *data) { int r, index; struct device_node *np = NULL; if (oh->_state != _HWMOD_STATE_REGISTERED) return 0; if (of_have_populated_dt()) { struct device_node *bus; bus = of_find_node_by_name(NULL, "ocp"); if (!bus) return -ENODEV; r = of_dev_hwmod_lookup(bus, oh, &index, &np); if (r) pr_debug("omap_hwmod: %s missing dt data\n", oh->name); else if (np && index) pr_warn("omap_hwmod: %s using broken dt data from %s\n", oh->name, np->name); } r = _init_mpu_rt_base(oh, NULL, index, np); if (r < 0) { WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", oh->name); return 0; } r = _init_clocks(oh, NULL); if (r < 0) { WARN(1, "omap_hwmod: %s: couldn't init clocks\n", oh->name); return -EINVAL; } if (np) { if (of_find_property(np, "ti,no-reset-on-init", NULL)) oh->flags |= HWMOD_INIT_NO_RESET; if (of_find_property(np, "ti,no-idle-on-init", NULL)) oh->flags |= HWMOD_INIT_NO_IDLE; } oh->_state = _HWMOD_STATE_INITIALIZED; return 0; } /** * _setup_iclk_autoidle - configure an IP block's interface clocks * @oh: struct omap_hwmod * * * Set up the module's interface clocks. XXX This function is still mostly * a stub; implementing this properly requires iclk autoidle usecounting in * the clock code. No return value. */ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh) { struct omap_hwmod_ocp_if *os; struct list_head *p; int i = 0; if (oh->_state != _HWMOD_STATE_INITIALIZED) return; p = oh->slave_ports.next; while (i < oh->slaves_cnt) { os = _fetch_next_ocp_if(&p, &i); if (!os->_clk) continue; if (os->flags & OCPIF_SWSUP_IDLE) { /* XXX omap_iclk_deny_idle(c); */ } else { /* XXX omap_iclk_allow_idle(c); */ clk_enable(os->_clk); } } return; } /** * _setup_reset - reset an IP block during the setup process * @oh: struct omap_hwmod * * * Reset the IP block corresponding to the hwmod @oh during the setup * process. The IP block is first enabled so it can be successfully * reset. Returns 0 upon success or a negative error code upon * failure. */ static int __init _setup_reset(struct omap_hwmod *oh) { int r; if (oh->_state != _HWMOD_STATE_INITIALIZED) return -EINVAL; if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK) return -EPERM; if (oh->rst_lines_cnt == 0) { r = _enable(oh); if (r) { pr_warn("omap_hwmod: %s: cannot be enabled for reset (%d)\n", oh->name, oh->_state); return -EINVAL; } } if (!(oh->flags & HWMOD_INIT_NO_RESET)) r = _reset(oh); return r; } /** * _setup_postsetup - transition to the appropriate state after _setup * @oh: struct omap_hwmod * * * Place an IP block represented by @oh into a "post-setup" state -- * either IDLE, ENABLED, or DISABLED. ("post-setup" simply means that * this function is called at the end of _setup().) The postsetup * state for an IP block can be changed by calling * omap_hwmod_enter_postsetup_state() early in the boot process, * before one of the omap_hwmod_setup*() functions are called for the * IP block. * * The IP block stays in this state until a PM runtime-based driver is * loaded for that IP block. A post-setup state of IDLE is * appropriate for almost all IP blocks with runtime PM-enabled * drivers, since those drivers are able to enable the IP block. A * post-setup state of ENABLED is appropriate for kernels with PM * runtime disabled. The DISABLED state is appropriate for unusual IP * blocks such as the MPU WDTIMER on kernels without WDTIMER drivers * included, since the WDTIMER starts running on reset and will reset * the MPU if left active. * * This post-setup mechanism is deprecated. Once all of the OMAP * drivers have been converted to use PM runtime, and all of the IP * block data and interconnect data is available to the hwmod code, it * should be possible to replace this mechanism with a "lazy reset" * arrangement. In a "lazy reset" setup, each IP block is enabled * when the driver first probes, then all remaining IP blocks without * drivers are either shut down or enabled after the drivers have * loaded. However, this cannot take place until the above * preconditions have been met, since otherwise the late reset code * has no way of knowing which IP blocks are in use by drivers, and * which ones are unused. * * No return value. */ static void __init _setup_postsetup(struct omap_hwmod *oh) { u8 postsetup_state; if (oh->rst_lines_cnt > 0) return; postsetup_state = oh->_postsetup_state; if (postsetup_state == _HWMOD_STATE_UNKNOWN) postsetup_state = _HWMOD_STATE_ENABLED; /* * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data - * it should be set by the core code as a runtime flag during startup */ if ((oh->flags & HWMOD_INIT_NO_IDLE) && (postsetup_state == _HWMOD_STATE_IDLE)) { oh->_int_flags |= _HWMOD_SKIP_ENABLE; postsetup_state = _HWMOD_STATE_ENABLED; } if (postsetup_state == _HWMOD_STATE_IDLE) _idle(oh); else if (postsetup_state == _HWMOD_STATE_DISABLED) _shutdown(oh); else if (postsetup_state != _HWMOD_STATE_ENABLED) WARN(1, "hwmod: %s: unknown postsetup state %d! defaulting to enabled\n", oh->name, postsetup_state); return; } /** * _setup - prepare IP block hardware for use * @oh: struct omap_hwmod * * @n: (unused, pass NULL) * * Configure the IP block represented by @oh. This may include * enabling the IP block, resetting it, and placing it into a * post-setup state, depending on the type of IP block and applicable * flags. IP blocks are reset to prevent any previous configuration * by the bootloader or previous operating system from interfering * with power management or other parts of the system. The reset can * be avoided; see omap_hwmod_no_setup_reset(). This is the second of * two phases for hwmod initialization. Code called here generally * affects the IP block hardware, or system integration hardware * associated with the IP block. Returns 0. */ static int __init _setup(struct omap_hwmod *oh, void *data) { if (oh->_state != _HWMOD_STATE_INITIALIZED) return 0; if (oh->parent_hwmod) { int r; r = _enable(oh->parent_hwmod); WARN(r, "hwmod: %s: setup: failed to enable parent hwmod %s\n", oh->name, oh->parent_hwmod->name); } _setup_iclk_autoidle(oh); if (!_setup_reset(oh)) _setup_postsetup(oh); if (oh->parent_hwmod) { u8 postsetup_state; postsetup_state = oh->parent_hwmod->_postsetup_state; if (postsetup_state == _HWMOD_STATE_IDLE) _idle(oh->parent_hwmod); else if (postsetup_state == _HWMOD_STATE_DISABLED) _shutdown(oh->parent_hwmod); else if (postsetup_state != _HWMOD_STATE_ENABLED) WARN(1, "hwmod: %s: unknown postsetup state %d! defaulting to enabled\n", oh->parent_hwmod->name, postsetup_state); } return 0; } /** * _register - register a struct omap_hwmod * @oh: struct omap_hwmod * * * Registers the omap_hwmod @oh. Returns -EEXIST if an omap_hwmod * already has been registered by the same name; -EINVAL if the * omap_hwmod is in the wrong state, if @oh is NULL, if the * omap_hwmod's class field is NULL; if the omap_hwmod is missing a * name, or if the omap_hwmod's class is missing a name; or 0 upon * success. * * XXX The data should be copied into bootmem, so the original data * should be marked __initdata and freed after init. This would allow * unneeded omap_hwmods to be freed on multi-OMAP configurations. Note * that the copy process would be relatively complex due to the large number * of substructures. */ static int __init _register(struct omap_hwmod *oh) { if (!oh || !oh->name || !oh->class || !oh->class->name || (oh->_state != _HWMOD_STATE_UNKNOWN)) return -EINVAL; pr_debug("omap_hwmod: %s: registering\n", oh->name); if (_lookup(oh->name)) return -EEXIST; list_add_tail(&oh->node, &omap_hwmod_list); INIT_LIST_HEAD(&oh->master_ports); INIT_LIST_HEAD(&oh->slave_ports); spin_lock_init(&oh->_lock); lockdep_set_class(&oh->_lock, &oh->hwmod_key); oh->_state = _HWMOD_STATE_REGISTERED; /* * XXX Rather than doing a strcmp(), this should test a flag * set in the hwmod data, inserted by the autogenerator code. */ if (!strcmp(oh->name, MPU_INITIATOR_NAME)) mpu_oh = oh; return 0; } /** * _alloc_links - return allocated memory for hwmod links * @ml: pointer to a struct omap_hwmod_link * for the master link * @sl: pointer to a struct omap_hwmod_link * for the slave link * * Return pointers to two struct omap_hwmod_link records, via the * addresses pointed to by @ml and @sl. Will first attempt to return * memory allocated as part of a large initial block, but if that has * been exhausted, will allocate memory itself. Since ideally this * second allocation path will never occur, the number of these * 'supplemental' allocations will be logged when debugging is * enabled. Returns 0. */ static int __init _alloc_links(struct omap_hwmod_link **ml, struct omap_hwmod_link **sl) { unsigned int sz; if ((free_ls + LINKS_PER_OCP_IF) <= max_ls) { *ml = &linkspace[free_ls++]; *sl = &linkspace[free_ls++]; return 0; } sz = sizeof(struct omap_hwmod_link) * LINKS_PER_OCP_IF; *sl = NULL; *ml = memblock_virt_alloc(sz, 0); *sl = (void *)(*ml) + sizeof(struct omap_hwmod_link); ls_supp++; pr_debug("omap_hwmod: supplemental link allocations needed: %d\n", ls_supp * LINKS_PER_OCP_IF); return 0; }; /** * _add_link - add an interconnect between two IP blocks * @oi: pointer to a struct omap_hwmod_ocp_if record * * Add struct omap_hwmod_link records connecting the master IP block * specified in @oi->master to @oi, and connecting the slave IP block * specified in @oi->slave to @oi. This code is assumed to run before * preemption or SMP has been enabled, thus avoiding the need for * locking in this code. Changes to this assumption will require * additional locking. Returns 0. */ static int __init _add_link(struct omap_hwmod_ocp_if *oi) { struct omap_hwmod_link *ml, *sl; pr_debug("omap_hwmod: %s -> %s: adding link\n", oi->master->name, oi->slave->name); _alloc_links(&ml, &sl); ml->ocp_if = oi; list_add(&ml->node, &oi->master->master_ports); oi->master->masters_cnt++; sl->ocp_if = oi; list_add(&sl->node, &oi->slave->slave_ports); oi->slave->slaves_cnt++; return 0; } /** * _register_link - register a struct omap_hwmod_ocp_if * @oi: struct omap_hwmod_ocp_if * * * Registers the omap_hwmod_ocp_if record @oi. Returns -EEXIST if it * has already been registered; -EINVAL if @oi is NULL or if the * record pointed to by @oi is missing required fields; or 0 upon * success. * * XXX The data should be copied into bootmem, so the original data * should be marked __initdata and freed after init. This would allow * unneeded omap_hwmods to be freed on multi-OMAP configurations. */ static int __init _register_link(struct omap_hwmod_ocp_if *oi) { if (!oi || !oi->master || !oi->slave || !oi->user) return -EINVAL; if (oi->_int_flags & _OCPIF_INT_FLAGS_REGISTERED) return -EEXIST; pr_debug("omap_hwmod: registering link from %s to %s\n", oi->master->name, oi->slave->name); /* * Register the connected hwmods, if they haven't been * registered already */ if (oi->master->_state != _HWMOD_STATE_REGISTERED) _register(oi->master); if (oi->slave->_state != _HWMOD_STATE_REGISTERED) _register(oi->slave); _add_link(oi); oi->_int_flags |= _OCPIF_INT_FLAGS_REGISTERED; return 0; } /** * _alloc_linkspace - allocate large block of hwmod links * @ois: pointer to an array of struct omap_hwmod_ocp_if records to count * * Allocate a large block of struct omap_hwmod_link records. This * improves boot time significantly by avoiding the need to allocate * individual records one by one. If the number of records to * allocate in the block hasn't been manually specified, this function * will count the number of struct omap_hwmod_ocp_if records in @ois * and use that to determine the allocation size. For SoC families * that require multiple list registrations, such as OMAP3xxx, this * estimation process isn't optimal, so manual estimation is advised * in those cases. Returns -EEXIST if the allocation has already occurred * or 0 upon success. */ static int __init _alloc_linkspace(struct omap_hwmod_ocp_if **ois) { unsigned int i = 0; unsigned int sz; if (linkspace) { WARN(1, "linkspace already allocated\n"); return -EEXIST; } if (max_ls == 0) while (ois[i++]) max_ls += LINKS_PER_OCP_IF; sz = sizeof(struct omap_hwmod_link) * max_ls; pr_debug("omap_hwmod: %s: allocating %d byte linkspace (%d links)\n", __func__, sz, max_ls); linkspace = memblock_virt_alloc(sz, 0); return 0; } /* Static functions intended only for use in soc_ops field function pointers */ /** * _omap2xxx_3xxx_wait_target_ready - wait for a module to leave slave idle * @oh: struct omap_hwmod * * * Wait for a module @oh to leave slave idle. Returns 0 if the module * does not have an IDLEST bit or if the module successfully leaves * slave idle; otherwise, pass along the return value of the * appropriate *_cm*_wait_module_ready() function. */ static int _omap2xxx_3xxx_wait_target_ready(struct omap_hwmod *oh) { if (!oh) return -EINVAL; if (oh->flags & HWMOD_NO_IDLEST) return 0; if (!_find_mpu_rt_port(oh)) return 0; /* XXX check module SIDLEMODE, hardreset status, enabled clocks */ return omap_cm_wait_module_ready(0, oh->prcm.omap2.module_offs, oh->prcm.omap2.idlest_reg_id, oh->prcm.omap2.idlest_idle_bit); } /** * _omap4_wait_target_ready - wait for a module to leave slave idle * @oh: struct omap_hwmod * * * Wait for a module @oh to leave slave idle. Returns 0 if the module * does not have an IDLEST bit or if the module successfully leaves * slave idle; otherwise, pass along the return value of the * appropriate *_cm*_wait_module_ready() function. */ static int _omap4_wait_target_ready(struct omap_hwmod *oh) { if (!oh) return -EINVAL; if (oh->flags & HWMOD_NO_IDLEST || !oh->clkdm) return 0; if (!_find_mpu_rt_port(oh)) return 0; /* XXX check module SIDLEMODE, hardreset status */ return omap_cm_wait_module_ready(oh->clkdm->prcm_partition, oh->clkdm->cm_inst, oh->prcm.omap4.clkctrl_offs, 0); } /** * _omap2_assert_hardreset - call OMAP2 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to assert hardreset * @ohri: hardreset line data * * Call omap2_prm_assert_hardreset() with parameters extracted from * the hwmod @oh and the hardreset line data @ohri. Only intended for * use as an soc_ops function pointer. Passes along the return value * from omap2_prm_assert_hardreset(). XXX This function is scheduled * for removal when the PRM code is moved into drivers/. */ static int _omap2_assert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { return omap_prm_assert_hardreset(ohri->rst_shift, 0, oh->prcm.omap2.module_offs, 0); } /** * _omap2_deassert_hardreset - call OMAP2 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to deassert hardreset * @ohri: hardreset line data * * Call omap2_prm_deassert_hardreset() with parameters extracted from * the hwmod @oh and the hardreset line data @ohri. Only intended for * use as an soc_ops function pointer. Passes along the return value * from omap2_prm_deassert_hardreset(). XXX This function is * scheduled for removal when the PRM code is moved into drivers/. */ static int _omap2_deassert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, 0, oh->prcm.omap2.module_offs, 0, 0); } /** * _omap2_is_hardreset_asserted - call OMAP2 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to test hardreset * @ohri: hardreset line data * * Call omap2_prm_is_hardreset_asserted() with parameters extracted * from the hwmod @oh and the hardreset line data @ohri. Only * intended for use as an soc_ops function pointer. Passes along the * return value from omap2_prm_is_hardreset_asserted(). XXX This * function is scheduled for removal when the PRM code is moved into * drivers/. */ static int _omap2_is_hardreset_asserted(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { return omap_prm_is_hardreset_asserted(ohri->st_shift, 0, oh->prcm.omap2.module_offs, 0); } /** * _omap4_assert_hardreset - call OMAP4 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to assert hardreset * @ohri: hardreset line data * * Call omap4_prminst_assert_hardreset() with parameters extracted * from the hwmod @oh and the hardreset line data @ohri. Only * intended for use as an soc_ops function pointer. Passes along the * return value from omap4_prminst_assert_hardreset(). XXX This * function is scheduled for removal when the PRM code is moved into * drivers/. */ static int _omap4_assert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { if (!oh->clkdm) return -EINVAL; return omap_prm_assert_hardreset(ohri->rst_shift, oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.rstctrl_offs); } /** * _omap4_deassert_hardreset - call OMAP4 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to deassert hardreset * @ohri: hardreset line data * * Call omap4_prminst_deassert_hardreset() with parameters extracted * from the hwmod @oh and the hardreset line data @ohri. Only * intended for use as an soc_ops function pointer. Passes along the * return value from omap4_prminst_deassert_hardreset(). XXX This * function is scheduled for removal when the PRM code is moved into * drivers/. */ static int _omap4_deassert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { if (!oh->clkdm) return -EINVAL; if (ohri->st_shift) pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n", oh->name, ohri->name); return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->rst_shift, oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.rstctrl_offs, oh->prcm.omap4.rstctrl_offs + OMAP4_RST_CTRL_ST_OFFSET); } /** * _omap4_is_hardreset_asserted - call OMAP4 PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to test hardreset * @ohri: hardreset line data * * Call omap4_prminst_is_hardreset_asserted() with parameters * extracted from the hwmod @oh and the hardreset line data @ohri. * Only intended for use as an soc_ops function pointer. Passes along * the return value from omap4_prminst_is_hardreset_asserted(). XXX * This function is scheduled for removal when the PRM code is moved * into drivers/. */ static int _omap4_is_hardreset_asserted(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { if (!oh->clkdm) return -EINVAL; return omap_prm_is_hardreset_asserted(ohri->rst_shift, oh->clkdm->pwrdm.ptr-> prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.rstctrl_offs); } /** * _am33xx_deassert_hardreset - call AM33XX PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to deassert hardreset * @ohri: hardreset line data * * Call am33xx_prminst_deassert_hardreset() with parameters extracted * from the hwmod @oh and the hardreset line data @ohri. Only * intended for use as an soc_ops function pointer. Passes along the * return value from am33xx_prminst_deassert_hardreset(). XXX This * function is scheduled for removal when the PRM code is moved into * drivers/. */ static int _am33xx_deassert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.rstctrl_offs, oh->prcm.omap4.rstst_offs); } /* Public functions */ u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs) { if (oh->flags & HWMOD_16BIT_REG) return readw_relaxed(oh->_mpu_rt_va + reg_offs); else return readl_relaxed(oh->_mpu_rt_va + reg_offs); } void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs) { if (oh->flags & HWMOD_16BIT_REG) writew_relaxed(v, oh->_mpu_rt_va + reg_offs); else writel_relaxed(v, oh->_mpu_rt_va + reg_offs); } /** * omap_hwmod_softreset - reset a module via SYSCONFIG.SOFTRESET bit * @oh: struct omap_hwmod * * * This is a public function exposed to drivers. Some drivers may need to do * some settings before and after resetting the device. Those drivers after * doing the necessary settings could use this function to start a reset by * setting the SYSCONFIG.SOFTRESET bit. */ int omap_hwmod_softreset(struct omap_hwmod *oh) { u32 v; int ret; if (!oh || !(oh->_sysc_cache)) return -EINVAL; v = oh->_sysc_cache; ret = _set_softreset(oh, &v); if (ret) goto error; _write_sysconfig(v, oh); ret = _clear_softreset(oh, &v); if (ret) goto error; _write_sysconfig(v, oh); error: return ret; } /** * omap_hwmod_lookup - look up a registered omap_hwmod by name * @name: name of the omap_hwmod to look up * * Given a @name of an omap_hwmod, return a pointer to the registered * struct omap_hwmod *, or NULL upon error. */ struct omap_hwmod *omap_hwmod_lookup(const char *name) { struct omap_hwmod *oh; if (!name) return NULL; oh = _lookup(name); return oh; } /** * omap_hwmod_for_each - call function for each registered omap_hwmod * @fn: pointer to a callback function * @data: void * data to pass to callback function * * Call @fn for each registered omap_hwmod, passing @data to each * function. @fn must return 0 for success or any other value for * failure. If @fn returns non-zero, the iteration across omap_hwmods * will stop and the non-zero return value will be passed to the * caller of omap_hwmod_for_each(). @fn is called with * omap_hwmod_for_each() held. */ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data), void *data) { struct omap_hwmod *temp_oh; int ret = 0; if (!fn) return -EINVAL; list_for_each_entry(temp_oh, &omap_hwmod_list, node) { ret = (*fn)(temp_oh, data); if (ret) break; } return ret; } /** * omap_hwmod_register_links - register an array of hwmod links * @ois: pointer to an array of omap_hwmod_ocp_if to register * * Intended to be called early in boot before the clock framework is * initialized. If @ois is not null, will register all omap_hwmods * listed in @ois that are valid for this chip. Returns -EINVAL if * omap_hwmod_init() hasn't been called before calling this function, * -ENOMEM if the link memory area can't be allocated, or 0 upon * success. */ int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois) { int r, i; if (!inited) return -EINVAL; if (!ois) return 0; if (ois[0] == NULL) /* Empty list */ return 0; if (!linkspace) { if (_alloc_linkspace(ois)) { pr_err("omap_hwmod: could not allocate link space\n"); return -ENOMEM; } } i = 0; do { r = _register_link(ois[i]); WARN(r && r != -EEXIST, "omap_hwmod: _register_link(%s -> %s) returned %d\n", ois[i]->master->name, ois[i]->slave->name, r); } while (ois[++i]); return 0; } /** * _ensure_mpu_hwmod_is_setup - ensure the MPU SS hwmod is init'ed and set up * @oh: pointer to the hwmod currently being set up (usually not the MPU) * * If the hwmod data corresponding to the MPU subsystem IP block * hasn't been initialized and set up yet, do so now. This must be * done first since sleep dependencies may be added from other hwmods * to the MPU. Intended to be called only by omap_hwmod_setup*(). No * return value. */ static void __init _ensure_mpu_hwmod_is_setup(struct omap_hwmod *oh) { if (!mpu_oh || mpu_oh->_state == _HWMOD_STATE_UNKNOWN) pr_err("omap_hwmod: %s: MPU initiator hwmod %s not yet registered\n", __func__, MPU_INITIATOR_NAME); else if (mpu_oh->_state == _HWMOD_STATE_REGISTERED && oh != mpu_oh) omap_hwmod_setup_one(MPU_INITIATOR_NAME); } /** * omap_hwmod_setup_one - set up a single hwmod * @oh_name: const char * name of the already-registered hwmod to set up * * Initialize and set up a single hwmod. Intended to be used for a * small number of early devices, such as the timer IP blocks used for * the scheduler clock. Must be called after omap2_clk_init(). * Resolves the struct clk names to struct clk pointers for each * registered omap_hwmod. Also calls _setup() on each hwmod. Returns * -EINVAL upon error or 0 upon success. */ int __init omap_hwmod_setup_one(const char *oh_name) { struct omap_hwmod *oh; pr_debug("omap_hwmod: %s: %s\n", oh_name, __func__); oh = _lookup(oh_name); if (!oh) { WARN(1, "omap_hwmod: %s: hwmod not yet registered\n", oh_name); return -EINVAL; } _ensure_mpu_hwmod_is_setup(oh); _init(oh, NULL); _setup(oh, NULL); return 0; } /** * omap_hwmod_setup_all - set up all registered IP blocks * * Initialize and set up all IP blocks registered with the hwmod code. * Must be called after omap2_clk_init(). Resolves the struct clk * names to struct clk pointers for each registered omap_hwmod. Also * calls _setup() on each hwmod. Returns 0 upon success. */ static int __init omap_hwmod_setup_all(void) { _ensure_mpu_hwmod_is_setup(NULL); omap_hwmod_for_each(_init, NULL); omap_hwmod_for_each(_setup, NULL); return 0; } omap_postcore_initcall(omap_hwmod_setup_all); /** * omap_hwmod_enable - enable an omap_hwmod * @oh: struct omap_hwmod * * * Enable an omap_hwmod @oh. Intended to be called by omap_device_enable(). * Returns -EINVAL on error or passes along the return value from _enable(). */ int omap_hwmod_enable(struct omap_hwmod *oh) { int r; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); r = _enable(oh); spin_unlock_irqrestore(&oh->_lock, flags); return r; } /** * omap_hwmod_idle - idle an omap_hwmod * @oh: struct omap_hwmod * * * Idle an omap_hwmod @oh. Intended to be called by omap_device_idle(). * Returns -EINVAL on error or passes along the return value from _idle(). */ int omap_hwmod_idle(struct omap_hwmod *oh) { int r; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); r = _idle(oh); spin_unlock_irqrestore(&oh->_lock, flags); return r; } /** * omap_hwmod_shutdown - shutdown an omap_hwmod * @oh: struct omap_hwmod * * * Shutdown an omap_hwmod @oh. Intended to be called by * omap_device_shutdown(). Returns -EINVAL on error or passes along * the return value from _shutdown(). */ int omap_hwmod_shutdown(struct omap_hwmod *oh) { int r; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); r = _shutdown(oh); spin_unlock_irqrestore(&oh->_lock, flags); return r; } /* * IP block data retrieval functions */ /** * omap_hwmod_count_resources - count number of struct resources needed by hwmod * @oh: struct omap_hwmod * * @flags: Type of resources to include when counting (IRQ/DMA/MEM) * * Count the number of struct resource array elements necessary to * contain omap_hwmod @oh resources. Intended to be called by code * that registers omap_devices. Intended to be used to determine the * size of a dynamically-allocated struct resource array, before * calling omap_hwmod_fill_resources(). Returns the number of struct * resource array elements needed. * * XXX This code is not optimized. It could attempt to merge adjacent * resource IDs. * */ int omap_hwmod_count_resources(struct omap_hwmod *oh, unsigned long flags) { int ret = 0; if (flags & IORESOURCE_IRQ) ret += _count_mpu_irqs(oh); if (flags & IORESOURCE_DMA) ret += _count_sdma_reqs(oh); if (flags & IORESOURCE_MEM) { int i = 0; struct omap_hwmod_ocp_if *os; struct list_head *p = oh->slave_ports.next; while (i < oh->slaves_cnt) { os = _fetch_next_ocp_if(&p, &i); ret += _count_ocp_if_addr_spaces(os); } } return ret; } /** * omap_hwmod_fill_resources - fill struct resource array with hwmod data * @oh: struct omap_hwmod * * @res: pointer to the first element of an array of struct resource to fill * * Fill the struct resource array @res with resource data from the * omap_hwmod @oh. Intended to be called by code that registers * omap_devices. See also omap_hwmod_count_resources(). Returns the * number of array elements filled. */ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res) { struct omap_hwmod_ocp_if *os; struct list_head *p; int i, j, mpu_irqs_cnt, sdma_reqs_cnt, addr_cnt; int r = 0; /* For each IRQ, DMA, memory area, fill in array.*/ mpu_irqs_cnt = _count_mpu_irqs(oh); for (i = 0; i < mpu_irqs_cnt; i++) { unsigned int irq; if (oh->xlate_irq) irq = oh->xlate_irq((oh->mpu_irqs + i)->irq); else irq = (oh->mpu_irqs + i)->irq; (res + r)->name = (oh->mpu_irqs + i)->name; (res + r)->start = irq; (res + r)->end = irq; (res + r)->flags = IORESOURCE_IRQ; r++; } sdma_reqs_cnt = _count_sdma_reqs(oh); for (i = 0; i < sdma_reqs_cnt; i++) { (res + r)->name = (oh->sdma_reqs + i)->name; (res + r)->start = (oh->sdma_reqs + i)->dma_req; (res + r)->end = (oh->sdma_reqs + i)->dma_req; (res + r)->flags = IORESOURCE_DMA; r++; } p = oh->slave_ports.next; i = 0; while (i < oh->slaves_cnt) { os = _fetch_next_ocp_if(&p, &i); addr_cnt = _count_ocp_if_addr_spaces(os); for (j = 0; j < addr_cnt; j++) { (res + r)->name = (os->addr + j)->name; (res + r)->start = (os->addr + j)->pa_start; (res + r)->end = (os->addr + j)->pa_end; (res + r)->flags = IORESOURCE_MEM; r++; } } return r; } /** * omap_hwmod_fill_dma_resources - fill struct resource array with dma data * @oh: struct omap_hwmod * * @res: pointer to the array of struct resource to fill * * Fill the struct resource array @res with dma resource data from the * omap_hwmod @oh. Intended to be called by code that registers * omap_devices. See also omap_hwmod_count_resources(). Returns the * number of array elements filled. */ int omap_hwmod_fill_dma_resources(struct omap_hwmod *oh, struct resource *res) { int i, sdma_reqs_cnt; int r = 0; sdma_reqs_cnt = _count_sdma_reqs(oh); for (i = 0; i < sdma_reqs_cnt; i++) { (res + r)->name = (oh->sdma_reqs + i)->name; (res + r)->start = (oh->sdma_reqs + i)->dma_req; (res + r)->end = (oh->sdma_reqs + i)->dma_req; (res + r)->flags = IORESOURCE_DMA; r++; } return r; } /** * omap_hwmod_get_resource_byname - fetch IP block integration data by name * @oh: struct omap_hwmod * to operate on * @type: one of the IORESOURCE_* constants from include/linux/ioport.h * @name: pointer to the name of the data to fetch (optional) * @rsrc: pointer to a struct resource, allocated by the caller * * Retrieve MPU IRQ, SDMA request line, or address space start/end * data for the IP block pointed to by @oh. The data will be filled * into a struct resource record pointed to by @rsrc. The struct * resource must be allocated by the caller. When @name is non-null, * the data associated with the matching entry in the IRQ/SDMA/address * space hwmod data arrays will be returned. If @name is null, the * first array entry will be returned. Data order is not meaningful * in hwmod data, so callers are strongly encouraged to use a non-null * @name whenever possible to avoid unpredictable effects if hwmod * data is later added that causes data ordering to change. This * function is only intended for use by OMAP core code. Device * drivers should not call this function - the appropriate bus-related * data accessor functions should be used instead. Returns 0 upon * success or a negative error code upon error. */ int omap_hwmod_get_resource_byname(struct omap_hwmod *oh, unsigned int type, const char *name, struct resource *rsrc) { int r; unsigned int irq, dma; u32 pa_start, pa_end; if (!oh || !rsrc) return -EINVAL; if (type == IORESOURCE_IRQ) { r = _get_mpu_irq_by_name(oh, name, &irq); if (r) return r; rsrc->start = irq; rsrc->end = irq; } else if (type == IORESOURCE_DMA) { r = _get_sdma_req_by_name(oh, name, &dma); if (r) return r; rsrc->start = dma; rsrc->end = dma; } else if (type == IORESOURCE_MEM) { r = _get_addr_space_by_name(oh, name, &pa_start, &pa_end); if (r) return r; rsrc->start = pa_start; rsrc->end = pa_end; } else { return -EINVAL; } rsrc->flags = type; rsrc->name = name; return 0; } /** * omap_hwmod_get_pwrdm - return pointer to this module's main powerdomain * @oh: struct omap_hwmod * * * Return the powerdomain pointer associated with the OMAP module * @oh's main clock. If @oh does not have a main clk, return the * powerdomain associated with the interface clock associated with the * module's MPU port. (XXX Perhaps this should use the SDMA port * instead?) Returns NULL on error, or a struct powerdomain * on * success. */ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh) { struct clk *c; struct omap_hwmod_ocp_if *oi; struct clockdomain *clkdm; struct clk_hw_omap *clk; if (!oh) return NULL; if (oh->clkdm) return oh->clkdm->pwrdm.ptr; if (oh->_clk) { c = oh->_clk; } else { oi = _find_mpu_rt_port(oh); if (!oi) return NULL; c = oi->_clk; } clk = to_clk_hw_omap(__clk_get_hw(c)); clkdm = clk->clkdm; if (!clkdm) return NULL; return clkdm->pwrdm.ptr; } /** * omap_hwmod_get_mpu_rt_va - return the module's base address (for the MPU) * @oh: struct omap_hwmod * * * Returns the virtual address corresponding to the beginning of the * module's register target, in the address range that is intended to * be used by the MPU. Returns the virtual address upon success or NULL * upon error. */ void __iomem *omap_hwmod_get_mpu_rt_va(struct omap_hwmod *oh) { if (!oh) return NULL; if (oh->_int_flags & _HWMOD_NO_MPU_PORT) return NULL; if (oh->_state == _HWMOD_STATE_UNKNOWN) return NULL; return oh->_mpu_rt_va; } /* * XXX what about functions for drivers to save/restore ocp_sysconfig * for context save/restore operations? */ /** * omap_hwmod_enable_wakeup - allow device to wake up the system * @oh: struct omap_hwmod * * * Sets the module OCP socket ENAWAKEUP bit to allow the module to * send wakeups to the PRCM, and enable I/O ring wakeup events for * this IP block if it has dynamic mux entries. Eventually this * should set PRCM wakeup registers to cause the PRCM to receive * wakeup events from the module. Does not set any wakeup routing * registers beyond this point - if the module is to wake up any other * module or subsystem, that must be set separately. Called by * omap_device code. Returns -EINVAL on error or 0 upon success. */ int omap_hwmod_enable_wakeup(struct omap_hwmod *oh) { unsigned long flags; u32 v; spin_lock_irqsave(&oh->_lock, flags); if (oh->class->sysc && (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)) { v = oh->_sysc_cache; _enable_wakeup(oh, &v); _write_sysconfig(v, oh); } _set_idle_ioring_wakeup(oh, true); spin_unlock_irqrestore(&oh->_lock, flags); return 0; } /** * omap_hwmod_disable_wakeup - prevent device from waking the system * @oh: struct omap_hwmod * * * Clears the module OCP socket ENAWAKEUP bit to prevent the module * from sending wakeups to the PRCM, and disable I/O ring wakeup * events for this IP block if it has dynamic mux entries. Eventually * this should clear PRCM wakeup registers to cause the PRCM to ignore * wakeup events from the module. Does not set any wakeup routing * registers beyond this point - if the module is to wake up any other * module or subsystem, that must be set separately. Called by * omap_device code. Returns -EINVAL on error or 0 upon success. */ int omap_hwmod_disable_wakeup(struct omap_hwmod *oh) { unsigned long flags; u32 v; spin_lock_irqsave(&oh->_lock, flags); if (oh->class->sysc && (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)) { v = oh->_sysc_cache; _disable_wakeup(oh, &v); _write_sysconfig(v, oh); } _set_idle_ioring_wakeup(oh, false); spin_unlock_irqrestore(&oh->_lock, flags); return 0; } /** * omap_hwmod_assert_hardreset - assert the HW reset line of submodules * contained in the hwmod module. * @oh: struct omap_hwmod * * @name: name of the reset line to lookup and assert * * Some IP like dsp, ipu or iva contain processor that require * an HW reset line to be assert / deassert in order to enable fully * the IP. Returns -EINVAL if @oh is null or if the operation is not * yet supported on this OMAP; otherwise, passes along the return value * from _assert_hardreset(). */ int omap_hwmod_assert_hardreset(struct omap_hwmod *oh, const char *name) { int ret; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); ret = _assert_hardreset(oh, name); spin_unlock_irqrestore(&oh->_lock, flags); return ret; } /** * omap_hwmod_deassert_hardreset - deassert the HW reset line of submodules * contained in the hwmod module. * @oh: struct omap_hwmod * * @name: name of the reset line to look up and deassert * * Some IP like dsp, ipu or iva contain processor that require * an HW reset line to be assert / deassert in order to enable fully * the IP. Returns -EINVAL if @oh is null or if the operation is not * yet supported on this OMAP; otherwise, passes along the return value * from _deassert_hardreset(). */ int omap_hwmod_deassert_hardreset(struct omap_hwmod *oh, const char *name) { int ret; unsigned long flags; if (!oh) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); ret = _deassert_hardreset(oh, name); spin_unlock_irqrestore(&oh->_lock, flags); return ret; } /** * omap_hwmod_for_each_by_class - call @fn for each hwmod of class @classname * @classname: struct omap_hwmod_class name to search for * @fn: callback function pointer to call for each hwmod in class @classname * @user: arbitrary context data to pass to the callback function * * For each omap_hwmod of class @classname, call @fn. * If the callback function returns something other than * zero, the iterator is terminated, and the callback function's return * value is passed back to the caller. Returns 0 upon success, -EINVAL * if @classname or @fn are NULL, or passes back the error code from @fn. */ int omap_hwmod_for_each_by_class(const char *classname, int (*fn)(struct omap_hwmod *oh, void *user), void *user) { struct omap_hwmod *temp_oh; int ret = 0; if (!classname || !fn) return -EINVAL; pr_debug("omap_hwmod: %s: looking for modules of class %s\n", __func__, classname); list_for_each_entry(temp_oh, &omap_hwmod_list, node) { if (!strcmp(temp_oh->class->name, classname)) { pr_debug("omap_hwmod: %s: %s: calling callback fn\n", __func__, temp_oh->name); ret = (*fn)(temp_oh, user); if (ret) break; } } if (ret) pr_debug("omap_hwmod: %s: iterator terminated early: %d\n", __func__, ret); return ret; } /** * omap_hwmod_set_postsetup_state - set the post-_setup() state for this hwmod * @oh: struct omap_hwmod * * @state: state that _setup() should leave the hwmod in * * Sets the hwmod state that @oh will enter at the end of _setup() * (called by omap_hwmod_setup_*()). See also the documentation * for _setup_postsetup(), above. Returns 0 upon success or * -EINVAL if there is a problem with the arguments or if the hwmod is * in the wrong state. */ int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state) { int ret; unsigned long flags; if (!oh) return -EINVAL; if (state != _HWMOD_STATE_DISABLED && state != _HWMOD_STATE_ENABLED && state != _HWMOD_STATE_IDLE) return -EINVAL; spin_lock_irqsave(&oh->_lock, flags); if (oh->_state != _HWMOD_STATE_REGISTERED) { ret = -EINVAL; goto ohsps_unlock; } oh->_postsetup_state = state; ret = 0; ohsps_unlock: spin_unlock_irqrestore(&oh->_lock, flags); return ret; } /** * omap_hwmod_get_context_loss_count - get lost context count * @oh: struct omap_hwmod * * * Returns the context loss count of associated @oh * upon success, or zero if no context loss data is available. * * On OMAP4, this queries the per-hwmod context loss register, * assuming one exists. If not, or on OMAP2/3, this queries the * enclosing powerdomain context loss count. */ int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh) { struct powerdomain *pwrdm; int ret = 0; if (soc_ops.get_context_lost) return soc_ops.get_context_lost(oh); pwrdm = omap_hwmod_get_pwrdm(oh); if (pwrdm) ret = pwrdm_get_context_loss_count(pwrdm); return ret; } /** * omap_hwmod_init - initialize the hwmod code * * Sets up some function pointers needed by the hwmod code to operate on the * currently-booted SoC. Intended to be called once during kernel init * before any hwmods are registered. No return value. */ void __init omap_hwmod_init(void) { if (cpu_is_omap24xx()) { soc_ops.wait_target_ready = _omap2xxx_3xxx_wait_target_ready; soc_ops.assert_hardreset = _omap2_assert_hardreset; soc_ops.deassert_hardreset = _omap2_deassert_hardreset; soc_ops.is_hardreset_asserted = _omap2_is_hardreset_asserted; } else if (cpu_is_omap34xx()) { soc_ops.wait_target_ready = _omap2xxx_3xxx_wait_target_ready; soc_ops.assert_hardreset = _omap2_assert_hardreset; soc_ops.deassert_hardreset = _omap2_deassert_hardreset; soc_ops.is_hardreset_asserted = _omap2_is_hardreset_asserted; soc_ops.init_clkdm = _init_clkdm; } else if (cpu_is_omap44xx() || soc_is_omap54xx() || soc_is_dra7xx()) { soc_ops.enable_module = _omap4_enable_module; soc_ops.disable_module = _omap4_disable_module; soc_ops.wait_target_ready = _omap4_wait_target_ready; soc_ops.assert_hardreset = _omap4_assert_hardreset; soc_ops.deassert_hardreset = _omap4_deassert_hardreset; soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted; soc_ops.init_clkdm = _init_clkdm; soc_ops.update_context_lost = _omap4_update_context_lost; soc_ops.get_context_lost = _omap4_get_context_lost; } else if (cpu_is_ti814x() || cpu_is_ti816x() || soc_is_am33xx() || soc_is_am43xx()) { soc_ops.enable_module = _omap4_enable_module; soc_ops.disable_module = _omap4_disable_module; soc_ops.wait_target_ready = _omap4_wait_target_ready; soc_ops.assert_hardreset = _omap4_assert_hardreset; soc_ops.deassert_hardreset = _am33xx_deassert_hardreset; soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted; soc_ops.init_clkdm = _init_clkdm; } else { WARN(1, "omap_hwmod: unknown SoC type\n"); } inited = true; } /** * omap_hwmod_get_main_clk - get pointer to main clock name * @oh: struct omap_hwmod * * * Returns the main clock name assocated with @oh upon success, * or NULL if @oh is NULL. */ const char *omap_hwmod_get_main_clk(struct omap_hwmod *oh) { if (!oh) return NULL; return oh->main_clk; }
gpl-2.0
MichaelQQ/linux-2.6.35-vpls
fs/reiserfs/ioctl.c
289
5667
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ #include <linux/capability.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/reiserfs_fs.h> #include <linux/time.h> #include <asm/uaccess.h> #include <linux/pagemap.h> #include <linux/smp_lock.h> #include <linux/compat.h> /* * reiserfs_ioctl - handler for ioctl for inode * supported commands: * 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect * and prevent packing file (argument arg has to be non-zero) * 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION * 3) That's all for a while ... */ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_path.dentry->d_inode; unsigned int flags; int err = 0; reiserfs_write_lock(inode->i_sb); switch (cmd) { case REISERFS_IOC_UNPACK: if (S_ISREG(inode->i_mode)) { if (arg) err = reiserfs_unpack(inode, filp); } else err = -ENOTTY; break; /* * following two cases are taken from fs/ext2/ioctl.c by Remy * Card (card@masi.ibp.fr) */ case REISERFS_IOC_GETFLAGS: if (!reiserfs_attrs(inode->i_sb)) { err = -ENOTTY; break; } flags = REISERFS_I(inode)->i_attrs; i_attrs_to_sd_attrs(inode, (__u16 *) & flags); err = put_user(flags, (int __user *)arg); break; case REISERFS_IOC_SETFLAGS:{ if (!reiserfs_attrs(inode->i_sb)) { err = -ENOTTY; break; } err = mnt_want_write(filp->f_path.mnt); if (err) break; if (!is_owner_or_cap(inode)) { err = -EPERM; goto setflags_out; } if (get_user(flags, (int __user *)arg)) { err = -EFAULT; goto setflags_out; } /* * Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { err = -EPERM; goto setflags_out; } if (((flags ^ REISERFS_I(inode)-> i_attrs) & (REISERFS_IMMUTABLE_FL | REISERFS_APPEND_FL)) && !capable(CAP_LINUX_IMMUTABLE)) { err = -EPERM; goto setflags_out; } if ((flags & REISERFS_NOTAIL_FL) && S_ISREG(inode->i_mode)) { int result; result = reiserfs_unpack(inode, filp); if (result) { err = result; goto setflags_out; } } sd_attrs_to_i_attrs(flags, inode); REISERFS_I(inode)->i_attrs = flags; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setflags_out: mnt_drop_write(filp->f_path.mnt); break; } case REISERFS_IOC_GETVERSION: err = put_user(inode->i_generation, (int __user *)arg); break; case REISERFS_IOC_SETVERSION: if (!is_owner_or_cap(inode)) { err = -EPERM; break; } err = mnt_want_write(filp->f_path.mnt); if (err) break; if (get_user(inode->i_generation, (int __user *)arg)) { err = -EFAULT; goto setversion_out; } inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setversion_out: mnt_drop_write(filp->f_path.mnt); break; default: err = -ENOTTY; } reiserfs_write_unlock(inode->i_sb); return err; } #ifdef CONFIG_COMPAT long reiserfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { /* These are just misnamed, they actually get/put from/to user an int */ switch (cmd) { case REISERFS_IOC32_UNPACK: cmd = REISERFS_IOC_UNPACK; break; case REISERFS_IOC32_GETFLAGS: cmd = REISERFS_IOC_GETFLAGS; break; case REISERFS_IOC32_SETFLAGS: cmd = REISERFS_IOC_SETFLAGS; break; case REISERFS_IOC32_GETVERSION: cmd = REISERFS_IOC_GETVERSION; break; case REISERFS_IOC32_SETVERSION: cmd = REISERFS_IOC_SETVERSION; break; default: return -ENOIOCTLCMD; } return reiserfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); } #endif int reiserfs_commit_write(struct file *f, struct page *page, unsigned from, unsigned to); int reiserfs_prepare_write(struct file *f, struct page *page, unsigned from, unsigned to); /* ** reiserfs_unpack ** Function try to convert tail from direct item into indirect. ** It set up nopack attribute in the REISERFS_I(inode)->nopack */ int reiserfs_unpack(struct inode *inode, struct file *filp) { int retval = 0; int depth; int index; struct page *page; struct address_space *mapping; unsigned long write_from; unsigned long blocksize = inode->i_sb->s_blocksize; if (inode->i_size == 0) { REISERFS_I(inode)->i_flags |= i_nopack_mask; return 0; } /* ioctl already done */ if (REISERFS_I(inode)->i_flags & i_nopack_mask) { return 0; } depth = reiserfs_write_lock_once(inode->i_sb); /* we need to make sure nobody is changing the file size beneath us */ reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); write_from = inode->i_size & (blocksize - 1); /* if we are on a block boundary, we are already unpacked. */ if (write_from == 0) { REISERFS_I(inode)->i_flags |= i_nopack_mask; goto out; } /* we unpack by finding the page with the tail, and calling ** reiserfs_prepare_write on that page. This will force a ** reiserfs_get_block to unpack the tail for us. */ index = inode->i_size >> PAGE_CACHE_SHIFT; mapping = inode->i_mapping; page = grab_cache_page(mapping, index); retval = -ENOMEM; if (!page) { goto out; } retval = reiserfs_prepare_write(NULL, page, write_from, write_from); if (retval) goto out_unlock; /* conversion can change page contents, must flush */ flush_dcache_page(page); retval = reiserfs_commit_write(NULL, page, write_from, write_from); REISERFS_I(inode)->i_flags |= i_nopack_mask; out_unlock: unlock_page(page); page_cache_release(page); out: mutex_unlock(&inode->i_mutex); reiserfs_write_unlock_once(inode->i_sb, depth); return retval; }
gpl-2.0
PerLycke/android_kernel_moto_shamu
security/selinux/netport.c
289
6475
/* * Network port table * * SELinux must keep a mapping of network ports to labels/SIDs. This * mapping is maintained as part of the normal policy but a fast cache is * needed to reduce the lookup overhead. * * Author: Paul Moore <paul@paul-moore.com> * * This code is heavily based on the "netif" concept originally developed by * James Morris <jmorris@redhat.com> * (see security/selinux/netif.c for more information) * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2008 * * This program is free software: you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip.h> #include <net/ipv6.h> #include "netport.h" #include "objsec.h" #define SEL_NETPORT_HASH_SIZE 256 #define SEL_NETPORT_HASH_BKT_LIMIT 16 struct sel_netport_bkt { int size; struct list_head list; }; struct sel_netport { struct netport_security_struct psec; struct list_head list; struct rcu_head rcu; }; /* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason * for this is that I suspect most users will not make heavy use of both * address families at the same time so one table will usually end up wasted, * if this becomes a problem we can always add a hash table for each address * family later */ static LIST_HEAD(sel_netport_list); static DEFINE_SPINLOCK(sel_netport_lock); static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE]; /** * sel_netport_hashfn - Hashing function for the port table * @pnum: port number * * Description: * This is the hashing function for the port table, it returns the bucket * number for the given port. * */ static unsigned int sel_netport_hashfn(u16 pnum) { return (pnum & (SEL_NETPORT_HASH_SIZE - 1)); } /** * sel_netport_find - Search for a port record * @protocol: protocol * @port: pnum * * Description: * Search the network port table and return the matching record. If an entry * can not be found in the table return NULL. * */ static struct sel_netport *sel_netport_find(u8 protocol, u16 pnum) { unsigned int idx; struct sel_netport *port; idx = sel_netport_hashfn(pnum); list_for_each_entry_rcu(port, &sel_netport_hash[idx].list, list) if (port->psec.port == pnum && port->psec.protocol == protocol) return port; return NULL; } /** * sel_netport_insert - Insert a new port into the table * @port: the new port record * * Description: * Add a new port record to the network address hash table. * */ static void sel_netport_insert(struct sel_netport *port) { unsigned int idx; /* we need to impose a limit on the growth of the hash table so check * this bucket to make sure it is within the specified bounds */ idx = sel_netport_hashfn(port->psec.port); list_add_rcu(&port->list, &sel_netport_hash[idx].list); if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) { struct sel_netport *tail; tail = list_entry( rcu_dereference_protected( sel_netport_hash[idx].list.prev, lockdep_is_held(&sel_netport_lock)), struct sel_netport, list); list_del_rcu(&tail->list); kfree_rcu(tail, rcu); } else sel_netport_hash[idx].size++; } /** * sel_netport_sid_slow - Lookup the SID of a network address using the policy * @protocol: protocol * @pnum: port * @sid: port SID * * Description: * This function determines the SID of a network port by quering the security * policy. The result is added to the network port table to speedup future * queries. Returns zero on success, negative values on failure. * */ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid) { int ret = -ENOMEM; struct sel_netport *port; struct sel_netport *new = NULL; spin_lock_bh(&sel_netport_lock); port = sel_netport_find(protocol, pnum); if (port != NULL) { *sid = port->psec.sid; spin_unlock_bh(&sel_netport_lock); return 0; } new = kzalloc(sizeof(*new), GFP_ATOMIC); if (new == NULL) goto out; ret = security_port_sid(protocol, pnum, sid); if (ret != 0) goto out; new->psec.port = pnum; new->psec.protocol = protocol; new->psec.sid = *sid; sel_netport_insert(new); out: spin_unlock_bh(&sel_netport_lock); if (unlikely(ret)) { printk(KERN_WARNING "SELinux: failure in sel_netport_sid_slow()," " unable to determine network port label\n"); kfree(new); } return ret; } /** * sel_netport_sid - Lookup the SID of a network port * @protocol: protocol * @pnum: port * @sid: port SID * * Description: * This function determines the SID of a network port using the fastest method * possible. First the port table is queried, but if an entry can't be found * then the policy is queried and the result is added to the table to speedup * future queries. Returns zero on success, negative values on failure. * */ int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid) { struct sel_netport *port; rcu_read_lock(); port = sel_netport_find(protocol, pnum); if (port != NULL) { *sid = port->psec.sid; rcu_read_unlock(); return 0; } rcu_read_unlock(); return sel_netport_sid_slow(protocol, pnum, sid); } /** * sel_netport_flush - Flush the entire network port table * * Description: * Remove all entries from the network address table. * */ void sel_netport_flush(void) { unsigned int idx; struct sel_netport *port, *port_tmp; spin_lock_bh(&sel_netport_lock); for (idx = 0; idx < SEL_NETPORT_HASH_SIZE; idx++) { list_for_each_entry_safe(port, port_tmp, &sel_netport_hash[idx].list, list) { list_del_rcu(&port->list); kfree_rcu(port, rcu); } sel_netport_hash[idx].size = 0; } spin_unlock_bh(&sel_netport_lock); } static __init int sel_netport_init(void) { int iter; int ret; if (!selinux_enabled) return 0; for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) { INIT_LIST_HEAD(&sel_netport_hash[iter].list); sel_netport_hash[iter].size = 0; } return ret; } __initcall(sel_netport_init);
gpl-2.0
JoeyJiao/kernel-2.6.32-V858
drivers/mmc/core/bus.c
545
6006
/* * linux/drivers/mmc/core/bus.c * * Copyright (C) 2003 Russell King, All Rights Reserved. * Copyright (C) 2007 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * MMC card bus driver model */ #include <linux/device.h> #include <linux/err.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include "core.h" #include "sdio_cis.h" #include "bus.h" #define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev) #define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) static ssize_t mmc_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_card *card = dev_to_mmc_card(dev); switch (card->type) { case MMC_TYPE_MMC: return sprintf(buf, "MMC\n"); case MMC_TYPE_SD: return sprintf(buf, "SD\n"); case MMC_TYPE_SDIO: return sprintf(buf, "SDIO\n"); default: return -EFAULT; } } static struct device_attribute mmc_dev_attrs[] = { __ATTR(type, S_IRUGO, mmc_type_show, NULL), __ATTR_NULL, }; /* * This currently matches any MMC driver to any MMC card - drivers * themselves make the decision whether to drive this card in their * probe method. */ static int mmc_bus_match(struct device *dev, struct device_driver *drv) { return 1; } static int mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { struct mmc_card *card = dev_to_mmc_card(dev); const char *type; int retval = 0; switch (card->type) { case MMC_TYPE_MMC: type = "MMC"; break; case MMC_TYPE_SD: type = "SD"; break; case MMC_TYPE_SDIO: type = "SDIO"; break; default: type = NULL; } if (type) { retval = add_uevent_var(env, "MMC_TYPE=%s", type); if (retval) return retval; } retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card)); if (retval) return retval; /* * Request the mmc_block device. Note: that this is a direct request * for the module it carries no information as to what is inserted. */ retval = add_uevent_var(env, "MODALIAS=mmc:block"); return retval; } static int mmc_bus_probe(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = dev_to_mmc_card(dev); return drv->probe(card); } static int mmc_bus_remove(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = dev_to_mmc_card(dev); drv->remove(card); return 0; } static int mmc_bus_suspend(struct device *dev, pm_message_t state) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = dev_to_mmc_card(dev); int ret = 0; if (dev->driver && drv->suspend) ret = drv->suspend(card, state); return ret; } static int mmc_bus_resume(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = dev_to_mmc_card(dev); int ret = 0; if (dev->driver && drv->resume) ret = drv->resume(card); return ret; } static struct bus_type mmc_bus_type = { .name = "mmc", .dev_attrs = mmc_dev_attrs, .match = mmc_bus_match, .uevent = mmc_bus_uevent, .probe = mmc_bus_probe, .remove = mmc_bus_remove, .suspend = mmc_bus_suspend, .resume = mmc_bus_resume, }; int mmc_register_bus(void) { return bus_register(&mmc_bus_type); } void mmc_unregister_bus(void) { bus_unregister(&mmc_bus_type); } /** * mmc_register_driver - register a media driver * @drv: MMC media driver */ int mmc_register_driver(struct mmc_driver *drv) { drv->drv.bus = &mmc_bus_type; return driver_register(&drv->drv); } EXPORT_SYMBOL(mmc_register_driver); /** * mmc_unregister_driver - unregister a media driver * @drv: MMC media driver */ void mmc_unregister_driver(struct mmc_driver *drv) { drv->drv.bus = &mmc_bus_type; driver_unregister(&drv->drv); } EXPORT_SYMBOL(mmc_unregister_driver); static void mmc_release_card(struct device *dev) { struct mmc_card *card = dev_to_mmc_card(dev); sdio_free_common_cis(card); if (card->info) kfree(card->info); kfree(card); } /* * Allocate and initialise a new MMC card structure. */ struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type) { struct mmc_card *card; card = kzalloc(sizeof(struct mmc_card), GFP_KERNEL); if (!card) return ERR_PTR(-ENOMEM); card->host = host; device_initialize(&card->dev); card->dev.parent = mmc_classdev(host); card->dev.bus = &mmc_bus_type; card->dev.release = mmc_release_card; card->dev.type = type; return card; } /* * Register a new MMC card with the driver model. */ int mmc_add_card(struct mmc_card *card) { int ret; const char *type; dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); switch (card->type) { case MMC_TYPE_MMC: type = "MMC"; break; case MMC_TYPE_SD: type = "SD"; if (mmc_card_blockaddr(card)) type = "SDHC"; break; case MMC_TYPE_SDIO: type = "SDIO"; break; default: type = "?"; break; } if (mmc_host_is_spi(card->host)) { printk(KERN_INFO "%s: new %s%s card on SPI\n", mmc_hostname(card->host), mmc_card_highspeed(card) ? "high speed " : "", type); } else { printk(KERN_INFO "%s: new %s%s card at address %04x\n", mmc_hostname(card->host), mmc_card_highspeed(card) ? "high speed " : "", type, card->rca); } ret = device_add(&card->dev); if (ret) return ret; #ifdef CONFIG_DEBUG_FS mmc_add_card_debugfs(card); #endif mmc_card_set_present(card); return 0; } /* * Unregister a new MMC card with the driver model, and * (eventually) free it. */ void mmc_remove_card(struct mmc_card *card) { #ifdef CONFIG_DEBUG_FS mmc_remove_card_debugfs(card); #endif if (mmc_card_present(card)) { if (mmc_host_is_spi(card->host)) { printk(KERN_INFO "%s: SPI card removed\n", mmc_hostname(card->host)); } else { printk(KERN_INFO "%s: card %04x removed\n", mmc_hostname(card->host), card->rca); } device_del(&card->dev); } put_device(&card->dev); }
gpl-2.0