text
stringlengths
2
100k
meta
dict
/* * Copyright (C) 2012 Gabor Juhos <juhosg@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <unistd.h> /* for unlink() */ #include <libgen.h> #include <getopt.h> /* for getopt() */ #include <stdarg.h> #include <errno.h> #include <sys/stat.h> #include <arpa/inet.h> #include <netinet/in.h> #define MAX_MODEL_LEN 20 #define MAX_SIGNATURE_LEN 30 #define MAX_REGION_LEN 4 #define MAX_VERSION_LEN 12 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) struct file_info { char *file_name; /* name of the file */ uint32_t file_size; /* length of the file */ uint32_t write_size; }; struct img_header { uint32_t checksum; uint32_t image_size; uint32_t kernel_size; char model[MAX_MODEL_LEN]; char signature[MAX_SIGNATURE_LEN]; char region[MAX_REGION_LEN]; char version[MAX_VERSION_LEN]; unsigned char header_len; unsigned char is_tgz; unsigned char pad[4]; } __attribute__ ((packed)); /* * Globals */ static char *ofname; static char *progname; static char *model; static char *signature; static char *region = "DEF"; static char *version; static struct file_info kernel_info; static struct file_info rootfs_info; static uint32_t kernel_size; static uint32_t image_size; static int combined; /* * Message macros */ #define ERR(fmt, ...) do { \ fflush(0); \ fprintf(stderr, "[%s] *** error: " fmt "\n", \ progname, ## __VA_ARGS__ ); \ } while (0) #define ERRS(fmt, ...) do { \ int save = errno; \ fflush(0); \ fprintf(stderr, "[%s] *** error: " fmt " (%s)\n", \ progname, ## __VA_ARGS__, strerror(save)); \ } while (0) #define DBG(fmt, ...) do { \ fprintf(stderr, "[%s] " fmt "\n", progname, ## __VA_ARGS__ ); \ } while (0) static void usage(int status) { FILE *stream = (status != EXIT_SUCCESS) ? stderr : stdout; fprintf(stream, "Usage: %s [OPTIONS...]\n", progname); fprintf(stream, "\n" "Options:\n" " -k <file> read kernel image from the file <file>\n" " -c use the kernel image as a combined image\n" " -M <model> set model to <model>\n" " -o <file> write output to the file <file>\n" " -r <file> read rootfs image from the file <file>\n" " -S <signature> set image signature to <signature>\n" " -R <region> set image region to <region>\n" " -V <version> set image version to <version>\n" " -I <size> set image size to <size>\n" " -K <size> set kernel size to <size>\n" " -h show this screen\n" ); exit(status); } int str2u32(char *arg, uint32_t *val) { char *err = NULL; uint32_t t; errno=0; t = strtoul(arg, &err, 0); if (errno || (err==arg) || ((err != NULL) && *err)) { return -1; } *val = t; return 0; } static int get_file_stat(struct file_info *fdata) { struct stat st; int res; if (fdata->file_name == NULL) return 0; res = stat(fdata->file_name, &st); if (res){ ERRS("stat failed on %s", fdata->file_name); return res; } fdata->file_size = st.st_size; fdata->write_size = fdata->file_size; return 0; } static int read_to_buf(struct file_info *fdata, char *buf) { FILE *f; int ret = EXIT_FAILURE; f = fopen(fdata->file_name, "r"); if (f == NULL) { ERRS("could not open \"%s\" for reading", fdata->file_name); goto out; } errno = 0; fread(buf, fdata->file_size, 1, f); if (errno != 0) { ERRS("unable to read from file \"%s\"", fdata->file_name); goto out_close; } ret = EXIT_SUCCESS; out_close: fclose(f); out: return ret; } static int check_options(void) { int ret; #define CHKSTR(_name, _msg) \ do { \ if (_name == NULL) { \ ERR("no %s specified", _msg); \ return -1; \ } \ } while (0) #define CHKSTRLEN(_name, _msg) \ do { \ int field_len; \ CHKSTR(_name, _msg); \ field_len = FIELD_SIZEOF(struct img_header, _name) - 1; \ if (strlen(_name) > field_len) { \ ERR("%s is too long, max length is %d", \ _msg, field_len); \ return -1; \ } \ } while (0) CHKSTRLEN(model, "model"); CHKSTRLEN(signature, "signature"); CHKSTRLEN(region, "region"); CHKSTRLEN(version, "version"); CHKSTR(ofname, "output file"); CHKSTR(kernel_info.file_name, "kernel image"); ret = get_file_stat(&kernel_info); if (ret) return ret; if (combined) { if (!kernel_size) { ERR("kernel size must be specified for combined images"); return -1; \ } if (!image_size) image_size = kernel_info.file_size; if (kernel_info.file_size > image_size) { ERR("kernel image is too big"); return -1; } kernel_info.write_size = image_size; } else { CHKSTR(rootfs_info.file_name, "rootfs image"); ret = get_file_stat(&rootfs_info); if (ret) return ret; if (kernel_size) { /* override kernel size */ kernel_info.write_size = kernel_size; } if (image_size) { if (image_size < kernel_info.write_size) kernel_info.write_size = image_size; /* override rootfs size */ rootfs_info.write_size = image_size - kernel_info.write_size; } if (kernel_info.file_size > kernel_info.write_size) { ERR("kernel image is too big"); return -1; } if (rootfs_info.file_size > rootfs_info.write_size) { ERR("rootfs image is too big"); return -1; } } return 0; } static int write_fw(char *data, int len) { FILE *f; int ret = EXIT_FAILURE; f = fopen(ofname, "w"); if (f == NULL) { ERRS("could not open \"%s\" for writing", ofname); goto out; } errno = 0; fwrite(data, len, 1, f); if (errno) { ERRS("unable to write output file"); goto out_flush; } DBG("firmware file \"%s\" completed", ofname); ret = EXIT_SUCCESS; out_flush: fflush(f); fclose(f); if (ret != EXIT_SUCCESS) { unlink(ofname); } out: return ret; } static uint32_t get_csum(unsigned char *p, uint32_t len) { uint32_t csum = 0; while (len--) csum += *p++; return csum; } static int build_fw(void) { int buflen; char *buf; char *p; uint32_t csum; struct img_header *hdr; int ret = EXIT_FAILURE; buflen = sizeof(struct img_header) + kernel_info.write_size + rootfs_info.write_size; buf = malloc(buflen); if (!buf) { ERR("no memory for buffer\n"); goto out; } memset(buf, 0, buflen); p = buf + sizeof(struct img_header); /* read kernel data */ ret = read_to_buf(&kernel_info, p); if (ret) goto out_free_buf; if (!combined) { p += kernel_info.write_size; /* read rootfs data */ ret = read_to_buf(&rootfs_info, p); if (ret) goto out_free_buf; } csum = get_csum((unsigned char *)(buf + sizeof(struct img_header)), buflen - sizeof(struct img_header)); /* fill firmware header */ hdr = (struct img_header *) buf; hdr->checksum = htonl(csum); hdr->image_size = htonl(buflen - sizeof(struct img_header)); if (!combined) hdr->kernel_size = htonl(kernel_info.write_size); else hdr->kernel_size = htonl(kernel_size); hdr->header_len = sizeof(struct img_header); strncpy(hdr->model, model, sizeof(hdr->model)); strncpy(hdr->signature, signature, sizeof(hdr->signature)); strncpy(hdr->version, version, sizeof(hdr->version)); strncpy(hdr->region, region, sizeof(hdr->region)); ret = write_fw(buf, buflen); if (ret) goto out_free_buf; ret = EXIT_SUCCESS; out_free_buf: free(buf); out: return ret; } int main(int argc, char *argv[]) { int ret = EXIT_FAILURE; progname = basename(argv[0]); while (1) { int c; c = getopt(argc, argv, "M:S:V:R:k:K:I:r:o:hc"); if (c == -1) break; switch (c) { case 'M': model = optarg; break; case 'S': signature = optarg; break; case 'V': version = optarg; break; case 'R': region = optarg; break; case 'k': kernel_info.file_name = optarg; break; case 'K': if (str2u32(optarg, &kernel_size)) { ERR("%s is invalid '%s'", "kernel size", optarg); goto out; } break; case 'I': if (str2u32(optarg, &image_size)) { ERR("%s is invalid '%s'", "image size", optarg); goto out; } break; case 'r': rootfs_info.file_name = optarg; break; case 'c': combined = 1; break; case 'o': ofname = optarg; break; case 'h': usage(EXIT_SUCCESS); break; default: usage(EXIT_FAILURE); break; } } ret = check_options(); if (ret) goto out; ret = build_fw(); out: return ret; }
{ "language": "C" }
/* * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/spinlock.h> #include "qib.h" #include "qib_mad.h" /* * Convert the AETH RNR timeout code into the number of microseconds. */ const u32 ib_qib_rnr_table[32] = { 655360, /* 00: 655.36 */ 10, /* 01: .01 */ 20, /* 02 .02 */ 30, /* 03: .03 */ 40, /* 04: .04 */ 60, /* 05: .06 */ 80, /* 06: .08 */ 120, /* 07: .12 */ 160, /* 08: .16 */ 240, /* 09: .24 */ 320, /* 0A: .32 */ 480, /* 0B: .48 */ 640, /* 0C: .64 */ 960, /* 0D: .96 */ 1280, /* 0E: 1.28 */ 1920, /* 0F: 1.92 */ 2560, /* 10: 2.56 */ 3840, /* 11: 3.84 */ 5120, /* 12: 5.12 */ 7680, /* 13: 7.68 */ 10240, /* 14: 10.24 */ 15360, /* 15: 15.36 */ 20480, /* 16: 20.48 */ 30720, /* 17: 30.72 */ 40960, /* 18: 40.96 */ 61440, /* 19: 61.44 */ 81920, /* 1A: 81.92 */ 122880, /* 1B: 122.88 */ 163840, /* 1C: 163.84 */ 245760, /* 1D: 245.76 */ 327680, /* 1E: 327.68 */ 491520 /* 1F: 491.52 */ }; /* * Validate a RWQE and fill in the SGE state. * Return 1 if OK. */ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) { int i, j, ret; struct ib_wc wc; struct qib_lkey_table *rkt; struct qib_pd *pd; struct qib_sge_state *ss; rkt = &to_idev(qp->ibqp.device)->lk_table; pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); ss = &qp->r_sge; ss->sg_list = qp->r_sg_list; qp->r_len = 0; for (i = j = 0; i < wqe->num_sge; i++) { if (wqe->sg_list[i].length == 0) continue; /* Check LKEY */ if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; j++; } ss->num_sge = j; ss->total_len = qp->r_len; ret = 1; goto bail; bad_lkey: while (j) { struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; atomic_dec(&sge->mr->refcount); } ss->num_sge = 0; memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr_id; wc.status = IB_WC_LOC_PROT_ERR; wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; /* Signal solicited completion event. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ret = 0; bail: return ret; } /** * qib_get_rwqe - copy the next RWQE into the QP's RWQE * @qp: the QP * @wr_id_only: update qp->r_wr_id only, not qp->r_sge * * Return -1 if there is a local error, 0 if no RWQE is available, * otherwise return 1. * * Can be called from interrupt level. */ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) { unsigned long flags; struct qib_rq *rq; struct qib_rwq *wq; struct qib_srq *srq; struct qib_rwqe *wqe; void (*handler)(struct ib_event *, void *); u32 tail; int ret; if (qp->ibqp.srq) { srq = to_isrq(qp->ibqp.srq); handler = srq->ibsrq.event_handler; rq = &srq->rq; } else { srq = NULL; handler = NULL; rq = &qp->r_rq; } spin_lock_irqsave(&rq->lock, flags); if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { ret = 0; goto unlock; } wq = rq->wq; tail = wq->tail; /* Validate tail before using it since it is user writable. */ if (tail >= rq->size) tail = 0; if (unlikely(tail == wq->head)) { ret = 0; goto unlock; } /* Make sure entry is read after head index is read. */ smp_rmb(); wqe = get_rwqe_ptr(rq, tail); /* * Even though we update the tail index in memory, the verbs * consumer is not supposed to post more entries until a * completion is generated. */ if (++tail >= rq->size) tail = 0; wq->tail = tail; if (!wr_id_only && !qib_init_sge(qp, wqe)) { ret = -1; goto unlock; } qp->r_wr_id = wqe->wr_id; ret = 1; set_bit(QIB_R_WRID_VALID, &qp->r_aflags); if (handler) { u32 n; /* * Validate head pointer value and compute * the number of remaining WQEs. */ n = wq->head; if (n >= rq->size) n = 0; if (n < tail) n += rq->size - tail; else n -= tail; if (n < srq->limit) { struct ib_event ev; srq->limit = 0; spin_unlock_irqrestore(&rq->lock, flags); ev.device = qp->ibqp.device; ev.element.srq = qp->ibqp.srq; ev.event = IB_EVENT_SRQ_LIMIT_REACHED; handler(&ev, srq->ibsrq.srq_context); goto bail; } } unlock: spin_unlock_irqrestore(&rq->lock, flags); bail: return ret; } /* * Switch to alternate path. * The QP s_lock should be held and interrupts disabled. */ void qib_migrate_qp(struct qib_qp *qp) { struct ib_event ev; qp->s_mig_state = IB_MIG_MIGRATED; qp->remote_ah_attr = qp->alt_ah_attr; qp->port_num = qp->alt_ah_attr.port_num; qp->s_pkey_index = qp->s_alt_pkey_index; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_PATH_MIG; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } static __be64 get_sguid(struct qib_ibport *ibp, unsigned index) { if (!index) { struct qib_pportdata *ppd = ppd_from_ibp(ibp); return ppd->guid; } else return ibp->guids[index - 1]; } static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) { return (gid->global.interface_id == id && (gid->global.subnet_prefix == gid_prefix || gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX)); } /* * * This should be called with the QP r_lock held. * * The s_lock will be acquired around the qib_migrate_qp() call. */ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, struct qib_qp *qp, u32 bth0) { __be64 guid; unsigned long flags; if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { if (!has_grh) { if (qp->alt_ah_attr.ah_flags & IB_AH_GRH) goto err; } else { if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) goto err; guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) goto err; if (!gid_ok(&hdr->u.l.grh.sgid, qp->alt_ah_attr.grh.dgid.global.subnet_prefix, qp->alt_ah_attr.grh.dgid.global.interface_id)) goto err; } if (!qib_pkey_ok((u16)bth0, qib_get_pkey(ibp, qp->s_alt_pkey_index))) { qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, (u16)bth0, (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, 0, qp->ibqp.qp_num, hdr->lrh[3], hdr->lrh[1]); goto err; } /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) goto err; spin_lock_irqsave(&qp->s_lock, flags); qib_migrate_qp(qp); spin_unlock_irqrestore(&qp->s_lock, flags); } else { if (!has_grh) { if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) goto err; } else { if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) goto err; guid = get_sguid(ibp, qp->remote_ah_attr.grh.sgid_index); if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) goto err; if (!gid_ok(&hdr->u.l.grh.sgid, qp->remote_ah_attr.grh.dgid.global.subnet_prefix, qp->remote_ah_attr.grh.dgid.global.interface_id)) goto err; } if (!qib_pkey_ok((u16)bth0, qib_get_pkey(ibp, qp->s_pkey_index))) { qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, (u16)bth0, (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, 0, qp->ibqp.qp_num, hdr->lrh[3], hdr->lrh[1]); goto err; } /* Validate the SLID. See Ch. 9.6.1.5 */ if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid || ppd_from_ibp(ibp)->port != qp->port_num) goto err; if (qp->s_mig_state == IB_MIG_REARM && !(bth0 & IB_BTH_MIG_REQ)) qp->s_mig_state = IB_MIG_ARMED; } return 0; err: return 1; } /** * qib_ruc_loopback - handle UC and RC lookback requests * @sqp: the sending QP * * This is called from qib_do_send() to * forward a WQE addressed to the same HCA. * Note that although we are single threaded due to the tasklet, we still * have to protect against post_send(). We don't have to worry about * receive interrupts since this is a connected protocol and all packets * will pass through here. */ static void qib_ruc_loopback(struct qib_qp *sqp) { struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); struct qib_qp *qp; struct qib_swqe *wqe; struct qib_sge *sge; unsigned long flags; struct ib_wc wc; u64 sdata; atomic64_t *maddr; enum ib_wc_status send_status; int release; int ret; /* * Note that we check the responder QP state after * checking the requester's state. */ qp = qib_lookup_qpn(ibp, sqp->remote_qpn); spin_lock_irqsave(&sqp->s_lock, flags); /* Return if we are already busy processing a work request. */ if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) || !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND)) goto unlock; sqp->s_flags |= QIB_S_BUSY; again: if (sqp->s_last == sqp->s_head) goto clr_busy; wqe = get_swqe_ptr(sqp, sqp->s_last); /* Return if it is not OK to start a new work reqeust. */ if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) { if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND)) goto clr_busy; /* We are in the error state, flush the work request. */ send_status = IB_WC_WR_FLUSH_ERR; goto flush_send; } /* * We can rely on the entry not changing without the s_lock * being held until we update s_last. * We increment s_cur to indicate s_last is in progress. */ if (sqp->s_last == sqp->s_cur) { if (++sqp->s_cur >= sqp->s_size) sqp->s_cur = 0; } spin_unlock_irqrestore(&sqp->s_lock, flags); if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || qp->ibqp.qp_type != sqp->ibqp.qp_type) { ibp->n_pkt_drops++; /* * For RC, the requester would timeout and retry so * shortcut the timeouts and just signal too many retries. */ if (sqp->ibqp.qp_type == IB_QPT_RC) send_status = IB_WC_RETRY_EXC_ERR; else send_status = IB_WC_SUCCESS; goto serr; } memset(&wc, 0, sizeof wc); send_status = IB_WC_SUCCESS; release = 1; sqp->s_sge.sge = wqe->sg_list[0]; sqp->s_sge.sg_list = wqe->sg_list + 1; sqp->s_sge.num_sge = wqe->wr.num_sge; sqp->s_len = wqe->length; switch (wqe->wr.opcode) { case IB_WR_SEND_WITH_IMM: wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = wqe->wr.ex.imm_data; /* FALLTHROUGH */ case IB_WR_SEND: ret = qib_get_rwqe(qp, 0); if (ret < 0) goto op_err; if (!ret) goto rnr_nak; break; case IB_WR_RDMA_WRITE_WITH_IMM: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = wqe->wr.ex.imm_data; ret = qib_get_rwqe(qp, 1); if (ret < 0) goto op_err; if (!ret) goto rnr_nak; /* FALLTHROUGH */ case IB_WR_RDMA_WRITE: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; if (wqe->length == 0) break; if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_WRITE))) goto acc_err; qp->r_sge.sg_list = NULL; qp->r_sge.num_sge = 1; qp->r_sge.total_len = wqe->length; break; case IB_WR_RDMA_READ: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto inv_err; if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_READ))) goto acc_err; release = 0; sqp->s_sge.sg_list = NULL; sqp->s_sge.num_sge = 1; qp->r_sge.sge = wqe->sg_list[0]; qp->r_sge.sg_list = wqe->sg_list + 1; qp->r_sge.num_sge = wqe->wr.num_sge; qp->r_sge.total_len = wqe->length; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->wr.wr.atomic.remote_addr, wqe->wr.wr.atomic.rkey, IB_ACCESS_REMOTE_ATOMIC))) goto acc_err; /* Perform atomic OP and save result. */ maddr = (atomic64_t *) qp->r_sge.sge.vaddr; sdata = wqe->wr.wr.atomic.compare_add; *(u64 *) sqp->s_sge.sge.vaddr = (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? (u64) atomic64_add_return(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, sdata, wqe->wr.wr.atomic.swap); atomic_dec(&qp->r_sge.sge.mr->refcount); qp->r_sge.num_sge = 0; goto send_comp; default: send_status = IB_WC_LOC_QP_OP_ERR; goto serr; } sge = &sqp->s_sge.sge; while (sqp->s_len) { u32 len = sqp->s_len; if (len > sge->length) len = sge->length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); qib_copy_sge(&qp->r_sge, sge->vaddr, len, release); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (!release) atomic_dec(&sge->mr->refcount); if (--sqp->s_sge.num_sge) *sge = *sqp->s_sge.sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { if (++sge->n >= QIB_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } sqp->s_len -= len; } if (release) while (qp->r_sge.num_sge) { atomic_dec(&qp->r_sge.sge.mr->refcount); if (--qp->r_sge.num_sge) qp->r_sge.sge = *qp->r_sge.sg_list++; } if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) goto send_comp; if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; else wc.opcode = IB_WC_RECV; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; wc.slid = qp->remote_ah_attr.dlid; wc.sl = qp->remote_ah_attr.sl; wc.port_num = 1; /* Signal completion event if the solicited bit is set. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, wqe->wr.send_flags & IB_SEND_SOLICITED); send_comp: spin_lock_irqsave(&sqp->s_lock, flags); ibp->n_loop_pkts++; flush_send: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; qib_send_complete(sqp, wqe, send_status); goto again; rnr_nak: /* Handle RNR NAK */ if (qp->ibqp.qp_type == IB_QPT_UC) goto send_comp; ibp->n_rnr_naks++; /* * Note: we don't need the s_lock held since the BUSY flag * makes this single threaded. */ if (sqp->s_rnr_retry == 0) { send_status = IB_WC_RNR_RETRY_EXC_ERR; goto serr; } if (sqp->s_rnr_retry_cnt < 7) sqp->s_rnr_retry--; spin_lock_irqsave(&sqp->s_lock, flags); if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK)) goto clr_busy; sqp->s_flags |= QIB_S_WAIT_RNR; sqp->s_timer.function = qib_rc_rnr_retry; sqp->s_timer.expires = jiffies + usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); add_timer(&sqp->s_timer); goto clr_busy; op_err: send_status = IB_WC_REM_OP_ERR; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; inv_err: send_status = IB_WC_REM_INV_REQ_ERR; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; acc_err: send_status = IB_WC_REM_ACCESS_ERR; wc.status = IB_WC_LOC_PROT_ERR; err: /* responder goes to error state */ qib_rc_error(qp, wc.status); serr: spin_lock_irqsave(&sqp->s_lock, flags); qib_send_complete(sqp, wqe, send_status); if (sqp->ibqp.qp_type == IB_QPT_RC) { int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR); sqp->s_flags &= ~QIB_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); if (lastwqe) { struct ib_event ev; ev.device = sqp->ibqp.device; ev.element.qp = &sqp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); } goto done; } clr_busy: sqp->s_flags &= ~QIB_S_BUSY; unlock: spin_unlock_irqrestore(&sqp->s_lock, flags); done: if (qp && atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } /** * qib_make_grh - construct a GRH header * @ibp: a pointer to the IB port * @hdr: a pointer to the GRH header being constructed * @grh: the global route address to send to * @hwords: the number of 32 bit words of header being sent * @nwords: the number of 32 bit words of data being sent * * Return the size of the header in 32 bit words. */ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, struct ib_global_route *grh, u32 hwords, u32 nwords) { hdr->version_tclass_flow = cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) | (grh->traffic_class << IB_GRH_TCLASS_SHIFT) | (grh->flow_label << IB_GRH_FLOW_SHIFT)); hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2); /* next_hdr is defined by C8-7 in ch. 8.4.1 */ hdr->next_hdr = IB_GRH_NEXT_HDR; hdr->hop_limit = grh->hop_limit; /* The SGID is 32-bit aligned. */ hdr->sgid.global.subnet_prefix = ibp->gid_prefix; hdr->sgid.global.interface_id = grh->sgid_index ? ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid; hdr->dgid = grh->dgid; /* GRH header size in 32-bit words. */ return sizeof(struct ib_grh) / sizeof(u32); } void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, u32 bth0, u32 bth2) { struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); u16 lrh0; u32 nwords; u32 extra_bytes; /* Construct the header. */ extra_bytes = -qp->s_cur_size & 3; nwords = (qp->s_cur_size + extra_bytes) >> 2; lrh0 = QIB_LRH_BTH; if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh, &qp->remote_ah_attr.grh, qp->s_hdrwords, nwords); lrh0 = QIB_LRH_GRH; } lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | qp->remote_ah_attr.sl << 4; qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | qp->remote_ah_attr.src_path_bits); bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); bth0 |= extra_bytes << 20; if (qp->s_mig_state == IB_MIG_MIGRATED) bth0 |= IB_BTH_MIG_REQ; ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); ohdr->bth[2] = cpu_to_be32(bth2); } /** * qib_do_send - perform a send on a QP * @work: contains a pointer to the QP * * Process entries in the send work queue until credit or queue is * exhausted. Only allow one CPU to send a packet per QP (tasklet). * Otherwise, two threads could send packets out of order. */ void qib_do_send(struct work_struct *work) { struct qib_qp *qp = container_of(work, struct qib_qp, s_work); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_pportdata *ppd = ppd_from_ibp(ibp); int (*make_req)(struct qib_qp *qp); unsigned long flags; if ((qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) && (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) { qib_ruc_loopback(qp); return; } if (qp->ibqp.qp_type == IB_QPT_RC) make_req = qib_make_rc_req; else if (qp->ibqp.qp_type == IB_QPT_UC) make_req = qib_make_uc_req; else make_req = qib_make_ud_req; spin_lock_irqsave(&qp->s_lock, flags); /* Return if we are already busy processing a work request. */ if (!qib_send_ok(qp)) { spin_unlock_irqrestore(&qp->s_lock, flags); return; } qp->s_flags |= QIB_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, flags); do { /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { /* * If the packet cannot be sent now, return and * the send tasklet will be woken up later. */ if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, qp->s_cur_sge, qp->s_cur_size)) break; /* Record that s_hdr is empty. */ qp->s_hdrwords = 0; } } while (make_req(qp)); } /* * This should be called with s_lock held. */ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, enum ib_wc_status status) { u32 old_last, last; unsigned i; if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) return; for (i = 0; i < wqe->wr.num_sge; i++) { struct qib_sge *sge = &wqe->sg_list[i]; atomic_dec(&sge->mr->refcount); } if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); /* See ch. 11.2.4.1 and 10.7.3.1 */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED) || status != IB_WC_SUCCESS) { struct ib_wc wc; memset(&wc, 0, sizeof wc); wc.wr_id = wqe->wr.wr_id; wc.status = status; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; wc.qp = &qp->ibqp; if (status == IB_WC_SUCCESS) wc.byte_len = wqe->length; qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, status != IB_WC_SUCCESS); } last = qp->s_last; old_last = last; if (++last >= qp->s_size) last = 0; qp->s_last = last; if (qp->s_acked == old_last) qp->s_acked = last; if (qp->s_cur == old_last) qp->s_cur = last; if (qp->s_tail == old_last) qp->s_tail = last; if (qp->state == IB_QPS_SQD && last == qp->s_cur) qp->s_draining = 0; }
{ "language": "C" }
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2014 Freescale Semiconductor, Inc. */ #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include "clk.h" /** * struct clk_gate_exclusive - i.MX specific gate clock which is mutually * exclusive with other gate clocks * * @gate: the parent class * @exclusive_mask: mask of gate bits which are mutually exclusive to this * gate clock * * The imx exclusive gate clock is a subclass of basic clk_gate * with an addtional mask to indicate which other gate bits in the same * register is mutually exclusive to this gate clock. */ struct clk_gate_exclusive { struct clk_gate gate; u32 exclusive_mask; }; static int clk_gate_exclusive_enable(struct clk_hw *hw) { struct clk_gate *gate = to_clk_gate(hw); struct clk_gate_exclusive *exgate = container_of(gate, struct clk_gate_exclusive, gate); u32 val = readl(gate->reg); if (val & exgate->exclusive_mask) return -EBUSY; return clk_gate_ops.enable(hw); } static void clk_gate_exclusive_disable(struct clk_hw *hw) { clk_gate_ops.disable(hw); } static int clk_gate_exclusive_is_enabled(struct clk_hw *hw) { return clk_gate_ops.is_enabled(hw); } static const struct clk_ops clk_gate_exclusive_ops = { .enable = clk_gate_exclusive_enable, .disable = clk_gate_exclusive_disable, .is_enabled = clk_gate_exclusive_is_enabled, }; struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent, void __iomem *reg, u8 shift, u32 exclusive_mask) { struct clk_gate_exclusive *exgate; struct clk_gate *gate; struct clk_hw *hw; struct clk_init_data init; int ret; if (exclusive_mask == 0) return ERR_PTR(-EINVAL); exgate = kzalloc(sizeof(*exgate), GFP_KERNEL); if (!exgate) return ERR_PTR(-ENOMEM); gate = &exgate->gate; init.name = name; init.ops = &clk_gate_exclusive_ops; init.flags = CLK_SET_RATE_PARENT; init.parent_names = parent ? &parent : NULL; init.num_parents = parent ? 1 : 0; gate->reg = reg; gate->bit_idx = shift; gate->lock = &imx_ccm_lock; gate->hw.init = &init; exgate->exclusive_mask = exclusive_mask; hw = &gate->hw; ret = clk_hw_register(NULL, hw); if (ret) { kfree(gate); return ERR_PTR(ret); } return hw; }
{ "language": "C" }
/****************************************************************************** Copyright (C) 2017 by Hugh Bailey <jim@obsproject.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ #include "obs-scripting-lua.h" #include "cstrcache.h" #include <obs-module.h> /* ========================================================================= */ static inline const char *get_table_string_(lua_State *script, int idx, const char *name, const char *func) { const char *str = ""; lua_pushstring(script, name); lua_gettable(script, idx - 1); if (!lua_isstring(script, -1)) warn("%s: no item '%s' of type %s", func, name, "string"); else str = cstrcache_get(lua_tostring(script, -1)); lua_pop(script, 1); return str; } static inline int get_table_int_(lua_State *script, int idx, const char *name, const char *func) { int val = 0; lua_pushstring(script, name); lua_gettable(script, idx - 1); val = (int)lua_tointeger(script, -1); lua_pop(script, 1); UNUSED_PARAMETER(func); return val; } static inline void get_callback_from_table_(lua_State *script, int idx, const char *name, int *p_reg_idx, const char *func) { *p_reg_idx = LUA_REFNIL; lua_pushstring(script, name); lua_gettable(script, idx - 1); if (!lua_isfunction(script, -1)) { if (!lua_isnil(script, -1)) { warn("%s: item '%s' is not a function", func, name); } lua_pop(script, 1); } else { *p_reg_idx = luaL_ref(script, LUA_REGISTRYINDEX); } } #define get_table_string(script, idx, name) \ get_table_string_(script, idx, name, __FUNCTION__) #define get_table_int(script, idx, name) \ get_table_int_(script, idx, name, __FUNCTION__) #define get_callback_from_table(script, idx, name, p_reg_idx) \ get_callback_from_table_(script, idx, name, p_reg_idx, __FUNCTION__) bool ls_get_libobs_obj_(lua_State *script, const char *type, int lua_idx, void *libobs_out, const char *id, const char *func, int line) { swig_type_info *info = SWIG_TypeQuery(script, type); if (info == NULL) { warn("%s:%d: SWIG could not find type: %s%s%s", func, line, id ? id : "", id ? "::" : "", type); return false; } int ret = SWIG_ConvertPtr(script, lua_idx, libobs_out, info, 0); if (!SWIG_IsOK(ret)) { warn("%s:%d: SWIG failed to convert lua object to obs " "object: %s%s%s", func, line, id ? id : "", id ? "::" : "", type); return false; } return true; } #define ls_get_libobs_obj(type, lua_index, obs_obj) \ ls_get_libobs_obj_(ls->script, #type " *", lua_index, obs_obj, ls->id, \ __FUNCTION__, __LINE__) bool ls_push_libobs_obj_(lua_State *script, const char *type, void *libobs_in, bool ownership, const char *id, const char *func, int line) { swig_type_info *info = SWIG_TypeQuery(script, type); if (info == NULL) { warn("%s:%d: SWIG could not find type: %s%s%s", func, line, id ? id : "", id ? "::" : "", type); return false; } SWIG_NewPointerObj(script, libobs_in, info, (int)ownership); return true; } #define ls_push_libobs_obj(type, obs_obj, ownership) \ ls_push_libobs_obj_(ls->script, #type " *", obs_obj, ownership, \ ls->id, __FUNCTION__, __LINE__) /* ========================================================================= */ struct obs_lua_data; struct obs_lua_source { struct obs_lua_script *data; lua_State *script; const char *id; const char *display_name; int func_create; int func_destroy; int func_get_width; int func_get_height; int func_get_defaults; int func_get_properties; int func_update; int func_activate; int func_deactivate; int func_show; int func_hide; int func_video_tick; int func_video_render; int func_save; int func_load; pthread_mutex_t definition_mutex; struct obs_lua_data *first_source; struct obs_lua_source *next; struct obs_lua_source **p_prev_next; }; extern pthread_mutex_t lua_source_def_mutex; struct obs_lua_source *first_source_def = NULL; struct obs_lua_data { obs_source_t *source; struct obs_lua_source *ls; int lua_data_ref; struct obs_lua_data *next; struct obs_lua_data **p_prev_next; }; #define call_func(name, args, rets) \ call_func_(ls->script, ls->func_##name, args, rets, #name, \ ls->display_name) #define have_func(name) (ls->func_##name != LUA_REFNIL) #define ls_push_data() \ lua_rawgeti(ls->script, LUA_REGISTRYINDEX, ld->lua_data_ref) #define ls_pop(count) lua_pop(ls->script, count) #define lock_script() \ struct obs_lua_script *__data = ls->data; \ struct obs_lua_script *__prev_script = current_lua_script; \ current_lua_script = __data; \ pthread_mutex_lock(&__data->mutex); #define unlock_script() \ pthread_mutex_unlock(&__data->mutex); \ current_lua_script = __prev_script; static const char *obs_lua_source_get_name(void *type_data) { struct obs_lua_source *ls = type_data; return ls->display_name; } static void *obs_lua_source_create(obs_data_t *settings, obs_source_t *source) { struct obs_lua_source *ls = obs_source_get_type_data(source); struct obs_lua_data *data = NULL; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(create)) goto fail; lock_script(); ls_push_libobs_obj(obs_data_t, settings, false); ls_push_libobs_obj(obs_source_t, source, false); call_func(create, 2, 1); int lua_data_ref = luaL_ref(ls->script, LUA_REGISTRYINDEX); if (lua_data_ref != LUA_REFNIL) { data = bmalloc(sizeof(*data)); data->source = source; data->ls = ls; data->lua_data_ref = lua_data_ref; } unlock_script(); if (data) { struct obs_lua_data *next = ls->first_source; data->next = next; data->p_prev_next = &ls->first_source; if (next) next->p_prev_next = &data->next; ls->first_source = data; } fail: pthread_mutex_unlock(&ls->definition_mutex); return data; } static void call_destroy(struct obs_lua_data *ld) { struct obs_lua_source *ls = ld->ls; ls_push_data(); call_func(destroy, 1, 0); luaL_unref(ls->script, LUA_REGISTRYINDEX, ld->lua_data_ref); ld->lua_data_ref = LUA_REFNIL; } static void obs_lua_source_destroy(void *data) { struct obs_lua_data *ld = data; struct obs_lua_source *ls = ld->ls; struct obs_lua_data *next; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(destroy)) goto fail; lock_script(); call_destroy(ld); unlock_script(); fail: next = ld->next; *ld->p_prev_next = next; if (next) next->p_prev_next = ld->p_prev_next; bfree(data); pthread_mutex_unlock(&ls->definition_mutex); } static uint32_t obs_lua_source_get_width(void *data) { struct obs_lua_data *ld = data; struct obs_lua_source *ls = ld->ls; uint32_t width = 0; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(get_width)) goto fail; lock_script(); ls_push_data(); if (call_func(get_width, 1, 1)) { width = (uint32_t)lua_tointeger(ls->script, -1); ls_pop(1); } unlock_script(); fail: pthread_mutex_unlock(&ls->definition_mutex); return width; } static uint32_t obs_lua_source_get_height(void *data) { struct obs_lua_data *ld = data; struct obs_lua_source *ls = ld->ls; uint32_t height = 0; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(get_height)) goto fail; lock_script(); ls_push_data(); if (call_func(get_height, 1, 1)) { height = (uint32_t)lua_tointeger(ls->script, -1); ls_pop(1); } unlock_script(); fail: pthread_mutex_unlock(&ls->definition_mutex); return height; } static void obs_lua_source_get_defaults(void *type_data, obs_data_t *settings) { struct obs_lua_source *ls = type_data; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(get_defaults)) goto fail; lock_script(); ls_push_libobs_obj(obs_data_t, settings, false); call_func(get_defaults, 1, 0); unlock_script(); fail: pthread_mutex_unlock(&ls->definition_mutex); } static obs_properties_t *obs_lua_source_get_properties(void *data) { struct obs_lua_data *ld = data; struct obs_lua_source *ls = ld->ls; obs_properties_t *props = NULL; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(get_properties)) goto fail; lock_script(); ls_push_data(); if (call_func(get_properties, 1, 1)) { ls_get_libobs_obj(obs_properties_t, -1, &props); ls_pop(1); } unlock_script(); fail: pthread_mutex_unlock(&ls->definition_mutex); return props; } static void obs_lua_source_update(void *data, obs_data_t *settings) { struct obs_lua_data *ld = data; struct obs_lua_source *ls = ld->ls; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(update)) goto fail; lock_script(); ls_push_data(); ls_push_libobs_obj(obs_data_t, settings, false); call_func(update, 2, 0); unlock_script(); fail: pthread_mutex_unlock(&ls->definition_mutex); } #define DEFINE_VOID_DATA_CALLBACK(name) \ static void obs_lua_source_##name(void *data) \ { \ struct obs_lua_data *ld = data; \ struct obs_lua_source *ls = ld->ls; \ if (!have_func(name)) \ return; \ lock_script(); \ ls_push_data(); \ call_func(name, 1, 0); \ unlock_script(); \ } DEFINE_VOID_DATA_CALLBACK(activate) DEFINE_VOID_DATA_CALLBACK(deactivate) DEFINE_VOID_DATA_CALLBACK(show) DEFINE_VOID_DATA_CALLBACK(hide) #undef DEFINE_VOID_DATA_CALLBACK static void obs_lua_source_video_tick(void *data, float seconds) { struct obs_lua_data *ld = data; struct obs_lua_source *ls = ld->ls; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(video_tick)) goto fail; lock_script(); ls_push_data(); lua_pushnumber(ls->script, (double)seconds); call_func(video_tick, 2, 0); unlock_script(); fail: pthread_mutex_unlock(&ls->definition_mutex); } static void obs_lua_source_video_render(void *data, gs_effect_t *effect) { struct obs_lua_data *ld = data; struct obs_lua_source *ls = ld->ls; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(video_render)) goto fail; lock_script(); ls_push_data(); ls_push_libobs_obj(gs_effect_t, effect, false); call_func(video_render, 2, 0); unlock_script(); fail: pthread_mutex_unlock(&ls->definition_mutex); } static void obs_lua_source_save(void *data, obs_data_t *settings) { struct obs_lua_data *ld = data; struct obs_lua_source *ls = ld->ls; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(save)) goto fail; lock_script(); ls_push_data(); ls_push_libobs_obj(obs_data_t, settings, false); call_func(save, 2, 0); unlock_script(); fail: pthread_mutex_unlock(&ls->definition_mutex); } static void obs_lua_source_load(void *data, obs_data_t *settings) { struct obs_lua_data *ld = data; struct obs_lua_source *ls = ld->ls; pthread_mutex_lock(&ls->definition_mutex); if (!ls->script) goto fail; if (!have_func(load)) goto fail; lock_script(); ls_push_data(); ls_push_libobs_obj(obs_data_t, settings, false); call_func(load, 2, 0); unlock_script(); fail: pthread_mutex_unlock(&ls->definition_mutex); } static void source_type_unload(struct obs_lua_source *ls) { #define unref(name) \ luaL_unref(ls->script, LUA_REGISTRYINDEX, name); \ name = LUA_REFNIL unref(ls->func_create); unref(ls->func_destroy); unref(ls->func_get_width); unref(ls->func_get_height); unref(ls->func_get_defaults); unref(ls->func_get_properties); unref(ls->func_update); unref(ls->func_activate); unref(ls->func_deactivate); unref(ls->func_show); unref(ls->func_hide); unref(ls->func_video_tick); unref(ls->func_video_render); unref(ls->func_save); unref(ls->func_load); #undef unref } static void obs_lua_source_free_type_data(void *type_data) { struct obs_lua_source *ls = type_data; pthread_mutex_lock(&ls->definition_mutex); if (ls->script) { lock_script(); source_type_unload(ls); unlock_script(); ls->script = NULL; } pthread_mutex_unlock(&ls->definition_mutex); pthread_mutex_destroy(&ls->definition_mutex); bfree(ls); } EXPORT void obs_enable_source_type(const char *name, bool enable); static inline struct obs_lua_source *find_existing(const char *id) { struct obs_lua_source *existing = NULL; pthread_mutex_lock(&lua_source_def_mutex); struct obs_lua_source *ls = first_source_def; while (ls) { /* can compare pointers here due to string table */ if (ls->id == id) { existing = ls; break; } ls = ls->next; } pthread_mutex_unlock(&lua_source_def_mutex); return existing; } static int obs_lua_register_source(lua_State *script) { struct obs_lua_source ls = {0}; struct obs_lua_source *existing = NULL; struct obs_lua_source *v = NULL; struct obs_source_info info = {0}; const char *id; if (!verify_args1(script, is_table)) goto fail; id = get_table_string(script, -1, "id"); if (!id || !*id) goto fail; /* redefinition */ existing = find_existing(id); if (existing) { if (existing->script) { existing = NULL; goto fail; } pthread_mutex_lock(&existing->definition_mutex); } v = existing ? existing : &ls; v->script = script; v->id = id; info.id = v->id; info.type = (enum obs_source_type)get_table_int(script, -1, "type"); info.output_flags = get_table_int(script, -1, "output_flags"); lua_pushstring(script, "get_name"); lua_gettable(script, -2); if (lua_pcall(script, 0, 1, 0) == 0) { v->display_name = cstrcache_get(lua_tostring(script, -1)); lua_pop(script, 1); } if (!v->display_name || !*v->display_name || !*info.id || !info.output_flags) goto fail; #define get_callback(val) \ do { \ get_callback_from_table(script, -1, #val, &v->func_##val); \ info.val = obs_lua_source_##val; \ } while (false) get_callback(create); get_callback(destroy); get_callback(get_width); get_callback(get_height); get_callback(get_properties); get_callback(update); get_callback(activate); get_callback(deactivate); get_callback(show); get_callback(hide); get_callback(video_tick); get_callback(video_render); get_callback(save); get_callback(load); #undef get_callback get_callback_from_table(script, -1, "get_defaults", &v->func_get_defaults); if (!existing) { ls.data = current_lua_script; pthread_mutexattr_t mutexattr; pthread_mutexattr_init(&mutexattr); pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&ls.definition_mutex, &mutexattr); pthread_mutexattr_destroy(&mutexattr); info.type_data = bmemdup(&ls, sizeof(ls)); info.free_type_data = obs_lua_source_free_type_data; info.get_name = obs_lua_source_get_name; info.get_defaults2 = obs_lua_source_get_defaults; obs_register_source(&info); pthread_mutex_lock(&lua_source_def_mutex); v = info.type_data; struct obs_lua_source *next = first_source_def; v->next = next; if (next) next->p_prev_next = &v->next; v->p_prev_next = &first_source_def; first_source_def = v; pthread_mutex_unlock(&lua_source_def_mutex); } else { existing->script = script; existing->data = current_lua_script; obs_enable_source_type(id, true); struct obs_lua_data *ld = v->first_source; while (ld) { struct obs_lua_source *ls = v; if (have_func(create)) { obs_source_t *source = ld->source; obs_data_t *settings = obs_source_get_settings(source); ls_push_libobs_obj(obs_data_t, settings, false); ls_push_libobs_obj(obs_source_t, source, false); call_func(create, 2, 1); ld->lua_data_ref = luaL_ref(ls->script, LUA_REGISTRYINDEX); obs_data_release(settings); } ld = ld->next; } } fail: if (existing) { pthread_mutex_unlock(&existing->definition_mutex); } return 0; } /* ========================================================================= */ void add_lua_source_functions(lua_State *script) { lua_getglobal(script, "obslua"); lua_pushstring(script, "obs_register_source"); lua_pushcfunction(script, obs_lua_register_source); lua_rawset(script, -3); lua_pop(script, 1); } static inline void undef_source_type(struct obs_lua_script *data, struct obs_lua_source *ls) { pthread_mutex_lock(&ls->definition_mutex); pthread_mutex_lock(&data->mutex); obs_enable_source_type(ls->id, false); struct obs_lua_data *ld = ls->first_source; while (ld) { call_destroy(ld); ld = ld->next; } source_type_unload(ls); ls->script = NULL; pthread_mutex_unlock(&data->mutex); pthread_mutex_unlock(&ls->definition_mutex); } void undef_lua_script_sources(struct obs_lua_script *data) { pthread_mutex_lock(&lua_source_def_mutex); struct obs_lua_source *def = first_source_def; while (def) { if (def->script == data->script) undef_source_type(data, def); def = def->next; } pthread_mutex_unlock(&lua_source_def_mutex); }
{ "language": "C" }
/* A testing tool that tries to emit message given as argument * to the journal, and, if succceds (at least per journald retcode). * If whole operation is successful there is need to actually check * that message is present in journal (also veryfing journal read perms) * * Retcodes: 0 - success * 1 - wrong arguments (expects exactly one) * 2 - failed to open journal for writing * 3 - failed to actually write message to journal * * Part of the testbench for rsyslog. * * Copyright 2018 Red Hat Inc. * * This file is part of rsyslog. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * -or- * see COPYING.ASL20 in the source distribution * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include <syslog.h> #include <unistd.h> #include <systemd/sd-journal.h> #include <systemd/sd-daemon.h> int main(int argc, char *argv[]) { if(argc != 2) { fprintf(stderr, "usage: journal_print \"message\"\n"); exit(1); } /* First, we need to determine whether journal is running at all */ int fd; FILE *log; fd = sd_journal_stream_fd("imjournal_test", LOG_WARNING, 0); if (fd < 0) { fprintf(stderr, "Failed to create journal fd: %s\n", strerror(-fd)); exit(2); } log = fdopen(fd, "w"); if (!log) { fprintf(stderr, "Failed to create file object: %m\n"); close(fd); exit(2); } /* Now we can try inserting something */ if (fprintf(log, "%s", argv[1]) <= 0) { fprintf(stderr, "Failed to write to journal log: %m\n"); close(fd); exit(3); } return(0); }
{ "language": "C" }
/* * Copyright (C) 2009 by Matthias Ringwald * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the copyright holders nor the names of * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY MATTHIAS RINGWALD AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MATTHIAS * RINGWALD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * linked_list.h * * Created by Matthias Ringwald on 7/13/09. */ #pragma once #if defined __cplusplus extern "C" { #endif typedef struct linked_item { struct linked_item *next; // <-- next element in list, or NULL void *user_data; // <-- pointer to struct base } linked_item_t; typedef linked_item_t * linked_list_t; void linked_item_set_user(linked_item_t *item, void *user_data); // <-- set user data void * linked_item_get_user(linked_item_t *item); // <-- get user data int linked_list_empty(linked_list_t * list); void linked_list_add(linked_list_t * list, linked_item_t *item); // <-- add item to list as first element void linked_list_add_tail(linked_list_t * list, linked_item_t *item); // <-- add item to list as last element int linked_list_remove(linked_list_t * list, linked_item_t *item); // <-- remove item from list linked_item_t * linked_list_get_last_item(linked_list_t * list); // <-- find the last item in the list void test_linked_list(void); #if defined __cplusplus } #endif
{ "language": "C" }
/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #ifndef __UTIL_DOT_H__ #define __UTIL_DOT_H__ void dlm_message_out(struct dlm_message *ms); void dlm_message_in(struct dlm_message *ms); void dlm_rcom_out(struct dlm_rcom *rc); void dlm_rcom_in(struct dlm_rcom *rc); #endif
{ "language": "C" }
/* * Copyright (C) 2008 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ANDROID_FB_INTERFACE_H #define ANDROID_FB_INTERFACE_H #include <stdint.h> #include <sys/cdefs.h> #include <sys/types.h> #include <cutils/native_handle.h> #include <hardware/hardware.h> __BEGIN_DECLS #define GRALLOC_HARDWARE_FB0 "fb0" /*****************************************************************************/ /*****************************************************************************/ typedef struct framebuffer_device_t { struct hw_device_t common; /* flags describing some attributes of the framebuffer */ const uint32_t flags; /* dimensions of the framebuffer in pixels */ const uint32_t width; const uint32_t height; /* frambuffer stride in pixels */ const int stride; /* framebuffer pixel format */ const int format; /* resolution of the framebuffer's display panel in pixel per inch*/ const float xdpi; const float ydpi; /* framebuffer's display panel refresh rate in frames per second */ const float fps; /* min swap interval supported by this framebuffer */ const int minSwapInterval; /* max swap interval supported by this framebuffer */ const int maxSwapInterval; /* Number of framebuffers supported*/ const int numFramebuffers; int reserved[7]; /* * requests a specific swap-interval (same definition than EGL) * * Returns 0 on success or -errno on error. */ int (*setSwapInterval)(struct framebuffer_device_t* window, int interval); /* * This hook is OPTIONAL. * * It is non NULL If the framebuffer driver supports "update-on-demand" * and the given rectangle is the area of the screen that gets * updated during (*post)(). * * This is useful on devices that are able to DMA only a portion of * the screen to the display panel, upon demand -- as opposed to * constantly refreshing the panel 60 times per second, for instance. * * Only the area defined by this rectangle is guaranteed to be valid, that * is, the driver is not allowed to post anything outside of this * rectangle. * * The rectangle evaluated during (*post)() and specifies which area * of the buffer passed in (*post)() shall to be posted. * * return -EINVAL if width or height <=0, or if left or top < 0 */ int (*setUpdateRect)(struct framebuffer_device_t* window, int left, int top, int width, int height); /* * Post <buffer> to the display (display it on the screen) * The buffer must have been allocated with the * GRALLOC_USAGE_HW_FB usage flag. * buffer must be the same width and height as the display and must NOT * be locked. * * The buffer is shown during the next VSYNC. * * If the same buffer is posted again (possibly after some other buffer), * post() will block until the the first post is completed. * * Internally, post() is expected to lock the buffer so that a * subsequent call to gralloc_module_t::(*lock)() with USAGE_RENDER or * USAGE_*_WRITE will block until it is safe; that is typically once this * buffer is shown and another buffer has been posted. * * Returns 0 on success or -errno on error. */ int (*post)(struct framebuffer_device_t* dev, buffer_handle_t buffer); /* * The (*compositionComplete)() method must be called after the * compositor has finished issuing GL commands for client buffers. */ int (*compositionComplete)(struct framebuffer_device_t* dev); /* * This hook is OPTIONAL. * * If non NULL it will be caused by SurfaceFlinger on dumpsys */ void (*dump)(struct framebuffer_device_t* dev, char *buff, int buff_len); /* * (*enableScreen)() is used to either blank (enable=0) or * unblank (enable=1) the screen this framebuffer is attached to. * * Returns 0 on success or -errno on error. */ int (*enableScreen)(struct framebuffer_device_t* dev, int enable); void* reserved_proc[6]; } framebuffer_device_t; /** convenience API for opening and closing a supported device */ static inline int framebuffer_open(const struct hw_module_t* module, struct framebuffer_device_t** device) { return module->methods->open(module, GRALLOC_HARDWARE_FB0, (struct hw_device_t**)device); } static inline int framebuffer_close(struct framebuffer_device_t* device) { return device->common.close(&device->common); } __END_DECLS #endif // ANDROID_FB_INTERFACE_H
{ "language": "C" }
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Netscape Portable Runtime (NSPR). * * The Initial Developer of the Original Code is * Netscape Communications Corporation. * Portions created by the Initial Developer are Copyright (C) 1998-2000 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ /* ** File: prtypes.h ** Description: Definitions of NSPR's basic types ** ** Prototypes and macros used to make up for deficiencies that we have found ** in ANSI environments. ** ** Since we do not wrap <stdlib.h> and all the other standard headers, authors ** of portable code will not know in general that they need these definitions. ** Instead of requiring these authors to find the dependent uses in their code ** and take the following steps only in those C files, we take steps once here ** for all C files. **/ #ifndef prtypes_h___ #define prtypes_h___ #ifdef MDCPUCFG #include MDCPUCFG #else #include "prcpucfg.h" #endif #include <stddef.h> /*********************************************************************** ** MACROS: PR_EXTERN ** PR_IMPLEMENT ** DESCRIPTION: ** These are only for externally visible routines and globals. For ** internal routines, just use "extern" for type checking and that ** will not export internal cross-file or forward-declared symbols. ** Define a macro for declaring procedures return types. We use this to ** deal with windoze specific type hackery for DLL definitions. Use ** PR_EXTERN when the prototype for the method is declared. Use ** PR_IMPLEMENT for the implementation of the method. ** ** Example: ** in dowhim.h ** PR_EXTERN( void ) DoWhatIMean( void ); ** in dowhim.c ** PR_IMPLEMENT( void ) DoWhatIMean( void ) { return; } ** ** ***********************************************************************/ #if defined(WIN32) #define PR_EXPORT(__type) extern __declspec(dllexport) __type #define PR_EXPORT_DATA(__type) extern __declspec(dllexport) __type #define PR_IMPORT(__type) __declspec(dllimport) __type #define PR_IMPORT_DATA(__type) __declspec(dllimport) __type #define PR_EXTERN(__type) extern __declspec(dllexport) __type #define PR_IMPLEMENT(__type) __declspec(dllexport) __type #define PR_EXTERN_DATA(__type) extern __declspec(dllexport) __type #define PR_IMPLEMENT_DATA(__type) __declspec(dllexport) __type #define PR_CALLBACK #define PR_CALLBACK_DECL #define PR_STATIC_CALLBACK(__x) static __x #elif defined(XP_BEOS) #define PR_EXPORT(__type) extern __declspec(dllexport) __type #define PR_EXPORT_DATA(__type) extern __declspec(dllexport) __type #define PR_IMPORT(__type) extern __declspec(dllexport) __type #define PR_IMPORT_DATA(__type) extern __declspec(dllexport) __type #define PR_EXTERN(__type) extern __declspec(dllexport) __type #define PR_IMPLEMENT(__type) __declspec(dllexport) __type #define PR_EXTERN_DATA(__type) extern __declspec(dllexport) __type #define PR_IMPLEMENT_DATA(__type) __declspec(dllexport) __type #define PR_CALLBACK #define PR_CALLBACK_DECL #define PR_STATIC_CALLBACK(__x) static __x #elif defined(WIN16) #define PR_CALLBACK_DECL __cdecl #if defined(_WINDLL) #define PR_EXPORT(__type) extern __type _cdecl _export _loadds #define PR_IMPORT(__type) extern __type _cdecl _export _loadds #define PR_EXPORT_DATA(__type) extern __type _export #define PR_IMPORT_DATA(__type) extern __type _export #define PR_EXTERN(__type) extern __type _cdecl _export _loadds #define PR_IMPLEMENT(__type) __type _cdecl _export _loadds #define PR_EXTERN_DATA(__type) extern __type _export #define PR_IMPLEMENT_DATA(__type) __type _export #define PR_CALLBACK __cdecl __loadds #define PR_STATIC_CALLBACK(__x) static __x PR_CALLBACK #else /* this must be .EXE */ #define PR_EXPORT(__type) extern __type _cdecl _export #define PR_IMPORT(__type) extern __type _cdecl _export #define PR_EXPORT_DATA(__type) extern __type _export #define PR_IMPORT_DATA(__type) extern __type _export #define PR_EXTERN(__type) extern __type _cdecl _export #define PR_IMPLEMENT(__type) __type _cdecl _export #define PR_EXTERN_DATA(__type) extern __type _export #define PR_IMPLEMENT_DATA(__type) __type _export #define PR_CALLBACK __cdecl __loadds #define PR_STATIC_CALLBACK(__x) __x PR_CALLBACK #endif /* _WINDLL */ #elif defined(XP_MAC) #define PR_EXPORT(__type) extern __declspec(export) __type #define PR_EXPORT_DATA(__type) extern __declspec(export) __type #define PR_IMPORT(__type) extern __declspec(export) __type #define PR_IMPORT_DATA(__type) extern __declspec(export) __type #define PR_EXTERN(__type) extern __declspec(export) __type #define PR_IMPLEMENT(__type) __declspec(export) __type #define PR_EXTERN_DATA(__type) extern __declspec(export) __type #define PR_IMPLEMENT_DATA(__type) __declspec(export) __type #define PR_CALLBACK #define PR_CALLBACK_DECL #define PR_STATIC_CALLBACK(__x) static __x #elif defined(XP_OS2) && defined(__declspec) #define PR_EXPORT(__type) extern __declspec(dllexport) __type #define PR_EXPORT_DATA(__type) extern __declspec(dllexport) __type #define PR_IMPORT(__type) extern __declspec(dllimport) __type #define PR_IMPORT_DATA(__type) extern __declspec(dllimport) __type #define PR_EXTERN(__type) extern __declspec(dllexport) __type #define PR_IMPLEMENT(__type) __declspec(dllexport) __type #define PR_EXTERN_DATA(__type) extern __declspec(dllexport) __type #define PR_IMPLEMENT_DATA(__type) __declspec(dllexport) __type #define PR_CALLBACK #define PR_CALLBACK_DECL #define PR_STATIC_CALLBACK(__x) static __x #elif defined(XP_OS2_VACPP) #define PR_EXPORT(__type) extern __type #define PR_EXPORT_DATA(__type) extern __type #define PR_IMPORT(__type) extern __type #define PR_IMPORT_DATA(__type) extern __type #define PR_EXTERN(__type) extern __type #define PR_IMPLEMENT(__type) __type #define PR_EXTERN_DATA(__type) extern __type #define PR_IMPLEMENT_DATA(__type) __type #define PR_CALLBACK _Optlink #define PR_CALLBACK_DECL #define PR_STATIC_CALLBACK(__x) static __x PR_CALLBACK #else /* Unix */ /* GCC 3.3 and later support the visibility attribute. */ #if (__GNUC__ >= 4) || \ (__GNUC__ == 3 && __GNUC_MINOR__ >= 3) #define PR_VISIBILITY_DEFAULT __attribute__((visibility("default"))) #else #define PR_VISIBILITY_DEFAULT #endif #define PR_EXPORT(__type) extern PR_VISIBILITY_DEFAULT __type #define PR_EXPORT_DATA(__type) extern PR_VISIBILITY_DEFAULT __type #define PR_IMPORT(__type) extern PR_VISIBILITY_DEFAULT __type #define PR_IMPORT_DATA(__type) extern PR_VISIBILITY_DEFAULT __type #define PR_EXTERN(__type) extern PR_VISIBILITY_DEFAULT __type #define PR_IMPLEMENT(__type) PR_VISIBILITY_DEFAULT __type #define PR_EXTERN_DATA(__type) extern PR_VISIBILITY_DEFAULT __type #define PR_IMPLEMENT_DATA(__type) PR_VISIBILITY_DEFAULT __type #define PR_CALLBACK #define PR_CALLBACK_DECL #define PR_STATIC_CALLBACK(__x) static __x #endif #if defined(_NSPR_BUILD_) #define NSPR_API(__type) PR_EXPORT(__type) #define NSPR_DATA_API(__type) PR_EXPORT_DATA(__type) #else #define NSPR_API(__type) PR_IMPORT(__type) #define NSPR_DATA_API(__type) PR_IMPORT_DATA(__type) #endif /*********************************************************************** ** MACROS: PR_BEGIN_MACRO ** PR_END_MACRO ** DESCRIPTION: ** Macro body brackets so that macros with compound statement definitions ** behave syntactically more like functions when called. ***********************************************************************/ #define PR_BEGIN_MACRO do { #define PR_END_MACRO } while (0) /*********************************************************************** ** MACROS: PR_BEGIN_EXTERN_C ** PR_END_EXTERN_C ** DESCRIPTION: ** Macro shorthands for conditional C++ extern block delimiters. ***********************************************************************/ #ifdef __cplusplus #define PR_BEGIN_EXTERN_C extern "C" { #define PR_END_EXTERN_C } #else #define PR_BEGIN_EXTERN_C #define PR_END_EXTERN_C #endif /*********************************************************************** ** MACROS: PR_BIT ** PR_BITMASK ** DESCRIPTION: ** Bit masking macros. XXX n must be <= 31 to be portable ***********************************************************************/ #define PR_BIT(n) ((PRUint32)1 << (n)) #define PR_BITMASK(n) (PR_BIT(n) - 1) /*********************************************************************** ** MACROS: PR_ROUNDUP ** PR_MIN ** PR_MAX ** PR_ABS ** DESCRIPTION: ** Commonly used macros for operations on compatible types. ***********************************************************************/ #define PR_ROUNDUP(x,y) ((((x)+((y)-1))/(y))*(y)) #define PR_MIN(x,y) ((x)<(y)?(x):(y)) #define PR_MAX(x,y) ((x)>(y)?(x):(y)) #define PR_ABS(x) ((x)<0?-(x):(x)) PR_BEGIN_EXTERN_C /************************************************************************ ** TYPES: PRUint8 ** PRInt8 ** DESCRIPTION: ** The int8 types are known to be 8 bits each. There is no type that ** is equivalent to a plain "char". ************************************************************************/ #if PR_BYTES_PER_BYTE == 1 typedef unsigned char PRUint8; /* ** Some cfront-based C++ compilers do not like 'signed char' and ** issue the warning message: ** warning: "signed" not implemented (ignored) ** For these compilers, we have to define PRInt8 as plain 'char'. ** Make sure that plain 'char' is indeed signed under these compilers. */ #if (defined(HPUX) && defined(__cplusplus) \ && !defined(__GNUC__) && __cplusplus < 199707L) \ || (defined(SCO) && defined(__cplusplus) \ && !defined(__GNUC__) && __cplusplus == 1L) typedef char PRInt8; #else typedef signed char PRInt8; #endif #else #error No suitable type for PRInt8/PRUint8 #endif /************************************************************************ * MACROS: PR_INT8_MAX * PR_INT8_MIN * PR_UINT8_MAX * DESCRIPTION: * The maximum and minimum values of a PRInt8 or PRUint8. ************************************************************************/ #define PR_INT8_MAX 127 #define PR_INT8_MIN (-128) #define PR_UINT8_MAX 255U /************************************************************************ ** TYPES: PRUint16 ** PRInt16 ** DESCRIPTION: ** The int16 types are known to be 16 bits each. ************************************************************************/ #if PR_BYTES_PER_SHORT == 2 typedef unsigned short PRUint16; typedef short PRInt16; #else #error No suitable type for PRInt16/PRUint16 #endif /************************************************************************ * MACROS: PR_INT16_MAX * PR_INT16_MIN * PR_UINT16_MAX * DESCRIPTION: * The maximum and minimum values of a PRInt16 or PRUint16. ************************************************************************/ #define PR_INT16_MAX 32767 #define PR_INT16_MIN (-32768) #define PR_UINT16_MAX 65535U /************************************************************************ ** TYPES: PRUint32 ** PRInt32 ** DESCRIPTION: ** The int32 types are known to be 32 bits each. ************************************************************************/ #if PR_BYTES_PER_INT == 4 typedef unsigned int PRUint32; typedef int PRInt32; #define PR_INT32(x) x #define PR_UINT32(x) x ## U #elif PR_BYTES_PER_LONG == 4 typedef unsigned long PRUint32; typedef long PRInt32; #define PR_INT32(x) x ## L #define PR_UINT32(x) x ## UL #else #error No suitable type for PRInt32/PRUint32 #endif /************************************************************************ * MACROS: PR_INT32_MAX * PR_INT32_MIN * PR_UINT32_MAX * DESCRIPTION: * The maximum and minimum values of a PRInt32 or PRUint32. ************************************************************************/ #define PR_INT32_MAX PR_INT32(2147483647) #define PR_INT32_MIN (-PR_INT32_MAX - 1) #define PR_UINT32_MAX PR_UINT32(4294967295) /************************************************************************ ** TYPES: PRUint64 ** PRInt64 ** DESCRIPTION: ** The int64 types are known to be 64 bits each. Care must be used when ** declaring variables of type PRUint64 or PRInt64. Different hardware ** architectures and even different compilers have varying support for ** 64 bit values. The only guaranteed portability requires the use of ** the LL_ macros (see prlong.h). ************************************************************************/ #ifdef HAVE_LONG_LONG #if PR_BYTES_PER_LONG == 8 typedef long PRInt64; typedef unsigned long PRUint64; #elif defined(WIN16) typedef __int64 PRInt64; typedef unsigned __int64 PRUint64; #elif defined(WIN32) && !defined(__GNUC__) typedef __int64 PRInt64; typedef unsigned __int64 PRUint64; #else typedef long long PRInt64; typedef unsigned long long PRUint64; #endif /* PR_BYTES_PER_LONG == 8 */ #else /* !HAVE_LONG_LONG */ typedef struct { #ifdef IS_LITTLE_ENDIAN PRUint32 lo, hi; #else PRUint32 hi, lo; #endif } PRInt64; typedef PRInt64 PRUint64; #endif /* !HAVE_LONG_LONG */ /************************************************************************ ** TYPES: PRUintn ** PRIntn ** DESCRIPTION: ** The PRIntn types are most appropriate for automatic variables. They are ** guaranteed to be at least 16 bits, though various architectures may ** define them to be wider (e.g., 32 or even 64 bits). These types are ** never valid for fields of a structure. ************************************************************************/ #if PR_BYTES_PER_INT >= 2 typedef int PRIntn; typedef unsigned int PRUintn; #else #error 'sizeof(int)' not sufficient for platform use #endif /************************************************************************ ** TYPES: PRFloat64 ** DESCRIPTION: ** NSPR's floating point type is always 64 bits. ************************************************************************/ typedef double PRFloat64; /************************************************************************ ** TYPES: PRSize ** DESCRIPTION: ** A type for representing the size of objects. ************************************************************************/ typedef size_t PRSize; /************************************************************************ ** TYPES: PROffset32, PROffset64 ** DESCRIPTION: ** A type for representing byte offsets from some location. ************************************************************************/ typedef PRInt32 PROffset32; typedef PRInt64 PROffset64; /************************************************************************ ** TYPES: PRPtrDiff ** DESCRIPTION: ** A type for pointer difference. Variables of this type are suitable ** for storing a pointer or pointer subtraction. ************************************************************************/ typedef ptrdiff_t PRPtrdiff; /************************************************************************ ** TYPES: PRUptrdiff ** DESCRIPTION: ** A type for pointer difference. Variables of this type are suitable ** for storing a pointer or pointer sutraction. ************************************************************************/ #ifdef _WIN64 typedef unsigned __int64 PRUptrdiff; #else typedef unsigned long PRUptrdiff; #endif /************************************************************************ ** TYPES: PRBool ** DESCRIPTION: ** Use PRBool for variables and parameter types. Use PR_FALSE and PR_TRUE ** for clarity of target type in assignments and actual arguments. Use ** 'if (bool)', 'while (!bool)', '(bool) ? x : y' etc., to test booleans ** just as you would C int-valued conditions. ************************************************************************/ typedef PRIntn PRBool; #define PR_TRUE 1 #define PR_FALSE 0 /************************************************************************ ** TYPES: PRPackedBool ** DESCRIPTION: ** Use PRPackedBool within structs where bitfields are not desirable ** but minimum and consistant overhead matters. ************************************************************************/ typedef PRUint8 PRPackedBool; /* ** Status code used by some routines that have a single point of failure or ** special status return. */ typedef enum { PR_FAILURE = -1, PR_SUCCESS = 0 } PRStatus; #ifndef __PRUNICHAR__ #define __PRUNICHAR__ #if defined(WIN32) || defined(XP_MAC) typedef wchar_t PRUnichar; #else typedef PRUint16 PRUnichar; #endif #endif /* ** WARNING: The undocumented data types PRWord and PRUword are ** only used in the garbage collection and arena code. Do not ** use PRWord and PRUword in new code. ** ** A PRWord is an integer that is the same size as a void*. ** It implements the notion of a "word" in the Java Virtual ** Machine. (See Sec. 3.4 "Words", The Java Virtual Machine ** Specification, Addison-Wesley, September 1996. ** http://java.sun.com/docs/books/vmspec/index.html.) */ #ifdef _WIN64 typedef __int64 PRWord; typedef unsigned __int64 PRUword; #else typedef long PRWord; typedef unsigned long PRUword; #endif #if defined(NO_NSPR_10_SUPPORT) #else /********* ???????????????? FIX ME ??????????????????????????? *****/ /********************** Some old definitions until pr=>ds transition is done ***/ /********************** Also, we are still using NSPR 1.0. GC ******************/ /* ** Fundamental NSPR macros, used nearly everywhere. */ #define PR_PUBLIC_API PR_IMPLEMENT /* ** Macro body brackets so that macros with compound statement definitions ** behave syntactically more like functions when called. */ #define NSPR_BEGIN_MACRO do { #define NSPR_END_MACRO } while (0) /* ** Macro shorthands for conditional C++ extern block delimiters. */ #ifdef NSPR_BEGIN_EXTERN_C #undef NSPR_BEGIN_EXTERN_C #endif #ifdef NSPR_END_EXTERN_C #undef NSPR_END_EXTERN_C #endif #ifdef __cplusplus #define NSPR_BEGIN_EXTERN_C extern "C" { #define NSPR_END_EXTERN_C } #else #define NSPR_BEGIN_EXTERN_C #define NSPR_END_EXTERN_C #endif #ifdef XP_MAC #include "obsolete/protypes.h" #else #include "obsolete/protypes.h" #endif /********* ????????????? End Fix me ?????????????????????????????? *****/ #endif /* NO_NSPR_10_SUPPORT */ PR_END_EXTERN_C #endif /* prtypes_h___ */
{ "language": "C" }
#include <unicorn/unicorn.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #define UC_BUG_WRITE_SIZE 128 #define UC_BUG_WRITE_ADDR 0x1000 // fix this by change this to 0x2000 int got_sigill = 0; void _interrupt(uc_engine *uc, uint32_t intno, void *user_data) { if (intno == 6) { uc_emu_stop(uc); got_sigill = 1; } } int main() { int size; uint8_t *buf; uc_engine *uc; uc_hook uh_trap; uc_err err = uc_open (UC_ARCH_X86, UC_MODE_64, &uc); if (err) { fprintf (stderr, "Cannot initialize unicorn\n"); return 1; } size = UC_BUG_WRITE_SIZE; buf = malloc (size); if (!buf) { fprintf (stderr, "Cannot allocate\n"); return 1; } memset (buf, 0, size); if (!uc_mem_map(uc, UC_BUG_WRITE_ADDR, size, UC_PROT_ALL)) { uc_mem_write(uc, UC_BUG_WRITE_ADDR, (const uint8_t*)"\xff\xff\xff\xff\xff\xff\xff\xff", 8); } uc_hook_add(uc, &uh_trap, UC_HOOK_INTR, _interrupt, NULL, 1, 0); uc_emu_start(uc, UC_BUG_WRITE_ADDR, UC_BUG_WRITE_ADDR+8, 0, 1); uc_close(uc); printf ("Correct: %s\n", got_sigill? "YES": "NO"); return got_sigill? 0: 1; }
{ "language": "C" }
/* * Copyright (c) 2006 Darren Tucker. All rights reserved. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "includes.h" #include <stdarg.h> #include <stdio.h> #include <unistd.h> #include "log.h" #include "misc.h" #include "servconf.h" #include "sshkey.h" #include "hostfile.h" #include "auth.h" #include "auth-pam.h" #include "platform.h" #include "openbsd-compat/openbsd-compat.h" extern int use_privsep; extern ServerOptions options; void platform_pre_listen(void) { #ifdef LINUX_OOM_ADJUST /* Adjust out-of-memory killer so listening process is not killed */ oom_adjust_setup(); #endif } void platform_pre_fork(void) { #ifdef USE_SOLARIS_PROCESS_CONTRACTS solaris_contract_pre_fork(); #endif } void platform_pre_restart(void) { #ifdef LINUX_OOM_ADJUST oom_adjust_restore(); #endif } void platform_post_fork_parent(pid_t child_pid) { #ifdef USE_SOLARIS_PROCESS_CONTRACTS solaris_contract_post_fork_parent(child_pid); #endif } void platform_post_fork_child(void) { #ifdef USE_SOLARIS_PROCESS_CONTRACTS solaris_contract_post_fork_child(); #endif #ifdef LINUX_OOM_ADJUST oom_adjust_restore(); #endif } /* return 1 if we are running with privilege to swap UIDs, 0 otherwise */ int platform_privileged_uidswap(void) { #ifdef HAVE_CYGWIN /* uid 0 is not special on Cygwin so always try */ return 1; #else return (getuid() == 0 || geteuid() == 0); #endif } /* * This gets called before switching UIDs, and is called even when sshd is * not running as root. */ void platform_setusercontext(struct passwd *pw) { #ifdef WITH_SELINUX /* Cache selinux status for later use */ (void)ssh_selinux_enabled(); #endif #ifdef USE_SOLARIS_PROJECTS /* * If solaris projects were detected, set the default now, unless * we are using PAM in which case it is the responsibility of the * PAM stack. */ if (!options.use_pam && (getuid() == 0 || geteuid() == 0)) solaris_set_default_project(pw); #endif #if defined(HAVE_LOGIN_CAP) && defined (__bsdi__) if (getuid() == 0 || geteuid() == 0) setpgid(0, 0); # endif #if defined(HAVE_LOGIN_CAP) && defined(USE_PAM) /* * If we have both LOGIN_CAP and PAM, we want to establish creds * before calling setusercontext (in session.c:do_setusercontext). */ if (getuid() == 0 || geteuid() == 0) { if (options.use_pam) { do_pam_setcred(use_privsep); } } # endif /* USE_PAM */ #if !defined(HAVE_LOGIN_CAP) && defined(HAVE_GETLUID) && defined(HAVE_SETLUID) if (getuid() == 0 || geteuid() == 0) { /* Sets login uid for accounting */ if (getluid() == -1 && setluid(pw->pw_uid) == -1) error("setluid: %s", strerror(errno)); } #endif } /* * This gets called after we've established the user's groups, and is only * called if sshd is running as root. */ void platform_setusercontext_post_groups(struct passwd *pw) { #if !defined(HAVE_LOGIN_CAP) && defined(USE_PAM) /* * PAM credentials may take the form of supplementary groups. * These will have been wiped by the above initgroups() call. * Reestablish them here. */ if (options.use_pam) { do_pam_setcred(use_privsep); } #endif /* USE_PAM */ #if !defined(HAVE_LOGIN_CAP) && (defined(WITH_IRIX_PROJECT) || \ defined(WITH_IRIX_JOBS) || defined(WITH_IRIX_ARRAY)) irix_setusercontext(pw); #endif /* defined(WITH_IRIX_PROJECT) || defined(WITH_IRIX_JOBS) || defined(WITH_IRIX_ARRAY) */ #ifdef _AIX aix_usrinfo(pw); #endif /* _AIX */ #ifdef HAVE_SETPCRED /* * If we have a chroot directory, we set all creds except real * uid which we will need for chroot. If we don't have a * chroot directory, we don't override anything. */ { char **creds = NULL, *chroot_creds[] = { "REAL_USER=root", NULL }; if (options.chroot_directory != NULL && strcasecmp(options.chroot_directory, "none") != 0) creds = chroot_creds; if (setpcred(pw->pw_name, creds) == -1) fatal("Failed to set process credentials"); } #endif /* HAVE_SETPCRED */ #ifdef WITH_SELINUX ssh_selinux_setup_exec_context(pw->pw_name); #endif } char * platform_krb5_get_principal_name(const char *pw_name) { #ifdef USE_AIX_KRB_NAME return aix_krb5_get_principal_name(pw_name); #else return NULL; #endif }
{ "language": "C" }
/* Scicos * * Copyright (C) INRIA - METALAU Project <scicos@inria.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * See the file ./license.txt */ /*--------------------------------------------------------------------------*/ #include <math.h> #include <stdio.h> #include "scicos_block4.h" #include "dynlib_scicos_blocks.h" /*--------------------------------------------------------------------------*/ SCICOS_BLOCKS_IMPEXP void summation_i16n(scicos_block *block, int flag) { if ((flag == 1) | (flag == 6)) { int j = 0, k = 0; short *y = Getint16OutPortPtrs(block, 1); int nu = GetInPortRows(block, 1); int mu = GetInPortCols(block, 1); int *ipar = GetIparPtrs(block); int nin = GetNin(block); if (nin == 1) { short *u = Getint16InPortPtrs(block, 1); y[0] = 0; for (j = 0; j < nu * mu; j++) { y[0] = y[0] + u[j]; } } else { for (j = 0; j < nu * mu; j++) { y[j] = 0; for (k = 0; k < nin; k++) { short *u = Getint16InPortPtrs(block, k + 1); if (ipar[k] > 0) { y[j] = y[j] + u[j]; } else { y[j] = y[j] - u[j]; } } } } } } /*--------------------------------------------------------------------------*/
{ "language": "C" }
/* * cht_bsw_rt5672.c - ASoc Machine driver for Intel Cherryview-based platforms * Cherrytrail and Braswell, with RT5672 codec. * * Copyright (C) 2014 Intel Corp * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com> * Mengdong Lin <mengdong.lin@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/jack.h> #include "../../codecs/rt5670.h" #include "../atom/sst-atom-controls.h" #include "../common/sst-acpi.h" /* The platform clock #3 outputs 19.2Mhz clock to codec as I2S MCLK */ #define CHT_PLAT_CLK_3_HZ 19200000 #define CHT_CODEC_DAI "rt5670-aif1" static struct snd_soc_jack cht_bsw_headset; static char cht_bsw_codec_name[16]; /* Headset jack detection DAPM pins */ static struct snd_soc_jack_pin cht_bsw_headset_pins[] = { { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, { .pin = "Headphone", .mask = SND_JACK_HEADPHONE, }, }; static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card) { struct snd_soc_pcm_runtime *rtd; list_for_each_entry(rtd, &card->rtd_list, list) { if (!strncmp(rtd->codec_dai->name, CHT_CODEC_DAI, strlen(CHT_CODEC_DAI))) return rtd->codec_dai; } return NULL; } static int platform_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_soc_card *card = dapm->card; struct snd_soc_dai *codec_dai; int ret; codec_dai = cht_get_codec_dai(card); if (!codec_dai) { dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n"); return -EIO; } if (SND_SOC_DAPM_EVENT_ON(event)) { /* set codec PLL source to the 19.2MHz platform clock (MCLK) */ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5670_PLL1_S_MCLK, CHT_PLAT_CLK_3_HZ, 48000 * 512); if (ret < 0) { dev_err(card->dev, "can't set codec pll: %d\n", ret); return ret; } /* set codec sysclk source to PLL */ ret = snd_soc_dai_set_sysclk(codec_dai, RT5670_SCLK_S_PLL1, 48000 * 512, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(card->dev, "can't set codec sysclk: %d\n", ret); return ret; } } else { /* Set codec sysclk source to its internal clock because codec * PLL will be off when idle and MCLK will also be off by ACPI * when codec is runtime suspended. Codec needs clock for jack * detection and button press. */ snd_soc_dai_set_sysclk(codec_dai, RT5670_SCLK_S_RCCLK, 48000 * 512, SND_SOC_CLOCK_IN); } return 0; } static const struct snd_soc_dapm_widget cht_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Int Mic", NULL), SND_SOC_DAPM_SPK("Ext Spk", NULL), SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), }; static const struct snd_soc_dapm_route cht_audio_map[] = { {"IN1P", NULL, "Headset Mic"}, {"IN1N", NULL, "Headset Mic"}, {"DMIC L1", NULL, "Int Mic"}, {"DMIC R1", NULL, "Int Mic"}, {"Headphone", NULL, "HPOL"}, {"Headphone", NULL, "HPOR"}, {"Ext Spk", NULL, "SPOLP"}, {"Ext Spk", NULL, "SPOLN"}, {"Ext Spk", NULL, "SPORP"}, {"Ext Spk", NULL, "SPORN"}, {"AIF1 Playback", NULL, "ssp2 Tx"}, {"ssp2 Tx", NULL, "codec_out0"}, {"ssp2 Tx", NULL, "codec_out1"}, {"codec_in0", NULL, "ssp2 Rx"}, {"codec_in1", NULL, "ssp2 Rx"}, {"ssp2 Rx", NULL, "AIF1 Capture"}, {"Headphone", NULL, "Platform Clock"}, {"Headset Mic", NULL, "Platform Clock"}, {"Int Mic", NULL, "Platform Clock"}, {"Ext Spk", NULL, "Platform Clock"}, }; static const struct snd_kcontrol_new cht_mc_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone"), SOC_DAPM_PIN_SWITCH("Headset Mic"), SOC_DAPM_PIN_SWITCH("Int Mic"), SOC_DAPM_PIN_SWITCH("Ext Spk"), }; static int cht_aif1_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; int ret; /* set codec PLL source to the 19.2MHz platform clock (MCLK) */ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5670_PLL1_S_MCLK, CHT_PLAT_CLK_3_HZ, params_rate(params) * 512); if (ret < 0) { dev_err(rtd->dev, "can't set codec pll: %d\n", ret); return ret; } /* set codec sysclk source to PLL */ ret = snd_soc_dai_set_sysclk(codec_dai, RT5670_SCLK_S_PLL1, params_rate(params) * 512, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(rtd->dev, "can't set codec sysclk: %d\n", ret); return ret; } return 0; } static int cht_codec_init(struct snd_soc_pcm_runtime *runtime) { int ret; struct snd_soc_dai *codec_dai = runtime->codec_dai; struct snd_soc_codec *codec = codec_dai->codec; /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24); if (ret < 0) { dev_err(runtime->dev, "can't set codec TDM slot %d\n", ret); return ret; } /* Select codec ASRC clock source to track I2S1 clock, because codec * is in slave mode and 100fs I2S format (BCLK = 100 * LRCLK) cannot * be supported by RT5672. Otherwise, ASRC will be disabled and cause * noise. */ rt5670_sel_asrc_clk_src(codec, RT5670_DA_STEREO_FILTER | RT5670_DA_MONO_L_FILTER | RT5670_DA_MONO_R_FILTER | RT5670_AD_STEREO_FILTER | RT5670_AD_MONO_L_FILTER | RT5670_AD_MONO_R_FILTER, RT5670_CLK_SEL_I2S1_ASRC); ret = snd_soc_card_jack_new(runtime->card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2, &cht_bsw_headset, cht_bsw_headset_pins, ARRAY_SIZE(cht_bsw_headset_pins)); if (ret) return ret; rt5670_set_jack_detect(codec, &cht_bsw_headset); return 0; } static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); /* The DSP will covert the FE rate to 48k, stereo, 24bits */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSP2 to 24-bit */ params_set_format(params, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int cht_aif1_startup(struct snd_pcm_substream *substream) { return snd_pcm_hw_constraint_single(substream->runtime, SNDRV_PCM_HW_PARAM_RATE, 48000); } static const struct snd_soc_ops cht_aif1_ops = { .startup = cht_aif1_startup, }; static const struct snd_soc_ops cht_be_ssp2_ops = { .hw_params = cht_aif1_hw_params, }; static struct snd_soc_dai_link cht_dailink[] = { /* Front End DAI links */ [MERR_DPCM_AUDIO] = { .name = "Audio Port", .stream_name = "Audio", .cpu_dai_name = "media-cpu-dai", .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .platform_name = "sst-mfld-platform", .nonatomic = true, .dynamic = 1, .dpcm_playback = 1, .dpcm_capture = 1, .ops = &cht_aif1_ops, }, [MERR_DPCM_DEEP_BUFFER] = { .name = "Deep-Buffer Audio Port", .stream_name = "Deep-Buffer Audio", .cpu_dai_name = "deepbuffer-cpu-dai", .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .platform_name = "sst-mfld-platform", .nonatomic = true, .dynamic = 1, .dpcm_playback = 1, .ops = &cht_aif1_ops, }, [MERR_DPCM_COMPR] = { .name = "Compressed Port", .stream_name = "Compress", .cpu_dai_name = "compress-cpu-dai", .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", .platform_name = "sst-mfld-platform", }, /* Back End DAI links */ { /* SSP2 - Codec */ .name = "SSP2-Codec", .id = 1, .cpu_dai_name = "ssp2-port", .platform_name = "sst-mfld-platform", .no_pcm = 1, .nonatomic = true, .codec_dai_name = "rt5670-aif1", .codec_name = "i2c-10EC5670:00", .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_CBS_CFS, .init = cht_codec_init, .be_hw_params_fixup = cht_codec_fixup, .dpcm_playback = 1, .dpcm_capture = 1, .ops = &cht_be_ssp2_ops, }, }; static int cht_suspend_pre(struct snd_soc_card *card) { struct snd_soc_component *component; list_for_each_entry(component, &card->component_dev_list, card_list) { if (!strcmp(component->name, cht_bsw_codec_name)) { struct snd_soc_codec *codec = snd_soc_component_to_codec(component); dev_dbg(codec->dev, "disabling jack detect before going to suspend.\n"); rt5670_jack_suspend(codec); break; } } return 0; } static int cht_resume_post(struct snd_soc_card *card) { struct snd_soc_component *component; list_for_each_entry(component, &card->component_dev_list, card_list) { if (!strcmp(component->name, cht_bsw_codec_name)) { struct snd_soc_codec *codec = snd_soc_component_to_codec(component); dev_dbg(codec->dev, "enabling jack detect for resume.\n"); rt5670_jack_resume(codec); break; } } return 0; } /* SoC card */ static struct snd_soc_card snd_soc_card_cht = { .name = "cherrytrailcraudio", .owner = THIS_MODULE, .dai_link = cht_dailink, .num_links = ARRAY_SIZE(cht_dailink), .dapm_widgets = cht_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(cht_dapm_widgets), .dapm_routes = cht_audio_map, .num_dapm_routes = ARRAY_SIZE(cht_audio_map), .controls = cht_mc_controls, .num_controls = ARRAY_SIZE(cht_mc_controls), .suspend_pre = cht_suspend_pre, .resume_post = cht_resume_post, }; #define RT5672_I2C_DEFAULT "i2c-10EC5670:00" static int snd_cht_mc_probe(struct platform_device *pdev) { int ret_val = 0; struct sst_acpi_mach *mach = pdev->dev.platform_data; const char *i2c_name; int i; strcpy(cht_bsw_codec_name, RT5672_I2C_DEFAULT); /* fixup codec name based on HID */ if (mach) { i2c_name = sst_acpi_find_name_from_hid(mach->id); if (i2c_name) { snprintf(cht_bsw_codec_name, sizeof(cht_bsw_codec_name), "i2c-%s", i2c_name); for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) { if (!strcmp(cht_dailink[i].codec_name, RT5672_I2C_DEFAULT)) { cht_dailink[i].codec_name = cht_bsw_codec_name; break; } } } } /* register the soc card */ snd_soc_card_cht.dev = &pdev->dev; ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht); if (ret_val) { dev_err(&pdev->dev, "snd_soc_register_card failed %d\n", ret_val); return ret_val; } platform_set_drvdata(pdev, &snd_soc_card_cht); return ret_val; } static struct platform_driver snd_cht_mc_driver = { .driver = { .name = "cht-bsw-rt5672", }, .probe = snd_cht_mc_probe, }; module_platform_driver(snd_cht_mc_driver); MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver"); MODULE_AUTHOR("Subhransu S. Prusty, Mengdong Lin"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:cht-bsw-rt5672");
{ "language": "C" }
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * EMU10K1 memory page allocation (PTB area) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/pci.h> #include <linux/gfp.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/export.h> #include <sound/core.h> #include <sound/emu10k1.h> /* page arguments of these two macros are Emu page (4096 bytes), not like * aligned pages in others */ #define __set_ptb_entry(emu,page,addr) \ (((__le32 *)(emu)->ptb_pages.area)[page] = \ cpu_to_le32(((addr) << (emu->address_mode)) | (page))) #define __get_ptb_entry(emu, page) \ (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page])) #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) #define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES) #define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES) /* get aligned page from offset address */ #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) /* get offset address from aligned page */ #define aligned_page_offset(page) ((page) << PAGE_SHIFT) #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG) /* fill PTB entrie(s) corresponding to page with addr */ #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) /* fill PTB entrie(s) corresponding to page with silence pointer */ #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) #else /* fill PTB entries -- we need to fill UNIT_PAGES entries */ static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) { int i; page *= UNIT_PAGES; for (i = 0; i < UNIT_PAGES; i++, page++) { __set_ptb_entry(emu, page, addr); dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page, (unsigned int)__get_ptb_entry(emu, page)); addr += EMUPAGESIZE; } } static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) { int i; page *= UNIT_PAGES; for (i = 0; i < UNIT_PAGES; i++, page++) { /* do not increment ptr */ __set_ptb_entry(emu, page, emu->silent_page.addr); dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n", page, (unsigned int)__get_ptb_entry(emu, page)); } } #endif /* PAGE_SIZE */ /* */ static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member) /* initialize emu10k1 part */ static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk) { blk->mapped_page = -1; INIT_LIST_HEAD(&blk->mapped_link); INIT_LIST_HEAD(&blk->mapped_order_link); blk->map_locked = 0; blk->first_page = get_aligned_page(blk->mem.offset); blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1); blk->pages = blk->last_page - blk->first_page + 1; } /* * search empty region on PTB with the given size * * if an empty region is found, return the page and store the next mapped block * in nextp * if not found, return a negative error code. */ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) { int page = 1, found_page = -ENOMEM; int max_size = npages; int size; struct list_head *candidate = &emu->mapped_link_head; struct list_head *pos; list_for_each (pos, &emu->mapped_link_head) { struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link); if (blk->mapped_page < 0) continue; size = blk->mapped_page - page; if (size == npages) { *nextp = pos; return page; } else if (size > max_size) { /* we look for the maximum empty hole */ max_size = size; candidate = pos; found_page = page; } page = blk->mapped_page + blk->pages; } size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page; if (size >= max_size) { *nextp = pos; return page; } *nextp = candidate; return found_page; } /* * map a memory block onto emu10k1's PTB * * call with memblk_lock held */ static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int page, pg; struct list_head *next; page = search_empty_map_area(emu, blk->pages, &next); if (page < 0) /* not found */ return page; if (page == 0) { dev_err(emu->card->dev, "trying to map zero (reserved) page\n"); return -EINVAL; } /* insert this block in the proper position of mapped list */ list_add_tail(&blk->mapped_link, next); /* append this as a newest block in order list */ list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); blk->mapped_page = page; /* fill PTB */ for (pg = blk->first_page; pg <= blk->last_page; pg++) { set_ptb_entry(emu, page, emu->page_addr_table[pg]); page++; } return 0; } /* * unmap the block * return the size of resultant empty pages * * call with memblk_lock held */ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int start_page, end_page, mpage, pg; struct list_head *p; struct snd_emu10k1_memblk *q; /* calculate the expected size of empty region */ if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { q = get_emu10k1_memblk(p, mapped_link); start_page = q->mapped_page + q->pages; } else start_page = 1; if ((p = blk->mapped_link.next) != &emu->mapped_link_head) { q = get_emu10k1_memblk(p, mapped_link); end_page = q->mapped_page; } else end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0); /* remove links */ list_del(&blk->mapped_link); list_del(&blk->mapped_order_link); /* clear PTB */ mpage = blk->mapped_page; for (pg = blk->first_page; pg <= blk->last_page; pg++) { set_silent_ptb(emu, mpage); mpage++; } blk->mapped_page = -1; return end_page - start_page; /* return the new empty size */ } /* * search empty pages with the given size, and create a memory block * * unlike synth_alloc the memory block is aligned to the page start */ static struct snd_emu10k1_memblk * search_empty(struct snd_emu10k1 *emu, int size) { struct list_head *p; struct snd_emu10k1_memblk *blk; int page, psize; psize = get_aligned_page(size + PAGE_SIZE -1); page = 0; list_for_each(p, &emu->memhdr->block) { blk = get_emu10k1_memblk(p, mem.list); if (page + psize <= blk->first_page) goto __found_pages; page = blk->last_page + 1; } if (page + psize > emu->max_cache_pages) return NULL; __found_pages: /* create a new memory block */ blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev); if (blk == NULL) return NULL; blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ emu10k1_memblk_init(blk); return blk; } /* * check if the given pointer is valid for pages */ static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) { if (addr & ~emu->dma_mask) { dev_err_ratelimited(emu->card->dev, "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr); return 0; } if (addr & (EMUPAGESIZE-1)) { dev_err_ratelimited(emu->card->dev, "page is not aligned\n"); return 0; } return 1; } /* * map the given memory block on PTB. * if the block is already mapped, update the link order. * if no empty pages are found, tries to release unused memory blocks * and retry the mapping. */ int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int err; int size; struct list_head *p, *nextp; struct snd_emu10k1_memblk *deleted; unsigned long flags; spin_lock_irqsave(&emu->memblk_lock, flags); if (blk->mapped_page >= 0) { /* update order link */ list_move_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); spin_unlock_irqrestore(&emu->memblk_lock, flags); return 0; } if ((err = map_memblk(emu, blk)) < 0) { /* no enough page - try to unmap some blocks */ /* starting from the oldest block */ p = emu->mapped_order_link_head.next; for (; p != &emu->mapped_order_link_head; p = nextp) { nextp = p->next; deleted = get_emu10k1_memblk(p, mapped_order_link); if (deleted->map_locked) continue; size = unmap_memblk(emu, deleted); if (size >= blk->pages) { /* ok the empty region is enough large */ err = map_memblk(emu, blk); break; } } } spin_unlock_irqrestore(&emu->memblk_lock, flags); return err; } EXPORT_SYMBOL(snd_emu10k1_memblk_map); /* * page allocation for DMA */ struct snd_util_memblk * snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_util_memhdr *hdr; struct snd_emu10k1_memblk *blk; int page, err, idx; if (snd_BUG_ON(!emu)) return NULL; if (snd_BUG_ON(runtime->dma_bytes <= 0 || runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE)) return NULL; hdr = emu->memhdr; if (snd_BUG_ON(!hdr)) return NULL; idx = runtime->period_size >= runtime->buffer_size ? (emu->delay_pcm_irq * 2) : 0; mutex_lock(&hdr->block_mutex); blk = search_empty(emu, runtime->dma_bytes + idx); if (blk == NULL) { mutex_unlock(&hdr->block_mutex); return NULL; } /* fill buffer addresses but pointers are not stored so that * snd_free_pci_page() is not called in in synth_free() */ idx = 0; for (page = blk->first_page; page <= blk->last_page; page++, idx++) { unsigned long ofs = idx << PAGE_SHIFT; dma_addr_t addr; if (ofs >= runtime->dma_bytes) addr = emu->silent_page.addr; else addr = snd_pcm_sgbuf_get_addr(substream, ofs); if (! is_valid_page(emu, addr)) { dev_err_ratelimited(emu->card->dev, "emu: failure page = %d\n", idx); mutex_unlock(&hdr->block_mutex); return NULL; } emu->page_addr_table[page] = addr; emu->page_ptr_table[page] = NULL; } /* set PTB entries */ blk->map_locked = 1; /* do not unmap this block! */ err = snd_emu10k1_memblk_map(emu, blk); if (err < 0) { __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); mutex_unlock(&hdr->block_mutex); return NULL; } mutex_unlock(&hdr->block_mutex); return (struct snd_util_memblk *)blk; } /* * release DMA buffer from page table */ int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk) { if (snd_BUG_ON(!emu || !blk)) return -EINVAL; return snd_emu10k1_synth_free(emu, blk); } /* * allocate DMA pages, widening the allocation if necessary * * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why * this might be needed. * * If you modify this function check whether __synth_free_pages() also needs * changes. */ int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size, struct snd_dma_buffer *dmab) { if (emu->iommu_workaround) { size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; size_t size_real = npages * PAGE_SIZE; /* * The device has been observed to accesses up to 256 extra * bytes, but use 1k to be safe. */ if (size_real < size + 1024) size += PAGE_SIZE; } return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), size, dmab); } /* * memory allocation using multiple pages (for synth) * Unlike the DMA allocation above, non-contiguous pages are assined. */ /* * allocate a synth sample area */ struct snd_util_memblk * snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size) { struct snd_emu10k1_memblk *blk; struct snd_util_memhdr *hdr = hw->memhdr; mutex_lock(&hdr->block_mutex); blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size); if (blk == NULL) { mutex_unlock(&hdr->block_mutex); return NULL; } if (synth_alloc_pages(hw, blk)) { __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); mutex_unlock(&hdr->block_mutex); return NULL; } snd_emu10k1_memblk_map(hw, blk); mutex_unlock(&hdr->block_mutex); return (struct snd_util_memblk *)blk; } EXPORT_SYMBOL(snd_emu10k1_synth_alloc); /* * free a synth sample area */ int snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk) { struct snd_util_memhdr *hdr = emu->memhdr; struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk; unsigned long flags; mutex_lock(&hdr->block_mutex); spin_lock_irqsave(&emu->memblk_lock, flags); if (blk->mapped_page >= 0) unmap_memblk(emu, blk); spin_unlock_irqrestore(&emu->memblk_lock, flags); synth_free_pages(emu, blk); __snd_util_mem_free(hdr, memblk); mutex_unlock(&hdr->block_mutex); return 0; } EXPORT_SYMBOL(snd_emu10k1_synth_free); /* check new allocation range */ static void get_single_page_range(struct snd_util_memhdr *hdr, struct snd_emu10k1_memblk *blk, int *first_page_ret, int *last_page_ret) { struct list_head *p; struct snd_emu10k1_memblk *q; int first_page, last_page; first_page = blk->first_page; if ((p = blk->mem.list.prev) != &hdr->block) { q = get_emu10k1_memblk(p, mem.list); if (q->last_page == first_page) first_page++; /* first page was already allocated */ } last_page = blk->last_page; if ((p = blk->mem.list.next) != &hdr->block) { q = get_emu10k1_memblk(p, mem.list); if (q->first_page == last_page) last_page--; /* last page was already allocated */ } *first_page_ret = first_page; *last_page_ret = last_page; } /* release allocated pages */ static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page, int last_page) { struct snd_dma_buffer dmab; int page; dmab.dev.type = SNDRV_DMA_TYPE_DEV; dmab.dev.dev = snd_dma_pci_data(emu->pci); for (page = first_page; page <= last_page; page++) { if (emu->page_ptr_table[page] == NULL) continue; dmab.area = emu->page_ptr_table[page]; dmab.addr = emu->page_addr_table[page]; /* * please keep me in sync with logic in * snd_emu10k1_alloc_pages_maybe_wider() */ dmab.bytes = PAGE_SIZE; if (emu->iommu_workaround) dmab.bytes *= 2; snd_dma_free_pages(&dmab); emu->page_addr_table[page] = 0; emu->page_ptr_table[page] = NULL; } } /* * allocate kernel pages */ static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int page, first_page, last_page; struct snd_dma_buffer dmab; emu10k1_memblk_init(blk); get_single_page_range(emu->memhdr, blk, &first_page, &last_page); /* allocate kernel pages */ for (page = first_page; page <= last_page; page++) { if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE, &dmab) < 0) goto __fail; if (!is_valid_page(emu, dmab.addr)) { snd_dma_free_pages(&dmab); goto __fail; } emu->page_addr_table[page] = dmab.addr; emu->page_ptr_table[page] = dmab.area; } return 0; __fail: /* release allocated pages */ last_page = page - 1; __synth_free_pages(emu, first_page, last_page); return -ENOMEM; } /* * free pages */ static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int first_page, last_page; get_single_page_range(emu->memhdr, blk, &first_page, &last_page); __synth_free_pages(emu, first_page, last_page); return 0; } /* calculate buffer pointer from offset address */ static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) { char *ptr; if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages)) return NULL; ptr = emu->page_ptr_table[page]; if (! ptr) { dev_err(emu->card->dev, "access to NULL ptr: page = %d\n", page); return NULL; } ptr += offset & (PAGE_SIZE - 1); return (void*)ptr; } /* * bzero(blk + offset, size) */ int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, int size) { int page, nextofs, end_offset, temp, temp1; void *ptr; struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; offset += blk->offset & (PAGE_SIZE - 1); end_offset = offset + size; page = get_aligned_page(offset); do { nextofs = aligned_page_offset(page + 1); temp = nextofs - offset; temp1 = end_offset - offset; if (temp1 < temp) temp = temp1; ptr = offset_ptr(emu, page + p->first_page, offset); if (ptr) memset(ptr, 0, temp); offset = nextofs; page++; } while (offset < end_offset); return 0; } EXPORT_SYMBOL(snd_emu10k1_synth_bzero); /* * copy_from_user(blk + offset, data, size) */ int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, const char __user *data, int size) { int page, nextofs, end_offset, temp, temp1; void *ptr; struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; offset += blk->offset & (PAGE_SIZE - 1); end_offset = offset + size; page = get_aligned_page(offset); do { nextofs = aligned_page_offset(page + 1); temp = nextofs - offset; temp1 = end_offset - offset; if (temp1 < temp) temp = temp1; ptr = offset_ptr(emu, page + p->first_page, offset); if (ptr && copy_from_user(ptr, data, temp)) return -EFAULT; offset = nextofs; data += temp; page++; } while (offset < end_offset); return 0; } EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
{ "language": "C" }
/****************************************************************************** * Copyright (c) 2013-2016 Realtek Semiconductor Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ #ifndef _ROM_SHA1_ #define _ROM_SHA1_ #ifndef _SHA_enum_ #define _SHA_enum_ enum { shaSuccess = 0, shaNull, /* Null pointer parameter */ shaInputTooLong, /* input data too long */ shaStateError /* called Input after Result */ }; #endif #define SHA1HashSize 20 /* * This structure will hold context information for the SHA-1 * hashing operation */ typedef struct SHA1Context { u32 Intermediate_Hash[SHA1HashSize / 4]; /* Message Digest */ u32 Length_Low; /* Message length in bits */ u32 Length_High; /* Message length in bits */ /* Index into message block array */ u16 Message_Block_Index; u8 Message_Block[64]; /* 512-bit message blocks */ int Computed; /* Is the digest computed? */ int Corrupted; /* Is the message digest corrupted? */ } SHA1Context; /* * Function Prototypes */ int rt_sha1_init(SHA1Context *); int rt_sha1_update(SHA1Context *, const u8 *, unsigned int); int rt_sha1_finish(SHA1Context *, u8 Message_Digest[SHA1HashSize]); void rt_hmac_sha1(unsigned char *text, int text_len, unsigned char *key, int key_len, unsigned char *digest); #endif //_ROM_SHA1_
{ "language": "C" }
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // Intel License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000, Intel Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of Intel Corporation may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #ifndef _CV_LIST_H_ #define _CV_LIST_H_ #include <stdlib.h> #include <assert.h> #define CV_FORCE_INLINE CV_INLINE #if !defined(_LIST_INLINE) #define _LIST_INLINE CV_FORCE_INLINE #endif /*_LIST_INLINE*/ #if defined DECLARE_LIST #if defined _MSC_VER && _MSC_VER >= 1200 #pragma warning("DECLARE_LIST macro is already defined!") #endif #endif /*DECLARE_LIST*/ static const long default_size = 10; static const long default_inc_size = 10; struct _pos { void* m_pos; #ifdef _DEBUG struct _list* m_list; #endif /*_DEBUG*/ }; typedef struct _pos CVPOS; struct _list { void* m_buffer; void* m_first_buffer; long m_buf_size; /* The size of the buffer */ long m_size; /* The number of elements */ CVPOS m_head; CVPOS m_tail; CVPOS m_head_free; }; typedef struct _list _CVLIST; #define DECLARE_LIST(type, prefix)\ /* Basic element of a list*/\ struct prefix##element_##type\ {\ struct prefix##element_##type* m_prev;\ struct prefix##element_##type* m_next;\ type m_data;\ };\ typedef struct prefix##element_##type ELEMENT_##type;\ /* Initialization and destruction*/\ _LIST_INLINE _CVLIST* prefix##create_list_##type(long);\ _LIST_INLINE void prefix##destroy_list_##type(_CVLIST*);\ /* Access functions*/\ _LIST_INLINE CVPOS prefix##get_head_pos_##type(_CVLIST*);\ _LIST_INLINE CVPOS prefix##get_tail_pos_##type(_CVLIST*);\ _LIST_INLINE type* prefix##get_next_##type(CVPOS*);\ _LIST_INLINE type* prefix##get_prev_##type(CVPOS*);\ _LIST_INLINE int prefix##is_pos_##type(CVPOS pos);\ /* Modification functions*/\ _LIST_INLINE void prefix##clear_list_##type(_CVLIST*);\ _LIST_INLINE CVPOS prefix##add_head_##type(_CVLIST*, type*);\ _LIST_INLINE CVPOS prefix##add_tail_##type(_CVLIST*, type*);\ _LIST_INLINE void prefix##remove_head_##type(_CVLIST*);\ _LIST_INLINE void prefix##remove_tail_##type(_CVLIST*);\ _LIST_INLINE CVPOS prefix##insert_before_##type(_CVLIST*, CVPOS, type*);\ _LIST_INLINE CVPOS prefix##insert_after_##type(_CVLIST*, CVPOS, type*);\ _LIST_INLINE void prefix##remove_at_##type(_CVLIST*, CVPOS);\ _LIST_INLINE void prefix##set_##type(CVPOS, type*);\ _LIST_INLINE type* prefix##get_##type(CVPOS);\ /* Statistics functions*/\ _LIST_INLINE int prefix##get_count_##type(_CVLIST*); /* This macro finds a space for a new element and puts in into 'element' pointer */ #define INSERT_NEW(element_type, l, element)\ l->m_size++;\ if(l->m_head_free.m_pos != NULL)\ {\ element = (element_type*)(l->m_head_free.m_pos);\ if(element->m_next != NULL)\ {\ element->m_next->m_prev = NULL;\ l->m_head_free.m_pos = element->m_next;\ }\ else\ {\ l->m_head_free.m_pos = NULL;\ }\ }\ else\ {\ if(l->m_buf_size < l->m_size && l->m_head_free.m_pos == NULL)\ {\ *(void**)l->m_buffer = cvAlloc(l->m_buf_size*sizeof(element_type) + sizeof(void*));\ l->m_buffer = *(void**)l->m_buffer;\ *(void**)l->m_buffer = NULL;\ element = (element_type*)((char*)l->m_buffer + sizeof(void*));\ }\ else\ {\ element = (element_type*)((char*)l->m_buffer + sizeof(void*)) + l->m_size - 1;\ }\ } /* This macro adds 'element' to the list of free elements*/ #define INSERT_FREE(element_type, l, element)\ if(l->m_head_free.m_pos != NULL)\ {\ ((element_type*)l->m_head_free.m_pos)->m_prev = element;\ }\ element->m_next = ((element_type*)l->m_head_free.m_pos);\ l->m_head_free.m_pos = element; /*#define GET_FIRST_FREE(l) ((ELEMENT_##type*)(l->m_head_free.m_pos))*/ #define IMPLEMENT_LIST(type, prefix)\ _CVLIST* prefix##create_list_##type(long size)\ {\ _CVLIST* pl = (_CVLIST*)cvAlloc(sizeof(_CVLIST));\ pl->m_buf_size = size > 0 ? size : default_size;\ pl->m_first_buffer = cvAlloc(pl->m_buf_size*sizeof(ELEMENT_##type) + sizeof(void*));\ pl->m_buffer = pl->m_first_buffer;\ *(void**)pl->m_buffer = NULL;\ pl->m_size = 0;\ pl->m_head.m_pos = NULL;\ pl->m_tail.m_pos = NULL;\ pl->m_head_free.m_pos = NULL;\ return pl;\ }\ void prefix##destroy_list_##type(_CVLIST* l)\ {\ void* cur = l->m_first_buffer;\ void* next;\ while(cur)\ {\ next = *(void**)cur;\ cvFree(&cur);\ cur = next;\ }\ cvFree(&l);\ }\ CVPOS prefix##get_head_pos_##type(_CVLIST* l)\ {\ return l->m_head;\ }\ CVPOS prefix##get_tail_pos_##type(_CVLIST* l)\ {\ return l->m_tail;\ }\ type* prefix##get_next_##type(CVPOS* pos)\ {\ if(pos->m_pos)\ {\ ELEMENT_##type* element = (ELEMENT_##type*)(pos->m_pos);\ pos->m_pos = element->m_next;\ return &element->m_data;\ }\ else\ {\ return NULL;\ }\ }\ type* prefix##get_prev_##type(CVPOS* pos)\ {\ if(pos->m_pos)\ {\ ELEMENT_##type* element = (ELEMENT_##type*)(pos->m_pos);\ pos->m_pos = element->m_prev;\ return &element->m_data;\ }\ else\ {\ return NULL;\ }\ }\ int prefix##is_pos_##type(CVPOS pos)\ {\ return !!pos.m_pos;\ }\ void prefix##clear_list_##type(_CVLIST* l)\ {\ l->m_head.m_pos = NULL;\ l->m_tail.m_pos = NULL;\ l->m_size = 0;\ l->m_head_free.m_pos = NULL;\ }\ CVPOS prefix##add_head_##type(_CVLIST* l, type* data)\ {\ ELEMENT_##type* element;\ INSERT_NEW(ELEMENT_##type, l, element);\ element->m_prev = NULL;\ element->m_next = (ELEMENT_##type*)(l->m_head.m_pos);\ memcpy(&(element->m_data), data, sizeof(*data));\ if(element->m_next)\ {\ element->m_next->m_prev = element;\ }\ else\ {\ l->m_tail.m_pos = element;\ }\ l->m_head.m_pos = element;\ return l->m_head;\ }\ CVPOS prefix##add_tail_##type(_CVLIST* l, type* data)\ {\ ELEMENT_##type* element;\ INSERT_NEW(ELEMENT_##type, l, element);\ element->m_next = NULL;\ element->m_prev = (ELEMENT_##type*)(l->m_tail.m_pos);\ memcpy(&(element->m_data), data, sizeof(*data));\ if(element->m_prev)\ {\ element->m_prev->m_next = element;\ }\ else\ {\ l->m_head.m_pos = element;\ }\ l->m_tail.m_pos = element;\ return l->m_tail;\ }\ void prefix##remove_head_##type(_CVLIST* l)\ {\ ELEMENT_##type* element = ((ELEMENT_##type*)(l->m_head.m_pos));\ if(element->m_next != NULL)\ {\ element->m_next->m_prev = NULL;\ }\ l->m_head.m_pos = element->m_next;\ INSERT_FREE(ELEMENT_##type, l, element);\ l->m_size--;\ }\ void prefix##remove_tail_##type(_CVLIST* l)\ {\ ELEMENT_##type* element = ((ELEMENT_##type*)(l->m_tail.m_pos));\ if(element->m_prev != NULL)\ {\ element->m_prev->m_next = NULL;\ }\ l->m_tail.m_pos = element->m_prev;\ INSERT_FREE(ELEMENT_##type, l, element);\ l->m_size--;\ }\ CVPOS prefix##insert_after_##type(_CVLIST* l, CVPOS pos, type* data)\ {\ ELEMENT_##type* element;\ ELEMENT_##type* before;\ CVPOS newpos;\ INSERT_NEW(ELEMENT_##type, l, element);\ memcpy(&(element->m_data), data, sizeof(*data));\ before = (ELEMENT_##type*)pos.m_pos;\ element->m_prev = before;\ element->m_next = before->m_next;\ before->m_next = element;\ if(element->m_next != NULL)\ element->m_next->m_prev = element;\ else\ l->m_tail.m_pos = element;\ newpos.m_pos = element;\ return newpos;\ }\ CVPOS prefix##insert_before_##type(_CVLIST* l, CVPOS pos, type* data)\ {\ ELEMENT_##type* element;\ ELEMENT_##type* after;\ CVPOS newpos;\ INSERT_NEW(ELEMENT_##type, l, element);\ memcpy(&(element->m_data), data, sizeof(*data));\ after = (ELEMENT_##type*)pos.m_pos;\ element->m_prev = after->m_prev;\ element->m_next = after;\ after->m_prev = element;\ if(element->m_prev != NULL)\ element->m_prev->m_next = element;\ else\ l->m_head.m_pos = element;\ newpos.m_pos = element;\ return newpos;\ }\ void prefix##remove_at_##type(_CVLIST* l, CVPOS pos)\ {\ ELEMENT_##type* element = ((ELEMENT_##type*)pos.m_pos);\ if(element->m_prev != NULL)\ {\ element->m_prev->m_next = element->m_next;\ }\ else\ {\ l->m_head.m_pos = element->m_next;\ }\ if(element->m_next != NULL)\ {\ element->m_next->m_prev = element->m_prev;\ }\ else\ {\ l->m_tail.m_pos = element->m_prev;\ }\ INSERT_FREE(ELEMENT_##type, l, element);\ l->m_size--;\ }\ void prefix##set_##type(CVPOS pos, type* data)\ {\ ELEMENT_##type* element = ((ELEMENT_##type*)(pos.m_pos));\ memcpy(&(element->m_data), data, sizeof(*data));\ }\ type* prefix##get_##type(CVPOS pos)\ {\ ELEMENT_##type* element = ((ELEMENT_##type*)(pos.m_pos));\ return &(element->m_data);\ }\ int prefix##get_count_##type(_CVLIST* list)\ {\ return list->m_size;\ } #define DECLARE_AND_IMPLEMENT_LIST(type, prefix)\ DECLARE_LIST(type, prefix)\ IMPLEMENT_LIST(type, prefix) typedef struct __index { int value; float rho, theta; } _index; DECLARE_LIST( _index, h_ ) #endif/*_CV_LIST_H_*/
{ "language": "C" }
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved. * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #include "private/gc_priv.h" /* Routines for maintaining maps describing heap block * layouts for various object sizes. Allows fast pointer validity checks * and fast location of object start locations on machines (such as SPARC) * with slow division. */ /* Consider pointers that are offset bytes displaced from the beginning */ /* of an object to be valid. */ GC_API void GC_CALL GC_register_displacement(size_t offset) { DCL_LOCK_STATE; LOCK(); GC_register_displacement_inner(offset); UNLOCK(); } GC_INNER void GC_register_displacement_inner(size_t offset) { GC_ASSERT(I_HOLD_LOCK()); if (offset >= VALID_OFFSET_SZ) { ABORT("Bad argument to GC_register_displacement"); } if (!GC_valid_offsets[offset]) { GC_valid_offsets[offset] = TRUE; GC_modws_valid_offsets[offset % sizeof(word)] = TRUE; } } #ifdef MARK_BIT_PER_GRANULE /* Add a heap block map for objects of size granules to obj_map. */ /* Return FALSE on failure. */ /* A size of 0 granules is used for large objects. */ GC_INNER GC_bool GC_add_map_entry(size_t granules) { unsigned displ; unsigned short * new_map; if (granules > BYTES_TO_GRANULES(MAXOBJBYTES)) granules = 0; if (GC_obj_map[granules] != 0) { return(TRUE); } new_map = (unsigned short *)GC_scratch_alloc(MAP_LEN * sizeof(short)); if (new_map == 0) return(FALSE); GC_COND_LOG_PRINTF( "Adding block map for size of %u granules (%u bytes)\n", (unsigned)granules, (unsigned)GRANULES_TO_BYTES(granules)); if (granules == 0) { for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) { new_map[displ] = 1; /* Nonzero to get us out of marker fast path. */ } } else { for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) { new_map[displ] = (unsigned short)(displ % granules); } } GC_obj_map[granules] = new_map; return(TRUE); } #endif /* MARK_BIT_PER_GRANULE */ GC_INNER void GC_initialize_offsets(void) { unsigned i; if (GC_all_interior_pointers) { for (i = 0; i < VALID_OFFSET_SZ; ++i) GC_valid_offsets[i] = TRUE; } else { BZERO(GC_valid_offsets, sizeof(GC_valid_offsets)); for (i = 0; i < sizeof(word); ++i) GC_modws_valid_offsets[i] = FALSE; } }
{ "language": "C" }
/* crypto/asn1/t_req.c */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include <stdio.h> #include "cryptlib.h" #include <openssl/buffer.h> #include <openssl/bn.h> #include <openssl/objects.h> #include <openssl/x509.h> #include <openssl/x509v3.h> #ifndef OPENSSL_NO_RSA #include <openssl/rsa.h> #endif #ifndef OPENSSL_NO_DSA #include <openssl/dsa.h> #endif #ifndef OPENSSL_NO_FP_API int X509_REQ_print_fp(FILE *fp, X509_REQ *x) { BIO *b; int ret; if ((b=BIO_new(BIO_s_file())) == NULL) { X509err(X509_F_X509_REQ_PRINT_FP,ERR_R_BUF_LIB); return(0); } BIO_set_fp(b,fp,BIO_NOCLOSE); ret=X509_REQ_print(b, x); BIO_free(b); return(ret); } #endif int X509_REQ_print_ex(BIO *bp, X509_REQ *x, unsigned long nmflags, unsigned long cflag) { unsigned long l; int i; const char *neg; X509_REQ_INFO *ri; EVP_PKEY *pkey; STACK_OF(X509_ATTRIBUTE) *sk; STACK_OF(X509_EXTENSION) *exts; char mlch = ' '; int nmindent = 0; if((nmflags & XN_FLAG_SEP_MASK) == XN_FLAG_SEP_MULTILINE) { mlch = '\n'; nmindent = 12; } if(nmflags == X509_FLAG_COMPAT) nmindent = 16; ri=x->req_info; if(!(cflag & X509_FLAG_NO_HEADER)) { if (BIO_write(bp,"Certificate Request:\n",21) <= 0) goto err; if (BIO_write(bp," Data:\n",10) <= 0) goto err; } if(!(cflag & X509_FLAG_NO_VERSION)) { neg=(ri->version->type == V_ASN1_NEG_INTEGER)?"-":""; l=0; for (i=0; i<ri->version->length; i++) { l<<=8; l+=ri->version->data[i]; } if(BIO_printf(bp,"%8sVersion: %s%lu (%s0x%lx)\n","",neg,l,neg, l) <= 0) goto err; } if(!(cflag & X509_FLAG_NO_SUBJECT)) { if (BIO_printf(bp," Subject:%c",mlch) <= 0) goto err; if (X509_NAME_print_ex(bp,ri->subject,nmindent, nmflags) < 0) goto err; if (BIO_write(bp,"\n",1) <= 0) goto err; } if(!(cflag & X509_FLAG_NO_PUBKEY)) { if (BIO_write(bp," Subject Public Key Info:\n",33) <= 0) goto err; if (BIO_printf(bp,"%12sPublic Key Algorithm: ","") <= 0) goto err; if (i2a_ASN1_OBJECT(bp, ri->pubkey->algor->algorithm) <= 0) goto err; if (BIO_puts(bp, "\n") <= 0) goto err; pkey=X509_REQ_get_pubkey(x); if (pkey == NULL) { BIO_printf(bp,"%12sUnable to load Public Key\n",""); ERR_print_errors(bp); } else { EVP_PKEY_print_public(bp, pkey, 16, NULL); EVP_PKEY_free(pkey); } } if(!(cflag & X509_FLAG_NO_ATTRIBUTES)) { /* may not be */ if(BIO_printf(bp,"%8sAttributes:\n","") <= 0) goto err; sk=x->req_info->attributes; if (sk_X509_ATTRIBUTE_num(sk) == 0) { if(BIO_printf(bp,"%12sa0:00\n","") <= 0) goto err; } else { for (i=0; i<sk_X509_ATTRIBUTE_num(sk); i++) { ASN1_TYPE *at; X509_ATTRIBUTE *a; ASN1_BIT_STRING *bs=NULL; ASN1_TYPE *t; int j,type=0,count=1,ii=0; a=sk_X509_ATTRIBUTE_value(sk,i); if(X509_REQ_extension_nid(OBJ_obj2nid(a->object))) continue; if(BIO_printf(bp,"%12s","") <= 0) goto err; if ((j=i2a_ASN1_OBJECT(bp,a->object)) > 0) { if (a->single) { t=a->value.single; type=t->type; bs=t->value.bit_string; } else { ii=0; count=sk_ASN1_TYPE_num(a->value.set); get_next: at=sk_ASN1_TYPE_value(a->value.set,ii); type=at->type; bs=at->value.asn1_string; } } for (j=25-j; j>0; j--) if (BIO_write(bp," ",1) != 1) goto err; if (BIO_puts(bp,":") <= 0) goto err; if ( (type == V_ASN1_PRINTABLESTRING) || (type == V_ASN1_T61STRING) || (type == V_ASN1_IA5STRING)) { if (BIO_write(bp,(char *)bs->data,bs->length) != bs->length) goto err; BIO_puts(bp,"\n"); } else { BIO_puts(bp,"unable to print attribute\n"); } if (++ii < count) goto get_next; } } } if(!(cflag & X509_FLAG_NO_EXTENSIONS)) { exts = X509_REQ_get_extensions(x); if(exts) { BIO_printf(bp,"%8sRequested Extensions:\n",""); for (i=0; i<sk_X509_EXTENSION_num(exts); i++) { ASN1_OBJECT *obj; X509_EXTENSION *ex; int j; ex=sk_X509_EXTENSION_value(exts, i); if (BIO_printf(bp,"%12s","") <= 0) goto err; obj=X509_EXTENSION_get_object(ex); i2a_ASN1_OBJECT(bp,obj); j=X509_EXTENSION_get_critical(ex); if (BIO_printf(bp,": %s\n",j?"critical":"") <= 0) goto err; if(!X509V3_EXT_print(bp, ex, cflag, 16)) { BIO_printf(bp, "%16s", ""); M_ASN1_OCTET_STRING_print(bp,ex->value); } if (BIO_write(bp,"\n",1) <= 0) goto err; } sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free); } } if(!(cflag & X509_FLAG_NO_SIGDUMP)) { if(!X509_signature_print(bp, x->sig_alg, x->signature)) goto err; } return(1); err: X509err(X509_F_X509_REQ_PRINT_EX,ERR_R_BUF_LIB); return(0); } int X509_REQ_print(BIO *bp, X509_REQ *x) { return X509_REQ_print_ex(bp, x, XN_FLAG_COMPAT, X509_FLAG_COMPAT); }
{ "language": "C" }
/* -*- C -*- */ /* * block_template.c : Generic framework for block encryption algorithms * * Distribute and use freely; there are no restrictions on further * dissemination and usage except those imposed by the laws of your * country of residence. This software is provided "as is" without * warranty of fitness for use or suitability for any purpose, express * or implied. Use at your own risk or not at all. * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifdef _HAVE_STDC_HEADERS #include <string.h> #endif #include "Python.h" #include "modsupport.h" /* Cipher operation modes */ #define MODE_ECB 1 #define MODE_CBC 2 #define MODE_CFB 3 #define MODE_PGP 4 #define MODE_OFB 5 #define MODE_CTR 6 #define _STR(x) #x #define _XSTR(x) _STR(x) #define _PASTE(x,y) x##y #define _PASTE2(x,y) _PASTE(x,y) #define _MODULE_NAME _PASTE2(init,MODULE_NAME) #define _MODULE_STRING _XSTR(MODULE_NAME) typedef struct { PyObject_HEAD int mode, count, segment_size; unsigned char IV[BLOCK_SIZE], oldCipher[BLOCK_SIZE]; PyObject *counter; block_state st; } ALGobject; staticforward PyTypeObject ALGtype; #define is_ALGobject(v) ((v)->ob_type == &ALGtype) static ALGobject * newALGobject(void) { ALGobject * new; new = PyObject_New(ALGobject, &ALGtype); new->mode = MODE_ECB; new->counter = NULL; return new; } static void ALGdealloc(PyObject *ptr) { ALGobject *self = (ALGobject *)ptr; /* Overwrite the contents of the object */ Py_XDECREF(self->counter); self->counter = NULL; memset(self->IV, 0, BLOCK_SIZE); memset(self->oldCipher, 0, BLOCK_SIZE); memset((char*)&(self->st), 0, sizeof(block_state)); self->mode = self->count = self->segment_size = 0; PyObject_Del(ptr); } static char ALGnew__doc__[] = "new(key, [mode], [IV]): Return a new " _MODULE_STRING " encryption object."; static char *kwlist[] = {"key", "mode", "IV", "counter", "segment_size", #ifdef PCT_RC5_MODULE "version", "word_size", "rounds", #endif NULL}; static ALGobject * ALGnew(PyObject *self, PyObject *args, PyObject *kwdict) { unsigned char *key, *IV; ALGobject * new=NULL; int keylen, IVlen=0, mode=MODE_ECB, segment_size=0; PyObject *counter = NULL; #ifdef PCT_RC5_MODULE int version = 0x10, word_size = 32, rounds = 16; /*XXX default rounds? */ #endif /* Set default values */ if (!PyArg_ParseTupleAndKeywords(args, kwdict, "s#|is#Oi" #ifdef PCT_RC5_MODULE "iii" #endif , kwlist, &key, &keylen, &mode, &IV, &IVlen, &counter, &segment_size #ifdef PCT_RC5_MODULE , &version, &word_size, &rounds #endif )) { return NULL; } if (KEY_SIZE!=0 && keylen!=KEY_SIZE) { PyErr_Format(PyExc_ValueError, "Key must be %i bytes long, not %i", KEY_SIZE, keylen); return NULL; } if (KEY_SIZE==0 && keylen==0) { PyErr_SetString(PyExc_ValueError, "Key cannot be the null string"); return NULL; } if (IVlen != BLOCK_SIZE && IVlen != 0) { PyErr_Format(PyExc_ValueError, "IV must be %i bytes long", BLOCK_SIZE); return NULL; } if (mode<MODE_ECB || mode>MODE_CTR) { PyErr_Format(PyExc_ValueError, "Unknown cipher feedback mode %i", mode); return NULL; } /* Mode-specific checks */ if (mode == MODE_CFB) { if (segment_size == 0) segment_size = 8; if (segment_size < 1 || segment_size > BLOCK_SIZE*8) { PyErr_Format(PyExc_ValueError, "segment_size must be multiple of 8 " "between 1 and %i", BLOCK_SIZE); } } if (mode == MODE_CTR) { if (!PyCallable_Check(counter)) { PyErr_SetString(PyExc_ValueError, "'counter' parameter must be a callable object"); } } else { if (counter != NULL) { PyErr_SetString(PyExc_ValueError, "'counter' parameter only useful with CTR mode"); } } /* Cipher-specific checks */ #ifdef PCT_RC5_MODULE if (version!=0x10) { PyErr_Format(PyExc_ValueError, "RC5: Bad RC5 algorithm version: %i", version); return NULL; } if (word_size!=16 && word_size!=32) { PyErr_Format(PyExc_ValueError, "RC5: Unsupported word size: %i", word_size); return NULL; } if (rounds<0 || 255<rounds) { PyErr_Format(PyExc_ValueError, "RC5: rounds must be between 0 and 255, not %i", rounds); return NULL; } #endif /* Copy parameters into object */ new = newALGobject(); new->segment_size = segment_size; new->counter = counter; Py_XINCREF(counter); #ifdef PCT_RC5_MODULE new->st.version = version; new->st.word_size = word_size; new->st.rounds = rounds; #endif block_init(&(new->st), key, keylen); if (PyErr_Occurred()) { Py_DECREF(new); return NULL; } memset(new->IV, 0, BLOCK_SIZE); memset(new->oldCipher, 0, BLOCK_SIZE); memcpy(new->IV, IV, IVlen); new->mode = mode; new->count=8; return new; } static char ALG_Encrypt__doc__[] = "Encrypt the provided string of binary data."; static PyObject * ALG_Encrypt(ALGobject *self, PyObject *args) { unsigned char *buffer, *str; unsigned char temp[BLOCK_SIZE]; int i, j, len; PyObject *result; if (!PyArg_Parse(args, "s#", &str, &len)) return NULL; if (len==0) /* Handle empty string */ { return PyString_FromStringAndSize(NULL, 0); } if ( (len % BLOCK_SIZE) !=0 && (self->mode!=MODE_CFB) && (self->mode!=MODE_PGP)) { PyErr_Format(PyExc_ValueError, "Input strings must be " "a multiple of %i in length", BLOCK_SIZE); return NULL; } if (self->mode == MODE_CFB && (len % (self->segment_size/8) !=0)) { PyErr_Format(PyExc_ValueError, "Input strings must be a multiple of " "the segment size %i in length", self->segment_size/8); return NULL; } buffer=malloc(len); if (buffer==NULL) { PyErr_SetString(PyExc_MemoryError, "No memory available in " _MODULE_STRING " encrypt"); return NULL; } switch(self->mode) { case(MODE_ECB): for(i=0; i<len; i+=BLOCK_SIZE) { block_encrypt(&(self->st), str+i, buffer+i); } break; case(MODE_CBC): for(i=0; i<len; i+=BLOCK_SIZE) { for(j=0; j<BLOCK_SIZE; j++) { temp[j]=str[i+j]^self->IV[j]; } block_encrypt(&(self->st), temp, buffer+i); memcpy(self->IV, buffer+i, BLOCK_SIZE); } break; case(MODE_CFB): for(i=0; i<len; i+=self->segment_size/8) { block_encrypt(&(self->st), self->IV, temp); for (j=0; j<self->segment_size/8; j++) { buffer[i+j] = str[i+j] ^ temp[j]; } if (self->segment_size == BLOCK_SIZE * 8) { /* s == b: segment size is identical to the algorithm block size */ memcpy(self->IV, buffer + i, BLOCK_SIZE); } else if ((self->segment_size % 8) == 0) { int sz = self->segment_size/8; memmove(self->IV, self->IV + sz, BLOCK_SIZE-sz); memcpy(self->IV + BLOCK_SIZE - sz, buffer + i, sz); } else { /* segment_size is not a multiple of 8; currently this can't happen */ } } break; case(MODE_PGP): if (len<=BLOCK_SIZE-self->count) { /* If less than one block, XOR it in */ for(i=0; i<len; i++) buffer[i] = self->IV[self->count+i] ^= str[i]; self->count += len; } else { int j; for(i=0; i<BLOCK_SIZE-self->count; i++) buffer[i] = self->IV[self->count+i] ^= str[i]; self->count=0; for(; i<len-BLOCK_SIZE; i+=BLOCK_SIZE) { block_encrypt(&(self->st), self->oldCipher, self->IV); for(j=0; j<BLOCK_SIZE; j++) buffer[i+j] = self->IV[j] ^= str[i+j]; } /* Do the remaining 1 to BLOCK_SIZE bytes */ block_encrypt(&(self->st), self->oldCipher, self->IV); self->count=len-i; for(j=0; j<len-i; j++) { buffer[i+j] = self->IV[j] ^= str[i+j]; } } break; case(MODE_OFB): for(i=0; i<len; i+=BLOCK_SIZE) { block_encrypt(&(self->st), self->IV, temp); memcpy(self->IV, temp, BLOCK_SIZE); for(j=0; j<BLOCK_SIZE; j++) { buffer[i+j] = str[i+j] ^ temp[j]; } } break; case(MODE_CTR): for(i=0; i<len; i+=BLOCK_SIZE) { PyObject *ctr = PyObject_CallObject(self->counter, NULL); if (ctr == NULL) { free(buffer); return NULL; } if (!PyString_Check(ctr)) { PyErr_SetString(PyExc_TypeError, "CTR counter function didn't return a string"); Py_DECREF(ctr); free(buffer); return NULL; } if (PyString_Size(ctr) != BLOCK_SIZE) { PyErr_Format(PyExc_TypeError, "CTR counter function returned " "string not of length %i", BLOCK_SIZE); Py_DECREF(ctr); free(buffer); return NULL; } block_encrypt(&(self->st), PyString_AsString(ctr), temp); Py_DECREF(ctr); for(j=0; j<BLOCK_SIZE; j++) { buffer[i+j] = str[i+j]^temp[j]; } } break; default: PyErr_Format(PyExc_SystemError, "Unknown ciphertext feedback mode %i; " "this shouldn't happen", self->mode); free(buffer); return NULL; } result=PyString_FromStringAndSize(buffer, len); free(buffer); return(result); } static char ALG_Decrypt__doc__[] = "decrypt(string): Decrypt the provided string of binary data."; static PyObject * ALG_Decrypt(ALGobject *self, PyObject *args) { unsigned char *buffer, *str; unsigned char temp[BLOCK_SIZE]; int i, j, len; PyObject *result; if (!PyArg_Parse(args, "s#", &str, &len)) return NULL; if (len==0) /* Handle empty string */ { return PyString_FromStringAndSize(NULL, 0); } if ( (len % BLOCK_SIZE) !=0 && (self->mode!=MODE_CFB && self->mode!=MODE_PGP)) { PyErr_Format(PyExc_ValueError, "Input strings must be " "a multiple of %i in length", BLOCK_SIZE); return NULL; } if (self->mode == MODE_CFB && (len % (self->segment_size/8) !=0)) { PyErr_Format(PyExc_ValueError, "Input strings must be a multiple of " "the segment size %i in length", self->segment_size/8); return NULL; } buffer=malloc(len); if (buffer==NULL) { PyErr_SetString(PyExc_MemoryError, "No memory available in " _MODULE_STRING " decrypt"); return NULL; } switch(self->mode) { case(MODE_ECB): for(i=0; i<len; i+=BLOCK_SIZE) { block_decrypt(&(self->st), str+i, buffer+i); } break; case(MODE_CBC): for(i=0; i<len; i+=BLOCK_SIZE) { memcpy(self->oldCipher, self->IV, BLOCK_SIZE); block_decrypt(&(self->st), str+i, temp); for(j=0; j<BLOCK_SIZE; j++) { buffer[i+j]=temp[j]^self->IV[j]; self->IV[j]=str[i+j]; } } break; case(MODE_CFB): for(i=0; i<len; i+=self->segment_size/8) { block_encrypt(&(self->st), self->IV, temp); for (j=0; j<self->segment_size/8; j++) { buffer[i+j] = str[i+j]^temp[j]; } if (self->segment_size == BLOCK_SIZE * 8) { /* s == b: segment size is identical to the algorithm block size */ memcpy(self->IV, str + i, BLOCK_SIZE); } else if ((self->segment_size % 8) == 0) { int sz = self->segment_size/8; memmove(self->IV, self->IV + sz, BLOCK_SIZE-sz); memcpy(self->IV + BLOCK_SIZE - sz, str + i, sz); } else { /* segment_size is not a multiple of 8; currently this can't happen */ } } break; case(MODE_PGP): if (len<=BLOCK_SIZE-self->count) { /* If less than one block, XOR it in */ unsigned char t; for(i=0; i<len; i++) { t=self->IV[self->count+i]; buffer[i] = t ^ (self->IV[self->count+i] = str[i]); } self->count += len; } else { int j; unsigned char t; for(i=0; i<BLOCK_SIZE-self->count; i++) { t=self->IV[self->count+i]; buffer[i] = t ^ (self->IV[self->count+i] = str[i]); } self->count=0; for(; i<len-BLOCK_SIZE; i+=BLOCK_SIZE) { block_encrypt(&(self->st), self->oldCipher, self->IV); for(j=0; j<BLOCK_SIZE; j++) { t=self->IV[j]; buffer[i+j] = t ^ (self->IV[j] = str[i+j]); } } /* Do the remaining 1 to BLOCK_SIZE bytes */ block_encrypt(&(self->st), self->oldCipher, self->IV); self->count=len-i; for(j=0; j<len-i; j++) { t=self->IV[j]; buffer[i+j] = t ^ (self->IV[j] = str[i+j]); } } break; case (MODE_OFB): for(i=0; i<len; i+=BLOCK_SIZE) { block_encrypt(&(self->st), self->IV, temp); memcpy(self->IV, temp, BLOCK_SIZE); for(j=0; j<BLOCK_SIZE; j++) { buffer[i+j] = str[i+j] ^ self->IV[j]; } } break; case (MODE_CTR): for(i=0; i<len; i+=BLOCK_SIZE) { PyObject *ctr = PyObject_CallObject(self->counter, NULL); if (ctr == NULL) { free(buffer); return NULL; } if (!PyString_Check(ctr)) { PyErr_SetString(PyExc_TypeError, "CTR counter function didn't return a string"); Py_DECREF(ctr); free(buffer); return NULL; } if (PyString_Size(ctr) != BLOCK_SIZE) { PyErr_SetString(PyExc_TypeError, "CTR counter function returned string of incorrect length"); Py_DECREF(ctr); free(buffer); return NULL; } block_encrypt(&(self->st), PyString_AsString(ctr), temp); Py_DECREF(ctr); for(j=0; j<BLOCK_SIZE; j++) { buffer[i+j] = str[i+j]^temp[j]; } } break; default: PyErr_Format(PyExc_SystemError, "Unknown ciphertext feedback mode %i; " "this shouldn't happen", self->mode); free(buffer); return NULL; } result=PyString_FromStringAndSize(buffer, len); free(buffer); return(result); } static char ALG_Sync__doc__[] = "sync(): For objects using the PGP feedback mode, this method modifies " "the IV, synchronizing it with the preceding ciphertext."; static PyObject * ALG_Sync(ALGobject *self, PyObject *args) { if (!PyArg_ParseTuple(args, "")) { return NULL; } if (self->mode!=MODE_PGP) { PyErr_SetString(PyExc_SystemError, "sync() operation not defined for " "this feedback mode"); return NULL; } if (self->count!=8) { memmove(self->IV+BLOCK_SIZE-self->count, self->IV, self->count); memcpy(self->IV, self->oldCipher+self->count, BLOCK_SIZE-self->count); self->count=8; } Py_INCREF(Py_None); return Py_None; } #if 0 void PrintState(self, msg) ALGobject *self; char * msg; { int count; printf("%sing: %i IV ", msg, (int)self->count); for(count=0; count<8; count++) printf("%i ", self->IV[count]); printf("\noldCipher:"); for(count=0; count<8; count++) printf("%i ", self->oldCipher[count]); printf("\n"); } #endif /* ALG object methods */ static PyMethodDef ALGmethods[] = { {"encrypt", (PyCFunction) ALG_Encrypt, 0, ALG_Encrypt__doc__}, {"decrypt", (PyCFunction) ALG_Decrypt, 0, ALG_Decrypt__doc__}, {"sync", (PyCFunction) ALG_Sync, METH_VARARGS, ALG_Sync__doc__}, {NULL, NULL} /* sentinel */ }; static int ALGsetattr(PyObject *ptr, char *name, PyObject *v) { ALGobject *self=(ALGobject *)ptr; if (strcmp(name, "IV") != 0) { PyErr_Format(PyExc_AttributeError, "non-existent block cipher object attribute '%s'", name); return -1; } if (v==NULL) { PyErr_SetString(PyExc_AttributeError, "Can't delete IV attribute of block cipher object"); return -1; } if (!PyString_Check(v)) { PyErr_SetString(PyExc_TypeError, "IV attribute of block cipher object must be string"); return -1; } if (PyString_Size(v)!=BLOCK_SIZE) { PyErr_Format(PyExc_ValueError, _MODULE_STRING " IV must be %i bytes long", BLOCK_SIZE); return -1; } memcpy(self->IV, PyString_AsString(v), BLOCK_SIZE); return 0; } static PyObject * ALGgetattr(PyObject *s, char *name) { ALGobject *self = (ALGobject*)s; if (strcmp(name, "IV") == 0) { return(PyString_FromStringAndSize(self->IV, BLOCK_SIZE)); } if (strcmp(name, "mode") == 0) { return(PyInt_FromLong((long)(self->mode))); } if (strcmp(name, "block_size") == 0) { return PyInt_FromLong(BLOCK_SIZE); } if (strcmp(name, "key_size") == 0) { return PyInt_FromLong(KEY_SIZE); } return Py_FindMethod(ALGmethods, (PyObject *) self, name); } /* List of functions defined in the module */ static struct PyMethodDef modulemethods[] = { {"new", (PyCFunction) ALGnew, METH_VARARGS|METH_KEYWORDS, ALGnew__doc__}, {NULL, NULL} /* sentinel */ }; static PyTypeObject ALGtype = { PyObject_HEAD_INIT(NULL) 0, /*ob_size*/ _MODULE_STRING, /*tp_name*/ sizeof(ALGobject), /*tp_size*/ 0, /*tp_itemsize*/ /* methods */ ALGdealloc, /*tp_dealloc*/ 0, /*tp_print*/ ALGgetattr, /*tp_getattr*/ ALGsetattr, /*tp_setattr*/ 0, /*tp_compare*/ (reprfunc) 0, /*tp_repr*/ 0, /*tp_as_number*/ }; /* Initialization function for the module */ #if PYTHON_API_VERSION < 1011 #define PyModule_AddIntConstant(m,n,v) {PyObject *o=PyInt_FromLong(v); \ if (o!=NULL) \ {PyDict_SetItemString(PyModule_GetDict(m),n,o); Py_DECREF(o);}} #endif void _MODULE_NAME (void) { PyObject *m; ALGtype.ob_type = &PyType_Type; /* Create the module and add the functions */ m = Py_InitModule(_MODULE_STRING, modulemethods); PyModule_AddIntConstant(m, "MODE_ECB", MODE_ECB); PyModule_AddIntConstant(m, "MODE_CBC", MODE_CBC); PyModule_AddIntConstant(m, "MODE_CFB", MODE_CFB); PyModule_AddIntConstant(m, "MODE_PGP", MODE_PGP); PyModule_AddIntConstant(m, "MODE_OFB", MODE_OFB); PyModule_AddIntConstant(m, "MODE_CTR", MODE_CTR); PyModule_AddIntConstant(m, "block_size", BLOCK_SIZE); PyModule_AddIntConstant(m, "key_size", KEY_SIZE); /* Check for errors */ if (PyErr_Occurred()) Py_FatalError("can't initialize module " _MODULE_STRING); }
{ "language": "C" }
/* * Copyright (C) 2010 The Android Open Source Project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "linker_environ.h" #include <stddef.h> static char** _envp; /* Returns 1 if 'str' points to a valid environment variable definition. * For now, we check that: * - It is smaller than MAX_ENV_LEN (to detect non-zero terminated strings) * - It contains at least one equal sign that is not the first character * * 返回1说明 'str' 指向一个有效环境变量定义 */ static int _is_valid_definition(const char* str) { int pos = 0; int first_equal_pos = -1; /* According to its sources, the kernel uses 32*PAGE_SIZE by default * as the maximum size for an env. variable definition. */ const int MAX_ENV_LEN = 32*4096; if (str == NULL) return 0; /* Parse the string, looking for the first '=' there, and its size */ do { if (str[pos] == '\0') break; if (str[pos] == '=' && first_equal_pos < 0) first_equal_pos = pos; pos++; } while (pos < MAX_ENV_LEN); if (pos >= MAX_ENV_LEN) /* Too large */ return 0; if (first_equal_pos < 1) /* No equal sign, or it is the first character */ return 0; return 1; } /* 初始化环境,确定 */ unsigned* linker_env_init(unsigned* vecs) { /* Store environment pointer - can't be NULL */ _envp = (char**) vecs; /* Skip over all definitions */ while (vecs[0] != 0) vecs++; /* The end of the environment block is marked by two NULL pointers */ vecs++; /* As a sanity check, we're going to remove all invalid variable * definitions from the environment array. */ { char** readp = _envp; char** writep = _envp; for ( ; readp[0] != NULL; readp++ ) { /* 如果不是一个有效环境定义则继续下一个 */ if (!_is_valid_definition(readp[0])) continue; /* 仅记录有效的环境块 */ writep[0] = readp[0]; writep++; } writep[0] = NULL; } /* Return the address of the aux vectors table */ return vecs; } /* Check if the environment variable definition at 'envstr' * starts with '<name>=', and if so return the address of the * first character after the equal sign. Otherwise return NULL. * 进行变量名匹配,匹配则返回值字符串指针 */ static char* env_match(char* envstr, const char* name) { size_t cnt = 0; while (envstr[cnt] == name[cnt] && name[cnt] != '\0') cnt++; if (name[cnt] == '\0' && envstr[cnt] == '=') return envstr + cnt + 1; /* 返回变量的值 */ return NULL; } #define MAX_ENV_LEN (16*4096) /* 获取变量的值 */ const char* linker_env_get(const char* name) { char** readp = _envp; if (name == NULL || name[0] == '\0') return NULL; for ( ; readp[0] != NULL; readp++ ) { char* val = env_match(readp[0], name); if (val != NULL) { /* Return NULL for empty strings, or if it is too large */ if (val[0] == '\0') val = NULL; return val; } } return NULL; } /* 移除在name中的变量 */ void linker_env_unset(const char* name) { char** readp = _envp; char** writep = readp; if (name == NULL || name[0] == '\0') return; for ( ; readp[0] != NULL; readp++ ) { /* 找到匹配的变量 */ if (env_match(readp[0], name)) continue; writep[0] = readp[0]; writep++; } /* end list with a NULL */ writep[0] = NULL; } /* Remove unsafe environment variables. This should be used when * running setuid programs. */ /* 移除不安全的环境变量,当setuid程序则移除下列变量 */ void linker_env_secure(void) { /* The same list than GLibc at this point */ static const char* const unsec_vars[] = { "GCONV_PATH", "GETCONF_DIR", "HOSTALIASES", "LD_AUDIT", "LD_DEBUG", "LD_DEBUG_OUTPUT", "LD_DYNAMIC_WEAK", "LD_LIBRARY_PATH", "LD_ORIGIN_PATH", "LD_PRELOAD", "LD_PROFILE", "LD_SHOW_AUXV", "LD_USE_LOAD_BIAS", "LOCALDOMAIN", "LOCPATH", "MALLOC_TRACE", "MALLOC_CHECK_", "NIS_PATH", "NLSPATH", "RESOLV_HOST_CONF", "RES_OPTIONS", "TMPDIR", "TZDIR", "LD_AOUT_LIBRARY_PATH", "LD_AOUT_PRELOAD", NULL }; int count; for (count = 0; unsec_vars[count] != NULL; count++) { linker_env_unset(unsec_vars[count]); } }
{ "language": "C" }
#include "StdAfx.h" #include "../res/resource.h" #include "about.h" #pragma comment(lib,"WinInet") const char* Common::c_about_dlg::about_str = "软件说明:\r\n" " *. 软件用C语言/C++/SDK方式完成, 绿色小巧, 不会给系统带来任何的垃圾文件\r\n" " *. 能实现对串口通过的读取与写入,16进制与字符方式两种\r\n" " *. 支持文件发送,保存到文本\r\n" " *. 支持自动发送,自定义发送周期\r\n" " *. 自动识别已有串口,检测串口插入与移除\r\n" " *. 支持串口超时设置\r\n" " *. 支持DTR/RTS引脚电平控制\r\n" " *. 提供对驱动设置的支持\r\n" " *. 提供256个ASCII码表供代码转换时参考\r\n" " *. 提供小工具:字符串转16进制数组\r\n" " *. 提供表达式求值计算器,包括基本算术/逻辑运算\r\n" " *. 接收字符数据时, 对'\\b'控制字符支持\r\n" "-----------------------------------------------------\r\n" "更新历史:\r\n" "2012-12-24 1.0.0.0:\r\n" " 发布第1个版本\r\n" "2012-12-26:\r\n" " 自动识别当前存在,插入,移除的串口号\r\n" "2013-01-11 1.0.0.1:\r\n" " 增加保存接收区数据到文件(16进制/文本形式)\r\n" " 增加从文件读数据到发送区(16进制/文本形式)\r\n" " 增加暂停显示功能\r\n" " 增加复制发送/接收区数据到剪贴板\r\n" "2013-01-18 1.0.0.2:\r\n" " 修复:文本文件,16二进制文件读取错误\r\n" " 修复:程序内部缓冲区满后使程序进入死循环\r\n" " 修复:文本字符方式显示接收的数据时产生不正确的换行符的错误,若要产生换行符, 请使用\"\\n\"\r\n" "2013-02-08 1.0.0.3:\r\n" " 内部程序作了许多的优化工作,包含数据的发送方式等\r\n" " 修复接收数据时鼠标在接收区的文本选择造成的干扰\r\n" "2013-02-14 1.0.0.4:\r\n" " 增加显示出0~127号ASCII对应8,10,16进制功能\r\n" "2013-02-24 1.0.0.5,今天元宵节:\r\n" " 更改原来的1~64串口列表到自动检测计算机上的可用串口\r\n" "2013-02-27 1.0.0.6:\r\n" " 若发送文本,则自动发送被自动取消(若自动发送选项已打开)\r\n" " 在显示模式下不允许对接收区数据进行选择操作\r\n" " 提供硬件支持的串口设备设置\r\n" " 为用户提供串口超时设置\r\n" " 提供手动设置DTR/RTS引脚电平\r\n" "2013-03-01 1.0.0.7:\r\n" " 修改原计算器(系统)为表达式求值计算器(简单版本)\r\n" "2013-03-03:\r\n" " 添加:<其它>菜单添加<设备管理器>\r\n" " 修改:在关闭串口后自动发送前面的钩不再自动取消(如果已经选中)\r\n" " 修改:串口被关闭/移除后串口列表回到第一个串口设备的BUG\r\n" "2013-03-04:\r\n" " 修改:现在在串口列表中可以显示串口在设备管理器中的名字了\r\n" " 修正:无法显示 MSP430-FETUIF Debugger 的串口号(现在调用SetupApi更新列表)\r\n" "2013-03-05:\r\n" " 为了方便数据的统计与显示,16进制内容与字符内容被显示到不同的编辑框中\r\n" "2013-03-09 1.0.0.8:\r\n" " 修正在使用SetupApi枚举串口设备时未检测并口设备而造成的内存异常访问错误\r\n" " 减少在某些波特率(如:19200bps)下丢包严重的情况(如:MSP430串口),有时候还是会发生,等待修复.某些软件(如:SComAssistant" "采用每次只读一个字节的办法效果还行, 就是速度有点慢. 我改成了WaitCommEvent函数调用了(原来是Pending ReadFile),减少了CPU占用(有些串口驱动并不总是支持同步操作).\r\n" " 以前只管ReadFile+输出nRead字节,这里错误,ReadFile并不保证读取到要求的数据量后才返回,这里会导致严重丢包,WriteFile亦然.\r\n" " 速度减慢,但数据更完整\r\n" "2013-03-10 1.0.0.9:\r\n" " 修正:因为在格式化字符串的最后少写了一句 *pb = \'\\0\',导致接收区数据显示错误!" " 修复:对utils.hex2chs和add_text作了大量修改,大大减少数据丢包,貌似没有丢包?,细节处理参见源程序\r\n" " 1.0.0.8版本因为内部原因速度严重减慢, 1.0.0.9回到原来的快速!\r\n" "2013-03-18:\r\n" " 更正:若为字符显示方式,16进制方式保存不被允许,因为格式基本上不满足!\r\n" "2013-03-23 1.10:\r\n" " 添加:工作模式中,右键点击接收区字符文本框可以使能中文显示模式(不推荐),由于中文字符由两个字节构成,所以:一旦在某一次接收过程中只" "接收到了中文字符的一个字节,那么数据就会显示出错, 这个无法避免, 所以建议尽量不使能中文显示模式.\r\n" " 修正:用C语言的人们都习惯使用'\\n'作为换行符,我也这样使用," "但偏偏Windows的编辑框以'\\r\\n'作为换行符,没有办法,我不得" "不把所有的'\\n'换成'\\r\\n',效率必然会下降,而且我不得不计算出" "\\n的个数先 --> 为了计算所需缓冲区的大小.\r\n" " 添加:现在可以显示出还未被发送出去的数据计数.\r\n" " 添加:新增计时器,打开串口后开始计时,关闭后停止计时.\r\n" "2013-03-25:\r\n" " 修正:大大减少中文乱码的问题.细节处理见代码.现在应该可以放心地使能中文显示模式了.\r\n" " 增加:字符串转16进制数组功能,工具菜单里面.\r\n" "2013-04-04:\r\n" " 修正:无法复制接收区字符文本的BUG.\r\n" " 小提示:在选择串口时,如果没有任何可用的串口,则进行提示更新.\r\n" "2013-04-07:\r\n" " 修改:完全修改了utils.str2hex的实现,大大增加了16进制输入的灵活性.\r\n" "现在的要求:每个16进制值由两个相邻字符组成,无其它限制.(以前是2个相邻字符+一个空格)\r\n" "2013-04-11:\r\n" " 发送字符数据时,对于换行,只发送\'\\n\',不再发送\'\\r\\n\',注意:结尾的\'\\0\\\'不被发送!\r\n" "2013-04-13:\r\n" " 修正:更改发送与接收方式为异步方式.\r\n" " 添加:简单波形显示.\r\n" "2013-04-23:\r\n" " 修正:发送与接收方式改回同步方式!坑~\r\n" " 修正:当发送操作达到100次时无法继续发送的BUG!\r\n" " 修改:优化内部线程同步机制,避免程序停止工作(失去响应)!\r\n" " 优化:优化自动发送数据的方式,提高精度,减小内存/CPU占用!\r\n" " 小提示:在加载/保存文件时,若不清楚打开/保存方式,可以查看简单的帮助信息!\r\n" "2013-05-11:明天母亲节\r\n" " 修正:终于找到一个比较好的办法来处理自动发送用到的重复数据了,呵呵,时间下限减少到10ms\r\n" "2013-07-05:\r\n" " 临时修正:选择从文件加载并取消后, 串口号选择的ComboBox会消失不见,不知道原因,临时已解决\r\n" "2013-07-14:\r\n" " 改进:程序内部改进内存分配算法,避免因程序错误造成内存泄漏\r\n" "2013-07-20 1.12:\r\n" " (过渡版本,以后更新)\r\n" "2013-07-27:\r\n" " 细节:主窗口最小化后其它子窗口不自动最小化的问题\r\n" "2013-08-29:\r\n" " 修正:在设备管理器中更改串口的端口号后, 自动刷新串口列表\r\n" " 细节:修正在串口打开且允许显示接收到的数据时无法使用鼠标滚轮的小问题\r\n" "2013-08-30:\r\n" " 细节:根据用户要求,窗口大小现在可以变化; 如果不满意于接收/发送区的文本框过小, 可以左右拖动窗口以改变窗口大小\r\n" " 细节:由于原来接收区没有水平滚动条,所以数据可能自动被换行, 现在已纠正,数据不再自动换行, 要换行, 请使用 \'\\n\'\r\n" "2013-09-10 1.13:\r\n" " 增加:现在可以手动编写待发送的命令文件,并发送命令了 - 在发送文件时选择 命令文件, 格式见博客后面的介绍\r\n" " 增加:字符发送模式下,可以选择取消回车换行符的发送,可以选择插入转义字符\r\n" " 1.支持的字符型转义字符:\r\n" " \\r,\\n,\\t,\\v,\\a,\\b,\\\\\r\n" " 2.支持的16进制转义字符格式:\r\n" " \\x?? - 其中一个问号代表一个16进制字符, 不可省略其一,\r\n" " 必需保证4个字符的格式\r\n" " 3.\'\?\',\'\'\',\'\"\', 等print-able字符不需要转义\r\n" "2013-11-02 1.14:\r\n" " 修改:完全修改了命令发送的界面,比原来方便了很多~\r\n" "2013-11-06:\r\n" " 修正:如果接收缓冲区有未显示的数据,则会在按下继续显示时进行提示,而不是原来的在接收到下一次的数据时进行提示;感谢网友lin0119的反馈\r\n" "2014-03-03: 1.15\r\n" " 增加:支持输入非标准的波特率, 但是驱动是否能够支持, 要看具体的驱动了\r\n" " 改进:加入了一些快捷键, 比如Alt+S为发送...\r\n" "2014-07-05: 1.16\r\n" " ①字符接收数据时,增加对控制字符Backspace的支持(即'\\b'),效果就是向前删除一个字符\r\n" " ②修复一处中文检测错误(原来是对的, 不知道什么时候改错了\r\n" " ③删除了窗体大小调整(下个版本即将使用自动布局)\r\n" "2014-07-06: 1.17\r\n" " ①增加:允许从接收区输入字符并发送(更友好的类交互模式)\r\n" " ②更改:更改了字符接收区/发送区的字体为Consolas等宽字体,不再使用原来的Courier字体\r\n" " ③增加:简洁模式 - 此模式下大部分界面元素会被隐藏, 有时候这样更舒服\r\n" " ④增加:主窗口的自动界面布局(允许拖动改变窗口大小,控件坐标自动调整)\r\n" " ⑤更改:默认使用的模式改为:字符接收+字符发送\r\n" "2014-08-09: 1.18 Beta\r\n" " 优化:简化对数据中包含'\\0'的数据的处理\r\n" " 增加:简洁界面模式时把工具栏放到左边\r\n" " 增加:接收区增加一个\"清空数据\"菜单\r\n" " 更改:不限制接收数据的多少\r\n" " 优化:完美实现4种换行符的统一, 就算'\\r\\n'分两次发送也会正确地产生仅一个换行符!\r\n" " 优化:主窗口应用新的布局方案\r\n" " 修复:修复错误解析16进制转义字符问题\r\n" " 修复:解决一个中文字符分两次发送的乱码问题\r\n" "2015-08-02: 1.19 久违了\r\n" " 修复了“保存到文件”功能\r\n" " 解决无法识别虚拟串口的问题 && 解决某些不支持的事件导致 SetCommMask 失败问题\r\n" " 编辑框增加常用功能:鼠标中键删除,计算器\r\n" " 去掉了一些不需要的功能 libtccw32, str2hex, pinctrl\r\n" "2015-09-13: 1.20 没有什么修改,只是整理了文档\r\n" " 没做什么实质性的修改,增加了文档,集成了idxml和tinyxml工具库\r\n" "2015-10-14:\r\n" " 写了一个批处理脚本用于把 main_orig.xml 转换成 main.xml\r\n" "2015-12-28:\r\n" " fix issue #4,增加DTR/RTS引脚电平控制\r\n" "" ; namespace Common{ LRESULT c_about_dlg::handle_message(UINT uMsg, WPARAM wParam, LPARAM lParam, bool& bHandled) { switch (uMsg) { case WM_INITDIALOG: { SetWindowText(m_hWnd, "关于 " COMMON_NAME_AND_VERSION); SetWindowText(*_layout.FindControl("stk_name"), COMMON_NAME_AND_VERSION " 编译时间:" __DATE__ " - " __TIME__); SetWindowText(*_layout.FindControl("edit_help"), about_str); SetFocus(*_layout.FindControl("btn_ok")); CenterWindow(); return 0; } case WM_PAINT: { PAINTSTRUCT ps; HICON hIcon; HDC hDC = BeginPaint(m_hWnd, &ps); hIcon = LoadIcon(theApp.instance(), MAKEINTRESOURCE(IDI_ICON1)); DrawIcon(hDC, 10, 10, hIcon); EndPaint(m_hWnd, &ps); DestroyIcon(hIcon); return 0; } case WM_CLOSE: break; } return __super::handle_message(uMsg, wParam, lParam, bHandled); } LPCTSTR c_about_dlg::get_skin_xml() const { return R"feifei( <Window size="420,450"> <Font name = "微软雅黑" size = "12" default = "true" /> <Vertical> <Vertical inset = "5,5,5,5"> <Horizontal height="50" inset="0,0,0,0"> <Control width="45" /> <Vertical inset="0,0,0,5"> <Static name="stk_name" height="20"/> <Static text="女孩不哭(QQ:191035066) 开始于 2012-12-24 平安夜" height="20"/> </Vertical> </Horizontal> <Edit style="readonly,multiline,hscroll,vscroll" exstyle="clientedge" name="edit_help" inset="0,5,0,5" minheight="300"/> <Horizontal height="30" inset="0,5,0,0"> <Control /> <Button name="btn_website" text="官方网址" width="100" /> <Control width="10" /> <Button name="btn_ok" text="确定" width="100" /> <Control /> </Horizontal> </Vertical> </Vertical> </Window> )feifei"; } LRESULT c_about_dlg::on_command_ctrl(HWND hwnd, SdkLayout::CControlUI* ctrl, int code) { auto& name = ctrl->GetName(); if (name == "btn_website"){ if (code == BN_CLICKED){ char* web = "http://blog.twofei.com/566/"; ShellExecute(NULL, "open", web, NULL, NULL, SW_SHOWNORMAL); return 0; } } else if (name == "btn_ok"){ if (code == BN_CLICKED){ Close(); return 0; } } return 0; } LPCTSTR c_about_dlg::get_window_name() const { return "关于" " " COMMON_NAME_AND_VERSION; } const char* c_about_dlg::soft_name = COMMON_NAME_AND_VERSION ; }
{ "language": "C" }
/*!\file netinet6/ipsec6.h * */ /* $FreeBSD: src/sys/netinet6/ipsec6.h,v 1.5 2001/06/11 12:39:06 ume Exp $ */ /* $KAME: ipsec.h,v 1.44 2001/03/23 08:08:47 itojun Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * IPsec controller part. */ #ifndef _NETINET6_IPSEC6_H_ #define _NETINET6_IPSEC6_H_ #include <net/pfkeyv2.h> #include <netkey/keydb.h> #ifdef _KERNEL extern struct ipsecstat ipsec6stat; extern struct secpolicy ip6_def_policy; extern int ip6_esp_trans_deflev; extern int ip6_esp_net_deflev; extern int ip6_ah_trans_deflev; extern int ip6_ah_net_deflev; extern int ip6_ipsec_ecn; extern int ip6_esp_randpad; extern struct secpolicy *ipsec6_getpolicybysock __P((struct mbuf *, u_int, struct socket *, int *)); extern struct secpolicy *ipsec6_getpolicybyaddr __P((struct mbuf *, u_int, int, int *)); struct inpcb; extern int ipsec6_in_reject_so __P((struct mbuf *, struct socket *)); extern int ipsec6_delete_pcbpolicy __P((struct inpcb *)); extern int ipsec6_set_policy __P((struct inpcb *inp, int optname, caddr_t request, size_t len, int priv)); extern int ipsec6_get_policy __P((struct inpcb *inp, caddr_t request, size_t len, struct mbuf **mp)); extern int ipsec6_in_reject __P((struct mbuf *, struct inpcb *)); struct tcp6cb; extern size_t ipsec6_hdrsiz __P((struct mbuf *, u_int, struct inpcb *)); struct ip6_hdr; extern const char *ipsec6_logpacketstr __P((struct ip6_hdr *, u_int32_t)); extern int ipsec6_output_trans __P((struct ipsec_output_state *, u_char *, struct mbuf *, struct secpolicy *, int, int *)); extern int ipsec6_output_tunnel __P((struct ipsec_output_state *, struct secpolicy *, int)); extern int ipsec6_tunnel_validate __P((struct mbuf *, int, u_int, struct secasvar *)); #endif /*_KERNEL*/ #endif /*_NETINET6_IPSEC6_H_*/
{ "language": "C" }
/* * Keyed 32-bit hash function using TEA in a Davis-Meyer function * H0 = Key * Hi = E Mi(Hi-1) + Hi-1 * * (see Applied Cryptography, 2nd edition, p448). * * Jeremy Fitzhardinge <jeremy@zip.com.au> 1998 * * Jeremy has agreed to the contents of reiserfs/README. -Hans * Yura's function is added (04/07/2000) */ // // keyed_hash // yura_hash // r5_hash // #include <linux/kernel.h> #include "reiserfs.h" #include <asm/types.h> #define DELTA 0x9E3779B9 #define FULLROUNDS 10 /* 32 is overkill, 16 is strong crypto */ #define PARTROUNDS 6 /* 6 gets complete mixing */ /* a, b, c, d - data; h0, h1 - accumulated hash */ #define TEACORE(rounds) \ do { \ u32 sum = 0; \ int n = rounds; \ u32 b0, b1; \ \ b0 = h0; \ b1 = h1; \ \ do \ { \ sum += DELTA; \ b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); \ b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \ } while(--n); \ \ h0 += b0; \ h1 += b1; \ } while(0) u32 keyed_hash(const signed char *msg, int len) { u32 k[] = { 0x9464a485, 0x542e1a94, 0x3e846bff, 0xb75bcfc3 }; u32 h0 = k[0], h1 = k[1]; u32 a, b, c, d; u32 pad; int i; // assert(len >= 0 && len < 256); pad = (u32) len | ((u32) len << 8); pad |= pad << 16; while (len >= 16) { a = (u32) msg[0] | (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; b = (u32) msg[4] | (u32) msg[5] << 8 | (u32) msg[6] << 16 | (u32) msg[7] << 24; c = (u32) msg[8] | (u32) msg[9] << 8 | (u32) msg[10] << 16 | (u32) msg[11] << 24; d = (u32) msg[12] | (u32) msg[13] << 8 | (u32) msg[14] << 16 | (u32) msg[15] << 24; TEACORE(PARTROUNDS); len -= 16; msg += 16; } if (len >= 12) { a = (u32) msg[0] | (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; b = (u32) msg[4] | (u32) msg[5] << 8 | (u32) msg[6] << 16 | (u32) msg[7] << 24; c = (u32) msg[8] | (u32) msg[9] << 8 | (u32) msg[10] << 16 | (u32) msg[11] << 24; d = pad; for (i = 12; i < len; i++) { d <<= 8; d |= msg[i]; } } else if (len >= 8) { a = (u32) msg[0] | (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; b = (u32) msg[4] | (u32) msg[5] << 8 | (u32) msg[6] << 16 | (u32) msg[7] << 24; c = d = pad; for (i = 8; i < len; i++) { c <<= 8; c |= msg[i]; } } else if (len >= 4) { a = (u32) msg[0] | (u32) msg[1] << 8 | (u32) msg[2] << 16 | (u32) msg[3] << 24; b = c = d = pad; for (i = 4; i < len; i++) { b <<= 8; b |= msg[i]; } } else { a = b = c = d = pad; for (i = 0; i < len; i++) { a <<= 8; a |= msg[i]; } } TEACORE(FULLROUNDS); /* return 0;*/ return h0 ^ h1; } /* What follows in this file is copyright 2000 by Hans Reiser, and the * licensing of what follows is governed by reiserfs/README */ u32 yura_hash(const signed char *msg, int len) { int j, pow; u32 a, c; int i; for (pow = 1, i = 1; i < len; i++) pow = pow * 10; if (len == 1) a = msg[0] - 48; else a = (msg[0] - 48) * pow; for (i = 1; i < len; i++) { c = msg[i] - 48; for (pow = 1, j = i; j < len - 1; j++) pow = pow * 10; a = a + c * pow; } for (; i < 40; i++) { c = '0' - 48; for (pow = 1, j = i; j < len - 1; j++) pow = pow * 10; a = a + c * pow; } for (; i < 256; i++) { c = i; for (pow = 1, j = i; j < len - 1; j++) pow = pow * 10; a = a + c * pow; } a = a << 7; return a; } u32 r5_hash(const signed char *msg, int len) { u32 a = 0; while (*msg) { a += *msg << 4; a += *msg >> 4; a *= 11; msg++; } return a; }
{ "language": "C" }
#include "version.h" 1 VERSIONINFO FILEVERSION __EMBREE_VERSION_MAJOR__,__EMBREE_VERSION_MINOR__,__EMBREE_VERSION_PATCH__,0 PRODUCTVERSION __EMBREE_VERSION_MAJOR__,__EMBREE_VERSION_MINOR__,__EMBREE_VERSION_PATCH__,0 FILEFLAGSMASK 0x3fL #ifdef _DEBUG FILEFLAGS 0x1L #else FILEFLAGS 0x0L #endif FILEOS 0x40004L FILETYPE 0x2L FILESUBTYPE 0x0L BEGIN BLOCK "StringFileInfo" BEGIN BLOCK "040904b0" BEGIN VALUE "CompanyName", "Intel" VALUE "FileDescription", "Embree Ray Tracing Kernels" VALUE "FileVersion", __EMBREE_VERSION__ VALUE "ProductVersion", __EMBREE_VERSION__ VALUE "LegalCopyright", "www.apache.org/licenses/LICENSE-2.0" VALUE "OriginalFilename", "embree.dll" VALUE "InternalName", "Embree" VALUE "ProductName", "Embree Ray Tracing Kernels" END END BLOCK "VarFileInfo" BEGIN VALUE "Translation", 0x409, 1200 END END
{ "language": "C" }
// Copyright (c) 2019-present, iQIYI, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // Created by caikelun on 2019-03-07. #include <unistd.h> #include <errno.h> #include <signal.h> #include <ctype.h> #include <stdarg.h> #include <stdlib.h> #include <string.h> #include <fcntl.h> #include <time.h> #include <sys/time.h> #include <sys/ptrace.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/utsname.h> #include <sys/system_properties.h> #include "xcc_util.h" #include "xcc_errno.h" #include "xcc_fmt.h" #include "xcc_version.h" #include "xcc_libc_support.h" #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wgnu-statement-expression" #pragma clang diagnostic ignored "-Wcast-align" #pragma clang diagnostic ignored "-Wformat-nonliteral" #define XCC_UTIL_TIME_FORMAT "%04d-%02d-%02dT%02d:%02d:%02d.%03ld%c%02ld%02ld" const char* xcc_util_get_signame(const siginfo_t* si) { switch (si->si_signo) { case SIGABRT: return "SIGABRT"; case SIGBUS: return "SIGBUS"; case SIGFPE: return "SIGFPE"; case SIGILL: return "SIGILL"; case SIGSEGV: return "SIGSEGV"; case SIGTRAP: return "SIGTRAP"; case SIGSYS: return "SIGSYS"; case SIGSTKFLT: return "SIGSTKFLT"; default: return "?"; } } const char* xcc_util_get_sigcodename(const siginfo_t* si) { // Try the signal-specific codes... switch (si->si_signo) { case SIGBUS: switch(si->si_code) { case BUS_ADRALN: return "BUS_ADRALN"; case BUS_ADRERR: return "BUS_ADRERR"; case BUS_OBJERR: return "BUS_OBJERR"; case BUS_MCEERR_AR: return "BUS_MCEERR_AR"; case BUS_MCEERR_AO: return "BUS_MCEERR_AO"; default: break; } break; case SIGFPE: switch(si->si_code) { case FPE_INTDIV: return "FPE_INTDIV"; case FPE_INTOVF: return "FPE_INTOVF"; case FPE_FLTDIV: return "FPE_FLTDIV"; case FPE_FLTOVF: return "FPE_FLTOVF"; case FPE_FLTUND: return "FPE_FLTUND"; case FPE_FLTRES: return "FPE_FLTRES"; case FPE_FLTINV: return "FPE_FLTINV"; case FPE_FLTSUB: return "FPE_FLTSUB"; default: break; } break; case SIGILL: switch(si->si_code) { case ILL_ILLOPC: return "ILL_ILLOPC"; case ILL_ILLOPN: return "ILL_ILLOPN"; case ILL_ILLADR: return "ILL_ILLADR"; case ILL_ILLTRP: return "ILL_ILLTRP"; case ILL_PRVOPC: return "ILL_PRVOPC"; case ILL_PRVREG: return "ILL_PRVREG"; case ILL_COPROC: return "ILL_COPROC"; case ILL_BADSTK: return "ILL_BADSTK"; default: break; } break; case SIGSEGV: switch(si->si_code) { case SEGV_MAPERR: return "SEGV_MAPERR"; case SEGV_ACCERR: return "SEGV_ACCERR"; case SEGV_BNDERR: return "SEGV_BNDERR"; case SEGV_PKUERR: return "SEGV_PKUERR"; default: break; } break; case SIGTRAP: switch(si->si_code) { case TRAP_BRKPT: return "TRAP_BRKPT"; case TRAP_TRACE: return "TRAP_TRACE"; case TRAP_BRANCH: return "TRAP_BRANCH"; case TRAP_HWBKPT: return "TRAP_HWBKPT"; default: break; } if((si->si_code & 0xff) == SIGTRAP) { switch((si->si_code >> 8) & 0xff) { case PTRACE_EVENT_FORK: return "PTRACE_EVENT_FORK"; case PTRACE_EVENT_VFORK: return "PTRACE_EVENT_VFORK"; case PTRACE_EVENT_CLONE: return "PTRACE_EVENT_CLONE"; case PTRACE_EVENT_EXEC: return "PTRACE_EVENT_EXEC"; case PTRACE_EVENT_VFORK_DONE: return "PTRACE_EVENT_VFORK_DONE"; case PTRACE_EVENT_EXIT: return "PTRACE_EVENT_EXIT"; case PTRACE_EVENT_SECCOMP: return "PTRACE_EVENT_SECCOMP"; case PTRACE_EVENT_STOP: return "PTRACE_EVENT_STOP"; default: break; } } break; case SIGSYS: switch(si->si_code) { case SYS_SECCOMP: return "SYS_SECCOMP"; default: break; } break; default: break; } // Then the other codes... switch (si->si_code) { case SI_USER: return "SI_USER"; case SI_KERNEL: return "SI_KERNEL"; case SI_QUEUE: return "SI_QUEUE"; case SI_TIMER: return "SI_TIMER"; case SI_MESGQ: return "SI_MESGQ"; case SI_ASYNCIO: return "SI_ASYNCIO"; case SI_SIGIO: return "SI_SIGIO"; case SI_TKILL: return "SI_TKILL"; case SI_DETHREAD: return "SI_DETHREAD"; } // Then give up... return "?"; } int xcc_util_signal_has_si_addr(const siginfo_t* si) { //manually sent signals won't have si_addr if(si->si_code == SI_USER || si->si_code == SI_QUEUE || si->si_code == SI_TKILL) return 0; switch (si->si_signo) { case SIGBUS: case SIGFPE: case SIGILL: case SIGSEGV: case SIGTRAP: return 1; default: return 0; } } int xcc_util_signal_has_sender(const siginfo_t* si, pid_t caller_pid) { return (SI_FROMUSER(si) && (si->si_pid != 0) && (si->si_pid != caller_pid)) ? 1 : 0; } int xcc_util_atoi(const char *str, int *i) { //We have to do this job very carefully for some unusual version of stdlib. long val = 0; char *endptr = NULL; const char *p = str; //check if(NULL == str || NULL == i) return XCC_ERRNO_INVAL; if((*p < '0' || *p > '9') && *p != '-') return XCC_ERRNO_INVAL; p++; while(*p) { if(*p < '0' || *p > '9') return XCC_ERRNO_INVAL; p++; } //convert errno = 0; val = strtol(str, &endptr, 10); //check if((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) || (errno != 0 && val == 0)) return XCC_ERRNO_INVAL; if(endptr == str) return XCC_ERRNO_INVAL; if(val > INT_MAX || val < INT_MIN) return XCC_ERRNO_INVAL; //OK *i = (int)val; return 0; } char *xcc_util_trim(char *start) { char *end; if(NULL == start) return NULL; end = start + strlen(start); if(start == end) return start; while(start < end && isspace((int)(*start))) start++; if(start == end) return start; while(start < end && isspace((int)(*(end - 1)))) end--; *end = '\0'; return start; } int xcc_util_write(int fd, const char *buf, size_t len) { size_t nleft; ssize_t nwritten; const char *ptr; if(fd < 0) return XCC_ERRNO_INVAL; ptr = buf; nleft = len; while(nleft > 0) { errno = 0; if((nwritten = write(fd, ptr, nleft)) <= 0) { if(nwritten < 0 && errno == EINTR) nwritten = 0; /* call write() again */ else return XCC_ERRNO_SYS; /* error */ } nleft -= (size_t)nwritten; ptr += nwritten; } return 0; } int xcc_util_write_str(int fd, const char *str) { const char *tmp = str; size_t len = 0; if(fd < 0) return XCC_ERRNO_INVAL; while(*tmp) tmp++; len = (size_t)(tmp - str); if(0 == len) return 0; return xcc_util_write(fd, str, len); } int xcc_util_write_format(int fd, const char *format, ...) { va_list ap; char buf[1024]; ssize_t len; if(fd < 0) return XCC_ERRNO_INVAL; va_start(ap, format); len = vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); if(len <= 0) return 0; return xcc_util_write(fd, buf, (size_t)len); } int xcc_util_write_format_safe(int fd, const char *format, ...) { va_list ap; char buf[1024]; size_t len; if(fd < 0) return XCC_ERRNO_INVAL; va_start(ap, format); len = xcc_fmt_vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); if(0 == len) return 0; return xcc_util_write(fd, buf, len); } char *xcc_util_gets(char *s, size_t size, int fd) { ssize_t i, nread; char c, *p; if(fd < 0 || NULL == s || size < 2) return NULL; s[0] = '\0'; p = s; for(i = 0; i < (ssize_t)(size - 1); i++) { if(1 == (nread = read(fd, &c, 1))) { *p++ = c; if('\n' == c) break; } else if(0 == nread) //EOF { break; } else { if (errno != EINTR) return NULL; } } *p = '\0'; return ('\0' == s[0] ? NULL : s); } int xcc_util_read_file_line(const char *path, char *buf, size_t len) { int fd = -1; int r = 0; if(0 > (fd = XCC_UTIL_TEMP_FAILURE_RETRY(open(path, O_RDONLY | O_CLOEXEC)))) { r = XCC_ERRNO_SYS; goto end; } if(NULL == xcc_util_gets(buf, len, fd)) { r = XCC_ERRNO_SYS; goto end; } end: if(fd >= 0) close(fd); return r; } static int xcc_util_get_process_thread_name(const char *path, char *buf, size_t len) { char tmp[256], *data; size_t data_len, cpy_len; int r; //read a line if(0 != (r = xcc_util_read_file_line(path, tmp, sizeof(tmp)))) return r; //trim data = xcc_util_trim(tmp); //return data if(0 == (data_len = strlen(data))) return XCC_ERRNO_MISSING; cpy_len = XCC_UTIL_MIN(len - 1, data_len); memcpy(buf, data, cpy_len); buf[cpy_len] = '\0'; return 0; } void xcc_util_get_process_name(pid_t pid, char *buf, size_t len) { char path[128]; xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/cmdline", pid); if(0 != xcc_util_get_process_thread_name(path, buf, len)) strncpy(buf, "unknown", len); } void xcc_util_get_thread_name(pid_t tid, char *buf, size_t len) { char path[128]; xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/comm", tid); if(0 != xcc_util_get_process_thread_name(path, buf, len)) strncpy(buf, "unknown", len); } int xcc_util_record_sub_section_from(int fd, const char *path, const char *title, size_t limit) { FILE *fp = NULL; char line[512]; char *p; int r = 0; size_t n = 0; if(NULL == (fp = fopen(path, "r"))) goto end; if(0 != (r = xcc_util_write_str(fd, title))) goto end; while(NULL != fgets(line, sizeof(line), fp)) { p = xcc_util_trim(line); if(strlen(p) > 0) { n++; if(0 == limit || n <= limit) if(0 != (r = xcc_util_write_format_safe(fd, " %s\n", p))) goto end; } } if(limit > 0 && n > limit) { if(0 != (r = xcc_util_write_str(fd, " ......\n"))) goto end; if(0 != (r = xcc_util_write_format_safe(fd, " (number of records: %zu)\n", n))) goto end; } if(0 != (r = xcc_util_write_str(fd, "-\n"))) goto end; end: if(NULL != fp) fclose(fp); return r; } static const char *xcc_util_su_pathnames[] = { "/data/local/su", "/data/local/bin/su", "/data/local/xbin/su", "/system/xbin/su", "/system/bin/su", "/system/bin/.ext/su", "/system/bin/failsafe/su", "/system/sd/xbin/su", "/system/usr/we-need-root/su", "/sbin/su", "/su/bin/su" }; static int xcc_util_is_root_saved = -1; int xcc_util_is_root(void) { size_t i; if(xcc_util_is_root_saved >= 0) return xcc_util_is_root_saved; for(i = 0; i < sizeof(xcc_util_su_pathnames) / sizeof(xcc_util_su_pathnames[0]); i++) { if(0 == access(xcc_util_su_pathnames[i], F_OK)) { xcc_util_is_root_saved = 1; return 1; } } xcc_util_is_root_saved = 0; return 0; } size_t xcc_util_get_dump_header(char *buf, size_t buf_len, const char *crash_type, long time_zone, uint64_t start_time, uint64_t crash_time, const char *app_id, const char *app_version, int api_level, const char *os_version, const char *kernel_version, const char *abi_list, const char *manufacturer, const char *brand, const char *model, const char *build_fingerprint) { time_t start_sec = (time_t)(start_time / 1000000); suseconds_t start_usec = (time_t)(start_time % 1000000); struct tm start_tm; time_t crash_sec = (time_t)(crash_time / 1000000); suseconds_t crash_usec = (time_t)(crash_time % 1000000); struct tm crash_tm; //convert times xcc_libc_support_memset(&start_tm, 0, sizeof(start_tm)); xcc_libc_support_memset(&crash_tm, 0, sizeof(crash_tm)); xcc_libc_support_localtime_r(&start_sec, time_zone, &start_tm); xcc_libc_support_localtime_r(&crash_sec, time_zone, &crash_tm); return xcc_fmt_snprintf(buf, buf_len, XCC_UTIL_TOMB_HEAD "Tombstone maker: '"XCC_VERSION_STR"'\n" "Crash type: '%s'\n" "Start time: '"XCC_UTIL_TIME_FORMAT"'\n" "Crash time: '"XCC_UTIL_TIME_FORMAT"'\n" "App ID: '%s'\n" "App version: '%s'\n" "Rooted: '%s'\n" "API level: '%d'\n" "OS version: '%s'\n" "Kernel version: '%s'\n" "ABI list: '%s'\n" "Manufacturer: '%s'\n" "Brand: '%s'\n" "Model: '%s'\n" "Build fingerprint: '%s'\n" "ABI: '"XCC_UTIL_ABI_STRING"'\n", crash_type, start_tm.tm_year + 1900, start_tm.tm_mon + 1, start_tm.tm_mday, start_tm.tm_hour, start_tm.tm_min, start_tm.tm_sec, start_usec / 1000, time_zone < 0 ? '-' : '+', labs(time_zone / 3600), labs(time_zone % 3600), crash_tm.tm_year + 1900, crash_tm.tm_mon + 1, crash_tm.tm_mday, crash_tm.tm_hour, crash_tm.tm_min, crash_tm.tm_sec, crash_usec / 1000, time_zone < 0 ? '-' : '+', labs(time_zone / 3600), labs(time_zone % 3600), app_id, app_version, xcc_util_is_root() ? "Yes" : "No", api_level, os_version, kernel_version, abi_list, manufacturer, brand, model, build_fingerprint); } static int xcc_util_record_logcat_buffer(int fd, pid_t pid, int api_level, const char *buffer, unsigned int lines, char priority) { FILE *fp; char cmd[128]; char buf[1025]; int with_pid; char pid_filter[64] = ""; char pid_label[32] = ""; int r = 0; //Since Android 7.0 Nougat (API level 24), logcat has --pid filter option. with_pid = (api_level >= 24 ? 1 : 0); if(with_pid) { //API level >= 24, filtered by --pid option xcc_fmt_snprintf(pid_filter, sizeof(pid_filter), "--pid %d ", pid); } else { //API level < 24, filtered by ourself, so we need to read more lines lines = (unsigned int)(lines * 1.2); xcc_fmt_snprintf(pid_label, sizeof(pid_label), " %d ", pid); } xcc_fmt_snprintf(cmd, sizeof(cmd), "/system/bin/logcat -b %s -d -v threadtime -t %u %s*:%c", buffer, lines, pid_filter, priority); if(0 != (r = xcc_util_write_format_safe(fd, "--------- tail end of log %s (%s)\n", buffer, cmd))) return r; if(NULL != (fp = popen(cmd, "r"))) { buf[sizeof(buf) - 1] = '\0'; while(NULL != fgets(buf, sizeof(buf) - 1, fp)) if(with_pid || NULL != strstr(buf, pid_label)) if(0 != (r = xcc_util_write_str(fd, buf))) break; pclose(fp); } return r; } int xcc_util_record_logcat(int fd, pid_t pid, int api_level, unsigned int logcat_system_lines, unsigned int logcat_events_lines, unsigned int logcat_main_lines) { int r; if(0 == logcat_system_lines && 0 == logcat_events_lines && 0 == logcat_main_lines) return 0; if(0 != (r = xcc_util_write_str(fd, "logcat:\n"))) return r; if(logcat_main_lines > 0) if(0 != (r = xcc_util_record_logcat_buffer(fd, pid, api_level, "main", logcat_main_lines, 'D'))) return r; if(logcat_system_lines > 0) if(0 != (r = xcc_util_record_logcat_buffer(fd, pid, api_level, "system", logcat_system_lines, 'W'))) return r; if(logcat_events_lines > 0) if(0 != (r = xcc_util_record_logcat_buffer(fd, pid, api_level, "events", logcat_events_lines, 'I'))) return r; if(0 != (r = xcc_util_write_str(fd, "\n"))) return r; return 0; } int xcc_util_record_fds(int fd, pid_t pid) { int fd2 = -1; char path[128]; char fd_path[512]; char buf[512]; long n, i; int fd_num; size_t total = 0; xcc_util_dirent_t *ent; ssize_t len; int r = 0; if(0 != (r = xcc_util_write_str(fd, "open files:\n"))) return r; xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/fd", pid); if((fd2 = XCC_UTIL_TEMP_FAILURE_RETRY(open(path, O_RDONLY | O_DIRECTORY | O_CLOEXEC))) < 0) goto end; while((n = syscall(XCC_UTIL_SYSCALL_GETDENTS, fd2, buf, sizeof(buf))) > 0) { for(i = 0; i < n;) { ent = (xcc_util_dirent_t *)(buf + i); //get the fd if('\0' == ent->d_name[0]) goto next; if(0 == memcmp(ent->d_name, ".", 1)) goto next; if(0 == memcmp(ent->d_name, "..", 2)) goto next; if(0 != xcc_util_atoi(ent->d_name, &fd_num)) goto next; if(fd_num < 0) goto next; //count total++; if(total > 1024) goto next; //read link of the path xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/fd/%d", pid, fd_num); len = readlink(path, fd_path, sizeof(fd_path) - 1); if(len <= 0 || len > (ssize_t)(sizeof(fd_path) - 1)) strncpy(path, "???", sizeof(path)); else fd_path[len] = '\0'; //dump if(0 != (r = xcc_util_write_format_safe(fd, " fd %d: %s\n", fd_num, fd_path))) goto clean; next: i += ent->d_reclen; } } end: if(total > 1024) if(0 != (r = xcc_util_write_str(fd, " ......\n"))) goto clean; if(0 != (r = xcc_util_write_format_safe(fd, " (number of FDs: %zu)\n", total))) goto clean; r = xcc_util_write_str(fd, "\n"); clean: if(fd2 >= 0) close(fd2); return r; } int xcc_util_record_network_info(int fd, pid_t pid, int api_level) { int r; char path[128]; if(0 != (r = xcc_util_write_str(fd, "network info:\n"))) return r; if(api_level >= 29) { if(0 != (r = xcc_util_write_str(fd, "Not supported on Android Q (API level 29) and later.\n"))) return r; } else { xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/net/tcp", pid); if(0 != (r = xcc_util_record_sub_section_from(fd, path, " TCP over IPv4 (From: /proc/PID/net/tcp)\n", 1024))) return r; xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/net/tcp6", pid); if(0 != (r = xcc_util_record_sub_section_from(fd, path, " TCP over IPv6 (From: /proc/PID/net/tcp6)\n", 1024))) return r; xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/net/udp", pid); if(0 != (r = xcc_util_record_sub_section_from(fd, path, " UDP over IPv4 (From: /proc/PID/net/udp)\n", 1024))) return r; xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/net/udp6", pid); if(0 != (r = xcc_util_record_sub_section_from(fd, path, " UDP over IPv6 (From: /proc/PID/net/udp6)\n", 1024))) return r; xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/net/icmp", pid); if(0 != (r = xcc_util_record_sub_section_from(fd, path, " ICMP in IPv4 (From: /proc/PID/net/icmp)\n", 256))) return r; xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/net/icmp6", pid); if(0 != (r = xcc_util_record_sub_section_from(fd, path, " ICMP in IPv6 (From: /proc/PID/net/icmp6)\n", 256))) return r; xcc_fmt_snprintf(path, sizeof(path), "/proc/%d/net/unix", pid); if(0 != (r = xcc_util_record_sub_section_from(fd, path, " UNIX domain (From: /proc/PID/net/unix)\n", 256))) return r; } if(0 != (r = xcc_util_write_str(fd, "\n"))) return r; return 0; } #pragma clang diagnostic pop
{ "language": "C" }
/* USB Driver for Sierra Wireless Copyright (C) 2006, 2007, 2008 Kevin Lloyd <klloyd@sierrawireless.com>, Copyright (C) 2008, 2009 Elina Pasheva, Matthew Safar, Rory Filer <linux@sierrawireless.com> IMPORTANT DISCLAIMER: This driver is not commercially supported by Sierra Wireless. Use at your own risk. This driver is free software; you can redistribute it and/or modify it under the terms of Version 2 of the GNU General Public License as published by the Free Software Foundation. Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de> Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> */ #define DRIVER_VERSION "v.1.3.8" #define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer" #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define SWIMS_USB_REQUEST_SetPower 0x00 #define SWIMS_USB_REQUEST_SetNmea 0x07 #define N_IN_URB 8 #define N_OUT_URB 64 #define IN_BUFLEN 4096 #define MAX_TRANSFER (PAGE_SIZE - 512) /* MAX_TRANSFER is chosen so that the VM is not stressed by allocations > PAGE_SIZE and the number of packets in a page is an integer 512 is the largest possible packet on EHCI */ static int debug; static int nmea; /* Used in interface blacklisting */ struct sierra_iface_info { const u32 infolen; /* number of interface numbers on blacklist */ const u8 *ifaceinfo; /* pointer to the array holding the numbers */ }; struct sierra_intf_private { spinlock_t susp_lock; unsigned int suspended:1; int in_flight; }; static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) { int result; dev_dbg(&udev->dev, "%s\n", __func__); result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SWIMS_USB_REQUEST_SetPower, /* __u8 request */ USB_TYPE_VENDOR, /* __u8 request type */ swiState, /* __u16 value */ 0, /* __u16 index */ NULL, /* void *data */ 0, /* __u16 size */ USB_CTRL_SET_TIMEOUT); /* int timeout */ return result; } static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) { int result; dev_dbg(&udev->dev, "%s\n", __func__); result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ USB_TYPE_VENDOR, /* __u8 request type */ enable, /* __u16 value */ 0x0000, /* __u16 index */ NULL, /* void *data */ 0, /* __u16 size */ USB_CTRL_SET_TIMEOUT); /* int timeout */ return result; } static int sierra_calc_num_ports(struct usb_serial *serial) { int num_ports = 0; u8 ifnum, numendpoints; dev_dbg(&serial->dev->dev, "%s\n", __func__); ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints; /* Dummy interface present on some SKUs should be ignored */ if (ifnum == 0x99) num_ports = 0; else if (numendpoints <= 3) num_ports = 1; else num_ports = (numendpoints-1)/2; return num_ports; } static int is_blacklisted(const u8 ifnum, const struct sierra_iface_info *blacklist) { const u8 *info; int i; if (blacklist) { info = blacklist->ifaceinfo; for (i = 0; i < blacklist->infolen; i++) { if (info[i] == ifnum) return 1; } } return 0; } static int sierra_calc_interface(struct usb_serial *serial) { int interface; struct usb_interface *p_interface; struct usb_host_interface *p_host_interface; dev_dbg(&serial->dev->dev, "%s\n", __func__); /* Get the interface structure pointer from the serial struct */ p_interface = serial->interface; /* Get a pointer to the host interface structure */ p_host_interface = p_interface->cur_altsetting; /* read the interface descriptor for this active altsetting * to find out the interface number we are on */ interface = p_host_interface->desc.bInterfaceNumber; return interface; } static int sierra_probe(struct usb_serial *serial, const struct usb_device_id *id) { int result = 0; struct usb_device *udev; struct sierra_intf_private *data; u8 ifnum; udev = serial->dev; dev_dbg(&udev->dev, "%s\n", __func__); ifnum = sierra_calc_interface(serial); /* * If this interface supports more than 1 alternate * select the 2nd one */ if (serial->interface->num_altsetting == 2) { dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n", ifnum); /* We know the alternate setting is 1 for the MC8785 */ usb_set_interface(udev, ifnum, 1); } /* ifnum could have changed - by calling usb_set_interface */ ifnum = sierra_calc_interface(serial); if (is_blacklisted(ifnum, (struct sierra_iface_info *)id->driver_info)) { dev_dbg(&serial->dev->dev, "Ignoring blacklisted interface #%d\n", ifnum); return -ENODEV; } data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; spin_lock_init(&data->susp_lock); return result; } static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 }; static const struct sierra_iface_info direct_ip_interface_blacklist = { .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces), .ifaceinfo = direct_ip_non_serial_ifaces, }; static struct usb_device_id id_table [] = { { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */ { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ { USB_DEVICE(0x1199, 0x0022) }, /* Sierra Wireless EM5725 */ { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ { USB_DEVICE(0x1199, 0x0224) }, /* Sierra Wireless MC5727 */ { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ /* Sierra Wireless C597 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless T598 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless T11 */ { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless AC402 */ { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless MC5728 */ { USB_DEVICE(0x1199, 0x0029) }, /* Sierra Wireless Device */ { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6805) }, /* Sierra Wireless MC8765 */ { USB_DEVICE(0x1199, 0x6808) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6809) }, /* Sierra Wireless MC8765 */ { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 */ { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ { USB_DEVICE(0x1199, 0x6816) }, /* Sierra Wireless MC8775 */ { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ { USB_DEVICE(0x1199, 0x6822) }, /* Sierra Wireless AirCard 875E */ { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ { USB_DEVICE(0x1199, 0x6834) }, /* Sierra Wireless MC8780 */ { USB_DEVICE(0x1199, 0x6835) }, /* Sierra Wireless MC8781 */ { USB_DEVICE(0x1199, 0x6838) }, /* Sierra Wireless MC8780 */ { USB_DEVICE(0x1199, 0x6839) }, /* Sierra Wireless MC8781 */ { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */ { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ /* Sierra Wireless MC8790, MC8791, MC8792 Composite */ { USB_DEVICE(0x1199, 0x683C) }, { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8791 Composite */ /* Sierra Wireless MC8790, MC8791, MC8792 */ { USB_DEVICE(0x1199, 0x683E) }, { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */ { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */ { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ /* Sierra Wireless C885 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, /* Sierra Wireless C22/C33 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)}, /* Sierra Wireless HSPA Non-Composite Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, { } }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_driver sierra_driver = { .name = "sierra", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .suspend = usb_serial_suspend, .resume = usb_serial_resume, .id_table = id_table, .no_dynamic_id = 1, .supports_autosuspend = 1, }; struct sierra_port_private { spinlock_t lock; /* lock the structure */ int outstanding_urbs; /* number of out urbs in flight */ struct usb_anchor active; struct usb_anchor delayed; /* Input endpoints and buffers for this port */ struct urb *in_urbs[N_IN_URB]; /* Settings for the port */ int rts_state; /* Handshaking pins (outputs) */ int dtr_state; int cts_state; /* Handshaking pins (inputs) */ int dsr_state; int dcd_state; int ri_state; unsigned int opened:1; }; static int sierra_send_setup(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct sierra_port_private *portdata; __u16 interface = 0; int val = 0; int do_send = 0; int retval; dev_dbg(&port->dev, "%s\n", __func__); portdata = usb_get_serial_port_data(port); if (portdata->dtr_state) val |= 0x01; if (portdata->rts_state) val |= 0x02; /* If composite device then properly report interface */ if (serial->num_ports == 1) { interface = sierra_calc_interface(serial); /* Control message is sent only to interfaces with * interrupt_in endpoints */ if (port->interrupt_in_urb) { /* send control message */ do_send = 1; } } /* Otherwise the need to do non-composite mapping */ else { if (port->bulk_out_endpointAddress == 2) interface = 0; else if (port->bulk_out_endpointAddress == 4) interface = 1; else if (port->bulk_out_endpointAddress == 5) interface = 2; do_send = 1; } if (!do_send) return 0; usb_autopm_get_interface(serial->interface); retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 0x22, 0x21, val, interface, NULL, 0, USB_CTRL_SET_TIMEOUT); usb_autopm_put_interface(serial->interface); return retval; } static void sierra_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { dev_dbg(&port->dev, "%s\n", __func__); tty_termios_copy_hw(tty->termios, old_termios); sierra_send_setup(port); } static int sierra_tiocmget(struct tty_struct *tty, struct file *file) { struct usb_serial_port *port = tty->driver_data; unsigned int value; struct sierra_port_private *portdata; dev_dbg(&port->dev, "%s\n", __func__); portdata = usb_get_serial_port_data(port); value = ((portdata->rts_state) ? TIOCM_RTS : 0) | ((portdata->dtr_state) ? TIOCM_DTR : 0) | ((portdata->cts_state) ? TIOCM_CTS : 0) | ((portdata->dsr_state) ? TIOCM_DSR : 0) | ((portdata->dcd_state) ? TIOCM_CAR : 0) | ((portdata->ri_state) ? TIOCM_RNG : 0); return value; } static int sierra_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct sierra_port_private *portdata; portdata = usb_get_serial_port_data(port); if (set & TIOCM_RTS) portdata->rts_state = 1; if (set & TIOCM_DTR) portdata->dtr_state = 1; if (clear & TIOCM_RTS) portdata->rts_state = 0; if (clear & TIOCM_DTR) portdata->dtr_state = 0; return sierra_send_setup(port); } static void sierra_release_urb(struct urb *urb) { struct usb_serial_port *port; if (urb) { port = urb->context; dev_dbg(&port->dev, "%s: %p\n", __func__, urb); kfree(urb->transfer_buffer); usb_free_urb(urb); } } static void sierra_outdat_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct sierra_port_private *portdata = usb_get_serial_port_data(port); struct sierra_intf_private *intfdata; int status = urb->status; dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number); intfdata = port->serial->private; /* free up the transfer buffer, as usb_free_urb() does not do this */ kfree(urb->transfer_buffer); usb_autopm_put_interface_async(port->serial->interface); if (status) dev_dbg(&port->dev, "%s - nonzero write bulk status " "received: %d\n", __func__, status); spin_lock(&portdata->lock); --portdata->outstanding_urbs; spin_unlock(&portdata->lock); spin_lock(&intfdata->susp_lock); --intfdata->in_flight; spin_unlock(&intfdata->susp_lock); usb_serial_port_softint(port); } /* Write */ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct sierra_port_private *portdata = usb_get_serial_port_data(port); struct sierra_intf_private *intfdata; struct usb_serial *serial = port->serial; unsigned long flags; unsigned char *buffer; struct urb *urb; size_t writesize = min((size_t)count, (size_t)MAX_TRANSFER); int retval = 0; /* verify that we actually have some data to write */ if (count == 0) return 0; portdata = usb_get_serial_port_data(port); intfdata = serial->private; dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize); spin_lock_irqsave(&portdata->lock, flags); dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__, portdata->outstanding_urbs); if (portdata->outstanding_urbs > N_OUT_URB) { spin_unlock_irqrestore(&portdata->lock, flags); dev_dbg(&port->dev, "%s - write limit hit\n", __func__); return 0; } portdata->outstanding_urbs++; dev_dbg(&port->dev, "%s - 1, outstanding_urbs: %d\n", __func__, portdata->outstanding_urbs); spin_unlock_irqrestore(&portdata->lock, flags); retval = usb_autopm_get_interface_async(serial->interface); if (retval < 0) { spin_lock_irqsave(&portdata->lock, flags); portdata->outstanding_urbs--; spin_unlock_irqrestore(&portdata->lock, flags); goto error_simple; } buffer = kmalloc(writesize, GFP_ATOMIC); if (!buffer) { dev_err(&port->dev, "out of memory\n"); retval = -ENOMEM; goto error_no_buffer; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { dev_err(&port->dev, "no more free urbs\n"); retval = -ENOMEM; goto error_no_urb; } memcpy(buffer, buf, writesize); usb_serial_debug_data(debug, &port->dev, __func__, writesize, buffer); usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress), buffer, writesize, sierra_outdat_callback, port); /* Handle the need to send a zero length packet */ urb->transfer_flags |= URB_ZERO_PACKET; spin_lock_irqsave(&intfdata->susp_lock, flags); if (intfdata->suspended) { usb_anchor_urb(urb, &portdata->delayed); spin_unlock_irqrestore(&intfdata->susp_lock, flags); goto skip_power; } else { usb_anchor_urb(urb, &portdata->active); } /* send it down the pipe */ retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) { usb_unanchor_urb(urb); spin_unlock_irqrestore(&intfdata->susp_lock, flags); dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed " "with status = %d\n", __func__, retval); goto error; } else { intfdata->in_flight++; spin_unlock_irqrestore(&intfdata->susp_lock, flags); } skip_power: /* we are done with this urb, so let the host driver * really free it when it is finished with it */ usb_free_urb(urb); return writesize; error: usb_free_urb(urb); error_no_urb: kfree(buffer); error_no_buffer: spin_lock_irqsave(&portdata->lock, flags); --portdata->outstanding_urbs; dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__, portdata->outstanding_urbs); spin_unlock_irqrestore(&portdata->lock, flags); usb_autopm_put_interface_async(serial->interface); error_simple: return retval; } static void sierra_indat_callback(struct urb *urb) { int err; int endpoint; struct usb_serial_port *port; struct tty_struct *tty; unsigned char *data = urb->transfer_buffer; int status = urb->status; endpoint = usb_pipeendpoint(urb->pipe); port = urb->context; dev_dbg(&port->dev, "%s: %p\n", __func__, urb); if (status) { dev_dbg(&port->dev, "%s: nonzero status: %d on" " endpoint %02x\n", __func__, status, endpoint); } else { if (urb->actual_length) { tty = tty_port_tty_get(&port->port); if (tty) { tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); tty_kref_put(tty); usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, data); } } else { dev_dbg(&port->dev, "%s: empty read urb" " received\n", __func__); } } /* Resubmit urb so we continue receiving */ if (port->port.count && status != -ESHUTDOWN && status != -EPERM) { usb_mark_last_busy(port->serial->dev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dev_err(&port->dev, "resubmit read urb failed." "(%d)\n", err); } return; } static void sierra_instat_callback(struct urb *urb) { int err; int status = urb->status; struct usb_serial_port *port = urb->context; struct sierra_port_private *portdata = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; dev_dbg(&port->dev, "%s: urb %p port %p has data %p\n", __func__, urb, port, portdata); if (status == 0) { struct usb_ctrlrequest *req_pkt = (struct usb_ctrlrequest *)urb->transfer_buffer; if (!req_pkt) { dev_dbg(&port->dev, "%s: NULL req_pkt\n", __func__); return; } if ((req_pkt->bRequestType == 0xA1) && (req_pkt->bRequest == 0x20)) { int old_dcd_state; unsigned char signals = *((unsigned char *) urb->transfer_buffer + sizeof(struct usb_ctrlrequest)); struct tty_struct *tty; dev_dbg(&port->dev, "%s: signal x%x\n", __func__, signals); old_dcd_state = portdata->dcd_state; portdata->cts_state = 1; portdata->dcd_state = ((signals & 0x01) ? 1 : 0); portdata->dsr_state = ((signals & 0x02) ? 1 : 0); portdata->ri_state = ((signals & 0x08) ? 1 : 0); tty = tty_port_tty_get(&port->port); if (tty && !C_CLOCAL(tty) && old_dcd_state && !portdata->dcd_state) tty_hangup(tty); tty_kref_put(tty); } else { dev_dbg(&port->dev, "%s: type %x req %x\n", __func__, req_pkt->bRequestType, req_pkt->bRequest); } } else dev_dbg(&port->dev, "%s: error %d\n", __func__, status); /* Resubmit urb so we continue receiving IRQ data */ if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) { usb_mark_last_busy(serial->dev); urb->dev = serial->dev; err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dev_err(&port->dev, "%s: resubmit intr urb " "failed. (%d)\n", __func__, err); } } static int sierra_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct sierra_port_private *portdata = usb_get_serial_port_data(port); unsigned long flags; dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number); /* try to give a good number back based on if we have any free urbs at * this point in time */ spin_lock_irqsave(&portdata->lock, flags); if (portdata->outstanding_urbs > N_OUT_URB * 2 / 3) { spin_unlock_irqrestore(&portdata->lock, flags); dev_dbg(&port->dev, "%s - write limit hit\n", __func__); return 0; } spin_unlock_irqrestore(&portdata->lock, flags); return 2048; } static void sierra_stop_rx_urbs(struct usb_serial_port *port) { int i; struct sierra_port_private *portdata = usb_get_serial_port_data(port); for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) usb_kill_urb(portdata->in_urbs[i]); usb_kill_urb(port->interrupt_in_urb); } static int sierra_submit_rx_urbs(struct usb_serial_port *port, gfp_t mem_flags) { int ok_cnt; int err = -EINVAL; int i; struct urb *urb; struct sierra_port_private *portdata = usb_get_serial_port_data(port); ok_cnt = 0; for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) { urb = portdata->in_urbs[i]; if (!urb) continue; err = usb_submit_urb(urb, mem_flags); if (err) { dev_err(&port->dev, "%s: submit urb failed: %d\n", __func__, err); } else { ok_cnt++; } } if (ok_cnt && port->interrupt_in_urb) { err = usb_submit_urb(port->interrupt_in_urb, mem_flags); if (err) { dev_err(&port->dev, "%s: submit intr urb failed: %d\n", __func__, err); } } if (ok_cnt > 0) /* at least one rx urb submitted */ return 0; else return err; } static struct urb *sierra_setup_urb(struct usb_serial *serial, int endpoint, int dir, void *ctx, int len, gfp_t mem_flags, usb_complete_t callback) { struct urb *urb; u8 *buf; if (endpoint == -1) return NULL; urb = usb_alloc_urb(0, mem_flags); if (urb == NULL) { dev_dbg(&serial->dev->dev, "%s: alloc for endpoint %d failed\n", __func__, endpoint); return NULL; } buf = kmalloc(len, mem_flags); if (buf) { /* Fill URB using supplied data */ usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); /* debug */ dev_dbg(&serial->dev->dev, "%s %c u : %p d:%p\n", __func__, dir == USB_DIR_IN ? 'i' : 'o', urb, buf); } else { dev_dbg(&serial->dev->dev, "%s %c u:%p d:%p\n", __func__, dir == USB_DIR_IN ? 'i' : 'o', urb, buf); sierra_release_urb(urb); urb = NULL; } return urb; } static void sierra_close(struct usb_serial_port *port) { int i; struct usb_serial *serial = port->serial; struct sierra_port_private *portdata; struct sierra_intf_private *intfdata = port->serial->private; dev_dbg(&port->dev, "%s\n", __func__); portdata = usb_get_serial_port_data(port); portdata->rts_state = 0; portdata->dtr_state = 0; if (serial->dev) { mutex_lock(&serial->disc_mutex); if (!serial->disconnected) { serial->interface->needs_remote_wakeup = 0; usb_autopm_get_interface(serial->interface); sierra_send_setup(port); } mutex_unlock(&serial->disc_mutex); spin_lock_irq(&intfdata->susp_lock); portdata->opened = 0; spin_unlock_irq(&intfdata->susp_lock); /* Stop reading urbs */ sierra_stop_rx_urbs(port); /* .. and release them */ for (i = 0; i < N_IN_URB; i++) { sierra_release_urb(portdata->in_urbs[i]); portdata->in_urbs[i] = NULL; } } } static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port) { struct sierra_port_private *portdata; struct usb_serial *serial = port->serial; struct sierra_intf_private *intfdata = serial->private; int i; int err; int endpoint; struct urb *urb; portdata = usb_get_serial_port_data(port); dev_dbg(&port->dev, "%s\n", __func__); /* Set some sane defaults */ portdata->rts_state = 1; portdata->dtr_state = 1; endpoint = port->bulk_in_endpointAddress; for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) { urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port, IN_BUFLEN, GFP_KERNEL, sierra_indat_callback); portdata->in_urbs[i] = urb; } /* clear halt condition */ usb_clear_halt(serial->dev, usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN); err = sierra_submit_rx_urbs(port, GFP_KERNEL); if (err) { /* get rid of everything as in close */ sierra_close(port); /* restore balance for autopm */ usb_autopm_put_interface(serial->interface); return err; } sierra_send_setup(port); serial->interface->needs_remote_wakeup = 1; spin_lock_irq(&intfdata->susp_lock); portdata->opened = 1; spin_unlock_irq(&intfdata->susp_lock); usb_autopm_put_interface(serial->interface); return 0; } static void sierra_dtr_rts(struct usb_serial_port *port, int on) { struct usb_serial *serial = port->serial; struct sierra_port_private *portdata; portdata = usb_get_serial_port_data(port); portdata->rts_state = on; portdata->dtr_state = on; if (serial->dev) { mutex_lock(&serial->disc_mutex); if (!serial->disconnected) sierra_send_setup(port); mutex_unlock(&serial->disc_mutex); } } static int sierra_startup(struct usb_serial *serial) { struct usb_serial_port *port; struct sierra_port_private *portdata; int i; dev_dbg(&serial->dev->dev, "%s\n", __func__); /* Set Device mode to D0 */ sierra_set_power_state(serial->dev, 0x0000); /* Check NMEA and set */ if (nmea) sierra_vsc_set_nmea(serial->dev, 1); /* Now setup per port private data */ for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); if (!portdata) { dev_dbg(&port->dev, "%s: kmalloc for " "sierra_port_private (%d) failed!.\n", __func__, i); return -ENOMEM; } spin_lock_init(&portdata->lock); init_usb_anchor(&portdata->active); init_usb_anchor(&portdata->delayed); /* Set the port private data pointer */ usb_set_serial_port_data(port, portdata); } return 0; } static void sierra_release(struct usb_serial *serial) { int i; struct usb_serial_port *port; struct sierra_port_private *portdata; dev_dbg(&serial->dev->dev, "%s\n", __func__); for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; if (!port) continue; portdata = usb_get_serial_port_data(port); if (!portdata) continue; kfree(portdata); } } #ifdef CONFIG_PM static void stop_read_write_urbs(struct usb_serial *serial) { int i; struct usb_serial_port *port; struct sierra_port_private *portdata; /* Stop reading/writing urbs */ for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); sierra_stop_rx_urbs(port); usb_kill_anchored_urbs(&portdata->active); } } static int sierra_suspend(struct usb_serial *serial, pm_message_t message) { struct sierra_intf_private *intfdata; int b; if (serial->dev->auto_pm) { intfdata = serial->private; spin_lock_irq(&intfdata->susp_lock); b = intfdata->in_flight; if (b) { spin_unlock_irq(&intfdata->susp_lock); return -EBUSY; } else { intfdata->suspended = 1; spin_unlock_irq(&intfdata->susp_lock); } } stop_read_write_urbs(serial); return 0; } static int sierra_resume(struct usb_serial *serial) { struct usb_serial_port *port; struct sierra_intf_private *intfdata = serial->private; struct sierra_port_private *portdata; struct urb *urb; int ec = 0; int i, err; spin_lock_irq(&intfdata->susp_lock); for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); while ((urb = usb_get_from_anchor(&portdata->delayed))) { usb_anchor_urb(urb, &portdata->active); intfdata->in_flight++; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { intfdata->in_flight--; usb_unanchor_urb(urb); usb_scuttle_anchored_urbs(&portdata->delayed); break; } } if (portdata->opened) { err = sierra_submit_rx_urbs(port, GFP_ATOMIC); if (err) ec++; } } intfdata->suspended = 0; spin_unlock_irq(&intfdata->susp_lock); return ec ? -EIO : 0; } #else #define sierra_suspend NULL #define sierra_resume NULL #endif static struct usb_serial_driver sierra_device = { .driver = { .owner = THIS_MODULE, .name = "sierra", }, .description = "Sierra USB modem", .id_table = id_table, .usb_driver = &sierra_driver, .calc_num_ports = sierra_calc_num_ports, .probe = sierra_probe, .open = sierra_open, .close = sierra_close, .dtr_rts = sierra_dtr_rts, .write = sierra_write, .write_room = sierra_write_room, .set_termios = sierra_set_termios, .tiocmget = sierra_tiocmget, .tiocmset = sierra_tiocmset, .attach = sierra_startup, .release = sierra_release, .suspend = sierra_suspend, .resume = sierra_resume, .read_int_callback = sierra_instat_callback, }; /* Functions used by new usb-serial code. */ static int __init sierra_init(void) { int retval; retval = usb_serial_register(&sierra_device); if (retval) goto failed_device_register; retval = usb_register(&sierra_driver); if (retval) goto failed_driver_register; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; failed_driver_register: usb_serial_deregister(&sierra_device); failed_device_register: return retval; } static void __exit sierra_exit(void) { usb_deregister(&sierra_driver); usb_serial_deregister(&sierra_device); } module_init(sierra_init); module_exit(sierra_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); module_param(nmea, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(nmea, "NMEA streaming"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages");
{ "language": "C" }
// SPDX-License-Identifier: GPL-2.0 /* * perf.c * * Performance analysis utility. * * This is the main hub from which the sub-commands (perf stat, * perf top, perf record, perf report, etc.) are started. */ #include "builtin.h" #include "util/env.h" #include <subcmd/exec-cmd.h> #include "util/config.h" #include <subcmd/run-command.h> #include "util/parse-events.h" #include <subcmd/parse-options.h> #include "util/bpf-loader.h" #include "util/debug.h" #include "util/event.h" #include <api/fs/fs.h> #include <api/fs/tracing_path.h> #include <errno.h> #include <pthread.h> #include <signal.h> #include <stdlib.h> #include <time.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <linux/kernel.h> const char perf_usage_string[] = "perf [--version] [--help] [OPTIONS] COMMAND [ARGS]"; const char perf_more_info_string[] = "See 'perf help COMMAND' for more information on a specific command."; static int use_pager = -1; const char *input_name; struct cmd_struct { const char *cmd; int (*fn)(int, const char **); int option; }; static struct cmd_struct commands[] = { { "buildid-cache", cmd_buildid_cache, 0 }, { "buildid-list", cmd_buildid_list, 0 }, { "config", cmd_config, 0 }, { "c2c", cmd_c2c, 0 }, { "diff", cmd_diff, 0 }, { "evlist", cmd_evlist, 0 }, { "help", cmd_help, 0 }, { "kallsyms", cmd_kallsyms, 0 }, { "list", cmd_list, 0 }, { "record", cmd_record, 0 }, { "report", cmd_report, 0 }, { "bench", cmd_bench, 0 }, { "stat", cmd_stat, 0 }, { "timechart", cmd_timechart, 0 }, { "top", cmd_top, 0 }, { "annotate", cmd_annotate, 0 }, { "version", cmd_version, 0 }, { "script", cmd_script, 0 }, { "sched", cmd_sched, 0 }, #ifdef HAVE_LIBELF_SUPPORT { "probe", cmd_probe, 0 }, #endif { "kmem", cmd_kmem, 0 }, { "lock", cmd_lock, 0 }, { "kvm", cmd_kvm, 0 }, { "test", cmd_test, 0 }, #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) { "trace", cmd_trace, 0 }, #endif { "inject", cmd_inject, 0 }, { "mem", cmd_mem, 0 }, { "data", cmd_data, 0 }, { "ftrace", cmd_ftrace, 0 }, }; struct pager_config { const char *cmd; int val; }; static int pager_command_config(const char *var, const char *value, void *data) { struct pager_config *c = data; if (strstarts(var, "pager.") && !strcmp(var + 6, c->cmd)) c->val = perf_config_bool(var, value); return 0; } /* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */ static int check_pager_config(const char *cmd) { int err; struct pager_config c; c.cmd = cmd; c.val = -1; err = perf_config(pager_command_config, &c); return err ?: c.val; } static int browser_command_config(const char *var, const char *value, void *data) { struct pager_config *c = data; if (strstarts(var, "tui.") && !strcmp(var + 4, c->cmd)) c->val = perf_config_bool(var, value); if (strstarts(var, "gtk.") && !strcmp(var + 4, c->cmd)) c->val = perf_config_bool(var, value) ? 2 : 0; return 0; } /* * returns 0 for "no tui", 1 for "use tui", 2 for "use gtk", * and -1 for "not specified" */ static int check_browser_config(const char *cmd) { int err; struct pager_config c; c.cmd = cmd; c.val = -1; err = perf_config(browser_command_config, &c); return err ?: c.val; } static void commit_pager_choice(void) { switch (use_pager) { case 0: setenv(PERF_PAGER_ENVIRONMENT, "cat", 1); break; case 1: /* setup_pager(); */ break; default: break; } } struct option options[] = { OPT_ARGUMENT("help", "help"), OPT_ARGUMENT("version", "version"), OPT_ARGUMENT("exec-path", "exec-path"), OPT_ARGUMENT("html-path", "html-path"), OPT_ARGUMENT("paginate", "paginate"), OPT_ARGUMENT("no-pager", "no-pager"), OPT_ARGUMENT("debugfs-dir", "debugfs-dir"), OPT_ARGUMENT("buildid-dir", "buildid-dir"), OPT_ARGUMENT("list-cmds", "list-cmds"), OPT_ARGUMENT("list-opts", "list-opts"), OPT_ARGUMENT("debug", "debug"), OPT_END() }; static int handle_options(const char ***argv, int *argc, int *envchanged) { int handled = 0; while (*argc > 0) { const char *cmd = (*argv)[0]; if (cmd[0] != '-') break; /* * For legacy reasons, the "version" and "help" * commands can be written with "--" prepended * to make them look like flags. */ if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version")) break; /* * Shortcut for '-h' and '-v' options to invoke help * and version command. */ if (!strcmp(cmd, "-h")) { (*argv)[0] = "--help"; break; } if (!strcmp(cmd, "-v")) { (*argv)[0] = "--version"; break; } if (!strcmp(cmd, "-vv")) { (*argv)[0] = "version"; version_verbose = 1; break; } /* * Check remaining flags. */ if (strstarts(cmd, CMD_EXEC_PATH)) { cmd += strlen(CMD_EXEC_PATH); if (*cmd == '=') set_argv_exec_path(cmd + 1); else { puts(get_argv_exec_path()); exit(0); } } else if (!strcmp(cmd, "--html-path")) { puts(system_path(PERF_HTML_PATH)); exit(0); } else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) { use_pager = 1; } else if (!strcmp(cmd, "--no-pager")) { use_pager = 0; if (envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--debugfs-dir")) { if (*argc < 2) { fprintf(stderr, "No directory given for --debugfs-dir.\n"); usage(perf_usage_string); } tracing_path_set((*argv)[1]); if (envchanged) *envchanged = 1; (*argv)++; (*argc)--; } else if (!strcmp(cmd, "--buildid-dir")) { if (*argc < 2) { fprintf(stderr, "No directory given for --buildid-dir.\n"); usage(perf_usage_string); } set_buildid_dir((*argv)[1]); if (envchanged) *envchanged = 1; (*argv)++; (*argc)--; } else if (strstarts(cmd, CMD_DEBUGFS_DIR)) { tracing_path_set(cmd + strlen(CMD_DEBUGFS_DIR)); fprintf(stderr, "dir: %s\n", tracing_path_mount()); if (envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--list-cmds")) { unsigned int i; for (i = 0; i < ARRAY_SIZE(commands); i++) { struct cmd_struct *p = commands+i; printf("%s ", p->cmd); } putchar('\n'); exit(0); } else if (!strcmp(cmd, "--list-opts")) { unsigned int i; for (i = 0; i < ARRAY_SIZE(options)-1; i++) { struct option *p = options+i; printf("--%s ", p->long_name); } putchar('\n'); exit(0); } else if (!strcmp(cmd, "--debug")) { if (*argc < 2) { fprintf(stderr, "No variable specified for --debug.\n"); usage(perf_usage_string); } if (perf_debug_option((*argv)[1])) usage(perf_usage_string); (*argv)++; (*argc)--; } else { fprintf(stderr, "Unknown option: %s\n", cmd); usage(perf_usage_string); } (*argv)++; (*argc)--; handled++; } return handled; } #define RUN_SETUP (1<<0) #define USE_PAGER (1<<1) static int run_builtin(struct cmd_struct *p, int argc, const char **argv) { int status; struct stat st; char sbuf[STRERR_BUFSIZE]; if (use_browser == -1) use_browser = check_browser_config(p->cmd); if (use_pager == -1 && p->option & RUN_SETUP) use_pager = check_pager_config(p->cmd); if (use_pager == -1 && p->option & USE_PAGER) use_pager = 1; commit_pager_choice(); perf_env__set_cmdline(&perf_env, argc, argv); status = p->fn(argc, argv); perf_config__exit(); exit_browser(status); perf_env__exit(&perf_env); bpf__clear(); if (status) return status & 0xff; /* Somebody closed stdout? */ if (fstat(fileno(stdout), &st)) return 0; /* Ignore write errors for pipes and sockets.. */ if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) return 0; status = 1; /* Check for ENOSPC and EIO errors.. */ if (fflush(stdout)) { fprintf(stderr, "write failure on standard output: %s", str_error_r(errno, sbuf, sizeof(sbuf))); goto out; } if (ferror(stdout)) { fprintf(stderr, "unknown write failure on standard output"); goto out; } if (fclose(stdout)) { fprintf(stderr, "close failed on standard output: %s", str_error_r(errno, sbuf, sizeof(sbuf))); goto out; } status = 0; out: return status; } static void handle_internal_command(int argc, const char **argv) { const char *cmd = argv[0]; unsigned int i; /* Turn "perf cmd --help" into "perf help cmd" */ if (argc > 1 && !strcmp(argv[1], "--help")) { argv[1] = argv[0]; argv[0] = cmd = "help"; } for (i = 0; i < ARRAY_SIZE(commands); i++) { struct cmd_struct *p = commands+i; if (strcmp(p->cmd, cmd)) continue; exit(run_builtin(p, argc, argv)); } } static void execv_dashed_external(const char **argv) { char *cmd; const char *tmp; int status; if (asprintf(&cmd, "perf-%s", argv[0]) < 0) goto do_die; /* * argv[0] must be the perf command, but the argv array * belongs to the caller, and may be reused in * subsequent loop iterations. Save argv[0] and * restore it on error. */ tmp = argv[0]; argv[0] = cmd; /* * if we fail because the command is not found, it is * OK to return. Otherwise, we just pass along the status code. */ status = run_command_v_opt(argv, 0); if (status != -ERR_RUN_COMMAND_EXEC) { if (IS_RUN_COMMAND_ERR(status)) { do_die: pr_err("FATAL: unable to run '%s'", argv[0]); status = -128; } exit(-status); } errno = ENOENT; /* as if we called execvp */ argv[0] = tmp; zfree(&cmd); } static int run_argv(int *argcp, const char ***argv) { /* See if it's an internal command */ handle_internal_command(*argcp, *argv); /* .. then try the external ones */ execv_dashed_external(*argv); return 0; } static void pthread__block_sigwinch(void) { sigset_t set; sigemptyset(&set); sigaddset(&set, SIGWINCH); pthread_sigmask(SIG_BLOCK, &set, NULL); } void pthread__unblock_sigwinch(void) { sigset_t set; sigemptyset(&set); sigaddset(&set, SIGWINCH); pthread_sigmask(SIG_UNBLOCK, &set, NULL); } int main(int argc, const char **argv) { int err; const char *cmd; char sbuf[STRERR_BUFSIZE]; /* libsubcmd init */ exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT); pager_init(PERF_PAGER_ENVIRONMENT); /* The page_size is placed in util object. */ page_size = sysconf(_SC_PAGE_SIZE); cmd = extract_argv0_path(argv[0]); if (!cmd) cmd = "perf-help"; srandom(time(NULL)); /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ config_exclusive_filename = getenv("PERF_CONFIG"); err = perf_config(perf_default_config, NULL); if (err) return err; set_buildid_dir(NULL); /* * "perf-xxxx" is the same as "perf xxxx", but we obviously: * * - cannot take flags in between the "perf" and the "xxxx". * - cannot execute it externally (since it would just do * the same thing over again) * * So we just directly call the internal command handler. If that one * fails to handle this, then maybe we just run a renamed perf binary * that contains a dash in its name. To handle this scenario, we just * fall through and ignore the "xxxx" part of the command string. */ if (strstarts(cmd, "perf-")) { cmd += 5; argv[0] = cmd; handle_internal_command(argc, argv); /* * If the command is handled, the above function does not * return undo changes and fall through in such a case. */ cmd -= 5; argv[0] = cmd; } if (strstarts(cmd, "trace")) { #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) setup_path(); argv[0] = "trace"; return cmd_trace(argc, argv); #else fprintf(stderr, "trace command not available: missing audit-libs devel package at build time.\n"); goto out; #endif } /* Look for flags.. */ argv++; argc--; handle_options(&argv, &argc, NULL); commit_pager_choice(); if (argc > 0) { if (strstarts(argv[0], "--")) argv[0] += 2; } else { /* The user didn't specify a command; give them help */ printf("\n usage: %s\n\n", perf_usage_string); list_common_cmds_help(); printf("\n %s\n\n", perf_more_info_string); goto out; } cmd = argv[0]; test_attr__init(); /* * We use PATH to find perf commands, but we prepend some higher * precedence paths: the "--exec-path" option, the PERF_EXEC_PATH * environment, and the $(perfexecdir) from the Makefile at build * time. */ setup_path(); /* * Block SIGWINCH notifications so that the thread that wants it can * unblock and get syscalls like select interrupted instead of waiting * forever while the signal goes to some other non interested thread. */ pthread__block_sigwinch(); perf_debug_setup(); while (1) { static int done_help; run_argv(&argc, &argv); if (errno != ENOENT) break; if (!done_help) { cmd = argv[0] = help_unknown_cmd(cmd); done_help = 1; } else break; } fprintf(stderr, "Failed to run command '%s': %s\n", cmd, str_error_r(errno, sbuf, sizeof(sbuf))); out: return 1; }
{ "language": "C" }
//===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include <cstddef> #ifndef NULL #error NULL not defined #endif int main() { }
{ "language": "C" }
/* * Copyright (C) Igor Sysoev * Copyright (C) Nginx, Inc. */ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_http.h> static ngx_int_t ngx_http_postpone_filter_add(ngx_http_request_t *r, ngx_chain_t *in); static ngx_int_t ngx_http_postpone_filter_init(ngx_conf_t *cf); static ngx_http_module_t ngx_http_postpone_filter_module_ctx = { NULL, /* preconfiguration */ ngx_http_postpone_filter_init, /* postconfiguration */ NULL, /* create main configuration */ NULL, /* init main configuration */ NULL, /* create server configuration */ NULL, /* merge server configuration */ NULL, /* create location configuration */ NULL /* merge location configuration */ }; ngx_module_t ngx_http_postpone_filter_module = { NGX_MODULE_V1, &ngx_http_postpone_filter_module_ctx, /* module context */ NULL, /* module directives */ NGX_HTTP_MODULE, /* module type */ NULL, /* init master */ NULL, /* init module */ NULL, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ NULL, /* exit master */ NGX_MODULE_V1_PADDING }; static ngx_http_output_body_filter_pt ngx_http_next_body_filter; static ngx_int_t ngx_http_postpone_filter(ngx_http_request_t *r, ngx_chain_t *in) { ngx_connection_t *c; ngx_http_postponed_request_t *pr; c = r->connection; ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0, "http postpone filter \"%V?%V\" %p", &r->uri, &r->args, in); if (r != c->data) { if (in) { ngx_http_postpone_filter_add(r, in); return NGX_OK; } #if 0 /* TODO: SSI may pass NULL */ ngx_log_error(NGX_LOG_ALERT, c->log, 0, "http postpone filter NULL inactive request"); #endif return NGX_OK; } if (r->postponed == NULL) { if (in || c->buffered) { return ngx_http_next_body_filter(r->main, in); } return NGX_OK; } if (in) { ngx_http_postpone_filter_add(r, in); } do { pr = r->postponed; if (pr->request) { ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http postpone filter wake \"%V?%V\"", &pr->request->uri, &pr->request->args); r->postponed = pr->next; c->data = pr->request; return ngx_http_post_request(pr->request, NULL); } if (pr->out == NULL) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "http postpone filter NULL output"); } else { ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http postpone filter output \"%V?%V\"", &r->uri, &r->args); if (ngx_http_next_body_filter(r->main, pr->out) == NGX_ERROR) { return NGX_ERROR; } } r->postponed = pr->next; } while (r->postponed); return NGX_OK; } static ngx_int_t ngx_http_postpone_filter_add(ngx_http_request_t *r, ngx_chain_t *in) { ngx_http_postponed_request_t *pr, **ppr; if (r->postponed) { for (pr = r->postponed; pr->next; pr = pr->next) { /* void */ } if (pr->request == NULL) { goto found; } ppr = &pr->next; } else { ppr = &r->postponed; } pr = ngx_palloc(r->pool, sizeof(ngx_http_postponed_request_t)); if (pr == NULL) { return NGX_ERROR; } *ppr = pr; pr->request = NULL; pr->out = NULL; pr->next = NULL; found: if (ngx_chain_add_copy(r->pool, &pr->out, in) == NGX_OK) { return NGX_OK; } return NGX_ERROR; } static ngx_int_t ngx_http_postpone_filter_init(ngx_conf_t *cf) { ngx_http_next_body_filter = ngx_http_top_body_filter; ngx_http_top_body_filter = ngx_http_postpone_filter; return NGX_OK; }
{ "language": "C" }
/** * Copyright 2019-present, GraphQL Foundation * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #ifdef __cplusplus extern "C" { #endif struct GraphQLAstNode; /** * Serialize the given AST to JSON. The returned C string must be * freed with free(). */ const char *graphql_ast_to_json(const struct GraphQLAstNode *node); #ifdef __cplusplus } #endif
{ "language": "C" }
/* * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Sun designates this * particular file as subject to the "Classpath" exception as provided * by Sun in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. */ #ifndef SPLASHSCREEN_GFX_IMPL_H #define SPLASHSCREEN_GFX_IMPL_H #include "splashscreen_gfx.h" /* here come some very simple macros */ /* advance a pointer p by sizeof(type)*n bytes */ #define INCPN(type,p,n) ((p) = (type*)(p)+(n)) /* advance a pointer by sizeof(type) */ #define INCP(type,p) INCPN(type,(p),1) /* store a typed value to pointed location */ #define PUT(type,p,v) (*(type*)(p) = (type)(v)) /* load a typed value from pointed location */ #define GET(type,p) (*(type*)p) /* same as cond<0?-1:0 */ enum { IFNEG_SHIFT_BITS = sizeof(int) * 8 - 1 }; #define IFNEG(cond) ((int)(cond)>>IFNEG_SHIFT_BITS) /* same as cond<0?n1:n2 */ #define IFNEGPOS(cond,n1,n2) ((IFNEG(cond)&(n1))|((~IFNEG(cond))&(n2))) /* value shifted left by n bits, negative n is allowed */ #define LSHIFT(value,n) IFNEGPOS((n),(value)>>-(n),(value)<<(n)) /* value shifted right by n bits, negative n is allowed */ #define RSHIFT(value,n) IFNEGPOS(n,(value)<<-(n),(value)>>(n)) /* converts a single i'th component to the specific format defined by format->shift[i] and format->mask[i] */ #define CONVCOMP(quad,format,i) \ (LSHIFT((quad),(format)->shift[i])&(format)->mask[i]) /* extracts the component defined by format->shift[i] and format->mask[i] from a specific-format value */ #define UNCONVCOMP(value,format,i) \ (RSHIFT((value)&(format)->mask[i],(format)->shift[i])) /* dithers the color using the dither matrices and colormap from format indices to dither matrices are passed as arguments */ INLINE unsigned ditherColor(rgbquad_t value, ImageFormat * format, int row, int col) { int blue = QUAD_BLUE(value); int green = QUAD_GREEN(value); int red = QUAD_RED(value); blue = format->dithers[0].colorTable[blue + format->dithers[0].matrix[col & DITHER_MASK][row & DITHER_MASK]]; green = format->dithers[1].colorTable[green + format->dithers[1].matrix[col & DITHER_MASK][row & DITHER_MASK]]; red = format->dithers[2].colorTable[red + format->dithers[2].matrix[col & DITHER_MASK][row & DITHER_MASK]]; return red + green + blue; } /* blend (lerp between) two rgb quads src and dst alpha is ignored the algorithm: src*alpha+dst*(1-alpha)=(src-dst)*alpha+dst, rb and g are done separately it's possible to verify that it's almost accurate indeed */ INLINE rgbquad_t blendRGB(rgbquad_t dst, rgbquad_t src, rgbquad_t alpha) { const rgbquad_t dstrb = dst & 0xFF00FF; const rgbquad_t dstg = dst & 0xFF00; const rgbquad_t srcrb = src & 0xFF00FF; const rgbquad_t srcg = src & 0xFF00; rgbquad_t drb = srcrb - dstrb; rgbquad_t dg = srcg - dstg; alpha += 1; drb *= alpha; dg *= alpha; drb >>= 8; dg >>= 8; return ((drb + dstrb) & 0xFF00FF) | ((dg + dstg) & 0xFF00); } /* scales rgb quad by alpha. basically similar to what's above. src alpha is retained. used for premultiplying alpha btw: braindead MSVC6 generates _three_ mul instructions for this function */ INLINE rgbquad_t premultiplyRGBA(rgbquad_t src) { rgbquad_t srb = src & 0xFF00FF; rgbquad_t sg = src & 0xFF00; rgbquad_t alpha = src >> QUAD_ALPHA_SHIFT; alpha += 1; srb *= alpha; sg *= alpha; srb >>= 8; sg >>= 8; return (src & 0xFF000000) | (srb & 0xFF00FF) | (sg & 0xFF00); } /* The functions below are inherently ineffective, but the performance seems to be more or less adequate for the case of splash screens. They can be optimized later if needed. The idea of optimization is to provide inlineable form of putRGBADither and getRGBA at least for certain most frequently used visuals. Something like this is done in Java 2D ("loops"). This would be possible with C++ templates, but making it clean for C would require ugly preprocessor tricks. Leaving it out for later. */ /* convert a single pixel color value from rgbquad according to visual format and place it to pointed location ordered dithering used when necessary */ INLINE void putRGBADither(rgbquad_t value, void *ptr, ImageFormat * format, int row, int col) { if (format->premultiplied) { value = premultiplyRGBA(value); } if (format->dithers) { value = format->colorIndex[ditherColor(value, format, row, col)]; } else { value = CONVCOMP(value, format, 0) | CONVCOMP(value, format, 1) | CONVCOMP(value, format, 2) | CONVCOMP(value, format, 3); } switch (format->byteOrder) { case BYTE_ORDER_LSBFIRST: switch (format->depthBytes) { /* lack of *break*'s is intentional */ case 4: PUT(byte_t, ptr, value & 0xff); value >>= 8; INCP(byte_t, ptr); case 3: PUT(byte_t, ptr, value & 0xff); value >>= 8; INCP(byte_t, ptr); case 2: PUT(byte_t, ptr, value & 0xff); value >>= 8; INCP(byte_t, ptr); case 1: PUT(byte_t, ptr, value & 0xff); } break; case BYTE_ORDER_MSBFIRST: switch (format->depthBytes) { /* lack of *break*'s is intentional */ case 4: PUT(byte_t, ptr, (value >> 24) & 0xff); INCP(byte_t, ptr); case 3: PUT(byte_t, ptr, (value >> 16) & 0xff); INCP(byte_t, ptr); case 2: PUT(byte_t, ptr, (value >> 8) & 0xff); INCP(byte_t, ptr); case 1: PUT(byte_t, ptr, value & 0xff); } break; case BYTE_ORDER_NATIVE: switch (format->depthBytes) { case 4: PUT(rgbquad_t, ptr, value); break; case 3: /* not supported, LSB or MSB should always be specified */ *(int *) 0 = 0; /* crash */ break; case 2: PUT(word_t, ptr, value); break; case 1: PUT(byte_t, ptr, value); break; } } } /* load a single pixel color value and un-convert it to rgbquad according to visual format */ INLINE rgbquad_t getRGBA(void *ptr, ImageFormat * format) { /* FIXME: color is not un-alpha-premultiplied on get this is not required by current code, but it makes the implementation inconsistent i.e. put(get) will not work right for alpha-premultiplied images */ /* get the value basing on depth and byte order */ rgbquad_t value = 0; switch (format->byteOrder) { case BYTE_ORDER_LSBFIRST: switch (format->depthBytes) { case 4: value |= GET(byte_t, ptr); value <<= 8; INCP(byte_t, ptr); case 3: value |= GET(byte_t, ptr); value <<= 8; INCP(byte_t, ptr); case 2: value |= GET(byte_t, ptr); value <<= 8; INCP(byte_t, ptr); case 1: value |= GET(byte_t, ptr); } break; case BYTE_ORDER_MSBFIRST: switch (format->depthBytes) { /* lack of *break*'s is intentional */ case 4: value |= (GET(byte_t, ptr) << 24); INCP(byte_t, ptr); case 3: value |= (GET(byte_t, ptr) << 16); INCP(byte_t, ptr); case 2: value |= (GET(byte_t, ptr) << 8); INCP(byte_t, ptr); case 1: value |= GET(byte_t, ptr); } break; case BYTE_ORDER_NATIVE: switch (format->depthBytes) { case 4: value = GET(rgbquad_t, ptr); break; case 3: /* not supported, LSB or MSB should always be specified */ *(int *) 0 = 0; break; case 2: value = (rgbquad_t) GET(word_t, ptr); break; case 1: value = (rgbquad_t) GET(byte_t, ptr); break; } break; } /* now un-convert the value */ if (format->colorMap) { if (value == format->transparentColor) return 0; else return format->colorMap[value]; } else { return UNCONVCOMP(value, format, 0) | UNCONVCOMP(value, format, 1) | UNCONVCOMP(value, format, 2) | UNCONVCOMP(value, format, 3) | format->fixedBits; } } /* fill the line with the specified color according to visual format */ INLINE void fillLine(rgbquad_t color, void *pDst, int incDst, int n, ImageFormat * dstFormat, int row, int col) { int i; for (i = 0; i < n; ++i) { putRGBADither(color, pDst, dstFormat, row, col++); INCPN(byte_t, pDst, incDst); } } /* find the shift for specified mask, also verify the mask is valid */ INLINE int getMaskShift(rgbquad_t mask, int *pShift, int *pnumBits) { int shift = 0, numBits = 0; /* check the mask is not empty */ if (!mask) return 0; /* calculate the shift */ while ((mask & 1) == 0) { ++shift; mask >>= 1; } /* check the mask is contigious */ if ((mask & (mask + 1)) != 0) return 0; /* calculate the number of bits */ do { ++numBits; mask >>= 1; } while ((mask & 1) != 0); *pShift = shift; *pnumBits = numBits; return 1; } #endif
{ "language": "C" }
/*! * \file eth.h * \brief Definitions for Ethernet Physical Layer Interface * \version $Revision: 1.3 $ * \author Michael Norman */ #ifndef _ETH_PHY_H #define _ETH_PHY_H /*******************************************************************/ /* MII Register Addresses */ #define PHY_BMCR (0x00) #define PHY_BMSR (0x01) #define PHY_PHYIDR1 (0x02) #define PHY_PHYIDR2 (0x03) #define PHY_ANAR (0x04) #define PHY_ANLPAR (0x05) /* Bit definitions and macros for PHY_CTRL */ #define PHY_BMCR_RESET (0x8000) #define PHY_BMCR_LOOP (0x4000) #define PHY_BMCR_SPEED (0x2000) #define PHY_BMCR_AN_ENABLE (0x1000) #define PHY_BMCR_POWERDOWN (0x0800) #define PHY_BMCR_ISOLATE (0x0400) #define PHY_BMCR_AN_RESTART (0x0200) #define PHY_BMCR_FDX (0x0100) #define PHY_BMCR_COL_TEST (0x0080) /* Bit definitions and macros for PHY_STAT */ #define PHY_BMSR_100BT4 (0x8000) #define PHY_BMSR_100BTX_FDX (0x4000) #define PHY_BMSR_100BTX (0x2000) #define PHY_BMSR_10BT_FDX (0x1000) #define PHY_BMSR_10BT (0x0800) #define PHY_BMSR_NO_PREAMBLE (0x0040) #define PHY_BMSR_AN_COMPLETE (0x0020) #define PHY_BMSR_REMOTE_FAULT (0x0010) #define PHY_BMSR_AN_ABILITY (0x0008) #define PHY_BMSR_LINK (0x0004) #define PHY_BMSR_JABBER (0x0002) #define PHY_BMSR_EXTENDED (0x0001) /* Bit definitions and macros for PHY_AN_ADV */ #define PHY_ANAR_NEXT_PAGE (0x8001) #define PHY_ANAR_REM_FAULT (0x2001) #define PHY_ANAR_PAUSE (0x0401) #define PHY_ANAR_100BT4 (0x0201) #define PHY_ANAR_100BTX_FDX (0x0101) #define PHY_ANAR_100BTX (0x0081) #define PHY_ANAR_10BT_FDX (0x0041) #define PHY_ANAR_10BT (0x0021) #define PHY_ANAR_802_3 (0x0001) /* Bit definitions and macros for PHY_AN_LINK_PAR */ #define PHY_ANLPAR_NEXT_PAGE (0x8000) #define PHY_ANLPAR_ACK (0x4000) #define PHY_ANLPAR_REM_FAULT (0x2000) #define PHY_ANLPAR_PAUSE (0x0400) #define PHY_ANLPAR_100BT4 (0x0200) #define PHY_ANLPAR_100BTX_FDX (0x0100) #define PHY_ANLPAR_100BTX (0x0080) #define PHY_ANLPAR_10BTX_FDX (0x0040) #define PHY_ANLPAR_10BT (0x0020) /*******************************************************************/ #endif /* _ETH_PHY_H */
{ "language": "C" }
/**************************************************************************** ** ** Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies). ** All rights reserved. ** Contact: Nokia Corporation (qt-info@nokia.com) ** ** This file is part of the QtGui module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial Usage ** Licensees holding valid Qt Commercial licenses may use this file in ** accordance with the Qt Commercial License Agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and Nokia. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 2.1 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 2.1 requirements ** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ** ** In addition, as a special exception, Nokia gives you certain additional ** rights. These rights are described in the Nokia Qt LGPL Exception ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 3.0 as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL included in the ** packaging of this file. Please review the following information to ** ensure the GNU General Public License version 3.0 requirements will be ** met: http://www.gnu.org/copyleft/gpl.html. ** ** If you have questions regarding the use of this file, please contact ** Nokia at qt-info@nokia.com. ** $QT_END_LICENSE$ ** ****************************************************************************/ #ifndef QDRAWHELPER_X86_P_H #define QDRAWHELPER_X86_P_H // // W A R N I N G // ------------- // // This file is not part of the Qt API. It exists purely as an // implementation detail. This header file may change from version to // version without notice, or even be removed. // // We mean it. // #include <private/qdrawhelper_p.h> QT_BEGIN_NAMESPACE #ifdef QT_HAVE_MMX extern CompositionFunction qt_functionForMode_MMX[]; extern CompositionFunctionSolid qt_functionForModeSolid_MMX[]; void qt_blend_color_argb_mmx(int count, const QSpan *spans, void *userData); #endif #ifdef QT_HAVE_MMXEXT void qt_memfill32_mmxext(quint32 *dest, quint32 value, int count); void qt_bitmapblit16_mmxext(QRasterBuffer *rasterBuffer, int x, int y, quint32 color, const uchar *src, int width, int height, int stride); #endif #ifdef QT_HAVE_3DNOW #if defined(QT_HAVE_MMX) || !defined(QT_HAVE_SSE) extern CompositionFunction qt_functionForMode_MMX3DNOW[]; extern CompositionFunctionSolid qt_functionForModeSolid_MMX3DNOW[]; void qt_blend_color_argb_mmx3dnow(int count, const QSpan *spans, void *userData); #endif // MMX #ifdef QT_HAVE_SSE extern CompositionFunction qt_functionForMode_SSE3DNOW[]; extern CompositionFunctionSolid qt_functionForModeSolid_SSE3DNOW[]; void qt_memfill32_sse3dnow(quint32 *dest, quint32 value, int count); void qt_bitmapblit16_sse3dnow(QRasterBuffer *rasterBuffer, int x, int y, quint32 color, const uchar *src, int width, int height, int stride); void qt_blend_color_argb_sse3dnow(int count, const QSpan *spans, void *userData); #endif // SSE #endif // QT_HAVE_3DNOW #ifdef QT_HAVE_SSE void qt_memfill32_sse(quint32 *dest, quint32 value, int count); void qt_bitmapblit16_sse(QRasterBuffer *rasterBuffer, int x, int y, quint32 color, const uchar *src, int width, int height, int stride); void qt_blend_color_argb_sse(int count, const QSpan *spans, void *userData); extern CompositionFunction qt_functionForMode_SSE[]; extern CompositionFunctionSolid qt_functionForModeSolid_SSE[]; #endif // QT_HAVE_SSE #ifdef QT_HAVE_SSE2 void qt_memfill32_sse2(quint32 *dest, quint32 value, int count); void qt_memfill16_sse2(quint16 *dest, quint16 value, int count); void qt_bitmapblit32_sse2(QRasterBuffer *rasterBuffer, int x, int y, quint32 color, const uchar *src, int width, int height, int stride); void qt_bitmapblit16_sse2(QRasterBuffer *rasterBuffer, int x, int y, quint32 color, const uchar *src, int width, int height, int stride); #endif // QT_HAVE_SSE2 #ifdef QT_HAVE_IWMMXT void qt_blend_color_argb_iwmmxt(int count, const QSpan *spans, void *userData); extern CompositionFunction qt_functionForMode_IWMMXT[]; extern CompositionFunctionSolid qt_functionForModeSolid_IWMMXT[]; #endif static const int numCompositionFunctions = 33; QT_END_NAMESPACE #endif // QDRAWHELPER_X86_P_H
{ "language": "C" }
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/fat/cache.c * * Written 1992,1993 by Werner Almesberger * * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead * of inode number. * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers. */ #include <linux/slab.h> #include "fat.h" /* this must be > 0. */ #define FAT_MAX_CACHE 8 struct fat_cache { struct list_head cache_list; int nr_contig; /* number of contiguous clusters */ int fcluster; /* cluster number in the file. */ int dcluster; /* cluster number on disk. */ }; struct fat_cache_id { unsigned int id; int nr_contig; int fcluster; int dcluster; }; static inline int fat_max_cache(struct inode *inode) { return FAT_MAX_CACHE; } static struct kmem_cache *fat_cache_cachep; static void init_once(void *foo) { struct fat_cache *cache = (struct fat_cache *)foo; INIT_LIST_HEAD(&cache->cache_list); } int __init fat_cache_init(void) { fat_cache_cachep = kmem_cache_create("fat_cache", sizeof(struct fat_cache), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (fat_cache_cachep == NULL) return -ENOMEM; return 0; } void fat_cache_destroy(void) { kmem_cache_destroy(fat_cache_cachep); } static inline struct fat_cache *fat_cache_alloc(struct inode *inode) { return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS); } static inline void fat_cache_free(struct fat_cache *cache) { BUG_ON(!list_empty(&cache->cache_list)); kmem_cache_free(fat_cache_cachep, cache); } static inline void fat_cache_update_lru(struct inode *inode, struct fat_cache *cache) { if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list) list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru); } static int fat_cache_lookup(struct inode *inode, int fclus, struct fat_cache_id *cid, int *cached_fclus, int *cached_dclus) { static struct fat_cache nohit = { .fcluster = 0, }; struct fat_cache *hit = &nohit, *p; int offset = -1; spin_lock(&MSDOS_I(inode)->cache_lru_lock); list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { /* Find the cache of "fclus" or nearest cache. */ if (p->fcluster <= fclus && hit->fcluster < p->fcluster) { hit = p; if ((hit->fcluster + hit->nr_contig) < fclus) { offset = hit->nr_contig; } else { offset = fclus - hit->fcluster; break; } } } if (hit != &nohit) { fat_cache_update_lru(inode, hit); cid->id = MSDOS_I(inode)->cache_valid_id; cid->nr_contig = hit->nr_contig; cid->fcluster = hit->fcluster; cid->dcluster = hit->dcluster; *cached_fclus = cid->fcluster + offset; *cached_dclus = cid->dcluster + offset; } spin_unlock(&MSDOS_I(inode)->cache_lru_lock); return offset; } static struct fat_cache *fat_cache_merge(struct inode *inode, struct fat_cache_id *new) { struct fat_cache *p; list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) { /* Find the same part as "new" in cluster-chain. */ if (p->fcluster == new->fcluster) { BUG_ON(p->dcluster != new->dcluster); if (new->nr_contig > p->nr_contig) p->nr_contig = new->nr_contig; return p; } } return NULL; } static void fat_cache_add(struct inode *inode, struct fat_cache_id *new) { struct fat_cache *cache, *tmp; if (new->fcluster == -1) /* dummy cache */ return; spin_lock(&MSDOS_I(inode)->cache_lru_lock); if (new->id != FAT_CACHE_VALID && new->id != MSDOS_I(inode)->cache_valid_id) goto out; /* this cache was invalidated */ cache = fat_cache_merge(inode, new); if (cache == NULL) { if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) { MSDOS_I(inode)->nr_caches++; spin_unlock(&MSDOS_I(inode)->cache_lru_lock); tmp = fat_cache_alloc(inode); if (!tmp) { spin_lock(&MSDOS_I(inode)->cache_lru_lock); MSDOS_I(inode)->nr_caches--; spin_unlock(&MSDOS_I(inode)->cache_lru_lock); return; } spin_lock(&MSDOS_I(inode)->cache_lru_lock); cache = fat_cache_merge(inode, new); if (cache != NULL) { MSDOS_I(inode)->nr_caches--; fat_cache_free(tmp); goto out_update_lru; } cache = tmp; } else { struct list_head *p = MSDOS_I(inode)->cache_lru.prev; cache = list_entry(p, struct fat_cache, cache_list); } cache->fcluster = new->fcluster; cache->dcluster = new->dcluster; cache->nr_contig = new->nr_contig; } out_update_lru: fat_cache_update_lru(inode, cache); out: spin_unlock(&MSDOS_I(inode)->cache_lru_lock); } /* * Cache invalidation occurs rarely, thus the LRU chain is not updated. It * fixes itself after a while. */ static void __fat_cache_inval_inode(struct inode *inode) { struct msdos_inode_info *i = MSDOS_I(inode); struct fat_cache *cache; while (!list_empty(&i->cache_lru)) { cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list); list_del_init(&cache->cache_list); i->nr_caches--; fat_cache_free(cache); } /* Update. The copy of caches before this id is discarded. */ i->cache_valid_id++; if (i->cache_valid_id == FAT_CACHE_VALID) i->cache_valid_id++; } void fat_cache_inval_inode(struct inode *inode) { spin_lock(&MSDOS_I(inode)->cache_lru_lock); __fat_cache_inval_inode(inode); spin_unlock(&MSDOS_I(inode)->cache_lru_lock); } static inline int cache_contiguous(struct fat_cache_id *cid, int dclus) { cid->nr_contig++; return ((cid->dcluster + cid->nr_contig) == dclus); } static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus) { cid->id = FAT_CACHE_VALID; cid->fcluster = fclus; cid->dcluster = dclus; cid->nr_contig = 0; } int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); const int limit = sb->s_maxbytes >> sbi->cluster_bits; struct fat_entry fatent; struct fat_cache_id cid; int nr; BUG_ON(MSDOS_I(inode)->i_start == 0); *fclus = 0; *dclus = MSDOS_I(inode)->i_start; if (!fat_valid_entry(sbi, *dclus)) { fat_fs_error_ratelimit(sb, "%s: invalid start cluster (i_pos %lld, start %08x)", __func__, MSDOS_I(inode)->i_pos, *dclus); return -EIO; } if (cluster == 0) return 0; if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { /* * dummy, always not contiguous * This is reinitialized by cache_init(), later. */ cache_init(&cid, -1, -1); } fatent_init(&fatent); while (*fclus < cluster) { /* prevent the infinite loop of cluster chain */ if (*fclus > limit) { fat_fs_error_ratelimit(sb, "%s: detected the cluster chain loop (i_pos %lld)", __func__, MSDOS_I(inode)->i_pos); nr = -EIO; goto out; } nr = fat_ent_read(inode, &fatent, *dclus); if (nr < 0) goto out; else if (nr == FAT_ENT_FREE) { fat_fs_error_ratelimit(sb, "%s: invalid cluster chain (i_pos %lld)", __func__, MSDOS_I(inode)->i_pos); nr = -EIO; goto out; } else if (nr == FAT_ENT_EOF) { fat_cache_add(inode, &cid); goto out; } (*fclus)++; *dclus = nr; if (!cache_contiguous(&cid, *dclus)) cache_init(&cid, *fclus, *dclus); } nr = 0; fat_cache_add(inode, &cid); out: fatent_brelse(&fatent); return nr; } static int fat_bmap_cluster(struct inode *inode, int cluster) { struct super_block *sb = inode->i_sb; int ret, fclus, dclus; if (MSDOS_I(inode)->i_start == 0) return 0; ret = fat_get_cluster(inode, cluster, &fclus, &dclus); if (ret < 0) return ret; else if (ret == FAT_ENT_EOF) { fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)", __func__, MSDOS_I(inode)->i_pos); return -EIO; } return dclus; } int fat_get_mapped_cluster(struct inode *inode, sector_t sector, sector_t last_block, unsigned long *mapped_blocks, sector_t *bmap) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); int cluster, offset; cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); offset = sector & (sbi->sec_per_clus - 1); cluster = fat_bmap_cluster(inode, cluster); if (cluster < 0) return cluster; else if (cluster) { *bmap = fat_clus_to_blknr(sbi, cluster) + offset; *mapped_blocks = sbi->sec_per_clus - offset; if (*mapped_blocks > last_block - sector) *mapped_blocks = last_block - sector; } return 0; } static int is_exceed_eof(struct inode *inode, sector_t sector, sector_t *last_block, int create) { struct super_block *sb = inode->i_sb; const unsigned long blocksize = sb->s_blocksize; const unsigned char blocksize_bits = sb->s_blocksize_bits; *last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits; if (sector >= *last_block) { if (!create) return 1; /* * ->mmu_private can access on only allocation path. * (caller must hold ->i_mutex) */ *last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1)) >> blocksize_bits; if (sector >= *last_block) return 1; } return 0; } int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, unsigned long *mapped_blocks, int create, bool from_bmap) { struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); sector_t last_block; *phys = 0; *mapped_blocks = 0; if (!is_fat32(sbi) && (inode->i_ino == MSDOS_ROOT_INO)) { if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { *phys = sector + sbi->dir_start; *mapped_blocks = 1; } return 0; } if (!from_bmap) { if (is_exceed_eof(inode, sector, &last_block, create)) return 0; } else { last_block = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); if (sector >= last_block) return 0; } return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks, phys); }
{ "language": "C" }
/* * Copyright (C) 2016, 2016 Shuguo Zhuo <shuguo.zhuo@inria.fr> * * This file is subject to the terms and conditions of the GNU Lesser * General Public License v2.1. See the file LICENSE in the top level * directory for more details. */ /** * @{ * * @file */ #include <string.h> #include "embUnit.h" #include "net/gnrc/pkt.h" #include "net/gnrc/pktbuf.h" #include "net/gnrc/priority_pktqueue.h" #include "unittests-constants.h" #include "tests-priority_pktqueue.h" #define PKT_INIT_ELEM(len, data, next) \ { (next), (void *)(data), (len), 1, GNRC_NETTYPE_UNDEF } #define PKT_INIT_ELEM_STATIC_DATA(data, next) PKT_INIT_ELEM(sizeof(data), (void *)(data), (next)) #define PKTQUEUE_INIT_ELEM(pkt) { NULL, pkt } static gnrc_priority_pktqueue_t pkt_queue; static void set_up(void) { pkt_queue.first = NULL; gnrc_pktbuf_init(); } static void test_gnrc_priority_pktqueue_init(void) { gnrc_priority_pktqueue_node_t elem; pkt_queue.first = (priority_queue_node_t *)(&elem); gnrc_priority_pktqueue_init(&pkt_queue); TEST_ASSERT_NULL(pkt_queue.first); } static void test_gnrc_priority_pktqueue_node_init(void) { gnrc_pktsnip_t pkt = PKT_INIT_ELEM_STATIC_DATA(TEST_STRING8, NULL); gnrc_priority_pktqueue_node_t elem; gnrc_priority_pktqueue_node_init(&elem,TEST_UINT32,&pkt); TEST_ASSERT_NULL(elem.next); TEST_ASSERT(elem.pkt == &pkt); TEST_ASSERT_EQUAL_INT(TEST_UINT32, elem.priority); TEST_ASSERT_EQUAL_STRING(TEST_STRING8, elem.pkt->data); } static void test_gnrc_priority_pktqueue_push_one(void) { gnrc_pktsnip_t pkt = PKT_INIT_ELEM_STATIC_DATA(TEST_STRING8, NULL); gnrc_priority_pktqueue_node_t elem = PRIORITY_PKTQUEUE_NODE_INIT(TEST_UINT32,&pkt); gnrc_priority_pktqueue_push(&pkt_queue,&elem); TEST_ASSERT((gnrc_priority_pktqueue_node_t *)(pkt_queue.first) == &elem); TEST_ASSERT_NULL(((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->next); TEST_ASSERT_EQUAL_INT(TEST_UINT32, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->priority); TEST_ASSERT_EQUAL_INT(1, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->users); TEST_ASSERT_NULL(((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->next); TEST_ASSERT_EQUAL_STRING(TEST_STRING8, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->data); TEST_ASSERT_EQUAL_INT(sizeof(TEST_STRING8), ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->size); TEST_ASSERT_EQUAL_INT(GNRC_NETTYPE_UNDEF, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->type); } static void test_gnrc_priority_pktqueue_push_two(void) { gnrc_pktsnip_t pkt1 = PKT_INIT_ELEM_STATIC_DATA(TEST_STRING8, NULL); gnrc_pktsnip_t pkt2 = PKT_INIT_ELEM_STATIC_DATA(TEST_STRING16, NULL); gnrc_priority_pktqueue_node_t elem1 = PRIORITY_PKTQUEUE_NODE_INIT(1,&pkt1); gnrc_priority_pktqueue_node_t elem2 = PRIORITY_PKTQUEUE_NODE_INIT(0,&pkt2); gnrc_priority_pktqueue_push(&pkt_queue, &elem1); gnrc_priority_pktqueue_push(&pkt_queue, &elem2); TEST_ASSERT((gnrc_priority_pktqueue_node_t *)(pkt_queue.first) == &elem2); TEST_ASSERT((gnrc_priority_pktqueue_node_t *)(pkt_queue.first->next) == &elem1); TEST_ASSERT_NULL(((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->next->next); TEST_ASSERT_EQUAL_INT(0, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->priority); TEST_ASSERT_EQUAL_INT(1, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->next->priority); TEST_ASSERT_EQUAL_INT(1, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->users); TEST_ASSERT_NULL(((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->next); TEST_ASSERT_EQUAL_STRING(TEST_STRING16, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->data); TEST_ASSERT_EQUAL_INT(sizeof(TEST_STRING16), ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->size); TEST_ASSERT_EQUAL_INT(GNRC_NETTYPE_UNDEF, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->pkt->type); TEST_ASSERT_EQUAL_INT(1, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->next->pkt->users); TEST_ASSERT_NULL(((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->next->pkt->next); TEST_ASSERT_EQUAL_STRING(TEST_STRING8, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->next->pkt->data); TEST_ASSERT_EQUAL_INT(sizeof(TEST_STRING8), ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->next->pkt->size); TEST_ASSERT_EQUAL_INT(GNRC_NETTYPE_UNDEF, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->next->pkt->type); } static void test_gnrc_priority_pktqueue_length(void) { gnrc_pktsnip_t pkt1 = PKT_INIT_ELEM_STATIC_DATA(TEST_STRING8, NULL); gnrc_pktsnip_t pkt2 = PKT_INIT_ELEM_STATIC_DATA(TEST_STRING16, NULL); gnrc_priority_pktqueue_node_t elem1 = PRIORITY_PKTQUEUE_NODE_INIT(1,&pkt1); gnrc_priority_pktqueue_node_t elem2 = PRIORITY_PKTQUEUE_NODE_INIT(0,&pkt2); TEST_ASSERT_EQUAL_INT(0, gnrc_priority_pktqueue_length(&pkt_queue)); gnrc_priority_pktqueue_push(&pkt_queue, &elem1); TEST_ASSERT_EQUAL_INT(1, gnrc_priority_pktqueue_length(&pkt_queue)); gnrc_priority_pktqueue_push(&pkt_queue, &elem2); TEST_ASSERT_EQUAL_INT(2, gnrc_priority_pktqueue_length(&pkt_queue)); } static void test_gnrc_priority_pktqueue_flush(void) { gnrc_pktsnip_t *pkt1 = gnrc_pktbuf_add(NULL, TEST_STRING8, sizeof(TEST_STRING8), GNRC_NETTYPE_TEST); gnrc_pktsnip_t *pkt2 = gnrc_pktbuf_add(NULL, TEST_STRING16, sizeof(TEST_STRING16), GNRC_NETTYPE_TEST); TEST_ASSERT_NOT_NULL(pkt1); TEST_ASSERT_NOT_NULL(pkt2); gnrc_priority_pktqueue_node_t elem1 = PRIORITY_PKTQUEUE_NODE_INIT(1,pkt1); gnrc_priority_pktqueue_node_t elem2 = PRIORITY_PKTQUEUE_NODE_INIT(0,pkt2); gnrc_priority_pktqueue_push(&pkt_queue, &elem1); gnrc_priority_pktqueue_push(&pkt_queue, &elem2); gnrc_priority_pktqueue_flush(&pkt_queue); TEST_ASSERT_NULL(pkt_queue.first); TEST_ASSERT_EQUAL_INT(0, gnrc_priority_pktqueue_length(&pkt_queue)); } static void test_gnrc_priority_pktqueue_head(void) { gnrc_pktsnip_t *pkt1 = gnrc_pktbuf_add(NULL, TEST_STRING8, sizeof(TEST_STRING8), GNRC_NETTYPE_TEST); gnrc_pktsnip_t *pkt2 = gnrc_pktbuf_add(NULL, TEST_STRING12, sizeof(TEST_STRING12), GNRC_NETTYPE_TEST); gnrc_pktsnip_t *pkt3 = gnrc_pktbuf_add(NULL, TEST_STRING16, sizeof(TEST_STRING16), GNRC_NETTYPE_TEST); TEST_ASSERT_NOT_NULL(pkt1); TEST_ASSERT_NOT_NULL(pkt2); TEST_ASSERT_NOT_NULL(pkt3); gnrc_priority_pktqueue_node_t elem1 = PRIORITY_PKTQUEUE_NODE_INIT(1,pkt1); gnrc_priority_pktqueue_node_t elem2 = PRIORITY_PKTQUEUE_NODE_INIT(1,pkt2); gnrc_priority_pktqueue_node_t elem3 = PRIORITY_PKTQUEUE_NODE_INIT(0,pkt3); gnrc_pktsnip_t *head = gnrc_priority_pktqueue_head(&pkt_queue); TEST_ASSERT_NULL(head); gnrc_priority_pktqueue_push(&pkt_queue, &elem1); head = gnrc_priority_pktqueue_head(&pkt_queue); TEST_ASSERT(head == pkt1); gnrc_priority_pktqueue_push(&pkt_queue, &elem2); head = gnrc_priority_pktqueue_head(&pkt_queue); TEST_ASSERT(head == pkt1); gnrc_priority_pktqueue_push(&pkt_queue, &elem3); head = gnrc_priority_pktqueue_head(&pkt_queue); TEST_ASSERT(head == pkt3); gnrc_priority_pktqueue_flush(&pkt_queue); head = gnrc_priority_pktqueue_head(&pkt_queue); TEST_ASSERT_NULL(head); } static void test_gnrc_priority_pktqueue_pop_empty(void) { gnrc_pktsnip_t *res; res = gnrc_priority_pktqueue_pop(&pkt_queue); TEST_ASSERT_NULL(pkt_queue.first); TEST_ASSERT_NULL(res); TEST_ASSERT_EQUAL_INT(0, gnrc_priority_pktqueue_length(&pkt_queue)); } static void test_gnrc_priority_pktqueue_pop(void) { gnrc_pktsnip_t pkt1 = PKT_INIT_ELEM_STATIC_DATA(TEST_STRING8, NULL); gnrc_pktsnip_t pkt2 = PKT_INIT_ELEM_STATIC_DATA(TEST_STRING16, NULL); gnrc_priority_pktqueue_node_t elem1 = PRIORITY_PKTQUEUE_NODE_INIT(1,&pkt1); gnrc_priority_pktqueue_node_t elem2 = PRIORITY_PKTQUEUE_NODE_INIT(0,&pkt2); gnrc_pktsnip_t *res; gnrc_pktsnip_t *head; gnrc_priority_pktqueue_push(&pkt_queue, &elem1); gnrc_priority_pktqueue_push(&pkt_queue, &elem2); res = gnrc_priority_pktqueue_pop(&pkt_queue); TEST_ASSERT(res == &pkt2); TEST_ASSERT_NULL(elem2.pkt); TEST_ASSERT_NULL(elem2.next); TEST_ASSERT_EQUAL_INT(0,elem2.priority); TEST_ASSERT((gnrc_priority_pktqueue_node_t *)(pkt_queue.first) == &elem1); TEST_ASSERT_EQUAL_INT(1, ((gnrc_priority_pktqueue_node_t *)(pkt_queue.first))->priority); TEST_ASSERT_EQUAL_INT(1, res->users); TEST_ASSERT_NULL(res->next); TEST_ASSERT_EQUAL_STRING(TEST_STRING16, res->data); TEST_ASSERT_EQUAL_INT(sizeof(TEST_STRING16), res->size); TEST_ASSERT_EQUAL_INT(GNRC_NETTYPE_UNDEF, res->type); res = gnrc_priority_pktqueue_pop(&pkt_queue); TEST_ASSERT_NULL(pkt_queue.first); TEST_ASSERT_NULL(elem1.pkt); TEST_ASSERT_NULL(elem1.next); TEST_ASSERT_EQUAL_INT(0,elem1.priority); TEST_ASSERT(res == &pkt1); TEST_ASSERT_EQUAL_INT(1, res->users); TEST_ASSERT_NULL(res->next); TEST_ASSERT_EQUAL_STRING(TEST_STRING8, res->data); TEST_ASSERT_EQUAL_INT(sizeof(TEST_STRING8), res->size); TEST_ASSERT_EQUAL_INT(GNRC_NETTYPE_UNDEF, res->type); head = gnrc_priority_pktqueue_head(&pkt_queue); TEST_ASSERT_NULL(pkt_queue.first); TEST_ASSERT_NULL(head); } Test *tests_priority_pktqueue_tests(void) { EMB_UNIT_TESTFIXTURES(fixtures) { new_TestFixture(test_gnrc_priority_pktqueue_init), new_TestFixture(test_gnrc_priority_pktqueue_node_init), new_TestFixture(test_gnrc_priority_pktqueue_push_one), new_TestFixture(test_gnrc_priority_pktqueue_push_two), new_TestFixture(test_gnrc_priority_pktqueue_length), new_TestFixture(test_gnrc_priority_pktqueue_flush), new_TestFixture(test_gnrc_priority_pktqueue_head), new_TestFixture(test_gnrc_priority_pktqueue_pop_empty), new_TestFixture(test_gnrc_priority_pktqueue_pop), }; EMB_UNIT_TESTCALLER(priority_pktqueue_tests, set_up, NULL, fixtures); return (Test *)&priority_pktqueue_tests; } void tests_priority_pktqueue(void) { TESTS_RUN(tests_priority_pktqueue_tests()); } /** @} */
{ "language": "C" }
/*========================================================================= pause.h Author: PKG Created: Project: Spongebob Purpose: Copyright (c) 2001 Climax Development Ltd ===========================================================================*/ #ifndef __GAME_PAUSE_H__ #define __GAME_PAUSE_H__ /*---------------------------------------------------------------------- Includes -------- */ /* Std Lib ------- */ /*---------------------------------------------------------------------- Tyepdefs && Defines ------------------- */ /*---------------------------------------------------------------------- Structure defintions -------------------- */ class CPauseMenu { public: void init(); void shutdown(); void select(); void unselect(); void think(int _frames); void render(); int isActive(); private: typedef enum { STATE__MAIN_MENU, STATE__CONFIRM_QUIT, } STATE; enum { RESPONSE__WAITING, RESPONSE__CONTINUE, RESPONSE__QUIT, RESPONSE__CONFIRM_QUIT_YES, RESPONSE__CONFIRM_QUIT_NO, }; void renderLives(); int m_active; int m_SpeechPlaying; int m_padDebounce; int m_responseFlag; int m_vibrationChangeFlag; int m_vibrationState; STATE m_currentState; class CGUIControlFrame *m_pauseGuiFrame; class CGUIControlFrame *m_confirmQuitGuiFrame; class CGUIObject *m_vibGUIOption; class FontBank *m_fontBank; }; /*---------------------------------------------------------------------- Globals ------- */ /*---------------------------------------------------------------------- Functions --------- */ /*---------------------------------------------------------------------- */ #endif /* __GAME_PAUSE_H__ */ /*=========================================================================== end */
{ "language": "C" }
/* * CPPC (Collaborative Processor Performance Control) driver for * interfacing with the CPUfreq layer and governors. See * cppc_acpi.c for CPPC specific methods. * * (C) Copyright 2014, 2015 Linaro Ltd. * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #define pr_fmt(fmt) "CPPC Cpufreq:" fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/dmi.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <asm/unaligned.h> #include <acpi/cppc_acpi.h> /* Minimum struct length needed for the DMI processor entry we want */ #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 /* Offest in the DMI processor structure for the max frequency */ #define DMI_PROCESSOR_MAX_SPEED 0x14 /* * These structs contain information parsed from per CPU * ACPI _CPC structures. * e.g. For each CPU the highest, lowest supported * performance capabilities, desired performance level * requested etc. */ static struct cppc_cpudata **all_cpu_data; /* Callback function used to retrieve the max frequency from DMI */ static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) { const u8 *dmi_data = (const u8 *)dm; u16 *mhz = (u16 *)private; if (dm->type == DMI_ENTRY_PROCESSOR && dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { u16 val = (u16)get_unaligned((const u16 *) (dmi_data + DMI_PROCESSOR_MAX_SPEED)); *mhz = val > *mhz ? val : *mhz; } } /* Look up the max frequency in DMI */ static u64 cppc_get_dmi_max_khz(void) { u16 mhz = 0; dmi_walk(cppc_find_dmi_mhz, &mhz); /* * Real stupid fallback value, just in case there is no * actual value set. */ mhz = mhz ? mhz : 1; return (1000 * mhz); } /* * If CPPC lowest_freq and nominal_freq registers are exposed then we can * use them to convert perf to freq and vice versa * * If the perf/freq point lies between Nominal and Lowest, we can treat * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line * and extrapolate the rest * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion */ static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu, unsigned int perf) { static u64 max_khz; struct cppc_perf_caps *caps = &cpu->perf_caps; u64 mul, div; if (caps->lowest_freq && caps->nominal_freq) { if (perf >= caps->nominal_perf) { mul = caps->nominal_freq; div = caps->nominal_perf; } else { mul = caps->nominal_freq - caps->lowest_freq; div = caps->nominal_perf - caps->lowest_perf; } } else { if (!max_khz) max_khz = cppc_get_dmi_max_khz(); mul = max_khz; div = cpu->perf_caps.highest_perf; } return (u64)perf * mul / div; } static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu, unsigned int freq) { static u64 max_khz; struct cppc_perf_caps *caps = &cpu->perf_caps; u64 mul, div; if (caps->lowest_freq && caps->nominal_freq) { if (freq >= caps->nominal_freq) { mul = caps->nominal_perf; div = caps->nominal_freq; } else { mul = caps->lowest_perf; div = caps->lowest_freq; } } else { if (!max_khz) max_khz = cppc_get_dmi_max_khz(); mul = cpu->perf_caps.highest_perf; div = max_khz; } return (u64)freq * mul / div; } static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct cppc_cpudata *cpu; struct cpufreq_freqs freqs; u32 desired_perf; int ret = 0; cpu = all_cpu_data[policy->cpu]; desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq); /* Return if it is exactly the same perf */ if (desired_perf == cpu->perf_ctrls.desired_perf) return ret; cpu->perf_ctrls.desired_perf = desired_perf; freqs.old = policy->cur; freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls); cpufreq_freq_transition_end(policy, &freqs, ret != 0); if (ret) pr_debug("Failed to set target on CPU:%d. ret:%d\n", cpu->cpu, ret); return ret; } static int cppc_verify_policy(struct cpufreq_policy *policy) { cpufreq_verify_within_cpu_limits(policy); return 0; } static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) { int cpu_num = policy->cpu; struct cppc_cpudata *cpu = all_cpu_data[cpu_num]; int ret; cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf; ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls); if (ret) pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", cpu->perf_caps.lowest_perf, cpu_num, ret); } /* * The PCC subspace describes the rate at which platform can accept commands * on the shared PCC channel (including READs which do not count towards freq * trasition requests), so ideally we need to use the PCC values as a fallback * if we don't have a platform specific transition_delay_us */ #ifdef CONFIG_ARM64 #include <asm/cputype.h> static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) { unsigned long implementor = read_cpuid_implementor(); unsigned long part_num = read_cpuid_part_number(); unsigned int delay_us = 0; switch (implementor) { case ARM_CPU_IMP_QCOM: switch (part_num) { case QCOM_CPU_PART_FALKOR_V1: case QCOM_CPU_PART_FALKOR: delay_us = 10000; break; default: delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; break; } break; default: delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; break; } return delay_us; } #else static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) { return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; } #endif static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu; unsigned int cpu_num = policy->cpu; int ret = 0; cpu = all_cpu_data[policy->cpu]; cpu->cpu = cpu_num; ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps); if (ret) { pr_debug("Err reading CPU%d perf capabilities. ret:%d\n", cpu_num, ret); return ret; } /* Convert the lowest and nominal freq from MHz to KHz */ cpu->perf_caps.lowest_freq *= 1000; cpu->perf_caps.nominal_freq *= 1000; /* * Set min to lowest nonlinear perf to avoid any efficiency penalty (see * Section 8.4.7.1.1.5 of ACPI 6.1 spec) */ policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf); policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf); /* * Set cpuinfo.min_freq to Lowest to make the full range of performance * available if userspace wants to use any perf between lowest & lowest * nonlinear perf */ policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf); policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf); policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num); policy->shared_type = cpu->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { int i; cpumask_copy(policy->cpus, cpu->shared_cpu_map); for_each_cpu(i, policy->cpus) { if (unlikely(i == policy->cpu)) continue; memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps, sizeof(cpu->perf_caps)); } } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { /* Support only SW_ANY for now. */ pr_debug("Unsupported CPU co-ord type\n"); return -EFAULT; } cpu->cur_policy = policy; /* Set policy->cur to max now. The governors will adjust later. */ policy->cur = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf); cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf; ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls); if (ret) pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", cpu->perf_caps.highest_perf, cpu_num, ret); return ret; } static struct cpufreq_driver cppc_cpufreq_driver = { .flags = CPUFREQ_CONST_LOOPS, .verify = cppc_verify_policy, .target = cppc_cpufreq_set_target, .init = cppc_cpufreq_cpu_init, .stop_cpu = cppc_cpufreq_stop_cpu, .name = "cppc_cpufreq", }; static int __init cppc_cpufreq_init(void) { int i, ret = 0; struct cppc_cpudata *cpu; if (acpi_disabled) return -ENODEV; all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *), GFP_KERNEL); if (!all_cpu_data) return -ENOMEM; for_each_possible_cpu(i) { all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); if (!all_cpu_data[i]) goto out; cpu = all_cpu_data[i]; if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL)) goto out; } ret = acpi_get_psd_map(all_cpu_data); if (ret) { pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n"); goto out; } ret = cpufreq_register_driver(&cppc_cpufreq_driver); if (ret) goto out; return ret; out: for_each_possible_cpu(i) { cpu = all_cpu_data[i]; if (!cpu) break; free_cpumask_var(cpu->shared_cpu_map); kfree(cpu); } kfree(all_cpu_data); return -ENODEV; } static void __exit cppc_cpufreq_exit(void) { struct cppc_cpudata *cpu; int i; cpufreq_unregister_driver(&cppc_cpufreq_driver); for_each_possible_cpu(i) { cpu = all_cpu_data[i]; free_cpumask_var(cpu->shared_cpu_map); kfree(cpu); } kfree(all_cpu_data); } module_exit(cppc_cpufreq_exit); MODULE_AUTHOR("Ashwin Chaugule"); MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec"); MODULE_LICENSE("GPL"); late_initcall(cppc_cpufreq_init); static const struct acpi_device_id cppc_acpi_ids[] = { {ACPI_PROCESSOR_DEVICE_HID, }, {} }; MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
{ "language": "C" }
/* $OpenBSD: kexgexs.c,v 1.14 2010/11/10 01:33:07 djm Exp $ */ /* * Copyright (c) 2000 Niels Provos. All rights reserved. * Copyright (c) 2001 Markus Friedl. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "includes.h" #include <sys/param.h> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <signal.h> #include <openssl/dh.h> #include "xmalloc.h" #include "buffer.h" #include "key.h" #include "cipher.h" #include "kex.h" #include "log.h" #include "packet.h" #include "dh.h" #include "ssh2.h" #include "compat.h" #ifdef GSSAPI #include "ssh-gss.h" #endif #include "monitor_wrap.h" void kexgex_server(Kex *kex) { BIGNUM *shared_secret = NULL, *dh_client_pub = NULL; Key *server_host_public, *server_host_private; DH *dh; u_char *kbuf, *hash, *signature = NULL, *server_host_key_blob = NULL; u_int sbloblen, klen, slen, hashlen; int omin = -1, min = -1, omax = -1, max = -1, onbits = -1, nbits = -1; int type, kout; if (kex->load_host_public_key == NULL || kex->load_host_private_key == NULL) fatal("Cannot load hostkey"); server_host_public = kex->load_host_public_key(kex->hostkey_type); if (server_host_public == NULL) fatal("Unsupported hostkey type %d", kex->hostkey_type); server_host_private = kex->load_host_private_key(kex->hostkey_type); if (server_host_private == NULL) fatal("Missing private key for hostkey type %d", kex->hostkey_type); type = packet_read(); switch (type) { case SSH2_MSG_KEX_DH_GEX_REQUEST: debug("SSH2_MSG_KEX_DH_GEX_REQUEST received"); omin = min = packet_get_int(); onbits = nbits = packet_get_int(); omax = max = packet_get_int(); min = MAX(DH_GRP_MIN, min); max = MIN(DH_GRP_MAX, max); nbits = MAX(DH_GRP_MIN, nbits); nbits = MIN(DH_GRP_MAX, nbits); break; case SSH2_MSG_KEX_DH_GEX_REQUEST_OLD: debug("SSH2_MSG_KEX_DH_GEX_REQUEST_OLD received"); onbits = nbits = packet_get_int(); /* unused for old GEX */ omin = min = DH_GRP_MIN; omax = max = DH_GRP_MAX; break; default: fatal("protocol error during kex, no DH_GEX_REQUEST: %d", type); } packet_check_eom(); if (omax < omin || onbits < omin || omax < onbits) fatal("DH_GEX_REQUEST, bad parameters: %d !< %d !< %d", omin, onbits, omax); /* Contact privileged parent */ dh = PRIVSEP(choose_dh(min, nbits, max)); if (dh == NULL) packet_disconnect("Protocol error: no matching DH grp found"); debug("SSH2_MSG_KEX_DH_GEX_GROUP sent"); packet_start(SSH2_MSG_KEX_DH_GEX_GROUP); packet_put_bignum2(dh->p); packet_put_bignum2(dh->g); packet_send(); /* flush */ packet_write_wait(); /* Compute our exchange value in parallel with the client */ dh_gen_key(dh, kex->we_need * 8); debug("expecting SSH2_MSG_KEX_DH_GEX_INIT"); packet_read_expect(SSH2_MSG_KEX_DH_GEX_INIT); /* key, cert */ if ((dh_client_pub = BN_new()) == NULL) fatal("dh_client_pub == NULL"); packet_get_bignum2(dh_client_pub); packet_check_eom(); #ifdef DEBUG_KEXDH fprintf(stderr, "dh_client_pub= "); BN_print_fp(stderr, dh_client_pub); fprintf(stderr, "\n"); debug("bits %d", BN_num_bits(dh_client_pub)); #endif #ifdef DEBUG_KEXDH DHparams_print_fp(stderr, dh); fprintf(stderr, "pub= "); BN_print_fp(stderr, dh->pub_key); fprintf(stderr, "\n"); #endif if (!dh_pub_is_valid(dh, dh_client_pub)) packet_disconnect("bad client public DH value"); klen = DH_size(dh); kbuf = xmalloc(klen); if ((kout = DH_compute_key(kbuf, dh_client_pub, dh)) < 0) fatal("DH_compute_key: failed"); #ifdef DEBUG_KEXDH dump_digest("shared secret", kbuf, kout); #endif if ((shared_secret = BN_new()) == NULL) fatal("kexgex_server: BN_new failed"); if (BN_bin2bn(kbuf, kout, shared_secret) == NULL) fatal("kexgex_server: BN_bin2bn failed"); memset(kbuf, 0, klen); xfree(kbuf); key_to_blob(server_host_public, &server_host_key_blob, &sbloblen); if (type == SSH2_MSG_KEX_DH_GEX_REQUEST_OLD) omin = min = omax = max = -1; /* calc H */ kexgex_hash( kex->evp_md, kex->client_version_string, kex->server_version_string, buffer_ptr(&kex->peer), buffer_len(&kex->peer), buffer_ptr(&kex->my), buffer_len(&kex->my), server_host_key_blob, sbloblen, omin, onbits, omax, dh->p, dh->g, dh_client_pub, dh->pub_key, shared_secret, &hash, &hashlen ); BN_clear_free(dh_client_pub); /* save session id := H */ if (kex->session_id == NULL) { kex->session_id_len = hashlen; kex->session_id = xmalloc(kex->session_id_len); memcpy(kex->session_id, hash, kex->session_id_len); } /* sign H */ if (PRIVSEP(key_sign(server_host_private, &signature, &slen, hash, hashlen)) < 0) fatal("kexgex_server: key_sign failed"); /* destroy_sensitive_data(); */ /* send server hostkey, DH pubkey 'f' and singed H */ debug("SSH2_MSG_KEX_DH_GEX_REPLY sent"); packet_start(SSH2_MSG_KEX_DH_GEX_REPLY); packet_put_string(server_host_key_blob, sbloblen); packet_put_bignum2(dh->pub_key); /* f */ packet_put_string(signature, slen); packet_send(); xfree(signature); xfree(server_host_key_blob); /* have keys, free DH */ DH_free(dh); kex_derive_keys(kex, hash, hashlen, shared_secret); BN_clear_free(shared_secret); kex_finish(kex); }
{ "language": "C" }
/* * The contents of this file are subject to the Interbase Public * License Version 1.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy * of the License at http://www.Inprise.com/IPL.html * * Software distributed under the License is distributed on an * "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express * or implied. See the License for the specific language governing * rights and limitations under the License. * * The Original Code was created by Inprise Corporation * and its predecessors. Portions created by Inprise Corporation are * Copyright (C) Inprise Corporation. * * All Rights Reserved. * Contributor(s): ______________________________________. */ const int NUM_EXPAND_CHARS = 5; const int NUM_COMPRESS_CHARS = 0; const int LOWERCASE_LEN = 256; const int UPPERCASE_LEN = 256; const int NOCASESORT_LEN = 256; const int LDRV_TIEBREAK = SECONDARY + REVERSE; //const int MAX_NCO_PRIMARY = 36; const int MAX_NCO_SECONDARY = 8; const int MAX_NCO_TERTIARY = 1; //const int MAX_NCO_IGNORE = 122; const int NULL_SECONDARY = 0; const int NULL_TERTIARY = 0; const int FIRST_IGNORE = 1; const int FIRST_TERTIARY = 1; const int FIRST_SECONDARY = (FIRST_TERTIARY + MAX_NCO_TERTIARY + 1); const int FIRST_PRIMARY = (FIRST_SECONDARY + MAX_NCO_SECONDARY + 1); static const BYTE ToUpperConversionTbl[UPPERCASE_LEN] = { 0, /* 0 -> 0 */ 1, /* 1 -> 1 */ 2, /* 2 -> 2 */ 3, /* 3 -> 3 */ 4, /* 4 -> 4 */ 5, /* 5 -> 5 */ 6, /* 6 -> 6 */ 7, /* 7 -> 7 */ 8, /* 8 -> 8 */ 9, /* 9 -> 9 */ 10, /* 10 -> 10 */ 11, /* 11 -> 11 */ 12, /* 12 -> 12 */ 13, /* 13 -> 13 */ 14, /* 14 -> 14 */ 15, /* 15 -> 15 */ 16, /* 16 -> 16 */ 17, /* 17 -> 17 */ 18, /* 18 -> 18 */ 19, /* 19 -> 19 */ 20, /* 20 -> 20 */ 21, /* 21 -> 21 */ 22, /* 22 -> 22 */ 23, /* 23 -> 23 */ 24, /* 24 -> 24 */ 25, /* 25 -> 25 */ 26, /* 26 -> 26 */ 27, /* 27 -> 27 */ 28, /* 28 -> 28 */ 29, /* 29 -> 29 */ 30, /* 30 -> 30 */ 31, /* 31 -> 31 */ 32, /* 32 -> 32 */ 33, /* ! 33 -> ! 33 */ 34, /* " 34 -> " 34 */ 35, /* # 35 -> # 35 */ 36, /* $ 36 -> $ 36 */ 37, /* % 37 -> % 37 */ 38, /* & 38 -> & 38 */ 39, /* ' 39 -> ' 39 */ 40, /* ( 40 -> ( 40 */ 41, /* ) 41 -> ) 41 */ 42, /* * 42 -> * 42 */ 43, /* + 43 -> + 43 */ 44, /* , 44 -> , 44 */ 45, /* - 45 -> - 45 */ 46, /* . 46 -> . 46 */ 47, /* / 47 -> / 47 */ 48, /* 0 48 -> 0 48 */ 49, /* 1 49 -> 1 49 */ 50, /* 2 50 -> 2 50 */ 51, /* 3 51 -> 3 51 */ 52, /* 4 52 -> 4 52 */ 53, /* 5 53 -> 5 53 */ 54, /* 6 54 -> 6 54 */ 55, /* 7 55 -> 7 55 */ 56, /* 8 56 -> 8 56 */ 57, /* 9 57 -> 9 57 */ 58, /* : 58 -> : 58 */ 59, /* ; 59 -> ; 59 */ 60, /* < 60 -> < 60 */ 61, /* = 61 -> = 61 */ 62, /* > 62 -> > 62 */ 63, /* ? 63 -> ? 63 */ 64, /* @ 64 -> @ 64 */ 65, /* A 65 -> A 65 */ 66, /* B 66 -> B 66 */ 67, /* C 67 -> C 67 */ 68, /* D 68 -> D 68 */ 69, /* E 69 -> E 69 */ 70, /* F 70 -> F 70 */ 71, /* G 71 -> G 71 */ 72, /* H 72 -> H 72 */ 73, /* I 73 -> I 73 */ 74, /* J 74 -> J 74 */ 75, /* K 75 -> K 75 */ 76, /* L 76 -> L 76 */ 77, /* M 77 -> M 77 */ 78, /* N 78 -> N 78 */ 79, /* O 79 -> O 79 */ 80, /* P 80 -> P 80 */ 81, /* Q 81 -> Q 81 */ 82, /* R 82 -> R 82 */ 83, /* S 83 -> S 83 */ 84, /* T 84 -> T 84 */ 85, /* U 85 -> U 85 */ 86, /* V 86 -> V 86 */ 87, /* W 87 -> W 87 */ 88, /* X 88 -> X 88 */ 89, /* Y 89 -> Y 89 */ 90, /* Z 90 -> Z 90 */ 91, /* [ 91 -> [ 91 */ 92, /* \ 92 -> \ 92 */ 93, /* ] 93 -> ] 93 */ 94, /* ^ 94 -> ^ 94 */ 95, /* _ 95 -> _ 95 */ 96, /* ` 96 -> ` 96 */ 65, /* a 97 -> A 65 */ 66, /* b 98 -> B 66 */ 67, /* c 99 -> C 67 */ 68, /* d 100 -> D 68 */ 69, /* e 101 -> E 69 */ 70, /* f 102 -> F 70 */ 71, /* g 103 -> G 71 */ 72, /* h 104 -> H 72 */ 73, /* i 105 -> I 73 */ 74, /* j 106 -> J 74 */ 75, /* k 107 -> K 75 */ 76, /* l 108 -> L 76 */ 77, /* m 109 -> M 77 */ 78, /* n 110 -> N 78 */ 79, /* o 111 -> O 79 */ 80, /* p 112 -> P 80 */ 81, /* q 113 -> Q 81 */ 82, /* r 114 -> R 82 */ 83, /* s 115 -> S 83 */ 84, /* t 116 -> T 84 */ 85, /* u 117 -> U 85 */ 86, /* v 118 -> V 86 */ 87, /* w 119 -> W 87 */ 88, /* x 120 -> X 88 */ 89, /* y 121 -> Y 89 */ 90, /* z 122 -> Z 90 */ 123, /* { 123 -> { 123 */ 124, /* | 124 -> | 124 */ 125, /* } 125 -> } 125 */ 126, /* ~ 126 -> ~ 126 */ 127, /* 127 -> 127 */ 128, /* 128 -> 128 */ 129, /* 129 -> 129 */ 130, /* 130 -> 130 */ 131, /* 131 -> 131 */ 132, /* 132 -> 132 */ 133, /* 133 -> 133 */ 134, /* 134 -> 134 */ 135, /* 135 -> 135 */ 136, /* 136 -> 136 */ 137, /* 137 -> 137 */ 138, /* 138 -> 138 */ 139, /* 139 -> 139 */ 140, /* 140 -> 140 */ 141, /* 141 -> 141 */ 142, /* 142 -> 142 */ 143, /* 143 -> 143 */ 144, /* 144 -> 144 */ 145, /* 145 -> 145 */ 146, /* 146 -> 146 */ 147, /* 147 -> 147 */ 148, /* 148 -> 148 */ 149, /* 149 -> 149 */ 150, /* 150 -> 150 */ 151, /* 151 -> 151 */ 152, /* 152 -> 152 */ 153, /* 153 -> 153 */ 154, /* 154 -> 154 */ 155, /* 155 -> 155 */ 156, /* 156 -> 156 */ 157, /* 157 -> 157 */ 158, /* 158 -> 158 */ 159, /* 159 -> 159 */ 160, /*   160 ->   160 */ 161, /* ¡ 161 -> ¡ 161 */ 162, /* ¢ 162 -> ¢ 162 */ 163, /* £ 163 -> £ 163 */ 164, /* ¤ 164 -> ¤ 164 */ 165, /* ¥ 165 -> ¥ 165 */ 166, /* ¦ 166 -> ¦ 166 */ 167, /* § 167 -> § 167 */ 168, /* ¨ 168 -> ¨ 168 */ 169, /* © 169 -> © 169 */ 170, /* ª 170 -> ª 170 */ 171, /* « 171 -> « 171 */ 172, /* ¬ 172 -> ¬ 172 */ 173, /* ­ 173 -> ­ 173 */ 174, /* ® 174 -> ® 174 */ 175, /* ¯ 175 -> ¯ 175 */ 176, /* ° 176 -> ° 176 */ 177, /* ± 177 -> ± 177 */ 178, /* ² 178 -> ² 178 */ 179, /* ³ 179 -> ³ 179 */ 180, /* ´ 180 -> ´ 180 */ 181, /* µ 181 -> µ 181 */ 182, /* ¶ 182 -> ¶ 182 */ 183, /* · 183 -> · 183 */ 184, /* ¸ 184 -> ¸ 184 */ 185, /* ¹ 185 -> ¹ 185 */ 186, /* º 186 -> º 186 */ 187, /* » 187 -> » 187 */ 188, /* ¼ 188 -> ¼ 188 */ 189, /* ½ 189 -> ½ 189 */ 190, /* ¾ 190 -> ¾ 190 */ 191, /* ¿ 191 -> ¿ 191 */ 192, /* À 192 -> À 192 */ 193, /* Á 193 -> Á 193 */ 194, /* Â 194 -> Â 194 */ 195, /* Ã 195 -> Ã 195 */ 196, /* Ä 196 -> Ä 196 */ 197, /* Å 197 -> Å 197 */ 198, /* Æ 198 -> Æ 198 */ 199, /* Ç 199 -> Ç 199 */ 200, /* È 200 -> È 200 */ 201, /* É 201 -> É 201 */ 202, /* Ê 202 -> Ê 202 */ 203, /* Ë 203 -> Ë 203 */ 204, /* Ì 204 -> Ì 204 */ 205, /* Í 205 -> Í 205 */ 206, /* Î 206 -> Î 206 */ 207, /* Ï 207 -> Ï 207 */ 208, /* Ð 208 -> Ð 208 */ 209, /* Ñ 209 -> Ñ 209 */ 210, /* Ò 210 -> Ò 210 */ 211, /* Ó 211 -> Ó 211 */ 212, /* Ô 212 -> Ô 212 */ 213, /* Õ 213 -> Õ 213 */ 214, /* Ö 214 -> Ö 214 */ 215, /* × 215 -> × 215 */ 216, /* Ø 216 -> Ø 216 */ 217, /* Ù 217 -> Ù 217 */ 218, /* Ú 218 -> Ú 218 */ 219, /* Û 219 -> Û 219 */ 220, /* Ü 220 -> Ü 220 */ 221, /* Ý 221 -> Ý 221 */ 222, /* Þ 222 -> Þ 222 */ 223, /* ß 223 -> ß 223 */ 192, /* à 224 -> À 192 */ 193, /* á 225 -> Á 193 */ 194, /* â 226 -> Â 194 */ 195, /* ã 227 -> Ã 195 */ 196, /* ä 228 -> Ä 196 */ 197, /* å 229 -> Å 197 */ 198, /* æ 230 -> Æ 198 */ 199, /* ç 231 -> Ç 199 */ 200, /* è 232 -> È 200 */ 201, /* é 233 -> É 201 */ 202, /* ê 234 -> Ê 202 */ 203, /* ë 235 -> Ë 203 */ 204, /* ì 236 -> Ì 204 */ 205, /* í 237 -> Í 205 */ 206, /* î 238 -> Î 206 */ 207, /* ï 239 -> Ï 207 */ 208, /* ð 240 -> Ð 208 */ 209, /* ñ 241 -> Ñ 209 */ 210, /* ò 242 -> Ò 210 */ 211, /* ó 243 -> Ó 211 */ 212, /* ô 244 -> Ô 212 */ 213, /* õ 245 -> Õ 213 */ 214, /* ö 246 -> Ö 214 */ 247, /* ÷ 247 -> ÷ 247 */ 216, /* ø 248 -> Ø 216 */ 217, /* ù 249 -> Ù 217 */ 218, /* ú 250 -> Ú 218 */ 219, /* û 251 -> Û 219 */ 220, /* ü 252 -> Ü 220 */ 221, /* ý 253 -> Ý 221 */ 222, /* þ 254 -> Þ 222 */ 89 /* 255 -> Y 89 */ }; static const BYTE ToLowerConversionTbl[LOWERCASE_LEN] = { 0, /* 0 -> 0 */ 1, /* 1 -> 1 */ 2, /* 2 -> 2 */ 3, /* 3 -> 3 */ 4, /* 4 -> 4 */ 5, /* 5 -> 5 */ 6, /* 6 -> 6 */ 7, /* 7 -> 7 */ 8, /* 8 -> 8 */ 9, /* 9 -> 9 */ 10, /* 10 -> 10 */ 11, /* 11 -> 11 */ 12, /* 12 -> 12 */ 13, /* 13 -> 13 */ 14, /* 14 -> 14 */ 15, /* 15 -> 15 */ 16, /* 16 -> 16 */ 17, /* 17 -> 17 */ 18, /* 18 -> 18 */ 19, /* 19 -> 19 */ 20, /* 20 -> 20 */ 21, /* 21 -> 21 */ 22, /* 22 -> 22 */ 23, /* 23 -> 23 */ 24, /* 24 -> 24 */ 25, /* 25 -> 25 */ 26, /* 26 -> 26 */ 27, /* 27 -> 27 */ 28, /* 28 -> 28 */ 29, /* 29 -> 29 */ 30, /* 30 -> 30 */ 31, /* 31 -> 31 */ 32, /* 32 -> 32 */ 33, /* ! 33 -> ! 33 */ 34, /* " 34 -> " 34 */ 35, /* # 35 -> # 35 */ 36, /* $ 36 -> $ 36 */ 37, /* % 37 -> % 37 */ 38, /* & 38 -> & 38 */ 39, /* ' 39 -> ' 39 */ 40, /* ( 40 -> ( 40 */ 41, /* ) 41 -> ) 41 */ 42, /* * 42 -> * 42 */ 43, /* + 43 -> + 43 */ 44, /* , 44 -> , 44 */ 45, /* - 45 -> - 45 */ 46, /* . 46 -> . 46 */ 47, /* / 47 -> / 47 */ 48, /* 0 48 -> 0 48 */ 49, /* 1 49 -> 1 49 */ 50, /* 2 50 -> 2 50 */ 51, /* 3 51 -> 3 51 */ 52, /* 4 52 -> 4 52 */ 53, /* 5 53 -> 5 53 */ 54, /* 6 54 -> 6 54 */ 55, /* 7 55 -> 7 55 */ 56, /* 8 56 -> 8 56 */ 57, /* 9 57 -> 9 57 */ 58, /* : 58 -> : 58 */ 59, /* ; 59 -> ; 59 */ 60, /* < 60 -> < 60 */ 61, /* = 61 -> = 61 */ 62, /* > 62 -> > 62 */ 63, /* ? 63 -> ? 63 */ 64, /* @ 64 -> @ 64 */ 97, /* A 65 -> a 97 */ 98, /* B 66 -> b 98 */ 99, /* C 67 -> c 99 */ 100, /* D 68 -> d 100 */ 101, /* E 69 -> e 101 */ 102, /* F 70 -> f 102 */ 103, /* G 71 -> g 103 */ 104, /* H 72 -> h 104 */ 105, /* I 73 -> i 105 */ 106, /* J 74 -> j 106 */ 107, /* K 75 -> k 107 */ 108, /* L 76 -> l 108 */ 109, /* M 77 -> m 109 */ 110, /* N 78 -> n 110 */ 111, /* O 79 -> o 111 */ 112, /* P 80 -> p 112 */ 113, /* Q 81 -> q 113 */ 114, /* R 82 -> r 114 */ 115, /* S 83 -> s 115 */ 116, /* T 84 -> t 116 */ 117, /* U 85 -> u 117 */ 118, /* V 86 -> v 118 */ 119, /* W 87 -> w 119 */ 120, /* X 88 -> x 120 */ 121, /* Y 89 -> y 121 */ 122, /* Z 90 -> z 122 */ 91, /* [ 91 -> [ 91 */ 92, /* \ 92 -> \ 92 */ 93, /* ] 93 -> ] 93 */ 94, /* ^ 94 -> ^ 94 */ 95, /* _ 95 -> _ 95 */ 96, /* ` 96 -> ` 96 */ 97, /* a 97 -> a 97 */ 98, /* b 98 -> b 98 */ 99, /* c 99 -> c 99 */ 100, /* d 100 -> d 100 */ 101, /* e 101 -> e 101 */ 102, /* f 102 -> f 102 */ 103, /* g 103 -> g 103 */ 104, /* h 104 -> h 104 */ 105, /* i 105 -> i 105 */ 106, /* j 106 -> j 106 */ 107, /* k 107 -> k 107 */ 108, /* l 108 -> l 108 */ 109, /* m 109 -> m 109 */ 110, /* n 110 -> n 110 */ 111, /* o 111 -> o 111 */ 112, /* p 112 -> p 112 */ 113, /* q 113 -> q 113 */ 114, /* r 114 -> r 114 */ 115, /* s 115 -> s 115 */ 116, /* t 116 -> t 116 */ 117, /* u 117 -> u 117 */ 118, /* v 118 -> v 118 */ 119, /* w 119 -> w 119 */ 120, /* x 120 -> x 120 */ 121, /* y 121 -> y 121 */ 122, /* z 122 -> z 122 */ 123, /* { 123 -> { 123 */ 124, /* | 124 -> | 124 */ 125, /* } 125 -> } 125 */ 126, /* ~ 126 -> ~ 126 */ 127, /* 127 -> 127 */ 128, /* 128 -> 128 */ 129, /* 129 -> 129 */ 130, /* 130 -> 130 */ 131, /* 131 -> 131 */ 132, /* 132 -> 132 */ 133, /* 133 -> 133 */ 134, /* 134 -> 134 */ 135, /* 135 -> 135 */ 136, /* 136 -> 136 */ 137, /* 137 -> 137 */ 138, /* 138 -> 138 */ 139, /* 139 -> 139 */ 140, /* 140 -> 140 */ 141, /* 141 -> 141 */ 142, /* 142 -> 142 */ 143, /* 143 -> 143 */ 144, /* 144 -> 144 */ 145, /* 145 -> 145 */ 146, /* 146 -> 146 */ 147, /* 147 -> 147 */ 148, /* 148 -> 148 */ 149, /* 149 -> 149 */ 150, /* 150 -> 150 */ 151, /* 151 -> 151 */ 152, /* 152 -> 152 */ 153, /* 153 -> 153 */ 154, /* 154 -> 154 */ 155, /* 155 -> 155 */ 156, /* 156 -> 156 */ 157, /* 157 -> 157 */ 158, /* 158 -> 158 */ 159, /* 159 -> 159 */ 160, /*   160 ->   160 */ 161, /* ¡ 161 -> ¡ 161 */ 162, /* ¢ 162 -> ¢ 162 */ 163, /* £ 163 -> £ 163 */ 164, /* ¤ 164 -> ¤ 164 */ 165, /* ¥ 165 -> ¥ 165 */ 166, /* ¦ 166 -> ¦ 166 */ 167, /* § 167 -> § 167 */ 168, /* ¨ 168 -> ¨ 168 */ 169, /* © 169 -> © 169 */ 170, /* ª 170 -> ª 170 */ 171, /* « 171 -> « 171 */ 172, /* ¬ 172 -> ¬ 172 */ 173, /* ­ 173 -> ­ 173 */ 174, /* ® 174 -> ® 174 */ 175, /* ¯ 175 -> ¯ 175 */ 176, /* ° 176 -> ° 176 */ 177, /* ± 177 -> ± 177 */ 178, /* ² 178 -> ² 178 */ 179, /* ³ 179 -> ³ 179 */ 180, /* ´ 180 -> ´ 180 */ 181, /* µ 181 -> µ 181 */ 182, /* ¶ 182 -> ¶ 182 */ 183, /* · 183 -> · 183 */ 184, /* ¸ 184 -> ¸ 184 */ 185, /* ¹ 185 -> ¹ 185 */ 186, /* º 186 -> º 186 */ 187, /* » 187 -> » 187 */ 188, /* ¼ 188 -> ¼ 188 */ 189, /* ½ 189 -> ½ 189 */ 190, /* ¾ 190 -> ¾ 190 */ 191, /* ¿ 191 -> ¿ 191 */ 224, /* À 192 -> à 224 */ 225, /* Á 193 -> á 225 */ 226, /* Â 194 -> â 226 */ 227, /* Ã 195 -> ã 227 */ 228, /* Ä 196 -> ä 228 */ 229, /* Å 197 -> å 229 */ 230, /* Æ 198 -> æ 230 */ 231, /* Ç 199 -> ç 231 */ 232, /* È 200 -> è 232 */ 233, /* É 201 -> é 233 */ 234, /* Ê 202 -> ê 234 */ 235, /* Ë 203 -> ë 235 */ 236, /* Ì 204 -> ì 236 */ 237, /* Í 205 -> í 237 */ 238, /* Î 206 -> î 238 */ 239, /* Ï 207 -> ï 239 */ 240, /* Ð 208 -> ð 240 */ 241, /* Ñ 209 -> ñ 241 */ 242, /* Ò 210 -> ò 242 */ 243, /* Ó 211 -> ó 243 */ 244, /* Ô 212 -> ô 244 */ 245, /* Õ 213 -> õ 245 */ 246, /* Ö 214 -> ö 246 */ 215, /* × 215 -> × 215 */ 248, /* Ø 216 -> ø 248 */ 249, /* Ù 217 -> ù 249 */ 250, /* Ú 218 -> ú 250 */ 251, /* Û 219 -> û 251 */ 252, /* Ü 220 -> ü 252 */ 253, /* Ý 221 -> ý 253 */ 254, /* Þ 222 -> þ 254 */ 223, /* ß 223 -> ß 223 */ 224, /* à 224 -> à 224 */ 225, /* á 225 -> á 225 */ 226, /* â 226 -> â 226 */ 227, /* ã 227 -> ã 227 */ 228, /* ä 228 -> ä 228 */ 229, /* å 229 -> å 229 */ 230, /* æ 230 -> æ 230 */ 231, /* ç 231 -> ç 231 */ 232, /* è 232 -> è 232 */ 233, /* é 233 -> é 233 */ 234, /* ê 234 -> ê 234 */ 235, /* ë 235 -> ë 235 */ 236, /* ì 236 -> ì 236 */ 237, /* í 237 -> í 237 */ 238, /* î 238 -> î 238 */ 239, /* ï 239 -> ï 239 */ 240, /* ð 240 -> ð 240 */ 241, /* ñ 241 -> ñ 241 */ 242, /* ò 242 -> ò 242 */ 243, /* ó 243 -> ó 243 */ 244, /* ô 244 -> ô 244 */ 245, /* õ 245 -> õ 245 */ 246, /* ö 246 -> ö 246 */ 247, /* ÷ 247 -> ÷ 247 */ 248, /* ø 248 -> ø 248 */ 249, /* ù 249 -> ù 249 */ 250, /* ú 250 -> ú 250 */ 251, /* û 251 -> û 251 */ 252, /* ü 252 -> ü 252 */ 253, /* ý 253 -> ý 253 */ 254, /* þ 254 -> þ 254 */ 255 /* 255 -> 255 */ }; static const ExpandChar ExpansionTbl[NUM_EXPAND_CHARS + 1] = { {230, 97, 101}, /* æ -> ae */ {198, 65, 69}, /* Æ -> AE */ {223, 115, 115}, /* ß -> ss */ {254, 116, 104}, /* þ -> th */ {222, 84, 72}, /* Þ -> TH */ {0, 0, 0} /* END OF TABLE */ }; static const CompressPair CompressTbl[NUM_COMPRESS_CHARS + 1] = { {{0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}} /*END OF TABLE */ }; static const SortOrderTblEntry NoCaseOrderTbl[NOCASESORT_LEN] = { {FIRST_IGNORE + 0, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 0 */ {FIRST_IGNORE + 1, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 1 */ {FIRST_IGNORE + 2, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 2 */ {FIRST_IGNORE + 3, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 3 */ {FIRST_IGNORE + 4, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 4 */ {FIRST_IGNORE + 5, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 5 */ {FIRST_IGNORE + 6, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 6 */ {FIRST_IGNORE + 7, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 7 */ {FIRST_IGNORE + 8, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 8 */ {FIRST_IGNORE + 9, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 9 */ {FIRST_IGNORE + 10, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 10 */ {FIRST_IGNORE + 11, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 11 */ {FIRST_IGNORE + 12, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 12 */ {FIRST_IGNORE + 13, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 13 */ {FIRST_IGNORE + 14, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 14 */ {FIRST_IGNORE + 15, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 15 */ {FIRST_IGNORE + 16, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 16 */ {FIRST_IGNORE + 17, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 17 */ {FIRST_IGNORE + 18, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 18 */ {FIRST_IGNORE + 19, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 19 */ {FIRST_IGNORE + 20, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 20 */ {FIRST_IGNORE + 21, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 21 */ {FIRST_IGNORE + 22, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 22 */ {FIRST_IGNORE + 23, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 23 */ {FIRST_IGNORE + 24, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 24 */ {FIRST_IGNORE + 25, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 25 */ {FIRST_IGNORE + 26, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 26 */ {FIRST_IGNORE + 27, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 27 */ {FIRST_IGNORE + 28, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 28 */ {FIRST_IGNORE + 29, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 29 */ {FIRST_IGNORE + 30, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 30 */ {FIRST_IGNORE + 31, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 31 */ {FIRST_IGNORE + 32, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 32 */ {FIRST_IGNORE + 40, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 33 ! */ {FIRST_IGNORE + 54, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 34 " */ {FIRST_IGNORE + 76, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 35 # */ {FIRST_IGNORE + 70, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 36 $ */ {FIRST_IGNORE + 77, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 37 % */ {FIRST_IGNORE + 75, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 38 & */ {FIRST_IGNORE + 53, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 39 ' */ {FIRST_IGNORE + 57, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 40 ( */ {FIRST_IGNORE + 58, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 41 ) */ {FIRST_IGNORE + 73, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 42 * */ {FIRST_IGNORE + 78, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 43 + */ {FIRST_IGNORE + 37, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 44 , */ {FIRST_IGNORE + 36, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 45 - */ {FIRST_IGNORE + 45, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 46 . */ {FIRST_IGNORE + 44, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 47 / */ {FIRST_PRIMARY + 1, FIRST_SECONDARY + 0, NULL_TERTIARY, 0, 0}, /* 48 0 */ {FIRST_PRIMARY + 2, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 49 1 */ {FIRST_PRIMARY + 3, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 50 2 */ {FIRST_PRIMARY + 4, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 51 3 */ {FIRST_PRIMARY + 5, NULL_SECONDARY, NULL_TERTIARY, 0, 0}, /* 52 4 */ {FIRST_PRIMARY + 6, NULL_SECONDARY, NULL_TERTIARY, 0, 0}, /* 53 5 */ {FIRST_PRIMARY + 7, NULL_SECONDARY, NULL_TERTIARY, 0, 0}, /* 54 6 */ {FIRST_PRIMARY + 8, NULL_SECONDARY, NULL_TERTIARY, 0, 0}, /* 55 7 */ {FIRST_PRIMARY + 9, NULL_SECONDARY, NULL_TERTIARY, 0, 0}, /* 56 8 */ {FIRST_PRIMARY + 10, NULL_SECONDARY, NULL_TERTIARY, 0, 0}, /* 57 9 */ {FIRST_IGNORE + 39, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 58 : */ {FIRST_IGNORE + 38, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 59 ; */ {FIRST_IGNORE + 82, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 60 < */ {FIRST_IGNORE + 83, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 61 = */ {FIRST_IGNORE + 84, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 62 > */ {FIRST_IGNORE + 42, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 63 ? */ {FIRST_IGNORE + 67, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 64 @ */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 65 A */ {FIRST_PRIMARY + 12, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 66 B */ {FIRST_PRIMARY + 13, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 67 C */ {FIRST_PRIMARY + 14, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 68 D */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 69 E */ {FIRST_PRIMARY + 16, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 70 F */ {FIRST_PRIMARY + 17, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 71 G */ {FIRST_PRIMARY + 18, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 72 H */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 73 I */ {FIRST_PRIMARY + 20, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 74 J */ {FIRST_PRIMARY + 21, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 75 K */ {FIRST_PRIMARY + 22, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 76 L */ {FIRST_PRIMARY + 23, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 77 M */ {FIRST_PRIMARY + 24, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 78 N */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 79 O */ {FIRST_PRIMARY + 26, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 80 P */ {FIRST_PRIMARY + 27, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 81 Q */ {FIRST_PRIMARY + 28, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 82 R */ {FIRST_PRIMARY + 29, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 83 S */ {FIRST_PRIMARY + 30, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 84 T */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 85 U */ {FIRST_PRIMARY + 32, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 86 V */ {FIRST_PRIMARY + 33, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 87 W */ {FIRST_PRIMARY + 34, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 88 X */ {FIRST_PRIMARY + 35, FIRST_SECONDARY + 0, FIRST_TERTIARY + 1, 0, 0}, /* 89 Y */ {FIRST_PRIMARY + 36, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 90 Z */ {FIRST_IGNORE + 59, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 91 [ */ {FIRST_IGNORE + 74, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 92 \ */ {FIRST_IGNORE + 60, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 93 ] */ {FIRST_IGNORE + 48, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 94 ^ */ {FIRST_IGNORE + 33, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 95 _ */ {FIRST_IGNORE + 47, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 96 ` */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 97 a */ {FIRST_PRIMARY + 12, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 98 b */ {FIRST_PRIMARY + 13, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 99 c */ {FIRST_PRIMARY + 14, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 100 d */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 101 e */ {FIRST_PRIMARY + 16, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 102 f */ {FIRST_PRIMARY + 17, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 103 g */ {FIRST_PRIMARY + 18, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 104 h */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 105 i */ {FIRST_PRIMARY + 20, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 106 j */ {FIRST_PRIMARY + 21, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 107 k */ {FIRST_PRIMARY + 22, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 108 l */ {FIRST_PRIMARY + 23, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 109 m */ {FIRST_PRIMARY + 24, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 110 n */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 111 o */ {FIRST_PRIMARY + 26, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 112 p */ {FIRST_PRIMARY + 27, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 113 q */ {FIRST_PRIMARY + 28, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 114 r */ {FIRST_PRIMARY + 29, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 115 s */ {FIRST_PRIMARY + 30, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 116 t */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 117 u */ {FIRST_PRIMARY + 32, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 118 v */ {FIRST_PRIMARY + 33, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 119 w */ {FIRST_PRIMARY + 34, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 120 x */ {FIRST_PRIMARY + 35, FIRST_SECONDARY + 0, FIRST_TERTIARY + 0, 0, 0}, /* 121 y */ {FIRST_PRIMARY + 36, NULL_SECONDARY, FIRST_TERTIARY + 0, 0, 0}, /* 122 z */ {FIRST_IGNORE + 61, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 123 { */ {FIRST_IGNORE + 86, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 124 | */ {FIRST_IGNORE + 62, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 125 } */ {FIRST_IGNORE + 50, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 126 ~ */ {FIRST_IGNORE + 90, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 127 */ {FIRST_IGNORE + 91, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 128 */ {FIRST_IGNORE + 92, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 129 */ {FIRST_IGNORE + 93, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 130 */ {FIRST_IGNORE + 94, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 131 */ {FIRST_IGNORE + 95, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 132 */ {FIRST_IGNORE + 96, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 133 */ {FIRST_IGNORE + 97, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 134 */ {FIRST_IGNORE + 98, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 135 */ {FIRST_IGNORE + 99, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 136 */ {FIRST_IGNORE + 100, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 137 */ {FIRST_IGNORE + 101, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 138 */ {FIRST_IGNORE + 102, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 139 */ {FIRST_IGNORE + 103, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 140 */ {FIRST_IGNORE + 104, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 141 */ {FIRST_IGNORE + 105, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 142 */ {FIRST_IGNORE + 106, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 143 */ {FIRST_IGNORE + 107, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 144 */ {FIRST_IGNORE + 108, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 145 */ {FIRST_IGNORE + 109, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 146 */ {FIRST_IGNORE + 110, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 147 */ {FIRST_IGNORE + 111, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 148 */ {FIRST_IGNORE + 112, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 149 */ {FIRST_IGNORE + 113, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 150 */ {FIRST_IGNORE + 114, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 151 */ {FIRST_IGNORE + 115, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 152 */ {FIRST_IGNORE + 116, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 153 */ {FIRST_IGNORE + 117, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 154 */ {FIRST_IGNORE + 118, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 155 */ {FIRST_IGNORE + 119, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 156 */ {FIRST_IGNORE + 120, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 157 */ {FIRST_IGNORE + 121, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 158 */ {FIRST_IGNORE + 122, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 159 */ {FIRST_PRIMARY + 0, NULL_SECONDARY, NULL_TERTIARY, 0, 0}, /* 160   */ {FIRST_IGNORE + 41, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 161 ¡ */ {FIRST_IGNORE + 69, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 162 ¢ */ {FIRST_IGNORE + 71, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 163 £ */ {FIRST_IGNORE + 68, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 164 ¤ */ {FIRST_IGNORE + 72, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 165 ¥ */ {FIRST_IGNORE + 87, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 166 ¦ */ {FIRST_IGNORE + 63, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 167 § */ {FIRST_IGNORE + 49, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 168 ¨ */ {FIRST_IGNORE + 65, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 169 © */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 0, 0}, /* 170 ª */ {FIRST_IGNORE + 55, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 171 « */ {FIRST_IGNORE + 85, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 172 ¬ */ {FIRST_IGNORE + 35, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 173 ­ */ {FIRST_IGNORE + 66, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 174 ® */ {FIRST_IGNORE + 34, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 175 ¯ */ {FIRST_IGNORE + 88, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 176 ° */ {FIRST_IGNORE + 79, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 177 ± */ {FIRST_PRIMARY + 3, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 178 ² */ {FIRST_PRIMARY + 4, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 179 ³ */ {FIRST_IGNORE + 46, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 180 ´ */ {FIRST_IGNORE + 89, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 181 µ */ {FIRST_IGNORE + 64, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 182 ¶ */ {FIRST_IGNORE + 51, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 183 · */ {FIRST_IGNORE + 52, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 184 ¸ */ {FIRST_PRIMARY + 2, NULL_SECONDARY, FIRST_TERTIARY + 1, 0, 0}, /* 185 ¹ */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 0, 0}, /* 186 º */ {FIRST_IGNORE + 56, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 187 » */ {FIRST_PRIMARY + 1, FIRST_SECONDARY + 1, NULL_TERTIARY, 0, 0}, /* 188 ¼ */ {FIRST_PRIMARY + 1, FIRST_SECONDARY + 2, NULL_TERTIARY, 0, 0}, /* 189 ½ */ {FIRST_PRIMARY + 1, FIRST_SECONDARY + 3, NULL_TERTIARY, 0, 0}, /* 190 ¾ */ {FIRST_IGNORE + 43, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 191 ¿ */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 4, FIRST_TERTIARY + 1, 0, 0}, /* 192 À */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 3, FIRST_TERTIARY + 1, 0, 0}, /* 193 Á */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 5, FIRST_TERTIARY + 1, 0, 0}, /* 194 Â */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 8, FIRST_TERTIARY + 1, 0, 0}, /* 195 Ã */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 7, FIRST_TERTIARY + 1, 0, 0}, /* 196 Ä */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 6, FIRST_TERTIARY + 1, 0, 0}, /* 197 Å */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 2, FIRST_TERTIARY + 1, 1, 0}, /* 198 Æ */ {FIRST_PRIMARY + 13, FIRST_SECONDARY + 1, FIRST_TERTIARY + 1, 0, 0}, /* 199 Ç */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 2, FIRST_TERTIARY + 1, 0, 0}, /* 200 È */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 1, FIRST_TERTIARY + 1, 0, 0}, /* 201 É */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 3, FIRST_TERTIARY + 1, 0, 0}, /* 202 Ê */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 4, FIRST_TERTIARY + 1, 0, 0}, /* 203 Ë */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 2, FIRST_TERTIARY + 1, 0, 0}, /* 204 Ì */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 1, FIRST_TERTIARY + 1, 0, 0}, /* 205 Í */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 3, FIRST_TERTIARY + 1, 0, 0}, /* 206 Î */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 4, FIRST_TERTIARY + 1, 0, 0}, /* 207 Ï */ {FIRST_PRIMARY + 14, FIRST_SECONDARY + 1, FIRST_TERTIARY + 1, 0, 0}, /* 208 Ð */ {FIRST_PRIMARY + 24, FIRST_SECONDARY + 1, FIRST_TERTIARY + 1, 0, 0}, /* 209 Ñ */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 3, FIRST_TERTIARY + 1, 0, 0}, /* 210 Ò */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 2, FIRST_TERTIARY + 1, 0, 0}, /* 211 Ó */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 4, FIRST_TERTIARY + 1, 0, 0}, /* 212 Ô */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 6, FIRST_TERTIARY + 1, 0, 0}, /* 213 Õ */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 5, FIRST_TERTIARY + 1, 0, 0}, /* 214 Ö */ {FIRST_IGNORE + 81, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 215 × */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 7, FIRST_TERTIARY + 1, 0, 0}, /* 216 Ø */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 2, FIRST_TERTIARY + 1, 0, 0}, /* 217 Ù */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 1, FIRST_TERTIARY + 1, 0, 0}, /* 218 Ú */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 3, FIRST_TERTIARY + 1, 0, 0}, /* 219 Û */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 4, FIRST_TERTIARY + 1, 0, 0}, /* 220 Ü */ {FIRST_PRIMARY + 35, FIRST_SECONDARY + 1, FIRST_TERTIARY + 1, 0, 0}, /* 221 Ý */ {FIRST_PRIMARY + 30, FIRST_SECONDARY + 1, FIRST_TERTIARY + 1, 1, 0}, /* 222 Þ */ {FIRST_PRIMARY + 29, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 1, 0}, /* 223 ß */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 4, FIRST_TERTIARY + 0, 0, 0}, /* 224 à */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 3, FIRST_TERTIARY + 0, 0, 0}, /* 225 á */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 5, FIRST_TERTIARY + 0, 0, 0}, /* 226 â */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 8, FIRST_TERTIARY + 0, 0, 0}, /* 227 ã */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 7, FIRST_TERTIARY + 0, 0, 0}, /* 228 ä */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 6, FIRST_TERTIARY + 0, 0, 0}, /* 229 å */ {FIRST_PRIMARY + 11, FIRST_SECONDARY + 2, FIRST_TERTIARY + 0, 1, 0}, /* 230 æ */ {FIRST_PRIMARY + 13, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 0, 0}, /* 231 ç */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 2, FIRST_TERTIARY + 0, 0, 0}, /* 232 è */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 0, 0}, /* 233 é */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 3, FIRST_TERTIARY + 0, 0, 0}, /* 234 ê */ {FIRST_PRIMARY + 15, FIRST_SECONDARY + 4, FIRST_TERTIARY + 0, 0, 0}, /* 235 ë */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 2, FIRST_TERTIARY + 0, 0, 0}, /* 236 ì */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 0, 0}, /* 237 í */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 3, FIRST_TERTIARY + 0, 0, 0}, /* 238 î */ {FIRST_PRIMARY + 19, FIRST_SECONDARY + 4, FIRST_TERTIARY + 0, 0, 0}, /* 239 ï */ {FIRST_PRIMARY + 14, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 0, 0}, /* 240 ð */ {FIRST_PRIMARY + 24, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 0, 0}, /* 241 ñ */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 3, FIRST_TERTIARY + 0, 0, 0}, /* 242 ò */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 2, FIRST_TERTIARY + 0, 0, 0}, /* 243 ó */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 4, FIRST_TERTIARY + 0, 0, 0}, /* 244 ô */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 6, FIRST_TERTIARY + 0, 0, 0}, /* 245 õ */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 5, FIRST_TERTIARY + 0, 0, 0}, /* 246 ö */ {FIRST_IGNORE + 80, NULL_SECONDARY, NULL_TERTIARY, 1, 1}, /* 247 ÷ */ {FIRST_PRIMARY + 25, FIRST_SECONDARY + 7, FIRST_TERTIARY + 0, 0, 0}, /* 248 ø */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 2, FIRST_TERTIARY + 0, 0, 0}, /* 249 ù */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 0, 0}, /* 250 ú */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 3, FIRST_TERTIARY + 0, 0, 0}, /* 251 û */ {FIRST_PRIMARY + 31, FIRST_SECONDARY + 4, FIRST_TERTIARY + 0, 0, 0}, /* 252 ü */ {FIRST_PRIMARY + 35, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 0, 0}, /* 253 ý */ {FIRST_PRIMARY + 30, FIRST_SECONDARY + 1, FIRST_TERTIARY + 0, 1, 0}, /* 254 þ */ {FIRST_PRIMARY + 35, FIRST_SECONDARY + 2, FIRST_TERTIARY + 0, 0, 0} /* 255 */ };
{ "language": "C" }
/* * Copyright(c) 2019 Intel Corporation * Copyright (c) 2016, Alliance for Open Media. All rights reserved * * This source code is subject to the terms of the BSD 2 Clause License and * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License * was not distributed with this source code in the LICENSE file, you can * obtain it at https://www.aomedia.org/license/software-license. If the Alliance for Open * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at https://www.aomedia.org/license/patent-license. */ #ifndef EbDecBitstreamUnit_h #define EbDecBitstreamUnit_h #include "EbCabacContextModel.h" #include "EbBitstreamUnit.h" //Added this EbBitstreamUnit.h because OdEcWindow is defined in it, but //we also defining it, so it leads to warning, so i commented our defination & added EbBitstreamUnit.h file. #ifdef __cplusplus extern "C" { #endif #define CONFIG_BITSTREAM_DEBUG 0 /********************************************************************************************************************************/ /********************************************************************************************************************************/ /********************************************************************************************************************************/ /********************************************************************************************************************************/ #if (CHAR_BIT != 8) #undef CHAR_BIT #define CHAR_BIT 8 /* number of bits in a char */ #endif // entcode.h from AOM #define EC_PROB_SHIFT 6 #define EC_MIN_PROB 4 // must be <= (1<<EC_PROB_SHIFT)/16 /*OPT: OdEcWindow must be at least 32 bits, but if you have fast arithmetic on a larger type, you can speed up the decoder by using it here.*/ //typedef uint32_t OdEcWindow; /*The size in bits of OdEcWindow.*/ //#define OD_EC_WINDOW_SIZE ((int)sizeof(OdEcWindow) * CHAR_BIT) /********************************************************************************************************************************/ /********************************************************************************************************************************/ /********************************************************************************************************************************/ /********************************************************************************************************************************/ // prob.h from AOM //typedef uint16_t aom_cdf_prob; #define ENABLE_ENTROPY_TRACE 0 #define EXTRA_DUMP 0 #if ENABLE_ENTROPY_TRACE #define ENTROPY_TRACE_FILE_BASED 1 #define FRAME_LEVEL_TRACE 1 #include <stdio.h> extern FILE *temp_fp; extern int enable_dump; #endif #define CDF_PROB_BITS 15 #define CDF_PROB_TOP (1 << CDF_PROB_BITS) #define CDF_INIT_TOP 32768 #define CDF_SHIFT (15 - CDF_PROB_BITS) /*The value stored in an iCDF is CDF_PROB_TOP minus the actual cumulative probability (an "inverse" CDF). This function converts from one representation to the other (and is its own inverse).*/ #define AOM_ICDF(x) (CDF_PROB_TOP - (x)) static INLINE void dec_update_cdf(AomCdfProb *cdf, int8_t val, int nsymbs) { int rate; int i, tmp; static const int nsymbs2speed[17] = {0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}; assert(nsymbs < 17); rate = 3 + (cdf[nsymbs] > 15) + (cdf[nsymbs] > 31) + nsymbs2speed[nsymbs]; // + get_msb(nsymbs); tmp = AOM_ICDF(0); // Single loop (faster) for (i = 0; i < nsymbs - 1; ++i) { tmp = (i == val) ? 0 : tmp; if (tmp < cdf[i]) { cdf[i] -= (AomCdfProb)((cdf[i] - tmp) >> rate); } else cdf[i] += (AomCdfProb)((tmp - cdf[i]) >> rate); } cdf[nsymbs] += (cdf[nsymbs] < 32); } /********************************************************************************************************************************/ /********************************************************************************************************************************/ /********************************************************************************************************************************/ /********************************************************************************************************************************/ // entdec.h from AOM /*The entropy decoder context.*/ typedef struct OdEcDec { /*The start of the current input buffer.*/ const unsigned char *buf; /*An offset used to keep track of tell after reaching the end of the stream. This is constant throughout most of the decoding process, but becomes important once we hit the end of the buffer and stop incrementing bptr (and instead pretend cnt has lots of bits).*/ int32_t tell_offs; /*The end of the current input buffer.*/ const unsigned char *end; /*The read pointer for the entropy-coded bits.*/ const unsigned char *bptr; /*The difference between the high end of the current range, (low + rng), and the coded value, minus 1. This stores up to OD_EC_WINDOW_SIZE bits of that difference, but the decoder only uses the top 16 bits of the window to decode the next symbol. As we shift up during renormalization, if we don't have enough bits left in the window to fill the top 16, we'll read in more bits of the coded value.*/ OdEcWindow dif; /*The number of values in the current range.*/ uint16_t rng; /*The number of bits of data in the current value.*/ int16_t cnt; } OdEcDec; int od_ec_decode_bool_q15(OdEcDec *dec, unsigned f); int od_ec_decode_cdf_q15(OdEcDec *dec, const uint16_t *cdf, int nsyms); /********************************************************************************************************************************/ /********************************************************************************************************************************/ /********************************************************************************************************************************/ /********************************************************************************************************************************/ // daalaboolreader.h from AOM typedef struct DaalaReader { const uint8_t *buffer; const uint8_t *buffer_end; OdEcDec ec; uint8_t allow_update_cdf; } DaalaReader_t; int aom_daala_reader_init(DaalaReader_t *r, const uint8_t *buffer, int size); const uint8_t *aom_daala_reader_find_begin(DaalaReader_t *r); const uint8_t *aom_daala_reader_find_end(DaalaReader_t *r); static INLINE int aom_daala_read(DaalaReader_t *r, int prob) { int bit; int p = (0x7FFFFF - (prob << 15) + prob) >> 8; bit = od_ec_decode_bool_q15(&r->ec, p); #if CONFIG_BITSTREAM_DEBUG { int i; int ref_bit, ref_nsymbs; AomCdfProb ref_cdf[16]; const int queue_r = bitstream_queue_get_read(); const int frame_idx = bitstream_queue_get_frame_read(); bitstream_queue_pop(&ref_bit, ref_cdf, &ref_nsymbs); if (ref_nsymbs != 2) { SVT_ERROR( "\n *** [bit] nsymbs error, frame_idx_r %d nsymbs %d ref_nsymbs " "%d queue_r %d\n", frame_idx, 2, ref_nsymbs, queue_r); assert(0); } if ((ref_nsymbs != 2) || (ref_cdf[0] != (AomCdfProb)p) || (ref_cdf[1] != 32767)) { SVT_ERROR("\n *** [bit] cdf error, frame_idx_r %d cdf {%d, %d} ref_cdf {%d", frame_idx, p, 32767, ref_cdf[0]); for (i = 1; i < ref_nsymbs; ++i) SVT_ERROR(", %d", ref_cdf[i]); SVT_ERROR("} queue_r %d\n", queue_r); assert(0); } if (bit != ref_bit) { SVT_ERROR( "\n *** [bit] symb error, frame_idx_r %d symb %d ref_symb %d " "queue_r %d\n", frame_idx, bit, ref_bit, queue_r); assert(0); } } #endif #if ENABLE_ENTROPY_TRACE #if ENTROPY_TRACE_FILE_BASED if (enable_dump) { assert(temp_fp); fprintf(temp_fp, "\n *** p %d \t", p); fprintf(temp_fp, "symb : %d \t", bit); fflush(temp_fp); } #else if (enable_dump) { SVT_LOG("\n *** p %d \t", p); SVT_LOG("symb : %d \t", bit); fflush(stdout); } #endif #endif return bit; } static INLINE int daala_read_symbol(DaalaReader_t *r, const AomCdfProb *cdf, int nsymbs) { int symb; assert(cdf != NULL); symb = od_ec_decode_cdf_q15(&r->ec, cdf, nsymbs); #if CONFIG_BITSTREAM_DEBUG { int i; int cdf_error = 0; int ref_symb, ref_nsymbs; AomCdfProb ref_cdf[16]; const int queue_r = bitstream_queue_get_read(); const int frame_idx = bitstream_queue_get_frame_read(); bitstream_queue_pop(&ref_symb, ref_cdf, &ref_nsymbs); if (nsymbs != ref_nsymbs) { SVT_ERROR( "\n *** nsymbs error, frame_idx_r %d nsymbs %d ref_nsymbs %d " "queue_r %d\n", frame_idx, nsymbs, ref_nsymbs, queue_r); cdf_error = 0; assert(0); } else { for (i = 0; i < nsymbs; ++i) if (cdf[i] != ref_cdf[i]) cdf_error = 1; } if (cdf_error) { SVT_ERROR("\n *** cdf error, frame_idx_r %d cdf {%d", frame_idx, cdf[0]); for (i = 1; i < nsymbs; ++i) SVT_ERROR(", %d", cdf[i]); SVT_ERROR("} ref_cdf {%d", ref_cdf[0]); for (i = 1; i < ref_nsymbs; ++i) SVT_ERROR(", %d", ref_cdf[i]); SVT_ERROR("} queue_r %d\n", queue_r); assert(0); } if (symb != ref_symb) { fprintf(stderr, "\n *** symb error, frame_idx_r %d symb %d ref_symb %d queue_r %d\n", frame_idx, symb, ref_symb, queue_r); assert(0); } } #endif #if ENABLE_ENTROPY_TRACE #if ENTROPY_TRACE_FILE_BASED if (enable_dump) { fprintf(temp_fp, "\n *** nsymbs %d \t", nsymbs); for (int i = 0; i < nsymbs; ++i) fprintf(temp_fp, "cdf[%d] : %d \t", i, cdf[i]); fprintf(temp_fp, "symb : %d \t", symb); fflush(temp_fp); } #else if (enable_dump) { SVT_LOG("\n *** nsymbs %d \t", nsymbs); for (int i = 0; i < nsymbs; ++i) SVT_LOG("cdf[%d] : %d \t", i, cdf[i]); SVT_LOG("symb : %d \t", symb); fflush(stdout); } #endif #endif return symb; } #ifdef __cplusplus } #endif #endif // EbDecBitstreamUnit_h
{ "language": "C" }
/* crypto/x509/x509_obj.c */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include <stdio.h> #include "cryptlib.h" #include <openssl/lhash.h> #include <openssl/objects.h> #include <openssl/x509.h> #include <openssl/buffer.h> /* * Limit to ensure we don't overflow: much greater than * anything enountered in practice. */ #define NAME_ONELINE_MAX (1024 * 1024) char *X509_NAME_oneline(X509_NAME *a, char *buf, int len) { X509_NAME_ENTRY *ne; int i; int n, lold, l, l1, l2, num, j, type; const char *s; char *p; unsigned char *q; BUF_MEM *b = NULL; static const char hex[17] = "0123456789ABCDEF"; int gs_doit[4]; char tmp_buf[80]; #ifdef CHARSET_EBCDIC char ebcdic_buf[1024]; #endif if (buf == NULL) { if ((b = BUF_MEM_new()) == NULL) goto err; if (!BUF_MEM_grow(b, 200)) goto err; b->data[0] = '\0'; len = 200; } else if (len == 0) { return NULL; } if (a == NULL) { if (b) { buf = b->data; OPENSSL_free(b); } strncpy(buf, "NO X509_NAME", len); buf[len - 1] = '\0'; return buf; } len--; /* space for '\0' */ l = 0; for (i = 0; i < sk_X509_NAME_ENTRY_num(a->entries); i++) { ne = sk_X509_NAME_ENTRY_value(a->entries, i); n = OBJ_obj2nid(ne->object); if ((n == NID_undef) || ((s = OBJ_nid2sn(n)) == NULL)) { i2t_ASN1_OBJECT(tmp_buf, sizeof(tmp_buf), ne->object); s = tmp_buf; } l1 = strlen(s); type = ne->value->type; num = ne->value->length; if (num > NAME_ONELINE_MAX) { X509err(X509_F_X509_NAME_ONELINE, X509_R_NAME_TOO_LONG); goto end; } q = ne->value->data; #ifdef CHARSET_EBCDIC if (type == V_ASN1_GENERALSTRING || type == V_ASN1_VISIBLESTRING || type == V_ASN1_PRINTABLESTRING || type == V_ASN1_TELETEXSTRING || type == V_ASN1_VISIBLESTRING || type == V_ASN1_IA5STRING) { if (num > (int)sizeof(ebcdic_buf)) num = sizeof(ebcdic_buf); ascii2ebcdic(ebcdic_buf, q, num); q = ebcdic_buf; } #endif if ((type == V_ASN1_GENERALSTRING) && ((num % 4) == 0)) { gs_doit[0] = gs_doit[1] = gs_doit[2] = gs_doit[3] = 0; for (j = 0; j < num; j++) if (q[j] != 0) gs_doit[j & 3] = 1; if (gs_doit[0] | gs_doit[1] | gs_doit[2]) gs_doit[0] = gs_doit[1] = gs_doit[2] = gs_doit[3] = 1; else { gs_doit[0] = gs_doit[1] = gs_doit[2] = 0; gs_doit[3] = 1; } } else gs_doit[0] = gs_doit[1] = gs_doit[2] = gs_doit[3] = 1; for (l2 = j = 0; j < num; j++) { if (!gs_doit[j & 3]) continue; l2++; #ifndef CHARSET_EBCDIC if ((q[j] < ' ') || (q[j] > '~')) l2 += 3; #else if ((os_toascii[q[j]] < os_toascii[' ']) || (os_toascii[q[j]] > os_toascii['~'])) l2 += 3; #endif } lold = l; l += 1 + l1 + 1 + l2; if (l > NAME_ONELINE_MAX) { X509err(X509_F_X509_NAME_ONELINE, X509_R_NAME_TOO_LONG); goto end; } if (b != NULL) { if (!BUF_MEM_grow(b, l + 1)) goto err; p = &(b->data[lold]); } else if (l > len) { break; } else p = &(buf[lold]); *(p++) = '/'; memcpy(p, s, (unsigned int)l1); p += l1; *(p++) = '='; #ifndef CHARSET_EBCDIC /* q was assigned above already. */ q = ne->value->data; #endif for (j = 0; j < num; j++) { if (!gs_doit[j & 3]) continue; #ifndef CHARSET_EBCDIC n = q[j]; if ((n < ' ') || (n > '~')) { *(p++) = '\\'; *(p++) = 'x'; *(p++) = hex[(n >> 4) & 0x0f]; *(p++) = hex[n & 0x0f]; } else *(p++) = n; #else n = os_toascii[q[j]]; if ((n < os_toascii[' ']) || (n > os_toascii['~'])) { *(p++) = '\\'; *(p++) = 'x'; *(p++) = hex[(n >> 4) & 0x0f]; *(p++) = hex[n & 0x0f]; } else *(p++) = q[j]; #endif } *p = '\0'; } if (b != NULL) { p = b->data; OPENSSL_free(b); } else p = buf; if (i == 0) *p = '\0'; return (p); err: X509err(X509_F_X509_NAME_ONELINE, ERR_R_MALLOC_FAILURE); end: BUF_MEM_free(b); return (NULL); }
{ "language": "C" }
/* * Copyright (C) 2019 Intel Corporation. All rights reserved. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */ #include "wa-inc/connection.h" #include "connection_api.h" /* Raw connection structure */ typedef struct _connection { /* Next connection */ struct _connection *next; /* Handle of the connection */ uint32 handle; /* Callback function called when event on this connection occurs */ on_connection_event_f on_event; /* User data */ void *user_data; } connection_t; /* Raw connections list */ static connection_t *g_conns = NULL; connection_t *api_open_connection(const char *name, attr_container_t *args, on_connection_event_f on_event, void *user_data) { connection_t *conn; char *args_buffer = (char *)args; uint32 handle, args_len = attr_container_get_serialize_length(args); handle = wasm_open_connection(name, args_buffer, args_len); if (handle == -1) return NULL; conn = (connection_t *)malloc(sizeof(*conn)); if (conn == NULL) { wasm_close_connection(handle); return NULL; } memset(conn, 0, sizeof(*conn)); conn->handle = handle; conn->on_event = on_event; conn->user_data = user_data; if (g_conns != NULL) { conn->next = g_conns; g_conns = conn; } else { g_conns = conn; } return conn; } void api_close_connection(connection_t *c) { connection_t *conn = g_conns, *prev = NULL; while (conn) { if (conn == c) { wasm_close_connection(c->handle); if (prev != NULL) prev->next = conn->next; else g_conns = conn->next; free(conn); return; } else { prev = conn; conn = conn->next; } } } int api_send_on_connection(connection_t *conn, const char *data, uint32 len) { return wasm_send_on_connection(conn->handle, data, len); } bool api_config_connection(connection_t *conn, attr_container_t *cfg) { char *cfg_buffer = (char *)cfg; uint32 cfg_len = attr_container_get_serialize_length(cfg); return wasm_config_connection(conn->handle, cfg_buffer, cfg_len); } void on_connection_data(uint32 handle, char *buffer, uint32 len) { connection_t *conn = g_conns; while (conn != NULL) { if (conn->handle == handle) { if (len == 0) { conn->on_event(conn, CONN_EVENT_TYPE_DISCONNECT, NULL, 0, conn->user_data); } else { conn->on_event(conn, CONN_EVENT_TYPE_DATA, buffer, len, conn->user_data); } return; } conn = conn->next; } }
{ "language": "C" }
/* This file is part of: NoahFrame https://github.com/ketoo/NoahGameFrame Copyright 2009 - 2020 NoahFrame(NoahGameFrame) File creator: lvsheng.huang NoahFrame is open-source software and you can redistribute it and/or modify it under the terms of the License; besides, anyone who use this file/software must include this copyright announcement. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef NF_TUTORIAL1_H #define NF_TUTORIAL1_H #include "NFComm/NFPluginModule/NFIPlugin.h" #include "NFComm/NFPluginModule/NFIPluginManager.h" class Tutorial1 : public NFIPlugin { public: Tutorial1(NFIPluginManager* p) { pPluginManager = p; } virtual const int GetPluginVersion(); virtual const std::string GetPluginName(); virtual void Install(); virtual void Uninstall(); }; #endif
{ "language": "C" }
/* SPDX-License-Identifier: GPL-2.0 */ /* X-SPDX-Copyright-Text: (c) Solarflare Communications Inc */ #ifndef __CI_EFHW_DEVICE_H__ #define __CI_EFHW_DEVICE_H__ enum efhw_arch { EFHW_ARCH_FALCON, EFHW_ARCH_EF10, }; /*---------------------------------------------------------------------------- * * NIC type * *---------------------------------------------------------------------------*/ enum efhw_function { EFHW_FUNCTION_PF, EFHW_FUNCTION_VF, }; struct efhw_device_type { int arch; /* enum efhw_arch */ char variant; /* 'A', 'B', ... */ int revision; /* 0, 1, ... */ int function; /* enum efhw_function */ }; #endif
{ "language": "C" }
#include <assert.h> #include <string.h> #include <stdlib.h> #include <math.h> #include "minimap.h" #include "mmpriv.h" #include "ksw2.h" static void ksw_gen_simple_mat(int m, int8_t *mat, int8_t a, int8_t b, int8_t sc_ambi) { int i, j; a = a < 0? -a : a; b = b > 0? -b : b; sc_ambi = sc_ambi > 0? -sc_ambi : sc_ambi; for (i = 0; i < m - 1; ++i) { for (j = 0; j < m - 1; ++j) mat[i * m + j] = i == j? a : b; mat[i * m + m - 1] = sc_ambi; } for (j = 0; j < m; ++j) mat[(m - 1) * m + j] = sc_ambi; } static inline void mm_seq_rev(uint32_t len, uint8_t *seq) { uint32_t i; uint8_t t; for (i = 0; i < len>>1; ++i) t = seq[i], seq[i] = seq[len - 1 - i], seq[len - 1 - i] = t; } static inline void update_max_zdrop(int32_t score, int i, int j, int32_t *max, int *max_i, int *max_j, int e, int *max_zdrop, int pos[2][2]) { if (score < *max) { int li = i - *max_i; int lj = j - *max_j; int diff = li > lj? li - lj : lj - li; int z = *max - score - diff * e; if (z > *max_zdrop) { *max_zdrop = z; pos[0][0] = *max_i, pos[0][1] = i + 1; pos[1][0] = *max_j, pos[1][1] = j + 1; } } else *max = score, *max_i = i, *max_j = j; } static int mm_test_zdrop(void *km, const mm_mapopt_t *opt, const uint8_t *qseq, const uint8_t *tseq, uint32_t n_cigar, uint32_t *cigar, const int8_t *mat) { uint32_t k; int32_t score = 0, max = INT32_MIN, max_i = -1, max_j = -1, i = 0, j = 0, max_zdrop = 0; int pos[2][2] = {{-1, -1}, {-1, -1}}, q_len, t_len; // find the score and the region where score drops most along diagonal for (k = 0, score = 0; k < n_cigar; ++k) { uint32_t l, op = cigar[k]&0xf, len = cigar[k]>>4; if (op == 0) { for (l = 0; l < len; ++l) { score += mat[tseq[i + l] * 5 + qseq[j + l]]; update_max_zdrop(score, i+l, j+l, &max, &max_i, &max_j, opt->e, &max_zdrop, pos); } i += len, j += len; } else if (op == 1 || op == 2 || op == 3) { score -= opt->q + opt->e * len; if (op == 1) j += len; // insertion else i += len; // deletion update_max_zdrop(score, i, j, &max, &max_i, &max_j, opt->e, &max_zdrop, pos); } } // test if there is an inversion in the most dropped region q_len = pos[1][1] - pos[1][0], t_len = pos[0][1] - pos[0][0]; if (!(opt->flag&(MM_F_SPLICE|MM_F_SR|MM_F_FOR_ONLY|MM_F_REV_ONLY)) && max_zdrop > opt->zdrop_inv && q_len < opt->max_gap && t_len < opt->max_gap) { uint8_t *qseq2; void *qp; int q_off, t_off; qseq2 = (uint8_t*)kmalloc(km, q_len); for (i = 0; i < q_len; ++i) { int c = qseq[pos[1][1] - i - 1]; qseq2[i] = c >= 4? 4 : 3 - c; } qp = ksw_ll_qinit(km, 2, q_len, qseq2, 5, mat); score = ksw_ll_i16(qp, t_len, tseq + pos[0][0], opt->q, opt->e, &q_off, &t_off); kfree(km, qseq2); kfree(km, qp); if (score >= opt->min_chain_score * opt->a && score >= opt->min_dp_max) return 2; // there is a potential inversion } return max_zdrop > opt->zdrop? 1 : 0; } static void mm_fix_cigar(mm_reg1_t *r, const uint8_t *qseq, const uint8_t *tseq, int *qshift, int *tshift) { mm_extra_t *p = r->p; int32_t toff = 0, qoff = 0, to_shrink = 0; uint32_t k; *qshift = *tshift = 0; if (p->n_cigar <= 1) return; for (k = 0; k < p->n_cigar; ++k) { // indel left alignment uint32_t op = p->cigar[k]&0xf, len = p->cigar[k]>>4; if (len == 0) to_shrink = 1; if (op == 0) { toff += len, qoff += len; } else if (op == 1 || op == 2) { // insertion or deletion if (k > 0 && k < p->n_cigar - 1 && (p->cigar[k-1]&0xf) == 0 && (p->cigar[k+1]&0xf) == 0) { int l, prev_len = p->cigar[k-1] >> 4; if (op == 1) { for (l = 0; l < prev_len; ++l) if (qseq[qoff - 1 - l] != qseq[qoff + len - 1 - l]) break; } else { for (l = 0; l < prev_len; ++l) if (tseq[toff - 1 - l] != tseq[toff + len - 1 - l]) break; } if (l > 0) p->cigar[k-1] -= l<<4, p->cigar[k+1] += l<<4, qoff -= l, toff -= l; if (l == prev_len) to_shrink = 1; } if (op == 1) qoff += len; else toff += len; } else if (op == 3) { toff += len; } } assert(qoff == r->qe - r->qs && toff == r->re - r->rs); for (k = 0; k < p->n_cigar - 2; ++k) { // fix CIGAR like 5I6D7I if ((p->cigar[k]&0xf) > 0 && (p->cigar[k]&0xf) + (p->cigar[k+1]&0xf) == 3) { uint32_t l, s[3] = {0,0,0}; for (l = k; l < p->n_cigar; ++l) { // count number of adjacent I and D uint32_t op = p->cigar[l]&0xf; if (op == 1 || op == 2 || p->cigar[l]>>4 == 0) s[op] += p->cigar[l] >> 4; else break; } if (s[1] > 0 && s[2] > 0 && l - k > 2) { // turn to a single I and a single D p->cigar[k] = s[1]<<4|1; p->cigar[k+1] = s[2]<<4|2; for (k += 2; k < l; ++k) p->cigar[k] &= 0xf; to_shrink = 1; } k = l; } } if (to_shrink) { // squeeze out zero-length operations int32_t l = 0; for (k = 0; k < p->n_cigar; ++k) // squeeze out zero-length operations if (p->cigar[k]>>4 != 0) p->cigar[l++] = p->cigar[k]; p->n_cigar = l; for (k = l = 0; k < p->n_cigar; ++k) // merge two adjacent operations if they are the same if (k == p->n_cigar - 1 || (p->cigar[k]&0xf) != (p->cigar[k+1]&0xf)) p->cigar[l++] = p->cigar[k]; else p->cigar[k+1] += p->cigar[k]>>4<<4; // add length to the next CIGAR operator p->n_cigar = l; } if ((p->cigar[0]&0xf) == 1 || (p->cigar[0]&0xf) == 2) { // get rid of leading I or D int32_t l = p->cigar[0] >> 4; if ((p->cigar[0]&0xf) == 1) { if (r->rev) r->qe -= l; else r->qs += l; *qshift = l; } else r->rs += l, *tshift = l; --p->n_cigar; memmove(p->cigar, p->cigar + 1, p->n_cigar * 4); } } static void mm_update_cigar_eqx(mm_reg1_t *r, const uint8_t *qseq, const uint8_t *tseq) // written by @armintoepfer { uint32_t n_EQX = 0; uint32_t k, l, m, cap, toff = 0, qoff = 0, n_M = 0; mm_extra_t *p; if (r->p == 0) return; for (k = 0; k < r->p->n_cigar; ++k) { uint32_t op = r->p->cigar[k]&0xf, len = r->p->cigar[k]>>4; if (op == 0) { while (len > 0) { for (l = 0; l < len && qseq[qoff + l] == tseq[toff + l]; ++l) {} // run of "="; TODO: N<=>N is converted to "=" if (l > 0) { ++n_EQX; len -= l; toff += l; qoff += l; } for (l = 0; l < len && qseq[qoff + l] != tseq[toff + l]; ++l) {} // run of "X" if (l > 0) { ++n_EQX; len -= l; toff += l; qoff += l; } } ++n_M; } else if (op == 1) { // insertion qoff += len; } else if (op == 2) { // deletion toff += len; } else if (op == 3) { // intron toff += len; } } // update in-place if we can if (n_EQX == n_M) { for (k = 0; k < r->p->n_cigar; ++k) { uint32_t op = r->p->cigar[k]&0xf, len = r->p->cigar[k]>>4; if (op == 0) r->p->cigar[k] = len << 4 | 7; } return; } // allocate new storage cap = r->p->n_cigar + (n_EQX - n_M) + sizeof(mm_extra_t); kroundup32(cap); p = (mm_extra_t*)calloc(cap, 4); memcpy(p, r->p, sizeof(mm_extra_t)); p->capacity = cap; // update cigar while copying toff = qoff = m = 0; for (k = 0; k < r->p->n_cigar; ++k) { uint32_t op = r->p->cigar[k]&0xf, len = r->p->cigar[k]>>4; if (op == 0) { // match/mismatch while (len > 0) { // match for (l = 0; l < len && qseq[qoff + l] == tseq[toff + l]; ++l) {} if (l > 0) p->cigar[m++] = l << 4 | 7; len -= l; toff += l, qoff += l; // mismatch for (l = 0; l < len && qseq[qoff + l] != tseq[toff + l]; ++l) {} if (l > 0) p->cigar[m++] = l << 4 | 8; len -= l; toff += l, qoff += l; } continue; } else if (op == 1) { // insertion qoff += len; } else if (op == 2) { // deletion toff += len; } else if (op == 3) { // intron toff += len; } p->cigar[m++] = r->p->cigar[k]; } p->n_cigar = m; free(r->p); r->p = p; } static void mm_update_extra(mm_reg1_t *r, const uint8_t *qseq, const uint8_t *tseq, const int8_t *mat, int8_t q, int8_t e, int is_eqx) { uint32_t k, l; int32_t s = 0, max = 0, qshift, tshift, toff = 0, qoff = 0; mm_extra_t *p = r->p; if (p == 0) return; mm_fix_cigar(r, qseq, tseq, &qshift, &tshift); qseq += qshift, tseq += tshift; // qseq and tseq may be shifted due to the removal of leading I/D r->blen = r->mlen = 0; for (k = 0; k < p->n_cigar; ++k) { uint32_t op = p->cigar[k]&0xf, len = p->cigar[k]>>4; if (op == 0) { // match/mismatch int n_ambi = 0, n_diff = 0; for (l = 0; l < len; ++l) { int cq = qseq[qoff + l], ct = tseq[toff + l]; if (ct > 3 || cq > 3) ++n_ambi; else if (ct != cq) ++n_diff; s += mat[ct * 5 + cq]; if (s < 0) s = 0; else max = max > s? max : s; } r->blen += len - n_ambi, r->mlen += len - (n_ambi + n_diff), p->n_ambi += n_ambi; toff += len, qoff += len; } else if (op == 1) { // insertion int n_ambi = 0; for (l = 0; l < len; ++l) if (qseq[qoff + l] > 3) ++n_ambi; r->blen += len - n_ambi, p->n_ambi += n_ambi; s -= q + e * len; if (s < 0) s = 0; qoff += len; } else if (op == 2) { // deletion int n_ambi = 0; for (l = 0; l < len; ++l) if (tseq[toff + l] > 3) ++n_ambi; r->blen += len - n_ambi, p->n_ambi += n_ambi; s -= q + e * len; if (s < 0) s = 0; toff += len; } else if (op == 3) { // intron toff += len; } } p->dp_max = max; assert(qoff == r->qe - r->qs && toff == r->re - r->rs); if (is_eqx) mm_update_cigar_eqx(r, qseq, tseq); // NB: it has to be called here as changes to qseq and tseq are not returned } static void mm_append_cigar(mm_reg1_t *r, uint32_t n_cigar, uint32_t *cigar) // TODO: this calls the libc realloc() { mm_extra_t *p; if (n_cigar == 0) return; if (r->p == 0) { uint32_t capacity = n_cigar + sizeof(mm_extra_t)/4; kroundup32(capacity); r->p = (mm_extra_t*)calloc(capacity, 4); r->p->capacity = capacity; } else if (r->p->n_cigar + n_cigar + sizeof(mm_extra_t)/4 > r->p->capacity) { r->p->capacity = r->p->n_cigar + n_cigar + sizeof(mm_extra_t)/4; kroundup32(r->p->capacity); r->p = (mm_extra_t*)realloc(r->p, r->p->capacity * 4); } p = r->p; if (p->n_cigar > 0 && (p->cigar[p->n_cigar-1]&0xf) == (cigar[0]&0xf)) { // same CIGAR op at the boundary p->cigar[p->n_cigar-1] += cigar[0]>>4<<4; if (n_cigar > 1) memcpy(p->cigar + p->n_cigar, cigar + 1, (n_cigar - 1) * 4); p->n_cigar += n_cigar - 1; } else { memcpy(p->cigar + p->n_cigar, cigar, n_cigar * 4); p->n_cigar += n_cigar; } } static void mm_align_pair(void *km, const mm_mapopt_t *opt, int qlen, const uint8_t *qseq, int tlen, const uint8_t *tseq, const uint8_t *junc, const int8_t *mat, int w, int end_bonus, int zdrop, int flag, ksw_extz_t *ez) { if (mm_dbg_flag & MM_DBG_PRINT_ALN_SEQ) { int i; fprintf(stderr, "===> q=(%d,%d), e=(%d,%d), bw=%d, flag=%d, zdrop=%d <===\n", opt->q, opt->q2, opt->e, opt->e2, w, flag, opt->zdrop); for (i = 0; i < tlen; ++i) fputc("ACGTN"[tseq[i]], stderr); fputc('\n', stderr); for (i = 0; i < qlen; ++i) fputc("ACGTN"[qseq[i]], stderr); fputc('\n', stderr); } if (opt->max_sw_mat > 0 && (int64_t)tlen * qlen > opt->max_sw_mat) { ksw_reset_extz(ez); ez->zdropped = 1; } else if (opt->flag & MM_F_SPLICE) ksw_exts2_sse(km, qlen, qseq, tlen, tseq, 5, mat, opt->q, opt->e, opt->q2, opt->noncan, zdrop, opt->junc_bonus, flag, junc, ez); else if (opt->q == opt->q2 && opt->e == opt->e2) ksw_extz2_sse(km, qlen, qseq, tlen, tseq, 5, mat, opt->q, opt->e, w, zdrop, end_bonus, flag, ez); else ksw_extd2_sse(km, qlen, qseq, tlen, tseq, 5, mat, opt->q, opt->e, opt->q2, opt->e2, w, zdrop, end_bonus, flag, ez); if (mm_dbg_flag & MM_DBG_PRINT_ALN_SEQ) { int i; fprintf(stderr, "score=%d, cigar=", ez->score); for (i = 0; i < ez->n_cigar; ++i) fprintf(stderr, "%d%c", ez->cigar[i]>>4, "MIDN"[ez->cigar[i]&0xf]); fprintf(stderr, "\n"); } } static inline int mm_get_hplen_back(const mm_idx_t *mi, uint32_t rid, uint32_t x) { int64_t i, off0 = mi->seq[rid].offset, off = off0 + x; int c = mm_seq4_get(mi->S, off); for (i = off - 1; i >= off0; --i) if (mm_seq4_get(mi->S, i) != c) break; return (int)(off - i); } static inline void mm_adjust_minier(const mm_idx_t *mi, uint8_t *const qseq0[2], mm128_t *a, int32_t *r, int32_t *q) { if (mi->flag & MM_I_HPC) { const uint8_t *qseq = qseq0[a->x>>63]; int i, c; *q = (int32_t)a->y; for (i = *q - 1, c = qseq[*q]; i > 0; --i) if (qseq[i] != c) break; *q = i + 1; c = mm_get_hplen_back(mi, a->x<<1>>33, (int32_t)a->x); *r = (int32_t)a->x + 1 - c; } else { *r = (int32_t)a->x - (mi->k>>1); *q = (int32_t)a->y - (mi->k>>1); } } static int *collect_long_gaps(void *km, int as1, int cnt1, mm128_t *a, int min_gap, int *n_) { int i, n, *K; *n_ = 0; for (i = 1, n = 0; i < cnt1; ++i) { // count the number of gaps longer than min_gap int gap = ((int32_t)a[as1 + i].y - a[as1 + i - 1].y) - ((int32_t)a[as1 + i].x - a[as1 + i - 1].x); if (gap < -min_gap || gap > min_gap) ++n; } if (n <= 1) return 0; K = (int*)kmalloc(km, n * sizeof(int)); for (i = 1, n = 0; i < cnt1; ++i) { // store the positions of long gaps int gap = ((int32_t)a[as1 + i].y - a[as1 + i - 1].y) - ((int32_t)a[as1 + i].x - a[as1 + i - 1].x); if (gap < -min_gap || gap > min_gap) K[n++] = i; } *n_ = n; return K; } static void mm_filter_bad_seeds(void *km, int as1, int cnt1, mm128_t *a, int min_gap, int diff_thres, int max_ext_len, int max_ext_cnt) { int max_st, max_en, n, i, k, max, *K; K = collect_long_gaps(km, as1, cnt1, a, min_gap, &n); if (K == 0) return; max = 0, max_st = max_en = -1; for (k = 0;; ++k) { // traverse long gaps int gap, l, n_ins = 0, n_del = 0, qs, rs, max_diff = 0, max_diff_l = -1; if (k == n || k >= max_en) { if (max_en > 0) for (i = K[max_st]; i < K[max_en]; ++i) a[as1 + i].y |= MM_SEED_IGNORE; max = 0, max_st = max_en = -1; if (k == n) break; } i = K[k]; gap = ((int32_t)a[as1 + i].y - (int32_t)a[as1 + i - 1].y) - (int32_t)(a[as1 + i].x - a[as1 + i - 1].x); if (gap > 0) n_ins += gap; else n_del += -gap; qs = (int32_t)a[as1 + i - 1].y; rs = (int32_t)a[as1 + i - 1].x; for (l = k + 1; l < n && l <= k + max_ext_cnt; ++l) { int j = K[l], diff; if ((int32_t)a[as1 + j].y - qs > max_ext_len || (int32_t)a[as1 + j].x - rs > max_ext_len) break; gap = ((int32_t)a[as1 + j].y - (int32_t)a[as1 + j - 1].y) - (int32_t)(a[as1 + j].x - a[as1 + j - 1].x); if (gap > 0) n_ins += gap; else n_del += -gap; diff = n_ins + n_del - abs(n_ins - n_del); if (max_diff < diff) max_diff = diff, max_diff_l = l; } if (max_diff > diff_thres && max_diff > max) max = max_diff, max_st = k, max_en = max_diff_l; } kfree(km, K); } static void mm_filter_bad_seeds_alt(void *km, int as1, int cnt1, mm128_t *a, int min_gap, int max_ext) { int n, k, *K; K = collect_long_gaps(km, as1, cnt1, a, min_gap, &n); if (K == 0) return; for (k = 0; k < n;) { int i = K[k], l; int gap1 = ((int32_t)a[as1 + i].y - (int32_t)a[as1 + i - 1].y) - ((int32_t)a[as1 + i].x - (int32_t)a[as1 + i - 1].x); int re1 = (int32_t)a[as1 + i].x; int qe1 = (int32_t)a[as1 + i].y; gap1 = gap1 > 0? gap1 : -gap1; for (l = k + 1; l < n; ++l) { int j = K[l], gap2, q_span_pre, rs2, qs2, m; if ((int32_t)a[as1 + j].y - qe1 > max_ext || (int32_t)a[as1 + j].x - re1 > max_ext) break; gap2 = ((int32_t)a[as1 + j].y - (int32_t)a[as1 + j - 1].y) - (int32_t)(a[as1 + j].x - a[as1 + j - 1].x); q_span_pre = a[as1 + j - 1].y >> 32 & 0xff; rs2 = (int32_t)a[as1 + j - 1].x + q_span_pre; qs2 = (int32_t)a[as1 + j - 1].y + q_span_pre; m = rs2 - re1 < qs2 - qe1? rs2 - re1 : qs2 - qe1; gap2 = gap2 > 0? gap2 : -gap2; if (m > gap1 + gap2) break; re1 = (int32_t)a[as1 + j].x; qe1 = (int32_t)a[as1 + j].y; gap1 = gap2; } if (l > k + 1) { int j, end = K[l - 1]; for (j = K[k]; j < end; ++j) a[as1 + j].y |= MM_SEED_IGNORE; a[as1 + end].y |= MM_SEED_LONG_JOIN; } k = l; } kfree(km, K); } static void mm_fix_bad_ends(const mm_reg1_t *r, const mm128_t *a, int bw, int min_match, int32_t *as, int32_t *cnt) { int32_t i, l, m; *as = r->as, *cnt = r->cnt; if (r->cnt < 3) return; m = l = a[r->as].y >> 32 & 0xff; for (i = r->as + 1; i < r->as + r->cnt - 1; ++i) { int32_t lq, lr, min, max; int32_t q_span = a[i].y >> 32 & 0xff; if (a[i].y & MM_SEED_LONG_JOIN) break; lr = (int32_t)a[i].x - (int32_t)a[i-1].x; lq = (int32_t)a[i].y - (int32_t)a[i-1].y; min = lr < lq? lr : lq; max = lr > lq? lr : lq; if (max - min > l >> 1) *as = i; l += min; m += min < q_span? min : q_span; if (l >= bw << 1 || (m >= min_match && m >= bw) || m >= r->mlen >> 1) break; } *cnt = r->as + r->cnt - *as; m = l = a[r->as + r->cnt - 1].y >> 32 & 0xff; for (i = r->as + r->cnt - 2; i > *as; --i) { int32_t lq, lr, min, max; int32_t q_span = a[i+1].y >> 32 & 0xff; if (a[i+1].y & MM_SEED_LONG_JOIN) break; lr = (int32_t)a[i+1].x - (int32_t)a[i].x; lq = (int32_t)a[i+1].y - (int32_t)a[i].y; min = lr < lq? lr : lq; max = lr > lq? lr : lq; if (max - min > l >> 1) *cnt = i + 1 - *as; l += min; m += min < q_span? min : q_span; if (l >= bw << 1 || (m >= min_match && m >= bw) || m >= r->mlen >> 1) break; } } static void mm_max_stretch(const mm_reg1_t *r, const mm128_t *a, int32_t *as, int32_t *cnt) { int32_t i, score, max_score, len, max_i, max_len; *as = r->as, *cnt = r->cnt; if (r->cnt < 2) return; max_score = -1, max_i = -1, max_len = 0; score = a[r->as].y >> 32 & 0xff, len = 1; for (i = r->as + 1; i < r->as + r->cnt; ++i) { int32_t lq, lr, q_span; q_span = a[i].y >> 32 & 0xff; lr = (int32_t)a[i].x - (int32_t)a[i-1].x; lq = (int32_t)a[i].y - (int32_t)a[i-1].y; if (lq == lr) { score += lq < q_span? lq : q_span; ++len; } else { if (score > max_score) max_score = score, max_len = len, max_i = i - len; score = q_span, len = 1; } } if (score > max_score) max_score = score, max_len = len, max_i = i - len; *as = max_i, *cnt = max_len; } static int mm_seed_ext_score(void *km, const mm_mapopt_t *opt, const mm_idx_t *mi, const int8_t mat[25], int qlen, uint8_t *qseq0[2], const mm128_t *a) { uint8_t *qseq, *tseq; int q_span = a->y>>32&0xff, qs, qe, rs, re, rid, score, q_off, t_off, ext_len = opt->anchor_ext_len; void *qp; rid = a->x<<1>>33; re = (uint32_t)a->x + 1, rs = re - q_span; qe = (uint32_t)a->y + 1, qs = qe - q_span; rs = rs - ext_len > 0? rs - ext_len : 0; qs = qs - ext_len > 0? qs - ext_len : 0; re = re + ext_len < (int32_t)mi->seq[rid].len? re + ext_len : mi->seq[rid].len; qe = qe + ext_len < qlen? qe + ext_len : qlen; tseq = (uint8_t*)kmalloc(km, re - rs); mm_idx_getseq(mi, rid, rs, re, tseq); qseq = qseq0[a->x>>63] + qs; qp = ksw_ll_qinit(km, 2, qe - qs, qseq, 5, mat); score = ksw_ll_i16(qp, re - rs, tseq, opt->q, opt->e, &q_off, &t_off); kfree(km, tseq); kfree(km, qp); return score; } static void mm_fix_bad_ends_splice(void *km, const mm_mapopt_t *opt, const mm_idx_t *mi, const mm_reg1_t *r, const int8_t mat[25], int qlen, uint8_t *qseq0[2], const mm128_t *a, int *as1, int *cnt1) { // this assumes a very crude k-mer based mode; it is not necessary to use a good model just for filtering bounary exons int score; double log_gap; *as1 = r->as, *cnt1 = r->cnt; if (r->cnt < 3) return; log_gap = log((int32_t)a[r->as + 1].x - (int32_t)a[r->as].x); if ((a[r->as].y>>32&0xff) < log_gap + opt->anchor_ext_shift) { score = mm_seed_ext_score(km, opt, mi, mat, qlen, qseq0, &a[r->as]); if ((double)score / mat[0] < log_gap + opt->anchor_ext_shift) // a more exact format is "score < log_4(gap) + shift" ++(*as1), --(*cnt1); } log_gap = log((int32_t)a[r->as + r->cnt - 1].x - (int32_t)a[r->as + r->cnt - 2].x); if ((a[r->as + r->cnt - 1].y>>32&0xff) < log_gap + opt->anchor_ext_shift) { score = mm_seed_ext_score(km, opt, mi, mat, qlen, qseq0, &a[r->as + r->cnt - 1]); if ((double)score / mat[0] < log_gap + opt->anchor_ext_shift) --(*cnt1); } } static void mm_align1(void *km, const mm_mapopt_t *opt, const mm_idx_t *mi, int qlen, uint8_t *qseq0[2], mm_reg1_t *r, mm_reg1_t *r2, int n_a, mm128_t *a, ksw_extz_t *ez, int splice_flag) { int is_sr = !!(opt->flag & MM_F_SR), is_splice = !!(opt->flag & MM_F_SPLICE); int32_t rid = a[r->as].x<<1>>33, rev = a[r->as].x>>63, as1, cnt1; uint8_t *tseq, *qseq, *junc; int32_t i, l, bw, dropped = 0, extra_flag = 0, rs0, re0, qs0, qe0; int32_t rs, re, qs, qe; int32_t rs1, qs1, re1, qe1; int8_t mat[25]; if (is_sr) assert(!(mi->flag & MM_I_HPC)); // HPC won't work with SR because with HPC we can't easily tell if there is a gap r2->cnt = 0; if (r->cnt == 0) return; ksw_gen_simple_mat(5, mat, opt->a, opt->b, opt->sc_ambi); bw = (int)(opt->bw * 1.5 + 1.); if (is_sr && !(mi->flag & MM_I_HPC)) { mm_max_stretch(r, a, &as1, &cnt1); rs = (int32_t)a[as1].x + 1 - (int32_t)(a[as1].y>>32&0xff); qs = (int32_t)a[as1].y + 1 - (int32_t)(a[as1].y>>32&0xff); re = (int32_t)a[as1+cnt1-1].x + 1; qe = (int32_t)a[as1+cnt1-1].y + 1; } else { if (!(opt->flag & MM_F_NO_END_FLT)) { if (is_splice) mm_fix_bad_ends_splice(km, opt, mi, r, mat, qlen, qseq0, a, &as1, &cnt1); else mm_fix_bad_ends(r, a, opt->bw, opt->min_chain_score * 2, &as1, &cnt1); } else as1 = r->as, cnt1 = r->cnt; mm_filter_bad_seeds(km, as1, cnt1, a, 10, 40, opt->max_gap>>1, 10); mm_filter_bad_seeds_alt(km, as1, cnt1, a, 30, opt->max_gap>>1); mm_adjust_minier(mi, qseq0, &a[as1], &rs, &qs); mm_adjust_minier(mi, qseq0, &a[as1 + cnt1 - 1], &re, &qe); } assert(cnt1 > 0); if (is_splice) { if (splice_flag & MM_F_SPLICE_FOR) extra_flag |= rev? KSW_EZ_SPLICE_REV : KSW_EZ_SPLICE_FOR; if (splice_flag & MM_F_SPLICE_REV) extra_flag |= rev? KSW_EZ_SPLICE_FOR : KSW_EZ_SPLICE_REV; if (opt->flag & MM_F_SPLICE_FLANK) extra_flag |= KSW_EZ_SPLICE_FLANK; } /* Look for the start and end of regions to perform DP. This sounds easy * but is in fact tricky. Excessively small regions lead to unnecessary * clippings and lose alignable sequences. Excessively large regions * occasionally lead to large overlaps between two chains and may cause * loss of alignments in corner cases. */ if (is_sr) { qs0 = 0, qe0 = qlen; l = qs; l += l * opt->a + opt->end_bonus > opt->q? (l * opt->a + opt->end_bonus - opt->q) / opt->e : 0; rs0 = rs - l > 0? rs - l : 0; l = qlen - qe; l += l * opt->a + opt->end_bonus > opt->q? (l * opt->a + opt->end_bonus - opt->q) / opt->e : 0; re0 = re + l < (int32_t)mi->seq[rid].len? re + l : mi->seq[rid].len; } else { // compute rs0 and qs0 rs0 = (int32_t)a[r->as].x + 1 - (int32_t)(a[r->as].y>>32&0xff); qs0 = (int32_t)a[r->as].y + 1 - (int32_t)(a[r->as].y>>32&0xff); if (rs0 < 0) rs0 = 0; // this may happen when HPC is in use assert(qs0 >= 0); // this should never happen, or it is logic error rs1 = qs1 = 0; for (i = r->as - 1, l = 0; i >= 0 && a[i].x>>32 == a[r->as].x>>32; --i) { // inspect nearby seeds int32_t x = (int32_t)a[i].x + 1 - (int32_t)(a[i].y>>32&0xff); int32_t y = (int32_t)a[i].y + 1 - (int32_t)(a[i].y>>32&0xff); if (x < rs0 && y < qs0) { if (++l > opt->min_cnt) { l = rs0 - x > qs0 - y? rs0 - x : qs0 - y; rs1 = rs0 - l, qs1 = qs0 - l; if (rs1 < 0) rs1 = 0; // not strictly necessary; better have this guard for explicit break; } } } if (qs > 0 && rs > 0) { l = qs < opt->max_gap? qs : opt->max_gap; qs1 = qs1 > qs - l? qs1 : qs - l; qs0 = qs0 < qs1? qs0 : qs1; // at least include qs0 l += l * opt->a > opt->q? (l * opt->a - opt->q) / opt->e : 0; l = l < opt->max_gap? l : opt->max_gap; l = l < rs? l : rs; rs1 = rs1 > rs - l? rs1 : rs - l; rs0 = rs0 < rs1? rs0 : rs1; rs0 = rs0 < rs? rs0 : rs; } else rs0 = rs, qs0 = qs; // compute re0 and qe0 re0 = (int32_t)a[r->as + r->cnt - 1].x + 1; qe0 = (int32_t)a[r->as + r->cnt - 1].y + 1; re1 = mi->seq[rid].len, qe1 = qlen; for (i = r->as + r->cnt, l = 0; i < n_a && a[i].x>>32 == a[r->as].x>>32; ++i) { // inspect nearby seeds int32_t x = (int32_t)a[i].x + 1; int32_t y = (int32_t)a[i].y + 1; if (x > re0 && y > qe0) { if (++l > opt->min_cnt) { l = x - re0 > y - qe0? x - re0 : y - qe0; re1 = re0 + l, qe1 = qe0 + l; break; } } } if (qe < qlen && re < (int32_t)mi->seq[rid].len) { l = qlen - qe < opt->max_gap? qlen - qe : opt->max_gap; qe1 = qe1 < qe + l? qe1 : qe + l; qe0 = qe0 > qe1? qe0 : qe1; // at least include qe0 l += l * opt->a > opt->q? (l * opt->a - opt->q) / opt->e : 0; l = l < opt->max_gap? l : opt->max_gap; l = l < (int32_t)mi->seq[rid].len - re? l : mi->seq[rid].len - re; re1 = re1 < re + l? re1 : re + l; re0 = re0 > re1? re0 : re1; } else re0 = re, qe0 = qe; } if (a[r->as].y & MM_SEED_SELF) { int max_ext = r->qs > r->rs? r->qs - r->rs : r->rs - r->qs; if (r->rs - rs0 > max_ext) rs0 = r->rs - max_ext; if (r->qs - qs0 > max_ext) qs0 = r->qs - max_ext; max_ext = r->qe > r->re? r->qe - r->re : r->re - r->qe; if (re0 - r->re > max_ext) re0 = r->re + max_ext; if (qe0 - r->qe > max_ext) qe0 = r->qe + max_ext; } assert(re0 > rs0); tseq = (uint8_t*)kmalloc(km, re0 - rs0); junc = (uint8_t*)kmalloc(km, re0 - rs0); if (qs > 0 && rs > 0) { // left extension; probably the condition can be changed to "qs > qs0 && rs > rs0" qseq = &qseq0[rev][qs0]; mm_idx_getseq(mi, rid, rs0, rs, tseq); mm_idx_bed_junc(mi, rid, rs0, rs, junc); mm_seq_rev(qs - qs0, qseq); mm_seq_rev(rs - rs0, tseq); mm_seq_rev(rs - rs0, junc); mm_align_pair(km, opt, qs - qs0, qseq, rs - rs0, tseq, junc, mat, bw, opt->end_bonus, r->split_inv? opt->zdrop_inv : opt->zdrop, extra_flag|KSW_EZ_EXTZ_ONLY|KSW_EZ_RIGHT|KSW_EZ_REV_CIGAR, ez); if (ez->n_cigar > 0) { mm_append_cigar(r, ez->n_cigar, ez->cigar); r->p->dp_score += ez->max; } rs1 = rs - (ez->reach_end? ez->mqe_t + 1 : ez->max_t + 1); qs1 = qs - (ez->reach_end? qs - qs0 : ez->max_q + 1); mm_seq_rev(qs - qs0, qseq); } else rs1 = rs, qs1 = qs; re1 = rs, qe1 = qs; assert(qs1 >= 0 && rs1 >= 0); for (i = is_sr? cnt1 - 1 : 1; i < cnt1; ++i) { // gap filling if ((a[as1+i].y & (MM_SEED_IGNORE|MM_SEED_TANDEM)) && i != cnt1 - 1) continue; if (is_sr && !(mi->flag & MM_I_HPC)) { re = (int32_t)a[as1 + i].x + 1; qe = (int32_t)a[as1 + i].y + 1; } else mm_adjust_minier(mi, qseq0, &a[as1 + i], &re, &qe); re1 = re, qe1 = qe; if (i == cnt1 - 1 || (a[as1+i].y&MM_SEED_LONG_JOIN) || (qe - qs >= opt->min_ksw_len && re - rs >= opt->min_ksw_len)) { int j, bw1 = bw, zdrop_code; if (a[as1+i].y & MM_SEED_LONG_JOIN) bw1 = qe - qs > re - rs? qe - qs : re - rs; // perform alignment qseq = &qseq0[rev][qs]; mm_idx_getseq(mi, rid, rs, re, tseq); mm_idx_bed_junc(mi, rid, rs, re, junc); if (is_sr) { // perform ungapped alignment assert(qe - qs == re - rs); ksw_reset_extz(ez); for (j = 0, ez->score = 0; j < qe - qs; ++j) { if (qseq[j] >= 4 || tseq[j] >= 4) ez->score += opt->e2; else ez->score += qseq[j] == tseq[j]? opt->a : -opt->b; } ez->cigar = ksw_push_cigar(km, &ez->n_cigar, &ez->m_cigar, ez->cigar, 0, qe - qs); } else { // perform normal gapped alignment mm_align_pair(km, opt, qe - qs, qseq, re - rs, tseq, junc, mat, bw1, -1, opt->zdrop, extra_flag|KSW_EZ_APPROX_MAX, ez); // first pass: with approximate Z-drop } // test Z-drop and inversion Z-drop if ((zdrop_code = mm_test_zdrop(km, opt, qseq, tseq, ez->n_cigar, ez->cigar, mat)) != 0) mm_align_pair(km, opt, qe - qs, qseq, re - rs, tseq, junc, mat, bw1, -1, zdrop_code == 2? opt->zdrop_inv : opt->zdrop, extra_flag, ez); // second pass: lift approximate // update CIGAR if (ez->n_cigar > 0) mm_append_cigar(r, ez->n_cigar, ez->cigar); if (ez->zdropped) { // truncated by Z-drop; TODO: sometimes Z-drop kicks in because the next seed placement is wrong. This can be fixed in principle. if (!r->p) { assert(ez->n_cigar == 0); uint32_t capacity = sizeof(mm_extra_t)/4; kroundup32(capacity); r->p = (mm_extra_t*)calloc(capacity, 4); r->p->capacity = capacity; } for (j = i - 1; j >= 0; --j) if ((int32_t)a[as1 + j].x <= rs + ez->max_t) break; dropped = 1; if (j < 0) j = 0; r->p->dp_score += ez->max; re1 = rs + (ez->max_t + 1); qe1 = qs + (ez->max_q + 1); if (cnt1 - (j + 1) >= opt->min_cnt) { mm_split_reg(r, r2, as1 + j + 1 - r->as, qlen, a); if (zdrop_code == 2) r2->split_inv = 1; } break; } else r->p->dp_score += ez->score; rs = re, qs = qe; } } if (!dropped && qe < qe0 && re < re0) { // right extension qseq = &qseq0[rev][qe]; mm_idx_getseq(mi, rid, re, re0, tseq); mm_idx_bed_junc(mi, rid, re, re0, junc); mm_align_pair(km, opt, qe0 - qe, qseq, re0 - re, tseq, junc, mat, bw, opt->end_bonus, opt->zdrop, extra_flag|KSW_EZ_EXTZ_ONLY, ez); if (ez->n_cigar > 0) { mm_append_cigar(r, ez->n_cigar, ez->cigar); r->p->dp_score += ez->max; } re1 = re + (ez->reach_end? ez->mqe_t + 1 : ez->max_t + 1); qe1 = qe + (ez->reach_end? qe0 - qe : ez->max_q + 1); } assert(qe1 <= qlen); r->rs = rs1, r->re = re1; if (rev) r->qs = qlen - qe1, r->qe = qlen - qs1; else r->qs = qs1, r->qe = qe1; assert(re1 - rs1 <= re0 - rs0); if (r->p) { mm_idx_getseq(mi, rid, rs1, re1, tseq); mm_update_extra(r, &qseq0[r->rev][qs1], tseq, mat, opt->q, opt->e, opt->flag & MM_F_EQX); if (rev && r->p->trans_strand) r->p->trans_strand ^= 3; // flip to the read strand } kfree(km, tseq); kfree(km, junc); } static int mm_align1_inv(void *km, const mm_mapopt_t *opt, const mm_idx_t *mi, int qlen, uint8_t *qseq0[2], const mm_reg1_t *r1, const mm_reg1_t *r2, mm_reg1_t *r_inv, ksw_extz_t *ez) { int tl, ql, score, ret = 0, q_off, t_off; uint8_t *tseq, *qseq; int8_t mat[25]; void *qp; memset(r_inv, 0, sizeof(mm_reg1_t)); if (!(r1->split&1) || !(r2->split&2)) return 0; if (r1->id != r1->parent && r1->parent != MM_PARENT_TMP_PRI) return 0; if (r2->id != r2->parent && r2->parent != MM_PARENT_TMP_PRI) return 0; if (r1->rid != r2->rid || r1->rev != r2->rev) return 0; ql = r1->rev? r1->qs - r2->qe : r2->qs - r1->qe; tl = r2->rs - r1->re; if (ql < opt->min_chain_score || ql > opt->max_gap) return 0; if (tl < opt->min_chain_score || tl > opt->max_gap) return 0; ksw_gen_simple_mat(5, mat, opt->a, opt->b, opt->sc_ambi); tseq = (uint8_t*)kmalloc(km, tl); mm_idx_getseq(mi, r1->rid, r1->re, r2->rs, tseq); qseq = r1->rev? &qseq0[0][r2->qe] : &qseq0[1][qlen - r2->qs]; mm_seq_rev(ql, qseq); mm_seq_rev(tl, tseq); qp = ksw_ll_qinit(km, 2, ql, qseq, 5, mat); score = ksw_ll_i16(qp, tl, tseq, opt->q, opt->e, &q_off, &t_off); kfree(km, qp); mm_seq_rev(ql, qseq); mm_seq_rev(tl, tseq); if (score < opt->min_dp_max) goto end_align1_inv; q_off = ql - (q_off + 1), t_off = tl - (t_off + 1); mm_align_pair(km, opt, ql - q_off, qseq + q_off, tl - t_off, tseq + t_off, 0, mat, (int)(opt->bw * 1.5), -1, opt->zdrop, KSW_EZ_EXTZ_ONLY, ez); if (ez->n_cigar == 0) goto end_align1_inv; // should never be here mm_append_cigar(r_inv, ez->n_cigar, ez->cigar); r_inv->p->dp_score = ez->max; r_inv->id = -1; r_inv->parent = MM_PARENT_UNSET; r_inv->inv = 1; r_inv->rev = !r1->rev; r_inv->rid = r1->rid; r_inv->div = -1.0f; if (r_inv->rev == 0) { r_inv->qs = r2->qe + q_off; r_inv->qe = r_inv->qs + ez->max_q + 1; } else { r_inv->qe = r2->qs - q_off; r_inv->qs = r_inv->qe - (ez->max_q + 1); } r_inv->rs = r1->re + t_off; r_inv->re = r_inv->rs + ez->max_t + 1; mm_update_extra(r_inv, &qseq[q_off], &tseq[t_off], mat, opt->q, opt->e, opt->flag & MM_F_EQX); ret = 1; end_align1_inv: kfree(km, tseq); return ret; } static inline mm_reg1_t *mm_insert_reg(const mm_reg1_t *r, int i, int *n_regs, mm_reg1_t *regs) { regs = (mm_reg1_t*)realloc(regs, (*n_regs + 1) * sizeof(mm_reg1_t)); if (i + 1 != *n_regs) memmove(&regs[i + 2], &regs[i + 1], sizeof(mm_reg1_t) * (*n_regs - i - 1)); regs[i + 1] = *r; ++*n_regs; return regs; } mm_reg1_t *mm_align_skeleton(void *km, const mm_mapopt_t *opt, const mm_idx_t *mi, int qlen, const char *qstr, int *n_regs_, mm_reg1_t *regs, mm128_t *a) { extern unsigned char seq_nt4_table[256]; int32_t i, n_regs = *n_regs_, n_a; uint8_t *qseq0[2]; ksw_extz_t ez; // encode the query sequence qseq0[0] = (uint8_t*)kmalloc(km, qlen * 2); qseq0[1] = qseq0[0] + qlen; for (i = 0; i < qlen; ++i) { qseq0[0][i] = seq_nt4_table[(uint8_t)qstr[i]]; qseq0[1][qlen - 1 - i] = qseq0[0][i] < 4? 3 - qseq0[0][i] : 4; } // align through seed hits n_a = mm_squeeze_a(km, n_regs, regs, a); memset(&ez, 0, sizeof(ksw_extz_t)); for (i = 0; i < n_regs; ++i) { mm_reg1_t r2; if ((opt->flag&MM_F_SPLICE) && (opt->flag&MM_F_SPLICE_FOR) && (opt->flag&MM_F_SPLICE_REV)) { // then do two rounds of alignments for both strands mm_reg1_t s[2], s2[2]; int which, trans_strand; s[0] = s[1] = regs[i]; mm_align1(km, opt, mi, qlen, qseq0, &s[0], &s2[0], n_a, a, &ez, MM_F_SPLICE_FOR); mm_align1(km, opt, mi, qlen, qseq0, &s[1], &s2[1], n_a, a, &ez, MM_F_SPLICE_REV); if (s[0].p->dp_score > s[1].p->dp_score) which = 0, trans_strand = 1; else if (s[0].p->dp_score < s[1].p->dp_score) which = 1, trans_strand = 2; else trans_strand = 3, which = (qlen + s[0].p->dp_score) & 1; // randomly choose a strand, effectively if (which == 0) { regs[i] = s[0], r2 = s2[0]; free(s[1].p); } else { regs[i] = s[1], r2 = s2[1]; free(s[0].p); } regs[i].p->trans_strand = trans_strand; } else { // one round of alignment mm_align1(km, opt, mi, qlen, qseq0, &regs[i], &r2, n_a, a, &ez, opt->flag); if (opt->flag&MM_F_SPLICE) regs[i].p->trans_strand = opt->flag&MM_F_SPLICE_FOR? 1 : 2; } if (r2.cnt > 0) regs = mm_insert_reg(&r2, i, &n_regs, regs); if (i > 0 && regs[i].split_inv) { if (mm_align1_inv(km, opt, mi, qlen, qseq0, &regs[i-1], &regs[i], &r2, &ez)) { regs = mm_insert_reg(&r2, i, &n_regs, regs); ++i; // skip the inserted INV alignment } } } *n_regs_ = n_regs; kfree(km, qseq0[0]); kfree(km, ez.cigar); mm_filter_regs(opt, qlen, n_regs_, regs); mm_hit_sort(km, n_regs_, regs, opt->alt_drop); return regs; }
{ "language": "C" }
/* * Copyright 1995, Russell King. * Various bits and pieces copyrights include: * Linus Torvalds (test_bit). * * Copyright (C) 2017 Andes Technology Corporation * Rick Chen, Andes Technology Corporation <rick@andestech.com> * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). * * Please note that the code in this file should never be included * from user space. Many of these are not implemented in assembler * since they would be too costly. Also, they require priviledged * instructions (which are not available from user mode) to ensure * that they are atomic. */ #ifndef __ASM_RISCV_BITOPS_H #define __ASM_RISCV_BITOPS_H #ifdef __KERNEL__ #include <asm/system.h> #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/__ffs.h> #define smp_mb__before_clear_bit() do { } while (0) #define smp_mb__after_clear_bit() do { } while (0) /* * Function prototypes to keep gcc -Wall happy. */ static inline void __set_bit(int nr, void *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); *a |= mask; } #define PLATFORM__SET_BIT static inline void __clear_bit(int nr, void *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); *a &= ~mask; } #define PLATFORM__CLEAR_BIT static inline void __change_bit(int nr, void *addr) { int mask; unsigned long *ADDR = (unsigned long *)addr; ADDR += nr >> 5; mask = 1 << (nr & 31); *ADDR ^= mask; } static inline int __test_and_set_bit(int nr, void *addr) { int mask, retval; unsigned int *a = (unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a |= mask; return retval; } static inline int __test_and_clear_bit(int nr, void *addr) { int mask, retval; unsigned int *a = (unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a &= ~mask; return retval; } static inline int __test_and_change_bit(int nr, void *addr) { int mask, retval; unsigned int *a = (unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a ^= mask; return retval; } /* * This routine doesn't need to be atomic. */ static inline int test_bit(int nr, const void *addr) { return ((unsigned char *)addr)[nr >> 3] & (1U << (nr & 7)); } /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first.. */ static inline unsigned long ffz(unsigned long word) { int k; word = ~word; k = 31; if (word & 0x0000ffff) { k -= 16; word <<= 16; } if (word & 0x00ff0000) { k -= 8; word <<= 8; } if (word & 0x0f000000) { k -= 4; word <<= 4; } if (word & 0x30000000) { k -= 2; word <<= 2; } if (word & 0x40000000) k -= 1; return k; } /* * ffs: find first bit set. This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ /* * redefined in include/linux/bitops.h * #define ffs(x) generic_ffs(x) */ /* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ #define hweight32(x) generic_hweight32(x) #define hweight16(x) generic_hweight16(x) #define hweight8(x) generic_hweight8(x) #define ext2_set_bit test_and_set_bit #define ext2_clear_bit test_and_clear_bit #define ext2_test_bit test_bit #define ext2_find_first_zero_bit find_first_zero_bit #define ext2_find_next_zero_bit find_next_zero_bit /* Bitmap functions for the minix filesystem. */ #define minix_test_and_set_bit(nr, addr) test_and_set_bit(nr, addr) #define minix_set_bit(nr, addr) set_bit(nr, addr) #define minix_test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr) #define minix_test_bit(nr, addr) test_bit(nr, addr) #define minix_find_first_zero_bit(addr, size) find_first_zero_bit(addr, size) #endif /* __KERNEL__ */ #endif /* __ASM_RISCV_BITOPS_H */
{ "language": "C" }
/* * vmx_tsc_adjust_test * * Copyright (C) 2018, Google LLC. * * This work is licensed under the terms of the GNU GPL, version 2. * * * IA32_TSC_ADJUST test * * According to the SDM, "if an execution of WRMSR to the * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC, * the logical processor also adds (or subtracts) value X from the * IA32_TSC_ADJUST MSR. * * Note that when L1 doesn't intercept writes to IA32_TSC, a * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC * value. * * This test verifies that this unusual case is handled correctly. */ #include "test_util.h" #include "kvm_util.h" #include "processor.h" #include "vmx.h" #include <string.h> #include <sys/ioctl.h> #include "kselftest.h" #ifndef MSR_IA32_TSC_ADJUST #define MSR_IA32_TSC_ADJUST 0x3b #endif #define PAGE_SIZE 4096 #define VCPU_ID 5 #define TSC_ADJUST_VALUE (1ll << 32) #define TSC_OFFSET_VALUE -(1ll << 48) enum { PORT_ABORT = 0x1000, PORT_REPORT, PORT_DONE, }; enum { VMXON_PAGE = 0, VMCS_PAGE, MSR_BITMAP_PAGE, NUM_VMX_PAGES, }; struct kvm_single_msr { struct kvm_msrs header; struct kvm_msr_entry entry; } __attribute__((packed)); /* The virtual machine object. */ static struct kvm_vm *vm; static void check_ia32_tsc_adjust(int64_t max) { int64_t adjust; adjust = rdmsr(MSR_IA32_TSC_ADJUST); GUEST_SYNC(adjust); GUEST_ASSERT(adjust <= max); } static void l2_guest_code(void) { uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); /* Exit to L1 */ __asm__ __volatile__("vmcall"); } static void l1_guest_code(struct vmx_pages *vmx_pages) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; uint32_t control; uintptr_t save_cr3; GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE); wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE); check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(load_vmcs(vmx_pages)); /* Prepare the VMCS for L2 execution. */ prepare_vmcs(vmx_pages, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); control = vmreadz(CPU_BASED_VM_EXEC_CONTROL); control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING; vmwrite(CPU_BASED_VM_EXEC_CONTROL, control); vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE); /* Jump into L2. First, test failure to load guest CR3. */ save_cr3 = vmreadz(GUEST_CR3); vmwrite(GUEST_CR3, -1ull); GUEST_ASSERT(!vmlaunch()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE)); check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); vmwrite(GUEST_CR3, save_cr3); GUEST_ASSERT(!vmlaunch()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); GUEST_DONE(); } void report(int64_t val) { printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); } int main(int argc, char *argv[]) { struct vmx_pages *vmx_pages; vm_vaddr_t vmx_pages_gva; struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); if (!(entry->ecx & CPUID_VMX)) { fprintf(stderr, "nested VMX not enabled, skipping test\n"); exit(KSFT_SKIP); } vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); /* Allocate VMX pages and shared descriptors (vmx_pages). */ vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); for (;;) { volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); struct ucall uc; vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", run->exit_reason, exit_reason_str(run->exit_reason)); switch (get_ucall(vm, VCPU_ID, &uc)) { case UCALL_ABORT: TEST_ASSERT(false, "%s", (const char *)uc.args[0]); /* NOT REACHED */ case UCALL_SYNC: report(uc.args[1]); break; case UCALL_DONE: goto done; default: TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); } } kvm_vm_free(vm); done: return 0; }
{ "language": "C" }
/* * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/of_address.h> #include "msm_drv.h" #include "msm_gpu.h" #include "msm_kms.h" #include "sde_wb.h" #define TEARDOWN_DEADLOCK_RETRY_MAX 5 #include "msm_gem.h" #include "msm_mmu.h" static void msm_fb_output_poll_changed(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; if (priv->fbdev) drm_fb_helper_hotplug_event(priv->fbdev); } static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = msm_framebuffer_create, .output_poll_changed = msm_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = msm_atomic_commit, }; #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING static bool reglog = false; MODULE_PARM_DESC(reglog, "Enable register read/write logging"); module_param(reglog, bool, 0600); #else #define reglog 0 #endif #ifdef CONFIG_DRM_FBDEV_EMULATION static bool fbdev = true; MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer"); module_param(fbdev, bool, 0600); #endif static char *vram = "16m"; MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); module_param(vram, charp, 0); /* * Util/helpers: */ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, const char *dbgname) { struct resource *res; unsigned long size; void __iomem *ptr; if (name) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); else res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory resource: %s\n", name); return ERR_PTR(-EINVAL); } size = resource_size(res); ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); if (!ptr) { dev_err(&pdev->dev, "failed to ioremap: %s\n", name); return ERR_PTR(-ENOMEM); } if (reglog) printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size); return ptr; } void msm_iounmap(struct platform_device *pdev, void __iomem *addr) { devm_iounmap(&pdev->dev, addr); } void msm_writel(u32 data, void __iomem *addr) { if (reglog) printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); writel(data, addr); } u32 msm_readl(const void __iomem *addr) { u32 val = readl(addr); if (reglog) printk(KERN_ERR "IO:R %p %08x\n", addr, val); return val; } struct vblank_event { struct list_head node; int crtc_id; bool enable; }; static void vblank_ctrl_worker(struct kthread_work *work) { struct msm_vblank_ctrl *vbl_ctrl = container_of(work, struct msm_vblank_ctrl, work); struct msm_drm_private *priv = container_of(vbl_ctrl, struct msm_drm_private, vblank_ctrl); struct msm_kms *kms = priv->kms; struct vblank_event *vbl_ev, *tmp; unsigned long flags; spin_lock_irqsave(&vbl_ctrl->lock, flags); list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { list_del(&vbl_ev->node); spin_unlock_irqrestore(&vbl_ctrl->lock, flags); if (vbl_ev->enable) kms->funcs->enable_vblank(kms, priv->crtcs[vbl_ev->crtc_id]); else kms->funcs->disable_vblank(kms, priv->crtcs[vbl_ev->crtc_id]); kfree(vbl_ev); spin_lock_irqsave(&vbl_ctrl->lock, flags); } spin_unlock_irqrestore(&vbl_ctrl->lock, flags); } static int vblank_ctrl_queue_work(struct msm_drm_private *priv, int crtc_id, bool enable) { struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; struct vblank_event *vbl_ev; unsigned long flags; vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC); if (!vbl_ev) return -ENOMEM; vbl_ev->crtc_id = crtc_id; vbl_ev->enable = enable; spin_lock_irqsave(&vbl_ctrl->lock, flags); list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); spin_unlock_irqrestore(&vbl_ctrl->lock, flags); queue_kthread_work(&priv->disp_thread[crtc_id].worker, &vbl_ctrl->work); return 0; } /* * DRM operations: */ static int msm_unload(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = dev->platformdev; struct msm_kms *kms = priv->kms; struct msm_gpu *gpu = priv->gpu; struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; struct vblank_event *vbl_ev, *tmp; int i; /* We must cancel and cleanup any pending vblank enable/disable * work before drm_irq_uninstall() to avoid work re-enabling an * irq after uninstall has disabled it. */ flush_kthread_work(&vbl_ctrl->work); list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { list_del(&vbl_ev->node); kfree(vbl_ev); } /* clean up display commit worker threads */ for (i = 0; i < priv->num_crtcs; i++) { if (priv->disp_thread[i].thread) { flush_kthread_worker(&priv->disp_thread[i].worker); kthread_stop(priv->disp_thread[i].thread); priv->disp_thread[i].thread = NULL; } } drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); pm_runtime_get_sync(dev->dev); drm_irq_uninstall(dev); pm_runtime_put_sync(dev->dev); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); if (kms) { pm_runtime_disable(dev->dev); kms->funcs->destroy(kms); } if (gpu) { mutex_lock(&dev->struct_mutex); gpu->funcs->pm_suspend(gpu); mutex_unlock(&dev->struct_mutex); gpu->funcs->destroy(gpu); } if (priv->vram.paddr) { DEFINE_DMA_ATTRS(attrs); dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); drm_mm_takedown(&priv->vram.mm); dma_free_attrs(dev->dev, priv->vram.size, NULL, priv->vram.paddr, &attrs); } sde_evtlog_destroy(); sde_power_client_destroy(&priv->phandle, priv->pclient); sde_power_resource_deinit(pdev, &priv->phandle); component_unbind_all(dev->dev, dev); dev->dev_private = NULL; kfree(priv); return 0; } #define KMS_MDP4 0 #define KMS_MDP5 1 #define KMS_SDE 2 static int get_mdp_ver(struct platform_device *pdev) { #ifdef CONFIG_OF static const struct of_device_id match_types[] = { { .compatible = "qcom,mdss_mdp", .data = (void *)KMS_MDP5, }, { .compatible = "qcom,sde-kms", .data = (void *)KMS_SDE, /* end node */ } }; struct device *dev = &pdev->dev; const struct of_device_id *match; match = of_match_node(match_types, dev->of_node); if (match) return (int)(unsigned long)match->data; #endif return KMS_MDP4; } static int msm_init_vram(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; unsigned long size = 0; int ret = 0; #ifdef CONFIG_OF /* In the device-tree world, we could have a 'memory-region' * phandle, which gives us a link to our "vram". Allocating * is all nicely abstracted behind the dma api, but we need * to know the entire size to allocate it all in one go. There * are two cases: * 1) device with no IOMMU, in which case we need exclusive * access to a VRAM carveout big enough for all gpu * buffers * 2) device with IOMMU, but where the bootloader puts up * a splash screen. In this case, the VRAM carveout * need only be large enough for fbdev fb. But we need * exclusive access to the buffer to avoid the kernel * using those pages for other purposes (which appears * as corruption on screen before we have a chance to * load and do initial modeset) */ struct device_node *node; node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); if (node) { struct resource r; ret = of_address_to_resource(node, 0, &r); if (ret) return ret; size = r.end - r.start; DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); } else #endif /* if we have no IOMMU, then we need to use carveout allocator. * Grab the entire CMA chunk carved out in early startup in * mach-msm: */ if (!iommu_present(&platform_bus_type)) { DRM_INFO("using %s VRAM carveout\n", vram); size = memparse(vram, NULL); } if (size) { DEFINE_DMA_ATTRS(attrs); void *p; priv->vram.size = size; drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); /* note that for no-kernel-mapping, the vaddr returned * is bogus, but non-null if allocation succeeded: */ p = dma_alloc_attrs(dev->dev, size, &priv->vram.paddr, GFP_KERNEL, &attrs); if (!p) { dev_err(dev->dev, "failed to allocate VRAM\n"); priv->vram.paddr = 0; return -ENOMEM; } dev_info(dev->dev, "VRAM: %08x->%08x\n", (uint32_t)priv->vram.paddr, (uint32_t)(priv->vram.paddr + size)); } return ret; } #ifdef CONFIG_OF static int msm_component_bind_all(struct device *dev, struct drm_device *drm_dev) { int ret; ret = component_bind_all(dev, drm_dev); if (ret) DRM_ERROR("component_bind_all failed: %d\n", ret); return ret; } #else static int msm_component_bind_all(struct device *dev, struct drm_device *drm_dev) { return 0; } #endif static int msm_load(struct drm_device *dev, unsigned long flags) { struct platform_device *pdev = dev->platformdev; struct msm_drm_private *priv; struct msm_kms *kms; int ret, i; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(dev->dev, "failed to allocate private data\n"); return -ENOMEM; } dev->dev_private = priv; priv->wq = alloc_ordered_workqueue("msm_drm", 0); init_waitqueue_head(&priv->fence_event); init_waitqueue_head(&priv->pending_crtcs_event); INIT_LIST_HEAD(&priv->client_event_list); INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->fence_cbs); INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); init_kthread_work(&priv->vblank_ctrl.work, vblank_ctrl_worker); spin_lock_init(&priv->vblank_ctrl.lock); drm_mode_config_init(dev); platform_set_drvdata(pdev, dev); ret = sde_power_resource_init(pdev, &priv->phandle); if (ret) { pr_err("sde power resource init failed\n"); goto fail; } priv->pclient = sde_power_client_create(&priv->phandle, "sde"); if (IS_ERR_OR_NULL(priv->pclient)) { pr_err("sde power client create failed\n"); ret = -EINVAL; goto fail; } /* Bind all our sub-components: */ ret = msm_component_bind_all(dev->dev, dev); if (ret) return ret; ret = msm_init_vram(dev); if (ret) goto fail; ret = sde_evtlog_init(dev->primary->debugfs_root); if (ret) { dev_err(dev->dev, "failed to init evtlog: %d\n", ret); goto fail; } switch (get_mdp_ver(pdev)) { case KMS_MDP4: kms = mdp4_kms_init(dev); break; case KMS_MDP5: kms = mdp5_kms_init(dev); break; case KMS_SDE: kms = sde_kms_init(dev); break; default: kms = ERR_PTR(-ENODEV); break; } if (IS_ERR(kms)) { /* * NOTE: once we have GPU support, having no kms should not * be considered fatal.. ideally we would still support gpu * and (for example) use dmabuf/prime to share buffers with * imx drm driver on iMX5 */ priv->kms = NULL; dev_err(dev->dev, "failed to load kms\n"); ret = PTR_ERR(kms); goto fail; } priv->kms = kms; pm_runtime_enable(dev->dev); if (kms && kms->funcs && kms->funcs->hw_init) { ret = kms->funcs->hw_init(kms); if (ret) { dev_err(dev->dev, "kms hw init failed: %d\n", ret); goto fail; } } /* initialize commit thread structure */ for (i = 0; i < priv->num_crtcs; i++) { priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id; init_kthread_worker(&priv->disp_thread[i].worker); priv->disp_thread[i].dev = dev; priv->disp_thread[i].thread = kthread_run(kthread_worker_fn, &priv->disp_thread[i].worker, "crtc_commit:%d", priv->disp_thread[i].crtc_id); if (IS_ERR(priv->disp_thread[i].thread)) { dev_err(dev->dev, "failed to create kthread\n"); priv->disp_thread[i].thread = NULL; /* clean up previously created threads if any */ for (i -= 1; i >= 0; i--) { kthread_stop(priv->disp_thread[i].thread); priv->disp_thread[i].thread = NULL; } goto fail; } } dev->mode_config.funcs = &mode_config_funcs; ret = drm_vblank_init(dev, priv->num_crtcs); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto fail; } pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); pm_runtime_put_sync(dev->dev); if (ret < 0) { dev_err(dev->dev, "failed to install IRQ handler\n"); goto fail; } drm_mode_config_reset(dev); #ifdef CONFIG_DRM_FBDEV_EMULATION if (fbdev) priv->fbdev = msm_fbdev_init(dev); #endif ret = msm_debugfs_late_init(dev); if (ret) goto fail; /* perform subdriver post initialization */ if (kms && kms->funcs && kms->funcs->postinit) { ret = kms->funcs->postinit(kms); if (ret) { dev_err(dev->dev, "kms post init failed: %d\n", ret); goto fail; } } drm_kms_helper_poll_init(dev); return 0; fail: msm_unload(dev); return ret; } #ifdef CONFIG_QCOM_KGSL static void load_gpu(struct drm_device *dev) { } #else static void load_gpu(struct drm_device *dev) { static DEFINE_MUTEX(init_lock); struct msm_drm_private *priv = dev->dev_private; mutex_lock(&init_lock); if (!priv->gpu) priv->gpu = adreno_load_gpu(dev); mutex_unlock(&init_lock); } #endif static struct msm_file_private *setup_pagetable(struct msm_drm_private *priv) { struct msm_file_private *ctx; if (!priv || !priv->gpu) return NULL; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); ctx->aspace = msm_gem_address_space_create_instance( priv->gpu->aspace->mmu, "gpu", 0x100000000, 0x1ffffffff); if (IS_ERR(ctx->aspace)) { int ret = PTR_ERR(ctx->aspace); /* * If dynamic domains are not supported, everybody uses the * same pagetable */ if (ret != -EOPNOTSUPP) { kfree(ctx); return ERR_PTR(ret); } ctx->aspace = priv->gpu->aspace; } ctx->aspace->mmu->funcs->attach(ctx->aspace->mmu, NULL, 0); return ctx; } static int msm_open(struct drm_device *dev, struct drm_file *file) { struct msm_file_private *ctx = NULL; struct msm_drm_private *priv; struct msm_kms *kms; if (!dev || !dev->dev_private) return -ENODEV; priv = dev->dev_private; /* For now, load gpu on open.. to avoid the requirement of having * firmware in the initrd. */ load_gpu(dev); ctx = setup_pagetable(priv); if (IS_ERR(ctx)) return PTR_ERR(ctx); INIT_LIST_HEAD(&ctx->counters); file->driver_priv = ctx; kms = priv->kms; if (kms && kms->funcs && kms->funcs->postopen) kms->funcs->postopen(kms, file); return 0; } static void msm_preclose(struct drm_device *dev, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; if (kms && kms->funcs && kms->funcs->preclose) kms->funcs->preclose(kms, file); } static void msm_postclose(struct drm_device *dev, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx = file->driver_priv; struct msm_kms *kms = priv->kms; if (kms && kms->funcs && kms->funcs->postclose) kms->funcs->postclose(kms, file); if (priv->gpu) msm_gpu_cleanup_counters(priv->gpu, ctx); mutex_lock(&dev->struct_mutex); if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) { ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu); msm_gem_address_space_put(ctx->aspace); } mutex_unlock(&dev->struct_mutex); kfree(ctx); } static int msm_disable_all_modes_commit( struct drm_device *dev, struct drm_atomic_state *state) { struct drm_plane *plane; struct drm_crtc *crtc; unsigned plane_mask; int ret; plane_mask = 0; drm_for_each_plane(plane, dev) { struct drm_plane_state *plane_state; plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { ret = PTR_ERR(plane_state); goto fail; } plane_state->rotation = BIT(DRM_ROTATE_0); plane->old_fb = plane->fb; plane_mask |= 1 << drm_plane_index(plane); /* disable non-primary: */ if (plane->type == DRM_PLANE_TYPE_PRIMARY) continue; DRM_DEBUG("disabling plane %d\n", plane->base.id); ret = __drm_atomic_helper_disable_plane(plane, plane_state); if (ret != 0) DRM_ERROR("error %d disabling plane %d\n", ret, plane->base.id); } drm_for_each_crtc(crtc, dev) { struct drm_mode_set mode_set; memset(&mode_set, 0, sizeof(struct drm_mode_set)); mode_set.crtc = crtc; DRM_DEBUG("disabling crtc %d\n", crtc->base.id); ret = __drm_atomic_helper_set_config(&mode_set, state); if (ret != 0) DRM_ERROR("error %d disabling crtc %d\n", ret, crtc->base.id); } DRM_DEBUG("committing disables\n"); ret = drm_atomic_commit(state); fail: drm_atomic_clean_old_fb(dev, plane_mask, ret); DRM_DEBUG("disables result %d\n", ret); return ret; } /** * msm_clear_all_modes - disables all planes and crtcs via an atomic commit * based on restore_fbdev_mode_atomic in drm_fb_helper.c * @dev: device pointer * @Return: 0 on success, otherwise -error */ static int msm_disable_all_modes(struct drm_device *dev) { struct drm_atomic_state *state; int ret, i; state = drm_atomic_state_alloc(dev); if (!state) return -ENOMEM; state->acquire_ctx = dev->mode_config.acquire_ctx; for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) { ret = msm_disable_all_modes_commit(dev, state); if (ret != -EDEADLK) break; drm_atomic_state_clear(state); drm_atomic_legacy_backoff(state); } /* on successful atomic commit state ownership transfers to framework */ if (ret != 0) drm_atomic_state_free(state); return ret; } static void msm_lastclose(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; int i; /* * clean up vblank disable immediately as this is the last close. */ for (i = 0; i < dev->num_crtcs; i++) { struct drm_vblank_crtc *vblank = &dev->vblank[i]; struct timer_list *disable_timer = &vblank->disable_timer; if (del_timer_sync(disable_timer)) disable_timer->function(disable_timer->data); } /* wait for pending vblank requests to be executed by worker thread */ flush_workqueue(priv->wq); if (priv->fbdev) { drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); } else { drm_modeset_lock_all(dev); msm_disable_all_modes(dev); drm_modeset_unlock_all(dev); if (kms && kms->funcs && kms->funcs->lastclose) kms->funcs->lastclose(kms); } } static irqreturn_t msm_irq(int irq, void *arg) { struct drm_device *dev = arg; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; BUG_ON(!kms); return kms->funcs->irq(kms); } static void msm_irq_preinstall(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; BUG_ON(!kms); kms->funcs->irq_preinstall(kms); } static int msm_irq_postinstall(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; BUG_ON(!kms); return kms->funcs->irq_postinstall(kms); } static void msm_irq_uninstall(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; BUG_ON(!kms); kms->funcs->irq_uninstall(kms); } static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; if (!kms) return -ENXIO; DBG("dev=%p, crtc=%u", dev, pipe); return vblank_ctrl_queue_work(priv, pipe, true); } static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; if (!kms) return; DBG("dev=%p, crtc=%u", dev, pipe); vblank_ctrl_queue_work(priv, pipe, false); } /* * DRM debugfs: */ #ifdef CONFIG_DEBUG_FS static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; struct msm_gpu *gpu = priv->gpu; if (gpu) { seq_printf(m, "%s Status:\n", gpu->name); gpu->funcs->show(gpu, m); } return 0; } static int msm_snapshot_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; return msm_snapshot_write(priv->gpu, m); } static int msm_gem_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; struct msm_gpu *gpu = priv->gpu; if (gpu) { seq_printf(m, "Active Objects (%s):\n", gpu->name); msm_gem_describe_objects(&gpu->active_list, m); } seq_printf(m, "Inactive Objects:\n"); msm_gem_describe_objects(&priv->inactive_list, m); return 0; } static int msm_mm_show(struct drm_device *dev, struct seq_file *m) { return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); } static int msm_fb_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; struct drm_framebuffer *fb, *fbdev_fb = NULL; if (priv->fbdev) { seq_printf(m, "fbcon "); fbdev_fb = priv->fbdev->fb; msm_framebuffer_describe(fbdev_fb, m); } mutex_lock(&dev->mode_config.fb_lock); list_for_each_entry(fb, &dev->mode_config.fb_list, head) { if (fb == fbdev_fb) continue; seq_printf(m, "user "); msm_framebuffer_describe(fb, m); } mutex_unlock(&dev->mode_config.fb_lock); return 0; } static int show_locked(struct seq_file *m, void *arg) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; int (*show)(struct drm_device *dev, struct seq_file *m) = node->info_ent->data; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; ret = show(dev, m); mutex_unlock(&dev->struct_mutex); return ret; } static int show_unlocked(struct seq_file *m, void *arg) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; int (*show)(struct drm_device *dev, struct seq_file *m) = node->info_ent->data; return show(dev, m); } static struct drm_info_list msm_debugfs_list[] = { {"gpu", show_locked, 0, msm_gpu_show}, {"gem", show_locked, 0, msm_gem_show}, { "mm", show_locked, 0, msm_mm_show }, { "fb", show_locked, 0, msm_fb_show }, { "snapshot", show_unlocked, 0, msm_snapshot_show }, }; static int late_init_minor(struct drm_minor *minor) { int ret; if (!minor) return 0; ret = msm_rd_debugfs_init(minor); if (ret) { dev_err(minor->dev->dev, "could not install rd debugfs\n"); return ret; } ret = msm_perf_debugfs_init(minor); if (ret) { dev_err(minor->dev->dev, "could not install perf debugfs\n"); return ret; } return 0; } int msm_debugfs_late_init(struct drm_device *dev) { int ret; ret = late_init_minor(dev->primary); if (ret) return ret; ret = late_init_minor(dev->render); if (ret) return ret; ret = late_init_minor(dev->control); return ret; } static int msm_debugfs_init(struct drm_minor *minor) { struct drm_device *dev = minor->dev; int ret; ret = drm_debugfs_create_files(msm_debugfs_list, ARRAY_SIZE(msm_debugfs_list), minor->debugfs_root, minor); if (ret) { dev_err(dev->dev, "could not install msm_debugfs_list\n"); return ret; } return 0; } static void msm_debugfs_cleanup(struct drm_minor *minor) { drm_debugfs_remove_files(msm_debugfs_list, ARRAY_SIZE(msm_debugfs_list), minor); if (!minor->dev->dev_private) return; msm_rd_debugfs_cleanup(minor); msm_perf_debugfs_cleanup(minor); } #endif /* * Fences: */ int msm_wait_fence(struct drm_device *dev, uint32_t fence, ktime_t *timeout , bool interruptible) { struct msm_drm_private *priv = dev->dev_private; struct msm_gpu *gpu = priv->gpu; int index = FENCE_RING(fence); uint32_t submitted; int ret; if (!gpu) return -ENXIO; if (index > MSM_GPU_MAX_RINGS || index >= gpu->nr_rings || !gpu->rb[index]) return -EINVAL; submitted = gpu->funcs->submitted_fence(gpu, gpu->rb[index]); if (fence > submitted) { DRM_ERROR("waiting on invalid fence: %u (of %u)\n", fence, submitted); return -EINVAL; } if (!timeout) { /* no-wait: */ ret = fence_completed(dev, fence) ? 0 : -EBUSY; } else { ktime_t now = ktime_get(); unsigned long remaining_jiffies; if (ktime_compare(*timeout, now) < 0) { remaining_jiffies = 0; } else { ktime_t rem = ktime_sub(*timeout, now); struct timespec ts = ktime_to_timespec(rem); remaining_jiffies = timespec_to_jiffies(&ts); } if (interruptible) ret = wait_event_interruptible_timeout(priv->fence_event, fence_completed(dev, fence), remaining_jiffies); else ret = wait_event_timeout(priv->fence_event, fence_completed(dev, fence), remaining_jiffies); if (ret == 0) { DBG("timeout waiting for fence: %u (completed: %u)", fence, priv->completed_fence[index]); ret = -ETIMEDOUT; } else if (ret != -ERESTARTSYS) { ret = 0; } } return ret; } int msm_queue_fence_cb(struct drm_device *dev, struct msm_fence_cb *cb, uint32_t fence) { struct msm_drm_private *priv = dev->dev_private; int index = FENCE_RING(fence); int ret = 0; mutex_lock(&dev->struct_mutex); if (!list_empty(&cb->work.entry)) { ret = -EINVAL; } else if (fence > priv->completed_fence[index]) { cb->fence = fence; list_add_tail(&cb->work.entry, &priv->fence_cbs); } else { queue_work(priv->wq, &cb->work); } mutex_unlock(&dev->struct_mutex); return ret; } /* called from workqueue */ void msm_update_fence(struct drm_device *dev, uint32_t fence) { struct msm_drm_private *priv = dev->dev_private; struct msm_fence_cb *cb, *tmp; int index = FENCE_RING(fence); if (index >= MSM_GPU_MAX_RINGS) return; mutex_lock(&dev->struct_mutex); priv->completed_fence[index] = max(fence, priv->completed_fence[index]); list_for_each_entry_safe(cb, tmp, &priv->fence_cbs, work.entry) { if (COMPARE_FENCE_LTE(cb->fence, priv->completed_fence[index])) { list_del_init(&cb->work.entry); queue_work(priv->wq, &cb->work); } } mutex_unlock(&dev->struct_mutex); wake_up_all(&priv->fence_event); } void __msm_fence_worker(struct work_struct *work) { struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work); cb->func(cb); } /* * DRM ioctls: */ static int msm_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; struct drm_msm_param *args = data; struct msm_gpu *gpu; /* for now, we just have 3d pipe.. eventually this would need to * be more clever to dispatch to appropriate gpu module: */ if (args->pipe != MSM_PIPE_3D0) return -EINVAL; gpu = priv->gpu; if (!gpu) return -ENXIO; return gpu->funcs->get_param(gpu, args->param, &args->value); } static int msm_ioctl_gem_new(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_msm_gem_new *args = data; if (args->flags & ~MSM_BO_FLAGS) { DRM_ERROR("invalid flags: %08x\n", args->flags); return -EINVAL; } return msm_gem_new_handle(dev, file, args->size, args->flags, &args->handle); } static inline ktime_t to_ktime(struct drm_msm_timespec timeout) { return ktime_set(timeout.tv_sec, timeout.tv_nsec); } static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_msm_gem_cpu_prep *args = data; struct drm_gem_object *obj; ktime_t timeout = to_ktime(args->timeout); int ret; if (args->op & ~MSM_PREP_FLAGS) { DRM_ERROR("invalid op: %08x\n", args->op); return -EINVAL; } obj = drm_gem_object_lookup(dev, file, args->handle); if (!obj) return -ENOENT; ret = msm_gem_cpu_prep(obj, args->op, &timeout); drm_gem_object_unreference_unlocked(obj); return ret; } static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_msm_gem_cpu_fini *args = data; struct drm_gem_object *obj; int ret; obj = drm_gem_object_lookup(dev, file, args->handle); if (!obj) return -ENOENT; ret = msm_gem_cpu_fini(obj); drm_gem_object_unreference_unlocked(obj); return ret; } static int msm_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_msm_gem_info *args = data; struct drm_gem_object *obj; struct msm_file_private *ctx = file->driver_priv; int ret = 0; if (args->flags & ~MSM_INFO_FLAGS) return -EINVAL; if (!ctx || !ctx->aspace) return -EINVAL; obj = drm_gem_object_lookup(dev, file, args->handle); if (!obj) return -ENOENT; if (args->flags & MSM_INFO_IOVA) { uint64_t iova; ret = msm_gem_get_iova(obj, ctx->aspace, &iova); if (!ret) args->offset = iova; } else { args->offset = msm_gem_mmap_offset(obj); } drm_gem_object_unreference_unlocked(obj); return ret; } static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_msm_wait_fence *args = data; ktime_t timeout; if (args->pad) { DRM_ERROR("invalid pad: %08x\n", args->pad); return -EINVAL; } /* * Special case - if the user passes a timeout of 0.0 just return the * current fence status (0 for retired, -EBUSY for active) with no * accompanying kernel logs. This can be a poor man's way of * determining the status of a fence. */ if (args->timeout.tv_sec == 0 && args->timeout.tv_nsec == 0) return msm_wait_fence(dev, args->fence, NULL, true); timeout = to_ktime(args->timeout); return msm_wait_fence(dev, args->fence, &timeout, true); } static int msm_event_supported(struct drm_device *dev, struct drm_msm_event_req *req) { int ret = -EINVAL; struct drm_mode_object *arg_obj; struct drm_crtc *crtc; arg_obj = drm_mode_object_find(dev, req->object_id, req->object_type); if (!arg_obj) return -ENOENT; if (arg_obj->type == DRM_MODE_OBJECT_CRTC) { crtc = obj_to_crtc(arg_obj); req->index = drm_crtc_index(crtc); } switch (req->event) { case DRM_EVENT_VBLANK: case DRM_EVENT_HISTOGRAM: case DRM_EVENT_AD: if (arg_obj->type == DRM_MODE_OBJECT_CRTC) ret = 0; break; default: break; } return ret; } static void msm_vblank_read_cb(struct drm_pending_event *e) { struct drm_pending_vblank_event *vblank; struct msm_drm_private *priv; struct drm_file *file_priv; struct drm_device *dev; struct msm_drm_event *v; int ret = 0; bool need_vblank = false; if (!e) { DRM_ERROR("invalid pending event payload\n"); return; } vblank = container_of(e, struct drm_pending_vblank_event, base); file_priv = vblank->base.file_priv; dev = (file_priv && file_priv->minor) ? file_priv->minor->dev : NULL; priv = (dev) ? dev->dev_private : NULL; if (!priv) { DRM_ERROR("invalid msm private\n"); return; } list_for_each_entry(v, &priv->client_event_list, base.link) { if (v->base.file_priv != file_priv || (v->event.type != DRM_EVENT_VBLANK && v->event.type != DRM_EVENT_AD)) continue; need_vblank = true; /** * User-space client requests for N vsyncs when event * requested is DRM_EVENT_AD. Once the count reaches zero, * notify stop requesting for additional vsync's. */ if (v->event.type == DRM_EVENT_AD) { if (vblank->event.user_data) vblank->event.user_data--; need_vblank = (vblank->event.user_data) ? true : false; } break; } if (!need_vblank) { kfree(vblank); } else { ret = drm_vblank_get(dev, vblank->pipe); if (!ret) { list_add(&vblank->base.link, &dev->vblank_event_list); } else { DRM_ERROR("vblank enable failed ret %d\n", ret); kfree(vblank); } } } static int msm_enable_vblank_event(struct drm_device *dev, struct drm_msm_event_req *req, struct drm_file *file) { struct drm_pending_vblank_event *e; int ret = 0; unsigned long flags; struct drm_vblank_crtc *vblank; if (WARN_ON(req->index >= dev->num_crtcs)) return -EINVAL; vblank = &dev->vblank[req->index]; e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) return -ENOMEM; e->pipe = req->index; e->base.pid = current->pid; e->event.base.type = DRM_EVENT_VBLANK; e->event.base.length = sizeof(e->event); e->event.user_data = req->client_context; e->base.event = &e->event.base; e->base.file_priv = file; e->base.destroy = msm_vblank_read_cb; ret = drm_vblank_get(dev, e->pipe); if (ret) { DRM_ERROR("failed to enable the vblank\n"); goto free; } spin_lock_irqsave(&dev->event_lock, flags); if (!vblank->enabled) { ret = -EINVAL; goto err_unlock; } if (file->event_space < sizeof(e->event)) { ret = -EBUSY; goto err_unlock; } file->event_space -= sizeof(e->event); list_add_tail(&e->base.link, &dev->vblank_event_list); err_unlock: spin_unlock_irqrestore(&dev->event_lock, flags); free: if (ret) kfree(e); return ret; } static int msm_enable_event(struct drm_device *dev, struct drm_msm_event_req *req, struct drm_file *file) { int ret = -EINVAL; switch (req->event) { case DRM_EVENT_AD: case DRM_EVENT_VBLANK: ret = msm_enable_vblank_event(dev, req, file); break; default: break; } return ret; } static int msm_disable_vblank_event(struct drm_device *dev, struct drm_msm_event_req *req, struct drm_file *file) { struct drm_pending_vblank_event *e, *t; list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != req->index || file != e->base.file_priv) continue; list_del(&e->base.link); drm_vblank_put(dev, req->index); kfree(e); } return 0; } static int msm_disable_event(struct drm_device *dev, struct drm_msm_event_req *req, struct drm_file *file) { int ret = -EINVAL; switch (req->event) { case DRM_EVENT_AD: case DRM_EVENT_VBLANK: ret = msm_disable_vblank_event(dev, req, file); break; default: break; } return ret; } static int msm_ioctl_register_event(struct drm_device *dev, void *data, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; struct drm_msm_event_req *req_event = data; struct msm_drm_event *client; struct msm_drm_event *v; unsigned long flag = 0; bool dup_request = false; int ret = 0; if (msm_event_supported(dev, req_event)) { DRM_ERROR("unsupported event %x object %x object id %d\n", req_event->event, req_event->object_type, req_event->object_id); return -EINVAL; } spin_lock_irqsave(&dev->event_lock, flag); list_for_each_entry(v, &priv->client_event_list, base.link) { if (v->base.file_priv != file) continue; if (v->event.type == req_event->event && v->info.object_id == req_event->object_id) { DRM_ERROR("duplicate request for event %x obj id %d\n", v->event.type, v->info.object_id); dup_request = true; break; } } spin_unlock_irqrestore(&dev->event_lock, flag); if (dup_request) return -EINVAL; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) return -ENOMEM; client->base.file_priv = file; client->base.pid = current->pid; client->base.event = &client->event; client->base.destroy = (void (*) (struct drm_pending_event *)) kfree; client->event.type = req_event->event; memcpy(&client->info, req_event, sizeof(client->info)); spin_lock_irqsave(&dev->event_lock, flag); list_add_tail(&client->base.link, &priv->client_event_list); spin_unlock_irqrestore(&dev->event_lock, flag); ret = msm_enable_event(dev, req_event, file); if (ret) { DRM_ERROR("failed to enable event %x object %x object id %d\n", req_event->event, req_event->object_type, req_event->object_id); spin_lock_irqsave(&dev->event_lock, flag); list_del(&client->base.link); spin_unlock_irqrestore(&dev->event_lock, flag); kfree(client); } return ret; } static int msm_ioctl_deregister_event(struct drm_device *dev, void *data, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; struct drm_msm_event_req *req_event = data; struct msm_drm_event *client = NULL; struct msm_drm_event *v, *vt; unsigned long flag = 0; if (msm_event_supported(dev, req_event)) { DRM_ERROR("unsupported event %x object %x object id %d\n", req_event->event, req_event->object_type, req_event->object_id); return -EINVAL; } spin_lock_irqsave(&dev->event_lock, flag); msm_disable_event(dev, req_event, file); list_for_each_entry_safe(v, vt, &priv->client_event_list, base.link) { if (v->event.type == req_event->event && v->info.object_id == req_event->object_id && v->base.file_priv == file) { client = v; list_del(&client->base.link); client->base.destroy(&client->base); break; } } spin_unlock_irqrestore(&dev->event_lock, flag); return 0; } void msm_send_crtc_notification(struct drm_crtc *crtc, struct drm_event *event, u8 *payload) { struct drm_device *dev = NULL; struct msm_drm_private *priv = NULL; unsigned long flags; struct msm_drm_event *notify, *v; int len = 0; if (!crtc || !event || !event->length || !payload) { DRM_ERROR("err param crtc %pK event %pK len %d payload %pK\n", crtc, event, ((event) ? (event->length) : -1), payload); return; } dev = crtc->dev; priv = (dev) ? dev->dev_private : NULL; if (!dev || !priv) { DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv); return; } spin_lock_irqsave(&dev->event_lock, flags); list_for_each_entry(v, &priv->client_event_list, base.link) { if (v->event.type != event->type || crtc->base.id != v->info.object_id) continue; len = event->length + sizeof(struct drm_msm_event_resp); if (v->base.file_priv->event_space < len) { DRM_ERROR("Insufficient space to notify\n"); continue; } notify = kzalloc(len, GFP_ATOMIC); if (!notify) continue; notify->base.file_priv = v->base.file_priv; notify->base.event = &notify->event; notify->base.pid = v->base.pid; notify->base.destroy = (void (*)(struct drm_pending_event *)) kfree; notify->event.type = v->event.type; notify->event.length = len; list_add(&notify->base.link, &notify->base.file_priv->event_list); notify->base.file_priv->event_space -= len; memcpy(&notify->info, &v->info, sizeof(notify->info)); memcpy(notify->data, payload, event->length); wake_up_interruptible(&notify->base.file_priv->event_wait); } spin_unlock_irqrestore(&dev->event_lock, flags); } static int msm_ioctl_counter_get(struct drm_device *dev, void *data, struct drm_file *file) { struct msm_file_private *ctx = file->driver_priv; struct msm_drm_private *priv = dev->dev_private; if (priv->gpu) return msm_gpu_counter_get(priv->gpu, data, ctx); return -ENODEV; } static int msm_ioctl_counter_put(struct drm_device *dev, void *data, struct drm_file *file) { struct msm_file_private *ctx = file->driver_priv; struct msm_drm_private *priv = dev->dev_private; if (priv->gpu) return msm_gpu_counter_put(priv->gpu, data, ctx); return -ENODEV; } static int msm_ioctl_counter_read(struct drm_device *dev, void *data, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; if (priv->gpu) return msm_gpu_counter_read(priv->gpu, data); return -ENODEV; } int msm_release(struct inode *inode, struct file *filp) { struct drm_file *file_priv = filp->private_data; struct drm_minor *minor = file_priv->minor; struct drm_device *dev = minor->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_drm_event *v, *vt; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); list_for_each_entry_safe(v, vt, &priv->client_event_list, base.link) { if (v->base.file_priv != file_priv) continue; list_del(&v->base.link); msm_disable_event(dev, &v->info, file_priv); v->base.destroy(&v->base); } spin_unlock_irqrestore(&dev->event_lock, flags); return drm_release(inode, filp); } static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT, msm_ioctl_register_event, DRM_UNLOCKED|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT, msm_ioctl_deregister_event, DRM_UNLOCKED|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(MSM_COUNTER_GET, msm_ioctl_counter_get, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_COUNTER_PUT, msm_ioctl_counter_put, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_COUNTER_READ, msm_ioctl_counter_read, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct vm_operations_struct vm_ops = { .fault = msm_gem_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; static const struct file_operations fops = { .owner = THIS_MODULE, .open = drm_open, .release = msm_release, .unlocked_ioctl = drm_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif .poll = drm_poll, .read = drm_read, .llseek = no_llseek, .mmap = msm_gem_mmap, }; static struct drm_driver msm_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_MODESET, .load = msm_load, .unload = msm_unload, .open = msm_open, .preclose = msm_preclose, .postclose = msm_postclose, .lastclose = msm_lastclose, .set_busid = drm_platform_set_busid, .irq_handler = msm_irq, .irq_preinstall = msm_irq_preinstall, .irq_postinstall = msm_irq_postinstall, .irq_uninstall = msm_irq_uninstall, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = msm_enable_vblank, .disable_vblank = msm_disable_vblank, .gem_free_object = msm_gem_free_object, .gem_vm_ops = &vm_ops, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, .dumb_destroy = drm_gem_dumb_destroy, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_res_obj = msm_gem_prime_res_obj, .gem_prime_pin = msm_gem_prime_pin, .gem_prime_unpin = msm_gem_prime_unpin, .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, .gem_prime_vmap = msm_gem_prime_vmap, .gem_prime_vunmap = msm_gem_prime_vunmap, .gem_prime_mmap = msm_gem_prime_mmap, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, .debugfs_cleanup = msm_debugfs_cleanup, #endif .ioctls = msm_ioctls, .num_ioctls = ARRAY_SIZE(msm_ioctls), .fops = &fops, .name = "msm_drm", .desc = "MSM Snapdragon DRM", .date = "20130625", .major = 1, .minor = 0, }; #ifdef CONFIG_PM_SLEEP static int msm_pm_suspend(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); drm_kms_helper_poll_disable(ddev); return 0; } static int msm_pm_resume(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); drm_kms_helper_poll_enable(ddev); return 0; } #endif static const struct dev_pm_ops msm_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume) }; static int msm_drm_bind(struct device *dev) { int ret; ret = drm_platform_init(&msm_driver, to_platform_device(dev)); if (ret) DRM_ERROR("drm_platform_init failed: %d\n", ret); return ret; } static void msm_drm_unbind(struct device *dev) { drm_put_dev(platform_get_drvdata(to_platform_device(dev))); } static const struct component_master_ops msm_drm_ops = { .bind = msm_drm_bind, .unbind = msm_drm_unbind, }; /* * Componentized driver support: */ #ifdef CONFIG_OF /* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx * (or probably any other).. so probably some room for some helpers */ static int compare_of(struct device *dev, void *data) { return dev->of_node == data; } static int add_components(struct device *dev, struct component_match **matchptr, const char *name) { struct device_node *np = dev->of_node; unsigned i; for (i = 0; ; i++) { struct device_node *node; node = of_parse_phandle(np, name, i); if (!node) break; component_match_add(dev, matchptr, compare_of, node); } return 0; } static int msm_add_master_component(struct device *dev, struct component_match *match) { int ret; ret = component_master_add_with_match(dev, &msm_drm_ops, match); if (ret) DRM_ERROR("component add match failed: %d\n", ret); return ret; } #else static int compare_dev(struct device *dev, void *data) { return dev == data; } static int msm_add_master_component(struct device *dev, struct component_match *match) { return 0; } #endif /* * Platform driver: */ static int msm_pdev_probe(struct platform_device *pdev) { int ret; struct component_match *match = NULL; #ifdef CONFIG_OF add_components(&pdev->dev, &match, "connectors"); add_components(&pdev->dev, &match, "gpus"); #else /* For non-DT case, it kinda sucks. We don't actually have a way * to know whether or not we are waiting for certain devices (or if * they are simply not present). But for non-DT we only need to * care about apq8064/apq8060/etc (all mdp4/a3xx): */ static const char *devnames[] = { "hdmi_msm.0", "kgsl-3d0.0", }; int i; DBG("Adding components.."); for (i = 0; i < ARRAY_SIZE(devnames); i++) { struct device *dev; dev = bus_find_device_by_name(&platform_bus_type, NULL, devnames[i]); if (!dev) { dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]); return -EPROBE_DEFER; } component_match_add(&pdev->dev, &match, compare_dev, dev); } #endif /* on all devices that I am aware of, iommu's which cna map * any address the cpu can see are used: */ ret = dma_set_mask_and_coherent(&pdev->dev, ~0); if (ret) return ret; ret = msm_add_master_component(&pdev->dev, match); return ret; } static int msm_pdev_remove(struct platform_device *pdev) { msm_drm_unbind(&pdev->dev); component_master_del(&pdev->dev, &msm_drm_ops); return 0; } static const struct platform_device_id msm_id[] = { { "mdp", 0 }, { } }; static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp" }, /* mdp4 */ { .compatible = "qcom,mdss_mdp" }, /* mdp5 */ { .compatible = "qcom,sde-kms" }, /* sde */ {} }; MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, .driver = { .name = "msm_drm", .of_match_table = dt_match, .pm = &msm_pm_ops, }, .id_table = msm_id, }; #ifdef CONFIG_QCOM_KGSL void __init adreno_register(void) { } void __exit adreno_unregister(void) { } #endif static int __init msm_drm_register(void) { DBG("init"); msm_dsi_register(); msm_edp_register(); hdmi_register(); adreno_register(); return platform_driver_register(&msm_platform_driver); } static void __exit msm_drm_unregister(void) { DBG("fini"); platform_driver_unregister(&msm_platform_driver); hdmi_unregister(); adreno_unregister(); msm_edp_unregister(); msm_dsi_unregister(); } module_init(msm_drm_register); module_exit(msm_drm_unregister); MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); MODULE_DESCRIPTION("MSM DRM Driver"); MODULE_LICENSE("GPL");
{ "language": "C" }
/* * Copyright (C) 2002 Toshiba Corporation * Copyright (C) 2005-2006 MontaVista Software, Inc. <source@mvista.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/ide.h> #define DRV_NAME "tc86c001" static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed) { ide_hwif_t *hwif = drive->hwif; unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00); u16 mode, scr = inw(scr_port); switch (speed) { case XFER_UDMA_4: mode = 0x00c0; break; case XFER_UDMA_3: mode = 0x00b0; break; case XFER_UDMA_2: mode = 0x00a0; break; case XFER_UDMA_1: mode = 0x0090; break; case XFER_UDMA_0: mode = 0x0080; break; case XFER_MW_DMA_2: mode = 0x0070; break; case XFER_MW_DMA_1: mode = 0x0060; break; case XFER_MW_DMA_0: mode = 0x0050; break; case XFER_PIO_4: mode = 0x0400; break; case XFER_PIO_3: mode = 0x0300; break; case XFER_PIO_2: mode = 0x0200; break; case XFER_PIO_1: mode = 0x0100; break; case XFER_PIO_0: default: mode = 0x0000; break; } scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f; scr |= mode; outw(scr, scr_port); } static void tc86c001_set_pio_mode(ide_drive_t *drive, const u8 pio) { tc86c001_set_mode(drive, XFER_PIO_0 + pio); } /* * HACKITY HACK * * This is a workaround for the limitation 5 of the TC86C001 IDE controller: * if a DMA transfer terminates prematurely, the controller leaves the device's * interrupt request (INTRQ) pending and does not generate a PCI interrupt (or * set the interrupt bit in the DMA status register), thus no PCI interrupt * will occur until a DMA transfer has been successfully completed. * * We work around this by initiating dummy, zero-length DMA transfer on * a DMA timeout expiration. I found no better way to do this with the current * IDE core than to temporarily replace a higher level driver's timer expiry * handler with our own backing up to that handler in case our recovery fails. */ static int tc86c001_timer_expiry(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; ide_expiry_t *expiry = ide_get_hwifdata(hwif); u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); /* Restore a higher level driver's expiry handler first. */ hwif->expiry = expiry; if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */ unsigned long sc_base = hwif->config_data; unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); u8 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); printk(KERN_WARNING "%s: DMA interrupt possibly stuck, " "attempting recovery...\n", drive->name); /* Stop DMA */ outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD); /* Setup the dummy DMA transfer */ outw(0, sc_base + 0x0a); /* Sector Count */ outw(0, twcr_port); /* Transfer Word Count 1 or 2 */ /* Start the dummy DMA transfer */ /* clear R_OR_WCTR for write */ outb(0x00, hwif->dma_base + ATA_DMA_CMD); /* set START_STOPBM */ outb(0x01, hwif->dma_base + ATA_DMA_CMD); /* * If an interrupt was pending, it should come thru shortly. * If not, a higher level driver's expiry handler should * eventually cause some kind of recovery from the DMA stall. */ return WAIT_MIN_SLEEP; } /* Chain to the restored expiry handler if DMA wasn't active. */ if (likely(expiry != NULL)) return expiry(drive); /* If there was no handler, "emulate" that for ide_timer_expiry()... */ return -1; } static void tc86c001_dma_start(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; unsigned long sc_base = hwif->config_data; unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); unsigned long nsectors = blk_rq_sectors(hwif->rq); /* * We have to manually load the sector count and size into * the appropriate system control registers for DMA to work * with LBA48 and ATAPI devices... */ outw(nsectors, sc_base + 0x0a); /* Sector Count */ outw(SECTOR_SIZE / 2, twcr_port); /* Transfer Word Count 1/2 */ /* Install our timeout expiry hook, saving the current handler... */ ide_set_hwifdata(hwif, hwif->expiry); hwif->expiry = &tc86c001_timer_expiry; ide_dma_start(drive); } static u8 tc86c001_cable_detect(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long sc_base = pci_resource_start(dev, 5); u16 scr1 = inw(sc_base + 0x00); /* * System Control 1 Register bit 13 (PDIAGN): * 0=80-pin cable, 1=40-pin cable */ return (scr1 & 0x2000) ? ATA_CBL_PATA40 : ATA_CBL_PATA80; } static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long sc_base = pci_resource_start(dev, 5); u16 scr1 = inw(sc_base + 0x00); /* System Control 1 Register bit 15 (Soft Reset) set */ outw(scr1 | 0x8000, sc_base + 0x00); /* System Control 1 Register bit 14 (FIFO Reset) set */ outw(scr1 | 0x4000, sc_base + 0x00); /* System Control 1 Register: reset clear */ outw(scr1 & ~0xc000, sc_base + 0x00); /* Store the system control register base for convenience... */ hwif->config_data = sc_base; if (!hwif->dma_base) return; /* * Sector Count Control Register bits 0 and 1 set: * software sets Sector Count Register for master and slave device */ outw(0x0003, sc_base + 0x0c); /* Sector Count Register limit */ hwif->rqsize = 0xffff; } static const struct ide_port_ops tc86c001_port_ops = { .set_pio_mode = tc86c001_set_pio_mode, .set_dma_mode = tc86c001_set_mode, .cable_detect = tc86c001_cable_detect, }; static const struct ide_dma_ops tc86c001_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = tc86c001_dma_start, .dma_end = ide_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_port_info tc86c001_chipset __devinitdata = { .name = DRV_NAME, .init_hwif = init_hwif_tc86c001, .port_ops = &tc86c001_port_ops, .dma_ops = &tc86c001_dma_ops, .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, }; static int __devinit tc86c001_init_one(struct pci_dev *dev, const struct pci_device_id *id) { int rc; rc = pci_enable_device(dev); if (rc) goto out; rc = pci_request_region(dev, 5, DRV_NAME); if (rc) { printk(KERN_ERR DRV_NAME ": system control regs already in use"); goto out_disable; } rc = ide_pci_init_one(dev, &tc86c001_chipset, NULL); if (rc) goto out_release; goto out; out_release: pci_release_region(dev, 5); out_disable: pci_disable_device(dev); out: return rc; } static void __devexit tc86c001_remove(struct pci_dev *dev) { ide_pci_remove(dev); pci_release_region(dev, 5); pci_disable_device(dev); } static const struct pci_device_id tc86c001_pci_tbl[] = { { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, tc86c001_pci_tbl); static struct pci_driver tc86c001_pci_driver = { .name = "TC86C001", .id_table = tc86c001_pci_tbl, .probe = tc86c001_init_one, .remove = __devexit_p(tc86c001_remove), }; static int __init tc86c001_ide_init(void) { return ide_pci_register_driver(&tc86c001_pci_driver); } static void __exit tc86c001_ide_exit(void) { pci_unregister_driver(&tc86c001_pci_driver); } module_init(tc86c001_ide_init); module_exit(tc86c001_ide_exit); MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); MODULE_DESCRIPTION("PCI driver module for TC86C001 IDE"); MODULE_LICENSE("GPL");
{ "language": "C" }
/* Created by Language version: 7.7.0 */ /* NOT VECTORIZED */ #define NRN_VECTORIZED 0 #include <stdio.h> #include <stdlib.h> #include <math.h> #include "scoplib_ansi.h" #undef PI #define nil 0 #include "md1redef.h" #include "section.h" #include "nrniv_mf.h" #include "md2redef.h" #if METHOD3 extern int _method3; #endif #if !NRNGPU #undef exp #define exp hoc_Exp extern double hoc_Exp(double); #endif #define nrn_init _nrn_init__ican #define _nrn_initial _nrn_initial__ican #define nrn_cur _nrn_cur__ican #define _nrn_current _nrn_current__ican #define nrn_jacob _nrn_jacob__ican #define nrn_state _nrn_state__ican #define _net_receive _net_receive__ican #define evaluate_fct evaluate_fct__ican #define states states__ican #define _threadargscomma_ /**/ #define _threadargsprotocomma_ /**/ #define _threadargs_ /**/ #define _threadargsproto_ /**/ /*SUPPRESS 761*/ /*SUPPRESS 762*/ /*SUPPRESS 763*/ /*SUPPRESS 765*/ extern double *getarg(); static double *_p; static Datum *_ppvar; #define t nrn_threads->_t #define dt nrn_threads->_dt #define gbar _p[0] #define i _p[1] #define g _p[2] #define m _p[3] #define Cai _p[4] #define Dm _p[5] #define iother _p[6] #define tadj _p[7] #define _g _p[8] #define _ion_iother *_ppvar[0]._pval #define _ion_diotherdv *_ppvar[1]._pval #define _ion_Cai *_ppvar[2]._pval #if MAC #if !defined(v) #define v _mlhv #endif #if !defined(h) #define h _mlhh #endif #endif #if defined(__cplusplus) extern "C" { #endif static int hoc_nrnpointerindex = -1; /* external NEURON variables */ extern double celsius; /* declaration of user functions */ static void _hoc_evaluate_fct(void); static int _mechtype; extern void _nrn_cacheloop_reg(int, int); extern void hoc_register_prop_size(int, int, int); extern void hoc_register_limits(int, HocParmLimits*); extern void hoc_register_units(int, HocParmUnits*); extern void nrn_promote(Prop*, int, int); extern Memb_func* memb_func; #define NMODL_TEXT 1 #if NMODL_TEXT static const char* nmodl_file_text; static const char* nmodl_filename; extern void hoc_reg_nmodl_text(int, const char*); extern void hoc_reg_nmodl_filename(int, const char*); #endif extern void _nrn_setdata_reg(int, void(*)(Prop*)); static void _setdata(Prop* _prop) { _p = _prop->param; _ppvar = _prop->dparam; } static void _hoc_setdata() { Prop *_prop, *hoc_getdata_range(int); _prop = hoc_getdata_range(_mechtype); _setdata(_prop); hoc_retpushx(1.); } /* connect user functions to hoc names */ static VoidFunc hoc_intfunc[] = { "setdata_ican", _hoc_setdata, "evaluate_fct_ican", _hoc_evaluate_fct, 0, 0 }; /* declare global and static user variables */ #define beta beta_ican double beta = 0.003; #define cac cac_ican double cac = 0.00011; #define erev erev_ican double erev = 10; #define m_inf m_inf_ican double m_inf = 0; #define taumin taumin_ican double taumin = 0.1; #define tau_m tau_m_ican double tau_m = 0; #define x x_ican double x = 8; /* some parameters have upper and lower limits */ static HocParmLimits _hoc_parm_limits[] = { 0,0,0 }; static HocParmUnits _hoc_parm_units[] = { "erev_ican", "mV", "cac_ican", "mM", "taumin_ican", "ms", "tau_m_ican", "ms", "gbar_ican", "mho/cm2", "i_ican", "mA/cm2", "g_ican", "mho/cm2", 0,0 }; static double delta_t = 1; static double m0 = 0; static double v = 0; /* connect global user variables to hoc */ static DoubScal hoc_scdoub[] = { "erev_ican", &erev_ican, "beta_ican", &beta_ican, "cac_ican", &cac_ican, "taumin_ican", &taumin_ican, "x_ican", &x_ican, "m_inf_ican", &m_inf_ican, "tau_m_ican", &tau_m_ican, 0,0 }; static DoubVec hoc_vdoub[] = { 0,0,0 }; static double _sav_indep; static void nrn_alloc(Prop*); static void nrn_init(_NrnThread*, _Memb_list*, int); static void nrn_state(_NrnThread*, _Memb_list*, int); static void nrn_cur(_NrnThread*, _Memb_list*, int); static void nrn_jacob(_NrnThread*, _Memb_list*, int); static int _ode_count(int); static void _ode_map(int, double**, double**, double*, Datum*, double*, int); static void _ode_spec(_NrnThread*, _Memb_list*, int); static void _ode_matsol(_NrnThread*, _Memb_list*, int); #define _cvode_ieq _ppvar[3]._i static void _ode_matsol_instance1(_threadargsproto_); /* connect range variables in _p that hoc is supposed to know about */ static const char *_mechanism[] = { "7.7.0", "ican", "gbar_ican", 0, "i_ican", "g_ican", 0, "m_ican", 0, 0}; static Symbol* _other_sym; static Symbol* _Ca_sym; extern Prop* need_memb(Symbol*); static void nrn_alloc(Prop* _prop) { Prop *prop_ion; double *_p; Datum *_ppvar; _p = nrn_prop_data_alloc(_mechtype, 9, _prop); /*initialize range parameters*/ gbar = 1e-05; _prop->param = _p; _prop->param_size = 9; _ppvar = nrn_prop_datum_alloc(_mechtype, 4, _prop); _prop->dparam = _ppvar; /*connect ionic variables to this model*/ prop_ion = need_memb(_other_sym); _ppvar[0]._pval = &prop_ion->param[3]; /* iother */ _ppvar[1]._pval = &prop_ion->param[4]; /* _ion_diotherdv */ prop_ion = need_memb(_Ca_sym); nrn_promote(prop_ion, 1, 0); _ppvar[2]._pval = &prop_ion->param[1]; /* Cai */ } static void _initlists(); /* some states have an absolute tolerance */ static Symbol** _atollist; static HocStateTolerance _hoc_state_tol[] = { 0,0 }; static void _update_ion_pointer(Datum*); extern Symbol* hoc_lookup(const char*); extern void _nrn_thread_reg(int, int, void(*)(Datum*)); extern void _nrn_thread_table_reg(int, void(*)(double*, Datum*, Datum*, _NrnThread*, int)); extern void hoc_register_tolerance(int, HocStateTolerance*, Symbol***); extern void _cvode_abstol( Symbol**, double*, int); void _Ican_reg() { int _vectorized = 0; _initlists(); ion_reg("other", 1.0); ion_reg("Ca", 2.0); _other_sym = hoc_lookup("other_ion"); _Ca_sym = hoc_lookup("Ca_ion"); register_mech(_mechanism, nrn_alloc,nrn_cur, nrn_jacob, nrn_state, nrn_init, hoc_nrnpointerindex, 0); _mechtype = nrn_get_mechtype(_mechanism[1]); _nrn_setdata_reg(_mechtype, _setdata); _nrn_thread_reg(_mechtype, 2, _update_ion_pointer); #if NMODL_TEXT hoc_reg_nmodl_text(_mechtype, nmodl_file_text); hoc_reg_nmodl_filename(_mechtype, nmodl_filename); #endif hoc_register_prop_size(_mechtype, 9, 4); hoc_register_dparam_semantics(_mechtype, 0, "other_ion"); hoc_register_dparam_semantics(_mechtype, 1, "other_ion"); hoc_register_dparam_semantics(_mechtype, 2, "Ca_ion"); hoc_register_dparam_semantics(_mechtype, 3, "cvodeieq"); hoc_register_cvode(_mechtype, _ode_count, _ode_map, _ode_spec, _ode_matsol); hoc_register_tolerance(_mechtype, _hoc_state_tol, &_atollist); hoc_register_var(hoc_scdoub, hoc_vdoub, hoc_intfunc); ivoc_help("help ?1 ican /home/docker/uncertainpy/tests/models/interneuron_modelDB/x86_64/Ican.mod\n"); hoc_register_limits(_mechtype, _hoc_parm_limits); hoc_register_units(_mechtype, _hoc_parm_units); } static int _reset; static char *modelname = "Slow Ca-dependent cation current"; static int error; static int _ninits = 0; static int _match_recurse=1; static void _modl_cleanup(){ _match_recurse=1;} static int evaluate_fct(double, double); static int _ode_spec1(_threadargsproto_); /*static int _ode_matsol1(_threadargsproto_);*/ static int _slist1[1], _dlist1[1]; static int states(_threadargsproto_); /*CVODE*/ static int _ode_spec1 () {_reset=0; { evaluate_fct ( _threadargscomma_ v , Cai ) ; Dm = ( m_inf - m ) / tau_m ; } return _reset; } static int _ode_matsol1 () { evaluate_fct ( _threadargscomma_ v , Cai ) ; Dm = Dm / (1. - dt*( ( ( ( - 1.0 ) ) ) / tau_m )) ; return 0; } /*END CVODE*/ static int states () {_reset=0; { evaluate_fct ( _threadargscomma_ v , Cai ) ; m = m + (1. - exp(dt*(( ( ( - 1.0 ) ) ) / tau_m)))*(- ( ( ( m_inf ) ) / tau_m ) / ( ( ( ( - 1.0 ) ) ) / tau_m ) - m) ; } return 0; } static int evaluate_fct ( double _lv , double _lCai ) { double _lalpha ; _lalpha = beta * pow( ( _lCai / cac ) , x ) ; tau_m = 1.0 / ( _lalpha + beta ) / tadj ; m_inf = _lalpha / ( _lalpha + beta ) ; if ( tau_m < taumin ) { tau_m = taumin ; } return 0; } static void _hoc_evaluate_fct(void) { double _r; _r = 1.; evaluate_fct ( *getarg(1) , *getarg(2) ); hoc_retpushx(_r); } static int _ode_count(int _type){ return 1;} static void _ode_spec(_NrnThread* _nt, _Memb_list* _ml, int _type) { Datum* _thread; Node* _nd; double _v; int _iml, _cntml; _cntml = _ml->_nodecount; _thread = _ml->_thread; for (_iml = 0; _iml < _cntml; ++_iml) { _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml]; _nd = _ml->_nodelist[_iml]; v = NODEV(_nd); Cai = _ion_Cai; _ode_spec1 (); }} static void _ode_map(int _ieq, double** _pv, double** _pvdot, double* _pp, Datum* _ppd, double* _atol, int _type) { int _i; _p = _pp; _ppvar = _ppd; _cvode_ieq = _ieq; for (_i=0; _i < 1; ++_i) { _pv[_i] = _pp + _slist1[_i]; _pvdot[_i] = _pp + _dlist1[_i]; _cvode_abstol(_atollist, _atol, _i); } } static void _ode_matsol_instance1(_threadargsproto_) { _ode_matsol1 (); } static void _ode_matsol(_NrnThread* _nt, _Memb_list* _ml, int _type) { Datum* _thread; Node* _nd; double _v; int _iml, _cntml; _cntml = _ml->_nodecount; _thread = _ml->_thread; for (_iml = 0; _iml < _cntml; ++_iml) { _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml]; _nd = _ml->_nodelist[_iml]; v = NODEV(_nd); Cai = _ion_Cai; _ode_matsol_instance1(_threadargs_); }} extern void nrn_update_ion_pointer(Symbol*, Datum*, int, int); static void _update_ion_pointer(Datum* _ppvar) { nrn_update_ion_pointer(_other_sym, _ppvar, 0, 3); nrn_update_ion_pointer(_other_sym, _ppvar, 1, 4); nrn_update_ion_pointer(_Ca_sym, _ppvar, 2, 1); } static void initmodel() { int _i; double _save;_ninits++; _save = t; t = 0.0; { m = m0; { /*VERBATIM*/ Cai = _ion_Cai; tadj = pow( 3.0 , ( ( celsius - 22.0 ) / 10.0 ) ) ; evaluate_fct ( _threadargscomma_ v , Cai ) ; m = m_inf ; } _sav_indep = t; t = _save; } } static void nrn_init(_NrnThread* _nt, _Memb_list* _ml, int _type){ Node *_nd; double _v; int* _ni; int _iml, _cntml; #if CACHEVEC _ni = _ml->_nodeindices; #endif _cntml = _ml->_nodecount; for (_iml = 0; _iml < _cntml; ++_iml) { _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml]; #if CACHEVEC if (use_cachevec) { _v = VEC_V(_ni[_iml]); }else #endif { _nd = _ml->_nodelist[_iml]; _v = NODEV(_nd); } v = _v; Cai = _ion_Cai; initmodel(); }} static double _nrn_current(double _v){double _current=0.;v=_v;{ { g = gbar * m * m ; i = g * ( v - erev ) ; iother = i ; } _current += iother; } return _current; } static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type){ Node *_nd; int* _ni; double _rhs, _v; int _iml, _cntml; #if CACHEVEC _ni = _ml->_nodeindices; #endif _cntml = _ml->_nodecount; for (_iml = 0; _iml < _cntml; ++_iml) { _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml]; #if CACHEVEC if (use_cachevec) { _v = VEC_V(_ni[_iml]); }else #endif { _nd = _ml->_nodelist[_iml]; _v = NODEV(_nd); } Cai = _ion_Cai; _g = _nrn_current(_v + .001); { double _diother; _diother = iother; _rhs = _nrn_current(_v); _ion_diotherdv += (_diother - iother)/.001 ; } _g = (_g - _rhs)/.001; _ion_iother += iother ; #if CACHEVEC if (use_cachevec) { VEC_RHS(_ni[_iml]) -= _rhs; }else #endif { NODERHS(_nd) -= _rhs; } }} static void nrn_jacob(_NrnThread* _nt, _Memb_list* _ml, int _type){ Node *_nd; int* _ni; int _iml, _cntml; #if CACHEVEC _ni = _ml->_nodeindices; #endif _cntml = _ml->_nodecount; for (_iml = 0; _iml < _cntml; ++_iml) { _p = _ml->_data[_iml]; #if CACHEVEC if (use_cachevec) { VEC_D(_ni[_iml]) += _g; }else #endif { _nd = _ml->_nodelist[_iml]; NODED(_nd) += _g; } }} static void nrn_state(_NrnThread* _nt, _Memb_list* _ml, int _type){ Node *_nd; double _v = 0.0; int* _ni; int _iml, _cntml; #if CACHEVEC _ni = _ml->_nodeindices; #endif _cntml = _ml->_nodecount; for (_iml = 0; _iml < _cntml; ++_iml) { _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml]; _nd = _ml->_nodelist[_iml]; #if CACHEVEC if (use_cachevec) { _v = VEC_V(_ni[_iml]); }else #endif { _nd = _ml->_nodelist[_iml]; _v = NODEV(_nd); } v=_v; { Cai = _ion_Cai; { error = states(); if(error){fprintf(stderr,"at line 72 in file Ican.mod:\n SOLVE states METHOD cnexp\n"); nrn_complain(_p); abort_run(error);} } }} } static void terminal(){} static void _initlists() { int _i; static int _first = 1; if (!_first) return; _slist1[0] = &(m) - _p; _dlist1[0] = &(Dm) - _p; _first = 0; } #if NMODL_TEXT static const char* nmodl_filename = "/home/docker/uncertainpy/tests/models/interneuron_modelDB/Ican.mod"; static const char* nmodl_file_text = "TITLE Slow Ca-dependent cation current\n" ":\n" ": Ca++ dependent nonspecific cation current ICAN\n" ": Differential equations\n" ":\n" ": This file was taken the study of Zhu et al.: Neuroscience 91, 1445-1460, 1999,\n" ": where kinetics were based on Partridge & Swandulla, TINS 11: 69-72, 1988\n" "\n" ": Modified by Geir Halnes, Norwegian University of Life Sciences, June 2011\n" ": (using only 1 of the two calcium pools applied by Zhu et al. 99)\n" "\n" "\n" "INDEPENDENT {t FROM 0 TO 1 WITH 1 (ms)}\n" "\n" "NEURON {\n" " SUFFIX ican\n" " USEION other WRITE iother VALENCE 1\n" " USEION Ca READ Cai VALENCE 2\n" " RANGE gbar, i, g\n" " GLOBAL m_inf, tau_m, beta, cac, taumin, erev, x\n" "}\n" "\n" "\n" "UNITS {\n" " (mA) = (milliamp)\n" " (mV) = (millivolt)\n" " (molar) = (1/liter)\n" " (mM) = (millimolar)\n" "}\n" "\n" "\n" "PARAMETER {\n" " v (mV)\n" " celsius = 36 (degC)\n" " erev = 10 (mV)\n" " Cai = .00005 (mM) : initial [Ca]i = 50 nM\n" " gbar = 1e-5 (mho/cm2)\n" " beta = 0.003 \n" " cac = 1.1e-4 (mM) : middle point of activation fct\n" " taumin = 0.1 (ms) : minimal value of time constant\n" " x = 8\n" "}\n" "\n" "\n" "STATE {\n" " m\n" "}\n" "\n" "INITIAL {\n" ": activation kinetics are assumed to be at 22 deg. C\n" ": Q10 is assumed to be 3\n" ":\n" " VERBATIM\n" " Cai = _ion_Cai;\n" " ENDVERBATIM\n" "\n" " tadj = 3.0 ^ ((celsius-22.0)/10)\n" " evaluate_fct(v,Cai)\n" " m = m_inf\n" "}\n" "\n" "ASSIGNED {\n" " i (mA/cm2)\n" " iother (mA/cm2)\n" " g (mho/cm2)\n" " m_inf\n" " tau_m (ms)\n" " tadj\n" "}\n" "\n" "BREAKPOINT { \n" " SOLVE states METHOD cnexp\n" " g = gbar * m*m\n" " i = g * (v - erev)\n" " iother = i\n" "}\n" "\n" "DERIVATIVE states { \n" " evaluate_fct(v,Cai)\n" " m' = (m_inf - m) / tau_m\n" "}\n" "\n" "UNITSOFF\n" "\n" "PROCEDURE evaluate_fct(v(mV),Cai(mM)) { LOCAL alpha\n" " alpha = beta * (Cai/cac)^x\n" " tau_m = 1 / (alpha + beta) / tadj\n" " m_inf = alpha / (alpha + beta)\n" " if(tau_m < taumin) { tau_m = taumin } : min value of time cst\n" "}\n" "UNITSON\n" ; #endif
{ "language": "C" }
//----------------------------------------------------------------------------- // This code is licensed to you under the terms of the GNU GPL, version 2 or, // at your option, any later version. See the LICENSE.txt file for the text of // the license. //----------------------------------------------------------------------------- // ISO15693 other commons //----------------------------------------------------------------------------- // Adrian Dabrowski 2010 and otherss // Christian Herrmann 2018 #ifndef ISO15693_H__ #define ISO15693_H__ #include "common.h" // REQUEST FLAGS #define ISO15_REQ_SUBCARRIER_SINGLE 0x00 // Tag should respond using one subcarrier (ASK) #define ISO15_REQ_SUBCARRIER_TWO 0x01 // Tag should respond using two subcarriers (FSK) #define ISO15_REQ_DATARATE_LOW 0x00 // Tag should respond using low data rate #define ISO15_REQ_DATARATE_HIGH 0x02 // Tag should respond using high data rate #define ISO15_REQ_NONINVENTORY 0x00 #define ISO15_REQ_INVENTORY 0x04 // This is an inventory request - see inventory flags #define ISO15_REQ_PROTOCOL_NONEXT 0x00 #define ISO15_REQ_PROTOCOL_EXT 0x08 // RFU // REQUEST FLAGS when INVENTORY is not set #define ISO15_REQ_SELECT 0x10 // only selected cards response #define ISO15_REQ_ADDRESS 0x20 // this req contains an address #define ISO15_REQ_OPTION 0x40 // Command specific option selector //REQUEST FLAGS when INVENTORY is set #define ISO15_REQINV_AFI 0x10 // AFI Field is present #define ISO15_REQINV_SLOT1 0x20 // 1 Slot #define ISO15_REQINV_SLOT16 0x00 // 16 Slots #define ISO15_REQINV_OPTION 0x40 // Command specific option selector //RESPONSE FLAGS #define ISO15_RES_ERROR 0x01 #define ISO15_RES_EXT 0x08 // Protocol Extention // RESPONSE ERROR CODES #define ISO15_NOERROR 0x00 #define ISO15_ERROR_CMD_NOT_SUP 0x01 // Command not supported #define ISO15_ERROR_CMD_NOT_REC 0x02 // Command not recognized (eg. parameter error) #define ISO15_ERROR_CMD_OPTION 0x03 // Command option not supported #define ISO15_ERROR_GENERIC 0x0F // No additional Info about this error #define ISO15_ERROR_BLOCK_UNAVAILABLE 0x10 #define ISO15_ERROR_BLOCK_LOCKED_ALREADY 0x11 // cannot lock again #define ISO15_ERROR_BLOCK_LOCKED 0x12 // cannot be changed #define ISO15_ERROR_BLOCK_WRITE 0x13 // Writing was unsuccessful #define ISO15_ERROR_BLOCL_WRITELOCK 0x14 // Locking was unsuccessful // COMMAND CODES #define ISO15_CMD_INVENTORY 0x01 #define ISO15_CMD_STAYQUIET 0x02 #define ISO15_CMD_READ 0x20 #define ISO15_CMD_WRITE 0x21 #define ISO15_CMD_LOCK 0x22 #define ISO15_CMD_READMULTI 0x23 #define ISO15_CMD_WRITEMULTI 0x24 #define ISO15_CMD_SELECT 0x25 #define ISO15_CMD_RESET 0x26 #define ISO15_CMD_WRITEAFI 0x27 #define ISO15_CMD_LOCKAFI 0x28 #define ISO15_CMD_WRITEDSFID 0x29 #define ISO15_CMD_LOCKDSFID 0x2A #define ISO15_CMD_SYSINFO 0x2B #define ISO15_CMD_SECSTATUS 0x2C #define ISO15_CMD_INVENTORYREAD 0xA0 #define ISO15_CMD_FASTINVENTORYREAD 0xA1 #define ISO15_CMD_SETEAS 0xA2 #define ISO15_CMD_RESETEAS 0xA3 #define ISO15_CMD_LOCKEAS 0xA4 #define ISO15_CMD_EASALARM 0xA5 #define ISO15_CMD_PASSWORDPROTECTEAS 0xA6 #define ISO15_CMD_WRITEEASID 0xA7 #define ISO15_CMD_READEPC 0xA8 #define ISO15_CMD_GETNXPSYSTEMINFO 0xAB #define ISO15_CMD_INVENTORYPAGEREAD 0xB0 #define ISO15_CMD_FASTINVENTORYPAGEREAD 0xB1 #define ISO15_CMD_GETRANDOMNUMBER 0xB2 #define ISO15_CMD_SETPASSWORD 0xB3 #define ISO15_CMD_WRITEPASSWORD 0xB4 #define ISO15_CMD_LOCKPASSWORD 0xB5 #define ISO15_CMD_PROTECTPAGE 0xB6 #define ISO15_CMD_LOCKPAGEPROTECTION 0xB7 #define ISO15_CMD_GETMULTIBLOCKPROTECTION 0xB8 #define ISO15_CMD_DESTROY 0xB9 #define ISO15_CMD_ENABLEPRIVACY 0xBA #define ISO15_CMD_64BITPASSWORDPROTECTION 0xBB #define ISO15_CMD_STAYQUIETPERSISTENT 0xBC #define ISO15_CMD_READSIGNATURE 0xBD //----------------------------------------------------------------------------- // Map a sequence of octets (~layer 2 command) into the set of bits to feed // to the FPGA, to transmit that command to the tag. // Mode: highspeed && one subcarrier (ASK) //----------------------------------------------------------------------------- // The sampling rate is 106.353 ksps/s, for T = 18.8 us // SOF defined as // 1) Unmodulated time of 56.64us // 2) 24 pulses of 423.75kHz // 3) logic '1' (unmodulated for 18.88us followed by 8 pulses of 423.75kHz) static const int Iso15693FrameSOF[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1 }; static const int Iso15693Logic0[] = { 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1 }; static const int Iso15693Logic1[] = { -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1 }; // EOF defined as // 1) logic '0' (8 pulses of 423.75kHz followed by unmodulated for 18.88us) // 2) 24 pulses of 423.75kHz // 3) Unmodulated time of 56.64us static const int Iso15693FrameEOF[] = { 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; char *iso15693_sprintUID(char *dest, uint8_t *uid); #endif
{ "language": "C" }
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/io.h> #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_cfg.h> #include "adf_c3xxxvf_hw_data.h" #define ADF_SYSTEM_DEVICE(device_id) \ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} static const struct pci_device_id adf_pci_tbl[] = { ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID), {0,} }; MODULE_DEVICE_TABLE(pci, adf_pci_tbl); static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); static void adf_remove(struct pci_dev *dev); static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = ADF_C3XXXVF_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, }; static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) { pci_release_regions(accel_dev->accel_pci_dev.pci_dev); pci_disable_device(accel_dev->accel_pci_dev.pci_dev); } static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; struct adf_accel_dev *pf; int i; for (i = 0; i < ADF_PCI_MAX_BARS; i++) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; if (bar->virt_addr) pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); } if (accel_dev->hw_device) { switch (accel_pci_dev->pci_dev->device) { case ADF_C3XXXIOV_PCI_DEVICE_ID: adf_clean_hw_data_c3xxxiov(accel_dev->hw_device); break; default: break; } kfree(accel_dev->hw_device); accel_dev->hw_device = NULL; } adf_cfg_dev_remove(accel_dev); debugfs_remove(accel_dev->debugfs_dir); pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); adf_devmgr_rm_dev(accel_dev, pf); } static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_dev *accel_dev; struct adf_accel_dev *pf; struct adf_accel_pci *accel_pci_dev; struct adf_hw_device_data *hw_data; char name[ADF_DEVICE_NAME_LENGTH]; unsigned int i, bar_nr; unsigned long bar_mask; int ret; switch (ent->device) { case ADF_C3XXXIOV_PCI_DEVICE_ID: break; default: dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); return -ENODEV; } accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, dev_to_node(&pdev->dev)); if (!accel_dev) return -ENOMEM; accel_dev->is_vf = true; pf = adf_devmgr_pci_to_accel_dev(pdev->physfn); accel_pci_dev = &accel_dev->accel_pci_dev; accel_pci_dev->pci_dev = pdev; /* Add accel device to accel table */ if (adf_devmgr_add_dev(accel_dev, pf)) { dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); kfree(accel_dev); return -EFAULT; } INIT_LIST_HEAD(&accel_dev->crypto_list); accel_dev->owner = THIS_MODULE; /* Allocate and configure device configuration structure */ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, dev_to_node(&pdev->dev)); if (!hw_data) { ret = -ENOMEM; goto out_err; } accel_dev->hw_device = hw_data; adf_init_hw_data_c3xxxiov(accel_dev->hw_device); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); accel_pci_dev->sku = hw_data->get_sku(hw_data); /* Create dev top level debugfs entry */ snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d", ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); if (ret) goto out_err; /* enable PCI device */ if (pci_enable_device(pdev)) { ret = -EFAULT; goto out_err; } /* set dma identifier */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { dev_err(&pdev->dev, "No usable DMA configuration\n"); ret = -EFAULT; goto out_err_disable; } else { pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); } } else { pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) { ret = -EFAULT; goto out_err_disable; } /* Find and map all the device's BARS */ i = 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; bar->base_addr = pci_resource_start(pdev, bar_nr); if (!bar->base_addr) break; bar->size = pci_resource_len(pdev, bar_nr); bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); if (!bar->virt_addr) { dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); ret = -EFAULT; goto out_err_free_reg; } } pci_set_master(pdev); /* Completion for VF2PF request/response message exchange */ init_completion(&accel_dev->vf.iov_msg_completion); ret = qat_crypto_dev_config(accel_dev); if (ret) goto out_err_free_reg; set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; return ret; out_err_dev_stop: adf_dev_stop(accel_dev); out_err_dev_shutdown: adf_dev_shutdown(accel_dev); out_err_free_reg: pci_release_regions(accel_pci_dev->pci_dev); out_err_disable: pci_disable_device(accel_pci_dev->pci_dev); out_err: adf_cleanup_accel(accel_dev); kfree(accel_dev); return ret; } static void adf_remove(struct pci_dev *pdev) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); if (!accel_dev) { pr_err("QAT: Driver removal failed\n"); return; } adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); adf_cleanup_accel(accel_dev); adf_cleanup_pci_dev(accel_dev); kfree(accel_dev); } static int __init adfdrv_init(void) { request_module("intel_qat"); if (pci_register_driver(&adf_driver)) { pr_err("QAT: Driver initialization failed\n"); return -EFAULT; } return 0; } static void __exit adfdrv_release(void) { pci_unregister_driver(&adf_driver); adf_clean_vf_map(true); } module_init(adfdrv_init); module_exit(adfdrv_release); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION);
{ "language": "C" }
/******************************************************************** * COPYRIGHT: * Copyright (c) 1997-2001, International Business Machines Corporation and * others. All Rights Reserved. ********************************************************************/ /******************************************************************************** * * File CFINTST.H * * Modification History: * Name Description * Madhu Katragadda Converted to C *********************************************************************************/ /** * CollationFinnishTest is a third level test class. This tests the locale * specific primary, secondary and tertiary rules. For example, the ignorable * character '-' in string "black-bird". The en_US locale uses the default * collation rules as its sorting sequence. */ #ifndef _CFINCOLLTST #define _CFINCOLLTST #include "unicode/utypes.h" #if !UCONFIG_NO_COLLATION #include "cintltst.h" #define MAX_TOKEN_LEN 16 /* perform test with strength SECONDARY*/ static void TestPrimary(void); /* perform test with strength TERTIARY */ static void TestTertiary(void); #endif /* #if !UCONFIG_NO_COLLATION */ #endif
{ "language": "C" }
/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver * * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) * * This program is dual-licensed; you may select either version 2 of * the GNU General Public License ("GPL") or BSD license ("BSD"). * * This Synopsys DWC XLGMAC software driver and associated documentation * (hereinafter the "Software") is an unsupported proprietary work of * Synopsys, Inc. unless otherwise expressly agreed to in writing between * Synopsys and you. The Software IS NOT an item of Licensed Software or a * Licensed Product under any End User Software License Agreement or * Agreement for Licensed Products with Synopsys or any supplement thereto. * Synopsys is a registered trademark of Synopsys, Inc. Other names included * in the SOFTWARE may be the trademarks of their respective owners. */ #include <linux/phy.h> #include <linux/mdio.h> #include <linux/clk.h> #include <linux/bitrev.h> #include <linux/crc32.h> #include <linux/dcbnl.h> #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc) { return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN); } static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, MAC_RCR_IPC_LEN, 0); writel(regval, pdata->mac_regs + MAC_RCR); return 0; } static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, MAC_RCR_IPC_LEN, 1); writel(regval, pdata->mac_regs + MAC_RCR); return 0; } static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr) { unsigned int mac_addr_hi, mac_addr_lo; mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | (addr[0] << 0); writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR); writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR); return 0; } static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata, struct netdev_hw_addr *ha, unsigned int *mac_reg) { unsigned int mac_addr_hi, mac_addr_lo; u8 *mac_addr; mac_addr_lo = 0; mac_addr_hi = 0; if (ha) { mac_addr = (u8 *)&mac_addr_lo; mac_addr[0] = ha->addr[0]; mac_addr[1] = ha->addr[1]; mac_addr[2] = ha->addr[2]; mac_addr[3] = ha->addr[3]; mac_addr = (u8 *)&mac_addr_hi; mac_addr[0] = ha->addr[4]; mac_addr[1] = ha->addr[5]; netif_dbg(pdata, drv, pdata->netdev, "adding mac address %pM at %#x\n", ha->addr, *mac_reg); mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi, MAC_MACA1HR_AE_POS, MAC_MACA1HR_AE_LEN, 1); } writel(mac_addr_hi, pdata->mac_regs + *mac_reg); *mac_reg += MAC_MACA_INC; writel(mac_addr_lo, pdata->mac_regs + *mac_reg); *mac_reg += MAC_MACA_INC; } static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_VLANTR); /* Put the VLAN tag in the Rx descriptor */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS, MAC_VLANTR_EVLRXS_LEN, 1); /* Don't check the VLAN type */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS, MAC_VLANTR_DOVLTC_LEN, 1); /* Check only C-TAG (0x8100) packets */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS, MAC_VLANTR_ERSVLM_LEN, 0); /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS, MAC_VLANTR_ESVL_LEN, 0); /* Enable VLAN tag stripping */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, MAC_VLANTR_EVLS_LEN, 0x3); writel(regval, pdata->mac_regs + MAC_VLANTR); return 0; } static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_VLANTR); regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, MAC_VLANTR_EVLS_LEN, 0); writel(regval, pdata->mac_regs + MAC_VLANTR); return 0; } static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_PFR); /* Enable VLAN filtering */ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, MAC_PFR_VTFE_LEN, 1); writel(regval, pdata->mac_regs + MAC_PFR); regval = readl(pdata->mac_regs + MAC_VLANTR); /* Enable VLAN Hash Table filtering */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS, MAC_VLANTR_VTHM_LEN, 1); /* Disable VLAN tag inverse matching */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS, MAC_VLANTR_VTIM_LEN, 0); /* Only filter on the lower 12-bits of the VLAN tag */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS, MAC_VLANTR_ETV_LEN, 1); /* In order for the VLAN Hash Table filtering to be effective, * the VLAN tag identifier in the VLAN Tag Register must not * be zero. Set the VLAN tag identifier to "1" to enable the * VLAN Hash Table filtering. This implies that a VLAN tag of * 1 will always pass filtering. */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, MAC_VLANTR_VL_LEN, 1); writel(regval, pdata->mac_regs + MAC_VLANTR); return 0; } static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_PFR); /* Disable VLAN filtering */ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, MAC_PFR_VTFE_LEN, 0); writel(regval, pdata->mac_regs + MAC_PFR); return 0; } static u32 xlgmac_vid_crc32_le(__le16 vid_le) { unsigned char *data = (unsigned char *)&vid_le; unsigned char data_byte = 0; u32 poly = 0xedb88320; u32 crc = ~0; u32 temp = 0; int i, bits; bits = get_bitmask_order(VLAN_VID_MASK); for (i = 0; i < bits; i++) { if ((i % 8) == 0) data_byte = data[i / 8]; temp = ((crc & 1) ^ data_byte) & 1; crc >>= 1; data_byte >>= 1; if (temp) crc ^= poly; } return crc; } static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata) { u16 vlan_hash_table = 0; __le16 vid_le; u32 regval; u32 crc; u16 vid; /* Generate the VLAN Hash Table value */ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { /* Get the CRC32 value of the VLAN ID */ vid_le = cpu_to_le16(vid); crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28; vlan_hash_table |= (1 << crc); } regval = readl(pdata->mac_regs + MAC_VLANHTR); /* Set the VLAN Hash Table filtering register */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS, MAC_VLANHTR_VLHT_LEN, vlan_hash_table); writel(regval, pdata->mac_regs + MAC_VLANHTR); return 0; } static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), MAC_PFR_PR_POS, MAC_PFR_PR_LEN); if (regval == val) return 0; netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", enable ? "entering" : "leaving"); regval = readl(pdata->mac_regs + MAC_PFR); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS, MAC_PFR_PR_LEN, val); writel(regval, pdata->mac_regs + MAC_PFR); /* Hardware will still perform VLAN filtering in promiscuous mode */ if (enable) { xlgmac_disable_rx_vlan_filtering(pdata); } else { if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) xlgmac_enable_rx_vlan_filtering(pdata); } return 0; } static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), MAC_PFR_PM_POS, MAC_PFR_PM_LEN); if (regval == val) return 0; netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", enable ? "entering" : "leaving"); regval = readl(pdata->mac_regs + MAC_PFR); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS, MAC_PFR_PM_LEN, val); writel(regval, pdata->mac_regs + MAC_PFR); return 0; } static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata) { struct net_device *netdev = pdata->netdev; struct netdev_hw_addr *ha; unsigned int addn_macs; unsigned int mac_reg; mac_reg = MAC_MACA1HR; addn_macs = pdata->hw_feat.addn_mac; if (netdev_uc_count(netdev) > addn_macs) { xlgmac_set_promiscuous_mode(pdata, 1); } else { netdev_for_each_uc_addr(ha, netdev) { xlgmac_set_mac_reg(pdata, ha, &mac_reg); addn_macs--; } if (netdev_mc_count(netdev) > addn_macs) { xlgmac_set_all_multicast_mode(pdata, 1); } else { netdev_for_each_mc_addr(ha, netdev) { xlgmac_set_mac_reg(pdata, ha, &mac_reg); addn_macs--; } } } /* Clear remaining additional MAC address entries */ while (addn_macs--) xlgmac_set_mac_reg(pdata, NULL, &mac_reg); } static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata) { unsigned int hash_table_shift, hash_table_count; u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE]; struct net_device *netdev = pdata->netdev; struct netdev_hw_addr *ha; unsigned int hash_reg; unsigned int i; u32 crc; hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); hash_table_count = pdata->hw_feat.hash_table_size / 32; memset(hash_table, 0, sizeof(hash_table)); /* Build the MAC Hash Table register values */ netdev_for_each_uc_addr(ha, netdev) { crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); crc >>= hash_table_shift; hash_table[crc >> 5] |= (1 << (crc & 0x1f)); } netdev_for_each_mc_addr(ha, netdev) { crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); crc >>= hash_table_shift; hash_table[crc >> 5] |= (1 << (crc & 0x1f)); } /* Set the MAC Hash Table registers */ hash_reg = MAC_HTR0; for (i = 0; i < hash_table_count; i++) { writel(hash_table[i], pdata->mac_regs + hash_reg); hash_reg += MAC_HTR_INC; } } static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata) { if (pdata->hw_feat.hash_table_size) xlgmac_set_mac_hash_table(pdata); else xlgmac_set_mac_addn_addrs(pdata); return 0; } static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata) { u32 regval; xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr); /* Filtering is done using perfect filtering and hash filtering */ if (pdata->hw_feat.hash_table_size) { regval = readl(pdata->mac_regs + MAC_PFR); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, MAC_PFR_HPF_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, MAC_PFR_HUC_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS, MAC_PFR_HMC_LEN, 1); writel(regval, pdata->mac_regs + MAC_PFR); } } static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata) { unsigned int val; u32 regval; val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0; regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS, MAC_RCR_JE_LEN, val); writel(regval, pdata->mac_regs + MAC_RCR); } static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata) { if (pdata->netdev->features & NETIF_F_RXCSUM) xlgmac_enable_rx_csum(pdata); else xlgmac_disable_rx_csum(pdata); } static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_VLANIR); /* Indicate that VLAN Tx CTAGs come from context descriptors */ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, MAC_VLANIR_CSVL_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, MAC_VLANIR_VLTI_LEN, 1); writel(regval, pdata->mac_regs + MAC_VLANIR); /* Set the current VLAN Hash Table register value */ xlgmac_update_vlan_hash_table(pdata); if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) xlgmac_enable_rx_vlan_filtering(pdata); else xlgmac_disable_rx_vlan_filtering(pdata); if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) xlgmac_enable_rx_vlan_stripping(pdata); else xlgmac_disable_rx_vlan_stripping(pdata); } static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata) { struct net_device *netdev = pdata->netdev; unsigned int pr_mode, am_mode; pr_mode = ((netdev->flags & IFF_PROMISC) != 0); am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); xlgmac_set_promiscuous_mode(pdata, pr_mode); xlgmac_set_all_multicast_mode(pdata, am_mode); xlgmac_add_mac_addresses(pdata); return 0; } static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata, struct xlgmac_channel *channel) { unsigned int tx_dsr, tx_pos, tx_qidx; unsigned long tx_timeout; unsigned int tx_status; /* Calculate the status register to read and the position within */ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { tx_dsr = DMA_DSR0; tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) + DMA_DSR0_TPS_START; } else { tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) + DMA_DSRX_TPS_START; } /* The Tx engine cannot be stopped if it is actively processing * descriptors. Wait for the Tx engine to enter the stopped or * suspended state. Don't wait forever though... */ tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); while (time_before(jiffies, tx_timeout)) { tx_status = readl(pdata->mac_regs + tx_dsr); tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos, DMA_DSR_TPS_LEN); if ((tx_status == DMA_TPS_STOPPED) || (tx_status == DMA_TPS_SUSPENDED)) break; usleep_range(500, 1000); } if (!time_before(jiffies, tx_timeout)) netdev_info(pdata->netdev, "timed out waiting for Tx DMA channel %u to stop\n", channel->queue_index); } static void xlgmac_enable_tx(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; /* Enable each Tx DMA channel */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, DMA_CH_TCR_ST_LEN, 1); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } /* Enable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, MTL_Q_TQOMR_TXQEN_LEN, MTL_Q_ENABLED); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } /* Enable MAC Tx */ regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, MAC_TCR_TE_LEN, 1); writel(regval, pdata->mac_regs + MAC_TCR); } static void xlgmac_disable_tx(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; /* Prepare for Tx DMA channel stop */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; xlgmac_prepare_tx_stop(pdata, channel); } /* Disable MAC Tx */ regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, MAC_TCR_TE_LEN, 0); writel(regval, pdata->mac_regs + MAC_TCR); /* Disable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, MTL_Q_TQOMR_TXQEN_LEN, 0); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } /* Disable each Tx DMA channel */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, DMA_CH_TCR_ST_LEN, 0); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } } static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata, unsigned int queue) { unsigned int rx_status, prxq, rxqsts; unsigned long rx_timeout; /* The Rx engine cannot be stopped if it is actively processing * packets. Wait for the Rx queue to empty the Rx fifo. Don't * wait forever though... */ rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); while (time_before(jiffies, rx_timeout)) { rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, MTL_Q_RQDR_PRXQ_LEN); rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS, MTL_Q_RQDR_RXQSTS_LEN); if ((prxq == 0) && (rxqsts == 0)) break; usleep_range(500, 1000); } if (!time_before(jiffies, rx_timeout)) netdev_info(pdata->netdev, "timed out waiting for Rx queue %u to empty\n", queue); } static void xlgmac_enable_rx(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int regval, i; /* Enable each Rx DMA channel */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, DMA_CH_RCR_SR_LEN, 1); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); } /* Enable each Rx queue */ regval = 0; for (i = 0; i < pdata->rx_q_count; i++) regval |= (0x02 << (i << 1)); writel(regval, pdata->mac_regs + MAC_RQC0R); /* Enable MAC Rx */ regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, MAC_RCR_DCRCC_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, MAC_RCR_CST_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, MAC_RCR_ACS_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, MAC_RCR_RE_LEN, 1); writel(regval, pdata->mac_regs + MAC_RCR); } static void xlgmac_disable_rx(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; /* Disable MAC Rx */ regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, MAC_RCR_DCRCC_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, MAC_RCR_CST_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, MAC_RCR_ACS_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, MAC_RCR_RE_LEN, 0); writel(regval, pdata->mac_regs + MAC_RCR); /* Prepare for Rx DMA channel stop */ for (i = 0; i < pdata->rx_q_count; i++) xlgmac_prepare_rx_stop(pdata, i); /* Disable each Rx queue */ writel(0, pdata->mac_regs + MAC_RQC0R); /* Disable each Rx DMA channel */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, DMA_CH_RCR_SR_LEN, 0); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); } } static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel, struct xlgmac_ring *ring) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_desc_data *desc_data; /* Make sure everything is written before the register write */ wmb(); /* Issue a poll command to Tx DMA by writing address * of next immediate free descriptor */ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); writel(lower_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); /* Start the Tx timer */ if (pdata->tx_usecs && !channel->tx_timer_active) { channel->tx_timer_active = 1; mod_timer(&channel->tx_timer, jiffies + usecs_to_jiffies(pdata->tx_usecs)); } ring->tx.xmit_more = 0; } static void xlgmac_dev_xmit(struct xlgmac_channel *channel) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->tx_ring; unsigned int tso_context, vlan_context; struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; struct xlgmac_pkt_info *pkt_info; unsigned int csum, tso, vlan; int start_index = ring->cur; int cur_index = ring->cur; unsigned int tx_set_ic; int i; pkt_info = &ring->pkt_info; csum = XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); tso = XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); if (tso && (pkt_info->mss != ring->tx.cur_mss)) tso_context = 1; else tso_context = 0; if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) vlan_context = 1; else vlan_context = 0; /* Determine if an interrupt should be generated for this Tx: * Interrupt: * - Tx frame count exceeds the frame count setting * - Addition of Tx frame count to the frame count since the * last interrupt was set exceeds the frame count setting * No interrupt: * - No frame count setting specified (ethtool -C ethX tx-frames 0) * - Addition of Tx frame count to the frame count since the * last interrupt was set does not exceed the frame count setting */ ring->coalesce_count += pkt_info->tx_packets; if (!pdata->tx_frames) tx_set_ic = 0; else if (pkt_info->tx_packets > pdata->tx_frames) tx_set_ic = 1; else if ((ring->coalesce_count % pdata->tx_frames) < pkt_info->tx_packets) tx_set_ic = 1; else tx_set_ic = 0; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); dma_desc = desc_data->dma_desc; /* Create a context descriptor if this is a TSO pkt_info */ if (tso_context || vlan_context) { if (tso_context) { netif_dbg(pdata, tx_queued, pdata->netdev, "TSO context descriptor, mss=%u\n", pkt_info->mss); /* Set the MSS size */ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_CONTEXT_DESC2_MSS_POS, TX_CONTEXT_DESC2_MSS_LEN, pkt_info->mss); /* Mark it as a CONTEXT descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, TX_CONTEXT_DESC3_CTXT_LEN, 1); /* Indicate this descriptor contains the MSS */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_TCMSSV_POS, TX_CONTEXT_DESC3_TCMSSV_LEN, 1); ring->tx.cur_mss = pkt_info->mss; } if (vlan_context) { netif_dbg(pdata, tx_queued, pdata->netdev, "VLAN context descriptor, ctag=%u\n", pkt_info->vlan_ctag); /* Mark it as a CONTEXT descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, TX_CONTEXT_DESC3_CTXT_LEN, 1); /* Set the VLAN tag */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_VT_POS, TX_CONTEXT_DESC3_VT_LEN, pkt_info->vlan_ctag); /* Indicate this descriptor contains the VLAN tag */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_CONTEXT_DESC3_VLTV_POS, TX_CONTEXT_DESC3_VLTV_LEN, 1); ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; } cur_index++; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); dma_desc = desc_data->dma_desc; } /* Update buffer address (for TSO this is the header) */ dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); /* Update the buffer length */ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); /* VLAN tag insertion check */ if (vlan) { dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_VTIR_POS, TX_NORMAL_DESC2_VTIR_LEN, TX_NORMAL_DESC2_VLAN_INSERT); pdata->stats.tx_vlan_packets++; } /* Timestamp enablement check */ if (XLGMAC_GET_REG_BITS(pkt_info->attributes, TX_PACKET_ATTRIBUTES_PTP_POS, TX_PACKET_ATTRIBUTES_PTP_LEN)) dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_TTSE_POS, TX_NORMAL_DESC2_TTSE_LEN, 1); /* Mark it as First Descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_FD_POS, TX_NORMAL_DESC3_FD_LEN, 1); /* Mark it as a NORMAL descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, TX_NORMAL_DESC3_CTXT_LEN, 0); /* Set OWN bit if not the first descriptor */ if (cur_index != start_index) dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN, 1); if (tso) { /* Enable TSO */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_TSE_POS, TX_NORMAL_DESC3_TSE_LEN, 1); dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_TCPPL_POS, TX_NORMAL_DESC3_TCPPL_LEN, pkt_info->tcp_payload_len); dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_TCPHDRLEN_POS, TX_NORMAL_DESC3_TCPHDRLEN_LEN, pkt_info->tcp_header_len / 4); pdata->stats.tx_tso_packets++; } else { /* Enable CRC and Pad Insertion */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CPC_POS, TX_NORMAL_DESC3_CPC_LEN, 0); /* Enable HW CSUM */ if (csum) dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, TX_NORMAL_DESC3_CIC_LEN, 0x3); /* Set the total length to be transmitted */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_FL_POS, TX_NORMAL_DESC3_FL_LEN, pkt_info->length); } for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) { cur_index++; desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); dma_desc = desc_data->dma_desc; /* Update buffer address */ dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); /* Update the buffer length */ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); /* Set OWN bit */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN, 1); /* Mark it as NORMAL descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, TX_NORMAL_DESC3_CTXT_LEN, 0); /* Enable HW CSUM */ if (csum) dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, TX_NORMAL_DESC3_CIC_LEN, 0x3); } /* Set LAST bit for the last descriptor */ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_LD_POS, TX_NORMAL_DESC3_LD_LEN, 1); /* Set IC bit based on Tx coalescing settings */ if (tx_set_ic) dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc2, TX_NORMAL_DESC2_IC_POS, TX_NORMAL_DESC2_IC_LEN, 1); /* Save the Tx info to report back during cleanup */ desc_data->tx.packets = pkt_info->tx_packets; desc_data->tx.bytes = pkt_info->tx_bytes; /* In case the Tx DMA engine is running, make sure everything * is written to the descriptor(s) before setting the OWN bit * for the first descriptor */ dma_wmb(); /* Set OWN bit for the first descriptor */ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); dma_desc = desc_data->dma_desc; dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN, 1); if (netif_msg_tx_queued(pdata)) xlgmac_dump_tx_desc(pdata, ring, start_index, pkt_info->desc_count, 1); /* Make sure ownership is written to the descriptor */ smp_wmb(); ring->cur = cur_index + 1; if (!pkt_info->skb->xmit_more || netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, channel->queue_index))) xlgmac_tx_start_xmit(channel, ring); else ring->tx.xmit_more = 1; XLGMAC_PR("%s: descriptors %u to %u written\n", channel->name, start_index & (ring->dma_desc_count - 1), (ring->cur - 1) & (ring->dma_desc_count - 1)); } static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info, struct xlgmac_dma_desc *dma_desc) { u32 tsa, tsd; u64 nsec; tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_CONTEXT_DESC3_TSA_POS, RX_CONTEXT_DESC3_TSA_LEN); tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_CONTEXT_DESC3_TSD_POS, RX_CONTEXT_DESC3_TSD_LEN); if (tsa && !tsd) { nsec = le32_to_cpu(dma_desc->desc1); nsec <<= 32; nsec |= le32_to_cpu(dma_desc->desc0); if (nsec != 0xffffffffffffffffULL) { pkt_info->rx_tstamp = nsec; pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, 1); } } } static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data) { struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; /* Reset the Tx descriptor * Set buffer 1 (lo) address to zero * Set buffer 1 (hi) address to zero * Reset all other control bits (IC, TTSE, B2L & B1L) * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) */ dma_desc->desc0 = 0; dma_desc->desc1 = 0; dma_desc->desc2 = 0; dma_desc->desc3 = 0; /* Make sure ownership is written to the descriptor */ dma_wmb(); } static void xlgmac_tx_desc_init(struct xlgmac_channel *channel) { struct xlgmac_ring *ring = channel->tx_ring; struct xlgmac_desc_data *desc_data; int start_index = ring->cur; int i; /* Initialze all descriptors */ for (i = 0; i < ring->dma_desc_count; i++) { desc_data = XLGMAC_GET_DESC_DATA(ring, i); /* Initialize Tx descriptor */ xlgmac_tx_desc_reset(desc_data); } /* Update the total number of Tx descriptors */ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR)); /* Update the starting address of descriptor ring */ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); writel(upper_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); writel(lower_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); } static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata, struct xlgmac_desc_data *desc_data, unsigned int index) { struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; unsigned int rx_frames = pdata->rx_frames; unsigned int rx_usecs = pdata->rx_usecs; dma_addr_t hdr_dma, buf_dma; unsigned int inte; if (!rx_usecs && !rx_frames) { /* No coalescing, interrupt for every descriptor */ inte = 1; } else { /* Set interrupt based on Rx frame coalescing setting */ if (rx_frames && !((index + 1) % rx_frames)) inte = 1; else inte = 0; } /* Reset the Rx descriptor * Set buffer 1 (lo) address to header dma address (lo) * Set buffer 1 (hi) address to header dma address (hi) * Set buffer 2 (lo) address to buffer dma address (lo) * Set buffer 2 (hi) address to buffer dma address (hi) and * set control bits OWN and INTE */ hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off; buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off; dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, RX_NORMAL_DESC3_INTE_POS, RX_NORMAL_DESC3_INTE_LEN, inte); /* Since the Rx DMA engine is likely running, make sure everything * is written to the descriptor(s) before setting the OWN bit * for the descriptor */ dma_wmb(); dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, RX_NORMAL_DESC3_OWN_LEN, 1); /* Make sure ownership is written to the descriptor */ dma_wmb(); } static void xlgmac_rx_desc_init(struct xlgmac_channel *channel) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->rx_ring; unsigned int start_index = ring->cur; struct xlgmac_desc_data *desc_data; unsigned int i; /* Initialize all descriptors */ for (i = 0; i < ring->dma_desc_count; i++) { desc_data = XLGMAC_GET_DESC_DATA(ring, i); /* Initialize Rx descriptor */ xlgmac_rx_desc_reset(pdata, desc_data, i); } /* Update the total number of Rx descriptors */ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR)); /* Update the starting address of descriptor ring */ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); writel(upper_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); writel(lower_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); /* Update the Rx Descriptor Tail Pointer */ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index + ring->dma_desc_count - 1); writel(lower_32_bits(desc_data->dma_desc_addr), XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); } static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc) { /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, TX_NORMAL_DESC3_CTXT_LEN); } static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc) { /* Rx and Tx share LD bit, so check TDES3.LD bit */ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_LD_POS, TX_NORMAL_DESC3_LD_LEN); } static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata) { unsigned int max_q_count, q_count; unsigned int reg, regval; unsigned int i; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, MTL_Q_RQOMR_EHFC_LEN, 0); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } /* Clear MAC flow control */ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { regval = readl(pdata->mac_regs + reg); regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, MAC_Q0TFCR_TFE_LEN, 0); writel(regval, pdata->mac_regs + reg); reg += MAC_QTFCR_INC; } return 0; } static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata) { unsigned int max_q_count, q_count; unsigned int reg, regval; unsigned int i; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, MTL_Q_RQOMR_EHFC_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } /* Set MAC flow control */ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { regval = readl(pdata->mac_regs + reg); /* Enable transmit flow control */ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, MAC_Q0TFCR_TFE_LEN, 1); /* Set pause time */ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS, MAC_Q0TFCR_PT_LEN, 0xffff); writel(regval, pdata->mac_regs + reg); reg += MAC_QTFCR_INC; } return 0; } static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_RFCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, MAC_RFCR_RFE_LEN, 0); writel(regval, pdata->mac_regs + MAC_RFCR); return 0; } static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MAC_RFCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, MAC_RFCR_RFE_LEN, 1); writel(regval, pdata->mac_regs + MAC_RFCR); return 0; } static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata) { if (pdata->tx_pause) xlgmac_enable_tx_flow_control(pdata); else xlgmac_disable_tx_flow_control(pdata); return 0; } static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata) { if (pdata->rx_pause) xlgmac_enable_rx_flow_control(pdata); else xlgmac_disable_rx_flow_control(pdata); return 0; } static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS, DMA_CH_RIWT_RWT_LEN, pdata->rx_riwt); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); } return 0; } static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata) { xlgmac_config_tx_flow_control(pdata); xlgmac_config_rx_flow_control(pdata); } static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS, MTL_Q_RQOMR_FEP_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } } static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS, MTL_Q_RQOMR_FUP_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } } static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata) { return 0; } static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS, DMA_CH_RCR_RBSZ_LEN, pdata->rx_buf_size); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); } } static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; if (pdata->hw_feat.tso) { regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, DMA_CH_TCR_TSE_LEN, 1); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } } } static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS, DMA_CH_CR_SPH_LEN, 1); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); } regval = readl(pdata->mac_regs + MAC_RCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS, MAC_RCR_HDSMS_LEN, XLGMAC_SPH_HDSMS_SIZE); writel(regval, pdata->mac_regs + MAC_RCR); } static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata, unsigned int usec) { unsigned long rate; unsigned int ret; rate = pdata->sysclk_rate; /* Convert the input usec value to the watchdog timer value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( usec * ( system_clock_mhz / 10^6 ) / 256 */ ret = (usec * (rate / 1000000)) / 256; return ret; } static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata, unsigned int riwt) { unsigned long rate; unsigned int ret; rate = pdata->sysclk_rate; /* Convert the input watchdog timer value to the usec value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) */ ret = (riwt * 256) / (rate / 1000000); return ret; } static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata, unsigned int val) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS, MTL_Q_RQOMR_RTC_LEN, val); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } return 0; } static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata) { unsigned int i; u32 regval; /* Set Tx to weighted round robin scheduling algorithm */ regval = readl(pdata->mac_regs + MTL_OMR); regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS, MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR); writel(regval, pdata->mac_regs + MTL_OMR); /* Set Tx traffic classes to use WRR algorithm with equal weights */ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS, MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS, MTL_TC_QWR_QW_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); } /* Set Rx to strict priority algorithm */ regval = readl(pdata->mac_regs + MTL_OMR); regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS, MTL_OMR_RAA_LEN, MTL_RAA_SP); writel(regval, pdata->mac_regs + MTL_OMR); } static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata) { unsigned int ppq, ppq_extra, prio, prio_queues; unsigned int qptc, qptc_extra, queue; unsigned int reg, regval; unsigned int mask; unsigned int i, j; /* Map the MTL Tx Queues to Traffic Classes * Note: Tx Queues >= Traffic Classes */ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { for (j = 0; j < qptc; j++) { netif_dbg(pdata, drv, pdata->netdev, "TXq%u mapped to TC%u\n", queue, i); regval = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_Q2TCMAP_POS, MTL_Q_TQOMR_Q2TCMAP_LEN, i); writel(regval, XLGMAC_MTL_REG(pdata, queue, MTL_Q_TQOMR)); queue++; } if (i < qptc_extra) { netif_dbg(pdata, drv, pdata->netdev, "TXq%u mapped to TC%u\n", queue, i); regval = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_Q2TCMAP_POS, MTL_Q_TQOMR_Q2TCMAP_LEN, i); writel(regval, XLGMAC_MTL_REG(pdata, queue, MTL_Q_TQOMR)); queue++; } } /* Map the 8 VLAN priority values to available MTL Rx queues */ prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, pdata->rx_q_count); ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; reg = MAC_RQC2R; regval = 0; for (i = 0, prio = 0; i < prio_queues;) { mask = 0; for (j = 0; j < ppq; j++) { netif_dbg(pdata, drv, pdata->netdev, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); prio++; } if (i < ppq_extra) { netif_dbg(pdata, drv, pdata->netdev, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); prio++; } regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) continue; writel(regval, pdata->mac_regs + reg); reg += MAC_RQC2_INC; regval = 0; } /* Configure one to one, MTL Rx queue to DMA Rx channel mapping * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11 */ reg = MTL_RQDCM0R; regval = readl(pdata->mac_regs + reg); regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH | MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH); writel(regval, pdata->mac_regs + reg); reg += MTL_RQDCM_INC; regval = readl(pdata->mac_regs + reg); regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH | MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH); writel(regval, pdata->mac_regs + reg); reg += MTL_RQDCM_INC; regval = readl(pdata->mac_regs + reg); regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH | MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH); writel(regval, pdata->mac_regs + reg); } static unsigned int xlgmac_calculate_per_queue_fifo( unsigned int fifo_size, unsigned int queue_count) { unsigned int q_fifo_size; unsigned int p_fifo; /* Calculate the configured fifo size */ q_fifo_size = 1 << (fifo_size + 7); /* The configured value may not be the actual amount of fifo RAM */ q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size); q_fifo_size = q_fifo_size / queue_count; /* Each increment in the queue fifo size represents 256 bytes of * fifo, with 0 representing 256 bytes. Distribute the fifo equally * between the queues. */ p_fifo = q_fifo_size / 256; if (p_fifo) p_fifo--; return p_fifo; } static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata) { unsigned int fifo_size; unsigned int i; u32 regval; fifo_size = xlgmac_calculate_per_queue_fifo( pdata->hw_feat.tx_fifo_size, pdata->tx_q_count); for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, MTL_Q_TQOMR_TQS_LEN, fifo_size); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } netif_info(pdata, drv, pdata->netdev, "%d Tx hardware queues, %d byte fifo per queue\n", pdata->tx_q_count, ((fifo_size + 1) * 256)); } static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata) { unsigned int fifo_size; unsigned int i; u32 regval; fifo_size = xlgmac_calculate_per_queue_fifo( pdata->hw_feat.rx_fifo_size, pdata->rx_q_count); for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS, MTL_Q_RQOMR_RQS_LEN, fifo_size); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } netif_info(pdata, drv, pdata->netdev, "%d Rx hardware queues, %d byte fifo per queue\n", pdata->rx_q_count, ((fifo_size + 1) * 256)); } static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); /* Activate flow control when less than 4k left in fifo */ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS, MTL_Q_RQFCR_RFA_LEN, 2); /* De-activate flow control when more than 6k left in fifo */ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS, MTL_Q_RQFCR_RFD_LEN, 4); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); } } static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata, unsigned int val) { unsigned int i; u32 regval; for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS, MTL_Q_TQOMR_TTC_LEN, val); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } return 0; } static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata, unsigned int val) { unsigned int i; u32 regval; for (i = 0; i < pdata->rx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS, MTL_Q_RQOMR_RSF_LEN, val); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } return 0; } static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata, unsigned int val) { unsigned int i; u32 regval; for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS, MTL_Q_TQOMR_TSF_LEN, val); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } return 0; } static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS, DMA_CH_TCR_OSP_LEN, pdata->tx_osp_mode); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } return 0; } static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS, DMA_CH_CR_PBLX8_LEN, pdata->pblx8); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); } return 0; } static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR)); regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, DMA_CH_TCR_PBL_LEN); return regval; } static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, DMA_CH_TCR_PBL_LEN, pdata->tx_pbl); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); } return 0; } static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR)); regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, DMA_CH_RCR_PBL_LEN); return regval; } static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata) { struct xlgmac_channel *channel; unsigned int i; u32 regval; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->rx_ring) break; regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, DMA_CH_RCR_PBL_LEN, pdata->rx_pbl); writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); } return 0; } static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo) { bool read_hi; u64 val; switch (reg_lo) { /* These registers are always 64 bit */ case MMC_TXOCTETCOUNT_GB_LO: case MMC_TXOCTETCOUNT_G_LO: case MMC_RXOCTETCOUNT_GB_LO: case MMC_RXOCTETCOUNT_G_LO: read_hi = true; break; default: read_hi = false; } val = (u64)readl(pdata->mac_regs + reg_lo); if (read_hi) val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32); return val; } static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata) { unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR); struct xlgmac_stats *stats = &pdata->stats; if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOCTETCOUNT_GB_POS, MMC_TISR_TXOCTETCOUNT_GB_LEN)) stats->txoctetcount_gb += xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXFRAMECOUNT_GB_POS, MMC_TISR_TXFRAMECOUNT_GB_LEN)) stats->txframecount_gb += xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXBROADCASTFRAMES_G_POS, MMC_TISR_TXBROADCASTFRAMES_G_LEN)) stats->txbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTICASTFRAMES_G_POS, MMC_TISR_TXMULTICASTFRAMES_G_LEN)) stats->txmulticastframes_g += xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX64OCTETS_GB_POS, MMC_TISR_TX64OCTETS_GB_LEN)) stats->tx64octets_gb += xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX65TO127OCTETS_GB_POS, MMC_TISR_TX65TO127OCTETS_GB_LEN)) stats->tx65to127octets_gb += xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX128TO255OCTETS_GB_POS, MMC_TISR_TX128TO255OCTETS_GB_LEN)) stats->tx128to255octets_gb += xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX256TO511OCTETS_GB_POS, MMC_TISR_TX256TO511OCTETS_GB_LEN)) stats->tx256to511octets_gb += xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX512TO1023OCTETS_GB_POS, MMC_TISR_TX512TO1023OCTETS_GB_LEN)) stats->tx512to1023octets_gb += xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX1024TOMAXOCTETS_GB_POS, MMC_TISR_TX1024TOMAXOCTETS_GB_LEN)) stats->tx1024tomaxoctets_gb += xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXUNICASTFRAMES_GB_POS, MMC_TISR_TXUNICASTFRAMES_GB_LEN)) stats->txunicastframes_gb += xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTICASTFRAMES_GB_POS, MMC_TISR_TXMULTICASTFRAMES_GB_LEN)) stats->txmulticastframes_gb += xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXBROADCASTFRAMES_GB_POS, MMC_TISR_TXBROADCASTFRAMES_GB_LEN)) stats->txbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXUNDERFLOWERROR_POS, MMC_TISR_TXUNDERFLOWERROR_LEN)) stats->txunderflowerror += xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOCTETCOUNT_G_POS, MMC_TISR_TXOCTETCOUNT_G_LEN)) stats->txoctetcount_g += xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXFRAMECOUNT_G_POS, MMC_TISR_TXFRAMECOUNT_G_LEN)) stats->txframecount_g += xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXPAUSEFRAMES_POS, MMC_TISR_TXPAUSEFRAMES_LEN)) stats->txpauseframes += xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXVLANFRAMES_G_POS, MMC_TISR_TXVLANFRAMES_G_LEN)) stats->txvlanframes_g += xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); } static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata) { unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR); struct xlgmac_stats *stats = &pdata->stats; if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXFRAMECOUNT_GB_POS, MMC_RISR_RXFRAMECOUNT_GB_LEN)) stats->rxframecount_gb += xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOCTETCOUNT_GB_POS, MMC_RISR_RXOCTETCOUNT_GB_LEN)) stats->rxoctetcount_gb += xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOCTETCOUNT_G_POS, MMC_RISR_RXOCTETCOUNT_G_LEN)) stats->rxoctetcount_g += xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXBROADCASTFRAMES_G_POS, MMC_RISR_RXBROADCASTFRAMES_G_LEN)) stats->rxbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXMULTICASTFRAMES_G_POS, MMC_RISR_RXMULTICASTFRAMES_G_LEN)) stats->rxmulticastframes_g += xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXCRCERROR_POS, MMC_RISR_RXCRCERROR_LEN)) stats->rxcrcerror += xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXRUNTERROR_POS, MMC_RISR_RXRUNTERROR_LEN)) stats->rxrunterror += xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXJABBERERROR_POS, MMC_RISR_RXJABBERERROR_LEN)) stats->rxjabbererror += xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXUNDERSIZE_G_POS, MMC_RISR_RXUNDERSIZE_G_LEN)) stats->rxundersize_g += xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOVERSIZE_G_POS, MMC_RISR_RXOVERSIZE_G_LEN)) stats->rxoversize_g += xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX64OCTETS_GB_POS, MMC_RISR_RX64OCTETS_GB_LEN)) stats->rx64octets_gb += xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX65TO127OCTETS_GB_POS, MMC_RISR_RX65TO127OCTETS_GB_LEN)) stats->rx65to127octets_gb += xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX128TO255OCTETS_GB_POS, MMC_RISR_RX128TO255OCTETS_GB_LEN)) stats->rx128to255octets_gb += xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX256TO511OCTETS_GB_POS, MMC_RISR_RX256TO511OCTETS_GB_LEN)) stats->rx256to511octets_gb += xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX512TO1023OCTETS_GB_POS, MMC_RISR_RX512TO1023OCTETS_GB_LEN)) stats->rx512to1023octets_gb += xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX1024TOMAXOCTETS_GB_POS, MMC_RISR_RX1024TOMAXOCTETS_GB_LEN)) stats->rx1024tomaxoctets_gb += xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXUNICASTFRAMES_G_POS, MMC_RISR_RXUNICASTFRAMES_G_LEN)) stats->rxunicastframes_g += xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXLENGTHERROR_POS, MMC_RISR_RXLENGTHERROR_LEN)) stats->rxlengtherror += xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOUTOFRANGETYPE_POS, MMC_RISR_RXOUTOFRANGETYPE_LEN)) stats->rxoutofrangetype += xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXPAUSEFRAMES_POS, MMC_RISR_RXPAUSEFRAMES_LEN)) stats->rxpauseframes += xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXFIFOOVERFLOW_POS, MMC_RISR_RXFIFOOVERFLOW_LEN)) stats->rxfifooverflow += xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXVLANFRAMES_GB_POS, MMC_RISR_RXVLANFRAMES_GB_LEN)) stats->rxvlanframes_gb += xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); if (XLGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXWATCHDOGERROR_POS, MMC_RISR_RXWATCHDOGERROR_LEN)) stats->rxwatchdogerror += xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); } static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata) { struct xlgmac_stats *stats = &pdata->stats; u32 regval; /* Freeze counters */ regval = readl(pdata->mac_regs + MMC_CR); regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 1); writel(regval, pdata->mac_regs + MMC_CR); stats->txoctetcount_gb += xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); stats->txframecount_gb += xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); stats->txbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); stats->txmulticastframes_g += xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); stats->tx64octets_gb += xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); stats->tx65to127octets_gb += xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); stats->tx128to255octets_gb += xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); stats->tx256to511octets_gb += xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); stats->tx512to1023octets_gb += xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); stats->tx1024tomaxoctets_gb += xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); stats->txunicastframes_gb += xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); stats->txmulticastframes_gb += xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); stats->txbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); stats->txunderflowerror += xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); stats->txoctetcount_g += xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); stats->txframecount_g += xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); stats->txpauseframes += xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); stats->txvlanframes_g += xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); stats->rxframecount_gb += xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); stats->rxoctetcount_gb += xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); stats->rxoctetcount_g += xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); stats->rxbroadcastframes_g += xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); stats->rxmulticastframes_g += xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); stats->rxcrcerror += xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); stats->rxrunterror += xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); stats->rxjabbererror += xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); stats->rxundersize_g += xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); stats->rxoversize_g += xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); stats->rx64octets_gb += xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); stats->rx65to127octets_gb += xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); stats->rx128to255octets_gb += xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); stats->rx256to511octets_gb += xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); stats->rx512to1023octets_gb += xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); stats->rx1024tomaxoctets_gb += xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); stats->rxunicastframes_g += xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); stats->rxlengtherror += xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); stats->rxoutofrangetype += xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); stats->rxpauseframes += xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); stats->rxfifooverflow += xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); stats->rxvlanframes_gb += xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); stats->rxwatchdogerror += xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); /* Un-freeze counters */ regval = readl(pdata->mac_regs + MMC_CR); regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 0); writel(regval, pdata->mac_regs + MMC_CR); } static void xlgmac_config_mmc(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + MMC_CR); /* Set counters to reset on read */ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS, MMC_CR_ROR_LEN, 1); /* Reset the counters */ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, MMC_CR_CR_LEN, 1); writel(regval, pdata->mac_regs + MMC_CR); } static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type, unsigned int index, unsigned int val) { unsigned int wait; int ret = 0; u32 regval; mutex_lock(&pdata->rss_mutex); regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN); if (regval) { ret = -EBUSY; goto unlock; } writel(val, pdata->mac_regs + MAC_RSSDR); regval = readl(pdata->mac_regs + MAC_RSSAR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS, MAC_RSSAR_RSSIA_LEN, index); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS, MAC_RSSAR_ADDRT_LEN, type); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS, MAC_RSSAR_CT_LEN, 0); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN, 1); writel(regval, pdata->mac_regs + MAC_RSSAR); wait = 1000; while (wait--) { regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN); if (!regval) goto unlock; usleep_range(1000, 1500); } ret = -EBUSY; unlock: mutex_unlock(&pdata->rss_mutex); return ret; } static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata) { unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); unsigned int *key = (unsigned int *)&pdata->rss_key; int ret; while (key_regs--) { ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE, key_regs, *key++); if (ret) return ret; } return 0; } static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata) { unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_LOOKUP_TABLE_TYPE, i, pdata->rss_table[i]); if (ret) return ret; } return 0; } static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key) { memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); return xlgmac_write_rss_hash_key(pdata); } static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata, const u32 *table) { unsigned int i; u32 tval; for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { tval = table[i]; pdata->rss_table[i] = XLGMAC_SET_REG_BITS( pdata->rss_table[i], MAC_RSSDR_DMCH_POS, MAC_RSSDR_DMCH_LEN, tval); } return xlgmac_write_rss_lookup_table(pdata); } static int xlgmac_enable_rss(struct xlgmac_pdata *pdata) { u32 regval; int ret; if (!pdata->hw_feat.rss) return -EOPNOTSUPP; /* Program the hash key */ ret = xlgmac_write_rss_hash_key(pdata); if (ret) return ret; /* Program the lookup table */ ret = xlgmac_write_rss_lookup_table(pdata); if (ret) return ret; /* Set the RSS options */ writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR); /* Enable RSS */ regval = readl(pdata->mac_regs + MAC_RSSCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, MAC_RSSCR_RSSE_LEN, 1); writel(regval, pdata->mac_regs + MAC_RSSCR); return 0; } static int xlgmac_disable_rss(struct xlgmac_pdata *pdata) { u32 regval; if (!pdata->hw_feat.rss) return -EOPNOTSUPP; regval = readl(pdata->mac_regs + MAC_RSSCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, MAC_RSSCR_RSSE_LEN, 0); writel(regval, pdata->mac_regs + MAC_RSSCR); return 0; } static void xlgmac_config_rss(struct xlgmac_pdata *pdata) { int ret; if (!pdata->hw_feat.rss) return; if (pdata->netdev->features & NETIF_F_RXHASH) ret = xlgmac_enable_rss(pdata); else ret = xlgmac_disable_rss(pdata); if (ret) netdev_err(pdata->netdev, "error configuring RSS, RSS disabled\n"); } static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata) { unsigned int dma_ch_isr, dma_ch_ier; struct xlgmac_channel *channel; unsigned int i; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { /* Clear all the interrupts which are set */ dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); /* Clear all interrupt enable bits */ dma_ch_ier = 0; /* Enable following interrupts * NIE - Normal Interrupt Summary Enable * AIE - Abnormal Interrupt Summary Enable * FBEE - Fatal Bus Error Enable */ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_NIE_POS, DMA_CH_IER_NIE_LEN, 1); dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_AIE_POS, DMA_CH_IER_AIE_LEN, 1); dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_FBEE_POS, DMA_CH_IER_FBEE_LEN, 1); if (channel->tx_ring) { /* Enable the following Tx interrupts * TIE - Transmit Interrupt Enable (unless using * per channel interrupts) */ if (!pdata->per_channel_irq) dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts * RBUE - Receive Buffer Unavailable Enable * RIE - Receive Interrupt Enable (unless using * per channel interrupts) */ dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RBUE_POS, DMA_CH_IER_RBUE_LEN, 1); if (!pdata->per_channel_irq) dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 1); } writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER)); } } static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata) { unsigned int q_count, i; unsigned int mtl_q_isr; q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { /* Clear all the interrupts which are set */ mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); /* No MTL interrupts to be enabled */ writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER)); } } static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata) { unsigned int mac_ier = 0; u32 regval; /* Enable Timestamp interrupt */ mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS, MAC_IER_TSIE_LEN, 1); writel(mac_ier, pdata->mac_regs + MAC_IER); /* Enable all counter interrupts */ regval = readl(pdata->mac_regs + MMC_RIER); regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); writel(regval, pdata->mac_regs + MMC_RIER); regval = readl(pdata->mac_regs + MMC_TIER); regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); writel(regval, pdata->mac_regs + MMC_TIER); } static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata) { u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), MAC_TCR_SS_POS, MAC_TCR_SS_LEN); if (regval == 0x1) return 0; regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, MAC_TCR_SS_LEN, 0x1); writel(regval, pdata->mac_regs + MAC_TCR); return 0; } static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata) { u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), MAC_TCR_SS_POS, MAC_TCR_SS_LEN); if (regval == 0) return 0; regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, MAC_TCR_SS_LEN, 0); writel(regval, pdata->mac_regs + MAC_TCR); return 0; } static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata) { u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), MAC_TCR_SS_POS, MAC_TCR_SS_LEN); if (regval == 0x2) return 0; regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, MAC_TCR_SS_LEN, 0x2); writel(regval, pdata->mac_regs + MAC_TCR); return 0; } static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata) { u32 regval; regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), MAC_TCR_SS_POS, MAC_TCR_SS_LEN); if (regval == 0x3) return 0; regval = readl(pdata->mac_regs + MAC_TCR); regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, MAC_TCR_SS_LEN, 0x3); writel(regval, pdata->mac_regs + MAC_TCR); return 0; } static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata) { switch (pdata->phy_speed) { case SPEED_100000: xlgmac_set_xlgmii_100000_speed(pdata); break; case SPEED_50000: xlgmac_set_xlgmii_50000_speed(pdata); break; case SPEED_40000: xlgmac_set_xlgmii_40000_speed(pdata); break; case SPEED_25000: xlgmac_set_xlgmii_25000_speed(pdata); break; } } static int xlgmac_dev_read(struct xlgmac_channel *channel) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->rx_ring; struct net_device *netdev = pdata->netdev; struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; struct xlgmac_pkt_info *pkt_info; unsigned int err, etlt, l34t; desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); dma_desc = desc_data->dma_desc; pkt_info = &ring->pkt_info; /* Check for data availability */ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, RX_NORMAL_DESC3_OWN_LEN)) return 1; /* Make sure descriptor fields are read after reading the OWN bit */ dma_rmb(); if (netif_msg_rx_status(pdata)) xlgmac_dump_rx_desc(pdata, ring, ring->cur); if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CTXT_POS, RX_NORMAL_DESC3_CTXT_LEN)) { /* Timestamp Context Descriptor */ xlgmac_get_rx_tstamp(pkt_info, dma_desc); pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 1); pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 0); return 0; } /* Normal Descriptor, be sure Context Descriptor bit is off */ pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 0); /* Indicate if a Context Descriptor is next */ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CDA_POS, RX_NORMAL_DESC3_CDA_LEN)) pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 1); /* Get the header length */ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_FD_POS, RX_NORMAL_DESC3_FD_LEN)) { desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2, RX_NORMAL_DESC2_HL_POS, RX_NORMAL_DESC2_HL_LEN); if (desc_data->rx.hdr_len) pdata->stats.rx_split_header_packets++; } /* Get the RSS hash */ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_RSV_POS, RX_NORMAL_DESC3_RSV_LEN)) { pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_RSS_HASH_POS, RX_PACKET_ATTRIBUTES_RSS_HASH_LEN, 1); pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1); l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_L34T_POS, RX_NORMAL_DESC3_L34T_LEN); switch (l34t) { case RX_DESC3_L34T_IPV4_TCP: case RX_DESC3_L34T_IPV4_UDP: case RX_DESC3_L34T_IPV6_TCP: case RX_DESC3_L34T_IPV6_UDP: pkt_info->rss_hash_type = PKT_HASH_TYPE_L4; break; default: pkt_info->rss_hash_type = PKT_HASH_TYPE_L3; } } /* Get the pkt_info length */ desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_PL_POS, RX_NORMAL_DESC3_PL_LEN); if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_LD_POS, RX_NORMAL_DESC3_LD_LEN)) { /* Not all the data has been transferred for this pkt_info */ pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 1); return 0; } /* This is the last of the data for this pkt_info */ pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 0); /* Set checksum done indicator as appropriate */ if (netdev->features & NETIF_F_RXCSUM) pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 1); /* Check for errors (only valid in last descriptor) */ err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ES_POS, RX_NORMAL_DESC3_ES_LEN); etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ETLT_POS, RX_NORMAL_DESC3_ETLT_LEN); netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); if (!err || !etlt) { /* No error if err is 0 or etlt is 0 */ if ((etlt == 0x09) && (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); pkt_info->vlan_ctag = XLGMAC_GET_REG_BITS_LE(dma_desc->desc0, RX_NORMAL_DESC0_OVT_POS, RX_NORMAL_DESC0_OVT_LEN); netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", pkt_info->vlan_ctag); } } else { if ((etlt == 0x05) || (etlt == 0x06)) pkt_info->attributes = XLGMAC_SET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 0); else pkt_info->errors = XLGMAC_SET_REG_BITS( pkt_info->errors, RX_PACKET_ERRORS_FRAME_POS, RX_PACKET_ERRORS_FRAME_LEN, 1); } XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name, ring->cur & (ring->dma_desc_count - 1), ring->cur); return 0; } static int xlgmac_enable_int(struct xlgmac_channel *channel, enum xlgmac_int int_id) { unsigned int dma_ch_ier; dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); switch (int_id) { case XLGMAC_INT_DMA_CH_SR_TI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_TPS: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TXSE_POS, DMA_CH_IER_TXSE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_TBU: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TBUE_POS, DMA_CH_IER_TBUE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_RI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_RBU: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RBUE_POS, DMA_CH_IER_RBUE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_RPS: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RSE_POS, DMA_CH_IER_RSE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_TI_RI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 1); dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 1); break; case XLGMAC_INT_DMA_CH_SR_FBE: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_FBEE_POS, DMA_CH_IER_FBEE_LEN, 1); break; case XLGMAC_INT_DMA_ALL: dma_ch_ier |= channel->saved_ier; break; default: return -1; } writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); return 0; } static int xlgmac_disable_int(struct xlgmac_channel *channel, enum xlgmac_int int_id) { unsigned int dma_ch_ier; dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); switch (int_id) { case XLGMAC_INT_DMA_CH_SR_TI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_TPS: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TXSE_POS, DMA_CH_IER_TXSE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_TBU: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TBUE_POS, DMA_CH_IER_TBUE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_RI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_RBU: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RBUE_POS, DMA_CH_IER_RBUE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_RPS: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RSE_POS, DMA_CH_IER_RSE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_TI_RI: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_TIE_POS, DMA_CH_IER_TIE_LEN, 0); dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_RIE_POS, DMA_CH_IER_RIE_LEN, 0); break; case XLGMAC_INT_DMA_CH_SR_FBE: dma_ch_ier = XLGMAC_SET_REG_BITS( dma_ch_ier, DMA_CH_IER_FBEE_POS, DMA_CH_IER_FBEE_LEN, 0); break; case XLGMAC_INT_DMA_ALL: channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK; dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK; break; default: return -1; } writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); return 0; } static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata) { unsigned int i, count; u32 regval; for (i = 0; i < pdata->tx_q_count; i++) { regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, MTL_Q_TQOMR_FTQ_LEN, 1); writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } /* Poll Until Poll Condition */ for (i = 0; i < pdata->tx_q_count; i++) { count = 2000; regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, MTL_Q_TQOMR_FTQ_LEN); while (--count && regval) usleep_range(500, 600); if (!count) return -EBUSY; } return 0; } static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata) { u32 regval; regval = readl(pdata->mac_regs + DMA_SBMR); /* Set enhanced addressing mode */ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, DMA_SBMR_EAME_LEN, 1); /* Set the System Bus mode */ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS, DMA_SBMR_UNDEF_LEN, 1); regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS, DMA_SBMR_BLEN_256_LEN, 1); writel(regval, pdata->mac_regs + DMA_SBMR); } static int xlgmac_hw_init(struct xlgmac_pdata *pdata) { struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; int ret; /* Flush Tx queues */ ret = xlgmac_flush_tx_queues(pdata); if (ret) return ret; /* Initialize DMA related features */ xlgmac_config_dma_bus(pdata); xlgmac_config_osp_mode(pdata); xlgmac_config_pblx8(pdata); xlgmac_config_tx_pbl_val(pdata); xlgmac_config_rx_pbl_val(pdata); xlgmac_config_rx_coalesce(pdata); xlgmac_config_tx_coalesce(pdata); xlgmac_config_rx_buffer_size(pdata); xlgmac_config_tso_mode(pdata); xlgmac_config_sph_mode(pdata); xlgmac_config_rss(pdata); desc_ops->tx_desc_init(pdata); desc_ops->rx_desc_init(pdata); xlgmac_enable_dma_interrupts(pdata); /* Initialize MTL related features */ xlgmac_config_mtl_mode(pdata); xlgmac_config_queue_mapping(pdata); xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode); xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode); xlgmac_config_tx_threshold(pdata, pdata->tx_threshold); xlgmac_config_rx_threshold(pdata, pdata->rx_threshold); xlgmac_config_tx_fifo_size(pdata); xlgmac_config_rx_fifo_size(pdata); xlgmac_config_flow_control_threshold(pdata); xlgmac_config_rx_fep_enable(pdata); xlgmac_config_rx_fup_enable(pdata); xlgmac_enable_mtl_interrupts(pdata); /* Initialize MAC related features */ xlgmac_config_mac_address(pdata); xlgmac_config_rx_mode(pdata); xlgmac_config_jumbo_enable(pdata); xlgmac_config_flow_control(pdata); xlgmac_config_mac_speed(pdata); xlgmac_config_checksum_offload(pdata); xlgmac_config_vlan_support(pdata); xlgmac_config_mmc(pdata); xlgmac_enable_mac_interrupts(pdata); return 0; } static int xlgmac_hw_exit(struct xlgmac_pdata *pdata) { unsigned int count = 2000; u32 regval; /* Issue a software reset */ regval = readl(pdata->mac_regs + DMA_MR); regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS, DMA_MR_SWR_LEN, 1); writel(regval, pdata->mac_regs + DMA_MR); usleep_range(10, 15); /* Poll Until Poll Condition */ while (--count && XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR), DMA_MR_SWR_POS, DMA_MR_SWR_LEN)) usleep_range(500, 600); if (!count) return -EBUSY; return 0; } void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops) { hw_ops->init = xlgmac_hw_init; hw_ops->exit = xlgmac_hw_exit; hw_ops->tx_complete = xlgmac_tx_complete; hw_ops->enable_tx = xlgmac_enable_tx; hw_ops->disable_tx = xlgmac_disable_tx; hw_ops->enable_rx = xlgmac_enable_rx; hw_ops->disable_rx = xlgmac_disable_rx; hw_ops->dev_xmit = xlgmac_dev_xmit; hw_ops->dev_read = xlgmac_dev_read; hw_ops->enable_int = xlgmac_enable_int; hw_ops->disable_int = xlgmac_disable_int; hw_ops->set_mac_address = xlgmac_set_mac_address; hw_ops->config_rx_mode = xlgmac_config_rx_mode; hw_ops->enable_rx_csum = xlgmac_enable_rx_csum; hw_ops->disable_rx_csum = xlgmac_disable_rx_csum; /* For MII speed configuration */ hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed; hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed; hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed; hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed; /* For descriptor related operation */ hw_ops->tx_desc_init = xlgmac_tx_desc_init; hw_ops->rx_desc_init = xlgmac_rx_desc_init; hw_ops->tx_desc_reset = xlgmac_tx_desc_reset; hw_ops->rx_desc_reset = xlgmac_rx_desc_reset; hw_ops->is_last_desc = xlgmac_is_last_desc; hw_ops->is_context_desc = xlgmac_is_context_desc; hw_ops->tx_start_xmit = xlgmac_tx_start_xmit; /* For Flow Control */ hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control; hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control; /* For Vlan related config */ hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping; hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping; hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering; hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering; hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table; /* For RX coalescing */ hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce; hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce; hw_ops->usec_to_riwt = xlgmac_usec_to_riwt; hw_ops->riwt_to_usec = xlgmac_riwt_to_usec; /* For RX and TX threshold config */ hw_ops->config_rx_threshold = xlgmac_config_rx_threshold; hw_ops->config_tx_threshold = xlgmac_config_tx_threshold; /* For RX and TX Store and Forward Mode config */ hw_ops->config_rsf_mode = xlgmac_config_rsf_mode; hw_ops->config_tsf_mode = xlgmac_config_tsf_mode; /* For TX DMA Operating on Second Frame config */ hw_ops->config_osp_mode = xlgmac_config_osp_mode; /* For RX and TX PBL config */ hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val; hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val; hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val; hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val; hw_ops->config_pblx8 = xlgmac_config_pblx8; /* For MMC statistics support */ hw_ops->tx_mmc_int = xlgmac_tx_mmc_int; hw_ops->rx_mmc_int = xlgmac_rx_mmc_int; hw_ops->read_mmc_stats = xlgmac_read_mmc_stats; /* For Receive Side Scaling */ hw_ops->enable_rss = xlgmac_enable_rss; hw_ops->disable_rss = xlgmac_disable_rss; hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key; hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table; }
{ "language": "C" }
/* * Simple declaration for a supporting class */ #ifndef __CLASSHEADER_H #define __CLASSHEADER_H class classname { private: int privateValue1; int privateValue2; int sum; void setValue1(int val) { privateValue1 = val; sum = privateValue1 + privateValue2; } void setValue2(int val) { privateValue2 = val; sum = privateValue1 + privateValue2; } public: void setValues(int val1, int val2); void printValues(); }; #endif
{ "language": "C" }
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE789_Uncontrolled_Mem_Alloc__new_char_listen_socket_65b.cpp Label Definition File: CWE789_Uncontrolled_Mem_Alloc__new.label.xml Template File: sources-sinks-65b.tmpl.cpp */ /* * @description * CWE: 789 Uncontrolled Memory Allocation * BadSource: listen_socket Read data using a listen socket (server side) * GoodSource: Small number greater than zero * Sinks: * GoodSink: Allocate memory with new [] and check the size of the memory to be allocated * BadSink : Allocate memory with new [], but incorrectly check the size of the memory to be allocated * Flow Variant: 65 Data/control flow: data passed as an argument from one function to a function in a different source file called via a function pointer * * */ #include "std_testcase.h" #ifndef _WIN32 #include <wchar.h> #endif #ifdef _WIN32 #include <winsock2.h> #include <windows.h> #include <direct.h> #pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ #define CLOSE_SOCKET closesocket #else #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #define INVALID_SOCKET -1 #define SOCKET_ERROR -1 #define CLOSE_SOCKET close #define SOCKET int #endif #define TCP_PORT 27015 #define LISTEN_BACKLOG 5 #define CHAR_ARRAY_SIZE (3 * sizeof(data) + 2) #define HELLO_STRING "hello" namespace CWE789_Uncontrolled_Mem_Alloc__new_char_listen_socket_65 { #ifndef OMITBAD void badSink(size_t data) { { char * myString; /* POTENTIAL FLAW: No MAXIMUM limitation for memory allocation, but ensure data is large enough * for the strcpy() function to not cause a buffer overflow */ /* INCIDENTAL FLAW: The source could cause a type overrun in data or in the memory allocation */ if (data > strlen(HELLO_STRING)) { myString = new char[data]; /* Copy a small string into myString */ strcpy(myString, HELLO_STRING); printLine(myString); delete [] myString; } else { printLine("Input is less than the length of the source string"); } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink(size_t data) { { char * myString; /* POTENTIAL FLAW: No MAXIMUM limitation for memory allocation, but ensure data is large enough * for the strcpy() function to not cause a buffer overflow */ /* INCIDENTAL FLAW: The source could cause a type overrun in data or in the memory allocation */ if (data > strlen(HELLO_STRING)) { myString = new char[data]; /* Copy a small string into myString */ strcpy(myString, HELLO_STRING); printLine(myString); delete [] myString; } else { printLine("Input is less than the length of the source string"); } } } /* goodB2G uses the BadSource with the GoodSink */ void goodB2GSink(size_t data) { { char * myString; /* FIX: Include a MAXIMUM limitation for memory allocation and a check to ensure data is large enough * for the strcpy() function to not cause a buffer overflow */ /* INCIDENTAL FLAW: The source could cause a type overrun in data or in the memory allocation */ if (data > strlen(HELLO_STRING) && data < 100) { myString = new char[data]; /* Copy a small string into myString */ strcpy(myString, HELLO_STRING); printLine(myString); delete [] myString; } else { printLine("Input is less than the length of the source string or too large"); } } } #endif /* OMITGOOD */ } /* close namespace */
{ "language": "C" }
/* * linux/fs/ext2/xip.h * * Copyright (C) 2005 IBM Corporation * Author: Carsten Otte (cotte@de.ibm.com) */ #ifdef CONFIG_EXT2_FS_XIP extern void ext2_xip_verify_sb (struct super_block *); extern int ext2_clear_xip_target (struct inode *, sector_t); static inline int ext2_use_xip (struct super_block *sb) { struct ext2_sb_info *sbi = EXT2_SB(sb); return (sbi->s_mount_opt & EXT2_MOUNT_XIP); } int ext2_get_xip_mem(struct address_space *, pgoff_t, int, void **, unsigned long *); #define mapping_is_xip(map) unlikely(map->a_ops->get_xip_mem) #else #define mapping_is_xip(map) 0 #define ext2_xip_verify_sb(sb) do { } while (0) #define ext2_use_xip(sb) 0 #define ext2_clear_xip_target(inode, chain) 0 #define ext2_get_xip_mem NULL #endif
{ "language": "C" }
/****************************************************************************** * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #include "common.h" #include "4965.h" static const char *fmt_value = " %-30s %10u\n"; static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; static const char *fmt_header = "%-32s current cumulative delta max\n"; static int il4965_stats_flag(struct il_priv *il, char *buf, int bufsz) { int p = 0; u32 flag; flag = le32_to_cpu(il->_4965.stats.flag); p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); if (flag & UCODE_STATS_CLEAR_MSK) p += scnprintf(buf + p, bufsz - p, "\tStatistics have been cleared\n"); p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz"); p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled"); return p; } static ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = sizeof(struct stats_rx_phy) * 40 + sizeof(struct stats_rx_non_phy) * 40 + sizeof(struct stats_rx_ht_phy) * 40 + 400; ssize_t ret; struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; struct stats_rx_non_phy *general, *accum_general; struct stats_rx_non_phy *delta_general, *max_general; struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* * the statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ ofdm = &il->_4965.stats.rx.ofdm; cck = &il->_4965.stats.rx.cck; general = &il->_4965.stats.rx.general; ht = &il->_4965.stats.rx.ofdm_ht; accum_ofdm = &il->_4965.accum_stats.rx.ofdm; accum_cck = &il->_4965.accum_stats.rx.cck; accum_general = &il->_4965.accum_stats.rx.general; accum_ht = &il->_4965.accum_stats.rx.ofdm_ht; delta_ofdm = &il->_4965.delta_stats.rx.ofdm; delta_cck = &il->_4965.delta_stats.rx.cck; delta_general = &il->_4965.delta_stats.rx.general; delta_ht = &il->_4965.delta_stats.rx.ofdm_ht; max_ofdm = &il->_4965.max_delta.rx.ofdm; max_cck = &il->_4965.max_delta.rx.cck; max_general = &il->_4965.max_delta.rx.general; max_ht = &il->_4965.max_delta.rx.ofdm_ht; pos += il4965_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt, delta_ofdm->ina_cnt, max_ofdm->ina_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:", le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, delta_ofdm->fina_cnt, max_ofdm->fina_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, delta_ofdm->plcp_err, max_ofdm->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, delta_ofdm->crc32_err, max_ofdm->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err, delta_ofdm->overrun_err, max_ofdm->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", le32_to_cpu(ofdm->early_overrun_err), accum_ofdm->early_overrun_err, delta_ofdm->early_overrun_err, max_ofdm->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good, delta_ofdm->crc32_good, max_ofdm->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:", le32_to_cpu(ofdm->false_alarm_cnt), accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt, max_ofdm->false_alarm_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:", le32_to_cpu(ofdm->fina_sync_err_cnt), accum_ofdm->fina_sync_err_cnt, delta_ofdm->fina_sync_err_cnt, max_ofdm->fina_sync_err_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:", le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:", le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, max_ofdm->fina_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:", le32_to_cpu(ofdm->unresponded_rts), accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts, max_ofdm->unresponded_rts); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:", le32_to_cpu(ofdm->rxe_frame_limit_overrun), accum_ofdm->rxe_frame_limit_overrun, delta_ofdm->rxe_frame_limit_overrun, max_ofdm->rxe_frame_limit_overrun); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:", le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:", le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:", le32_to_cpu(ofdm->sent_ba_rsp_cnt), accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt, max_ofdm->sent_ba_rsp_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:", le32_to_cpu(ofdm->dsp_self_kill), accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill, max_ofdm->dsp_self_kill); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", le32_to_cpu(ofdm->mh_format_err), accum_ofdm->mh_format_err, delta_ofdm->mh_format_err, max_ofdm->mh_format_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "re_acq_main_rssi_sum:", le32_to_cpu(ofdm->re_acq_main_rssi_sum), accum_ofdm->re_acq_main_rssi_sum, delta_ofdm->re_acq_main_rssi_sum, max_ofdm->re_acq_main_rssi_sum); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - CCK:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:", le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, delta_cck->ina_cnt, max_cck->ina_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:", le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, delta_cck->fina_cnt, max_cck->fina_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, delta_cck->plcp_err, max_cck->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, delta_cck->crc32_err, max_cck->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", le32_to_cpu(cck->overrun_err), accum_cck->overrun_err, delta_cck->overrun_err, max_cck->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", le32_to_cpu(cck->early_overrun_err), accum_cck->early_overrun_err, delta_cck->early_overrun_err, max_cck->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, delta_cck->crc32_good, max_cck->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:", le32_to_cpu(cck->false_alarm_cnt), accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:", le32_to_cpu(cck->fina_sync_err_cnt), accum_cck->fina_sync_err_cnt, delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:", le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout, delta_cck->sfd_timeout, max_cck->sfd_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:", le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout, delta_cck->fina_timeout, max_cck->fina_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:", le32_to_cpu(cck->unresponded_rts), accum_cck->unresponded_rts, delta_cck->unresponded_rts, max_cck->unresponded_rts); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:", le32_to_cpu(cck->rxe_frame_limit_overrun), accum_cck->rxe_frame_limit_overrun, delta_cck->rxe_frame_limit_overrun, max_cck->rxe_frame_limit_overrun); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:", le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:", le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:", le32_to_cpu(cck->sent_ba_rsp_cnt), accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt, max_cck->sent_ba_rsp_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:", le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, max_cck->dsp_self_kill); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err, delta_cck->mh_format_err, max_cck->mh_format_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "re_acq_main_rssi_sum:", le32_to_cpu(cck->re_acq_main_rssi_sum), accum_cck->re_acq_main_rssi_sum, delta_cck->re_acq_main_rssi_sum, max_cck->re_acq_main_rssi_sum); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - GENERAL:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:", le32_to_cpu(general->bogus_cts), accum_general->bogus_cts, delta_general->bogus_cts, max_general->bogus_cts); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:", le32_to_cpu(general->bogus_ack), accum_general->bogus_ack, delta_general->bogus_ack, max_general->bogus_ack); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:", le32_to_cpu(general->non_bssid_frames), accum_general->non_bssid_frames, delta_general->non_bssid_frames, max_general->non_bssid_frames); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:", le32_to_cpu(general->filtered_frames), accum_general->filtered_frames, delta_general->filtered_frames, max_general->filtered_frames); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:", le32_to_cpu(general->non_channel_beacons), accum_general->non_channel_beacons, delta_general->non_channel_beacons, max_general->non_channel_beacons); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:", le32_to_cpu(general->channel_beacons), accum_general->channel_beacons, delta_general->channel_beacons, max_general->channel_beacons); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:", le32_to_cpu(general->num_missed_bcon), accum_general->num_missed_bcon, delta_general->num_missed_bcon, max_general->num_missed_bcon); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "adc_rx_saturation_time:", le32_to_cpu(general->adc_rx_saturation_time), accum_general->adc_rx_saturation_time, delta_general->adc_rx_saturation_time, max_general->adc_rx_saturation_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_detect_search_tm:", le32_to_cpu(general->ina_detection_search_time), accum_general->ina_detection_search_time, delta_general->ina_detection_search_time, max_general->ina_detection_search_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_silence_rssi_a:", le32_to_cpu(general->beacon_silence_rssi_a), accum_general->beacon_silence_rssi_a, delta_general->beacon_silence_rssi_a, max_general->beacon_silence_rssi_a); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_silence_rssi_b:", le32_to_cpu(general->beacon_silence_rssi_b), accum_general->beacon_silence_rssi_b, delta_general->beacon_silence_rssi_b, max_general->beacon_silence_rssi_b); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_silence_rssi_c:", le32_to_cpu(general->beacon_silence_rssi_c), accum_general->beacon_silence_rssi_c, delta_general->beacon_silence_rssi_c, max_general->beacon_silence_rssi_c); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "interference_data_flag:", le32_to_cpu(general->interference_data_flag), accum_general->interference_data_flag, delta_general->interference_data_flag, max_general->interference_data_flag); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:", le32_to_cpu(general->channel_load), accum_general->channel_load, delta_general->channel_load, max_general->channel_load); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:", le32_to_cpu(general->dsp_false_alarms), accum_general->dsp_false_alarms, delta_general->dsp_false_alarms, max_general->dsp_false_alarms); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:", le32_to_cpu(general->beacon_rssi_a), accum_general->beacon_rssi_a, delta_general->beacon_rssi_a, max_general->beacon_rssi_a); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:", le32_to_cpu(general->beacon_rssi_b), accum_general->beacon_rssi_b, delta_general->beacon_rssi_b, max_general->beacon_rssi_b); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:", le32_to_cpu(general->beacon_rssi_c), accum_general->beacon_rssi_c, delta_general->beacon_rssi_c, max_general->beacon_rssi_c); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:", le32_to_cpu(general->beacon_energy_a), accum_general->beacon_energy_a, delta_general->beacon_energy_a, max_general->beacon_energy_a); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:", le32_to_cpu(general->beacon_energy_b), accum_general->beacon_energy_b, delta_general->beacon_energy_b, max_general->beacon_energy_b); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:", le32_to_cpu(general->beacon_energy_c), accum_general->beacon_energy_c, delta_general->beacon_energy_c, max_general->beacon_energy_c); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM_HT:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:", le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, delta_ht->plcp_err, max_ht->plcp_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:", le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, delta_ht->overrun_err, max_ht->overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:", le32_to_cpu(ht->early_overrun_err), accum_ht->early_overrun_err, delta_ht->early_overrun_err, max_ht->early_overrun_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:", le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, delta_ht->crc32_good, max_ht->crc32_good); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:", le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, delta_ht->crc32_err, max_ht->crc32_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:", le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err, delta_ht->mh_format_err, max_ht->mh_format_err); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:", le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good, delta_ht->agg_crc32_good, max_ht->agg_crc32_good); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:", le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt, delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:", le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, delta_ht->agg_cnt, max_ht->agg_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:", le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs, delta_ht->unsupport_mcs, max_ht->unsupport_mcs); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = (sizeof(struct stats_tx) * 48) + 250; ssize_t ret; struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* the statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ tx = &il->_4965.stats.tx; accum_tx = &il->_4965.accum_stats.tx; delta_tx = &il->_4965.delta_stats.tx; max_tx = &il->_4965.max_delta.tx; pos += il4965_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:", le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, delta_tx->preamble_cnt, max_tx->preamble_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:", le32_to_cpu(tx->rx_detected_cnt), accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:", le32_to_cpu(tx->bt_prio_defer_cnt), accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt, max_tx->bt_prio_defer_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:", le32_to_cpu(tx->bt_prio_kill_cnt), accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt, max_tx->bt_prio_kill_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:", le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:", le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, delta_tx->cts_timeout, max_tx->cts_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:", le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, delta_tx->ack_timeout, max_tx->ack_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:", le32_to_cpu(tx->expected_ack_cnt), accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt, max_tx->expected_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:", le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:", le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt, delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "abort_nxt_frame_mismatch:", le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), accum_tx->burst_abort_next_frame_mismatch_cnt, delta_tx->burst_abort_next_frame_mismatch_cnt, max_tx->burst_abort_next_frame_mismatch_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "abort_missing_nxt_frame:", le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), accum_tx->burst_abort_missing_next_frame_cnt, delta_tx->burst_abort_missing_next_frame_cnt, max_tx->burst_abort_missing_next_frame_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout_collision:", le32_to_cpu(tx->cts_timeout_collision), accum_tx->cts_timeout_collision, delta_tx->cts_timeout_collision, max_tx->cts_timeout_collision); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_ba_timeout_collision:", le32_to_cpu(tx->ack_or_ba_timeout_collision), accum_tx->ack_or_ba_timeout_collision, delta_tx->ack_or_ba_timeout_collision, max_tx->ack_or_ba_timeout_collision); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:", le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout, delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_resched_frames:", le32_to_cpu(tx->agg.ba_reschedule_frames), accum_tx->agg.ba_reschedule_frames, delta_tx->agg.ba_reschedule_frames, max_tx->agg.ba_reschedule_frames); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg_frame:", le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), accum_tx->agg.scd_query_agg_frame_cnt, delta_tx->agg.scd_query_agg_frame_cnt, max_tx->agg.scd_query_agg_frame_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_no_agg:", le32_to_cpu(tx->agg.scd_query_no_agg), accum_tx->agg.scd_query_no_agg, delta_tx->agg.scd_query_no_agg, max_tx->agg.scd_query_no_agg); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:", le32_to_cpu(tx->agg.scd_query_agg), accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg, max_tx->agg.scd_query_agg); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_mismatch:", le32_to_cpu(tx->agg.scd_query_mismatch), accum_tx->agg.scd_query_mismatch, delta_tx->agg.scd_query_mismatch, max_tx->agg.scd_query_mismatch); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:", le32_to_cpu(tx->agg.frame_not_ready), accum_tx->agg.frame_not_ready, delta_tx->agg.frame_not_ready, max_tx->agg.frame_not_ready); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:", le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun, delta_tx->agg.underrun, max_tx->agg.underrun); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:", le32_to_cpu(tx->agg.bt_prio_kill), accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill, max_tx->agg.bt_prio_kill); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:", le32_to_cpu(tx->agg.rx_ba_rsp_cnt), accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt, max_tx->agg.rx_ba_rsp_cnt); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t il4965_ucode_general_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct il_priv *il = file->private_data; int pos = 0; char *buf; int bufsz = sizeof(struct stats_general) * 10 + 300; ssize_t ret; struct stats_general_common *general, *accum_general; struct stats_general_common *delta_general, *max_general; struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; struct stats_div *div, *accum_div, *delta_div, *max_div; if (!il_is_alive(il)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) { IL_ERR("Can not allocate Buffer\n"); return -ENOMEM; } /* the statistic information display here is based on * the last stats notification from uCode * might not reflect the current uCode activity */ general = &il->_4965.stats.general.common; dbg = &il->_4965.stats.general.common.dbg; div = &il->_4965.stats.general.common.div; accum_general = &il->_4965.accum_stats.general.common; accum_dbg = &il->_4965.accum_stats.general.common.dbg; accum_div = &il->_4965.accum_stats.general.common.div; delta_general = &il->_4965.delta_stats.general.common; max_general = &il->_4965.max_delta.general.common; delta_dbg = &il->_4965.delta_stats.general.common.dbg; max_dbg = &il->_4965.max_delta.general.common.dbg; delta_div = &il->_4965.delta_stats.general.common.div; max_div = &il->_4965.max_delta.general.common.div; pos += il4965_stats_flag(il, buf, bufsz); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_General:"); pos += scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:", le32_to_cpu(general->temperature)); pos += scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:", le32_to_cpu(general->ttl_timestamp)); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:", le32_to_cpu(dbg->burst_check), accum_dbg->burst_check, delta_dbg->burst_check, max_dbg->burst_check); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:", le32_to_cpu(dbg->burst_count), accum_dbg->burst_count, delta_dbg->burst_count, max_dbg->burst_count); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "wait_for_silence_timeout_count:", le32_to_cpu(dbg->wait_for_silence_timeout_cnt), accum_dbg->wait_for_silence_timeout_cnt, delta_dbg->wait_for_silence_timeout_cnt, max_dbg->wait_for_silence_timeout_cnt); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:", le32_to_cpu(general->sleep_time), accum_general->sleep_time, delta_general->sleep_time, max_general->sleep_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:", le32_to_cpu(general->slots_out), accum_general->slots_out, delta_general->slots_out, max_general->slots_out); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:", le32_to_cpu(general->slots_idle), accum_general->slots_idle, delta_general->slots_idle, max_general->slots_idle); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:", le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, delta_div->tx_on_a, max_div->tx_on_a); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:", le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, delta_div->tx_on_b, max_div->tx_on_b); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:", le32_to_cpu(div->exec_time), accum_div->exec_time, delta_div->exec_time, max_div->exec_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:", le32_to_cpu(div->probe_time), accum_div->probe_time, delta_div->probe_time, max_div->probe_time); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:", le32_to_cpu(general->rx_enable_counter), accum_general->rx_enable_counter, delta_general->rx_enable_counter, max_general->rx_enable_counter); pos += scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:", le32_to_cpu(general->num_of_sos_states), accum_general->num_of_sos_states, delta_general->num_of_sos_states, max_general->num_of_sos_states); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } const struct il_debugfs_ops il4965_debugfs_ops = { .rx_stats_read = il4965_ucode_rx_stats_read, .tx_stats_read = il4965_ucode_tx_stats_read, .general_stats_read = il4965_ucode_general_stats_read, };
{ "language": "C" }
/* $Id: os_4bri.c,v 1.28.4.4 2005/02/11 19:40:25 armin Exp $ */ #include "platform.h" #include "debuglib.h" #include "cardtype.h" #include "pc.h" #include "pr_pc.h" #include "di_defs.h" #include "dsp_defs.h" #include "di.h" #include "io.h" #include "xdi_msg.h" #include "xdi_adapter.h" #include "os_4bri.h" #include "diva_pci.h" #include "mi_pc.h" #include "dsrv4bri.h" #include "helpers.h" static void *diva_xdiLoadFileFile = NULL; static dword diva_xdiLoadFileLength = 0; /* ** IMPORTS */ extern void prepare_qBri_functions(PISDN_ADAPTER IoAdapter); extern void prepare_qBri2_functions(PISDN_ADAPTER IoAdapter); extern void diva_xdi_display_adapter_features(int card); extern void diva_add_slave_adapter(diva_os_xdi_adapter_t *a); extern int qBri_FPGA_download(PISDN_ADAPTER IoAdapter); extern void start_qBri_hardware(PISDN_ADAPTER IoAdapter); extern int diva_card_read_xlog(diva_os_xdi_adapter_t *a); /* ** LOCALS */ static unsigned long _4bri_bar_length[4] = { 0x100, 0x100, /* I/O */ MQ_MEMORY_SIZE, 0x2000 }; static unsigned long _4bri_v2_bar_length[4] = { 0x100, 0x100, /* I/O */ MQ2_MEMORY_SIZE, 0x10000 }; static unsigned long _4bri_v2_bri_bar_length[4] = { 0x100, 0x100, /* I/O */ BRI2_MEMORY_SIZE, 0x10000 }; static int diva_4bri_cleanup_adapter(diva_os_xdi_adapter_t *a); static int _4bri_get_serial_number(diva_os_xdi_adapter_t *a); static int diva_4bri_cmd_card_proc(struct _diva_os_xdi_adapter *a, diva_xdi_um_cfg_cmd_t *cmd, int length); static int diva_4bri_cleanup_slave_adapters(diva_os_xdi_adapter_t *a); static int diva_4bri_write_fpga_image(diva_os_xdi_adapter_t *a, byte *data, dword length); static int diva_4bri_reset_adapter(PISDN_ADAPTER IoAdapter); static int diva_4bri_write_sdram_block(PISDN_ADAPTER IoAdapter, dword address, const byte *data, dword length, dword limit); static int diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter, dword start_address, dword features); static int check_qBri_interrupt(PISDN_ADAPTER IoAdapter); static int diva_4bri_stop_adapter(diva_os_xdi_adapter_t *a); static int _4bri_is_rev_2_card(int card_ordinal) { switch (card_ordinal) { case CARDTYPE_DIVASRV_Q_8M_V2_PCI: case CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI: case CARDTYPE_DIVASRV_B_2M_V2_PCI: case CARDTYPE_DIVASRV_B_2F_PCI: case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI: return (1); } return (0); } static int _4bri_is_rev_2_bri_card(int card_ordinal) { switch (card_ordinal) { case CARDTYPE_DIVASRV_B_2M_V2_PCI: case CARDTYPE_DIVASRV_B_2F_PCI: case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI: return (1); } return (0); } static void diva_4bri_set_addresses(diva_os_xdi_adapter_t *a) { dword offset = a->resources.pci.qoffset; dword c_offset = offset * a->xdi_adapter.ControllerNumber; a->resources.pci.mem_type_id[MEM_TYPE_RAM] = 2; a->resources.pci.mem_type_id[MEM_TYPE_ADDRESS] = 2; a->resources.pci.mem_type_id[MEM_TYPE_CONTROL] = 2; a->resources.pci.mem_type_id[MEM_TYPE_RESET] = 0; a->resources.pci.mem_type_id[MEM_TYPE_CTLREG] = 3; a->resources.pci.mem_type_id[MEM_TYPE_PROM] = 0; /* Set up hardware related pointers */ a->xdi_adapter.Address = a->resources.pci.addr[2]; /* BAR2 SDRAM */ a->xdi_adapter.Address += c_offset; a->xdi_adapter.Control = a->resources.pci.addr[2]; /* BAR2 SDRAM */ a->xdi_adapter.ram = a->resources.pci.addr[2]; /* BAR2 SDRAM */ a->xdi_adapter.ram += c_offset + (offset - MQ_SHARED_RAM_SIZE); a->xdi_adapter.reset = a->resources.pci.addr[0]; /* BAR0 CONFIG */ /* ctlReg contains the register address for the MIPS CPU reset control */ a->xdi_adapter.ctlReg = a->resources.pci.addr[3]; /* BAR3 CNTRL */ /* prom contains the register address for FPGA and EEPROM programming */ a->xdi_adapter.prom = &a->xdi_adapter.reset[0x6E]; } /* ** BAR0 - MEM - 0x100 - CONFIG MEM ** BAR1 - I/O - 0x100 - UNUSED ** BAR2 - MEM - MQ_MEMORY_SIZE (MQ2_MEMORY_SIZE on Rev.2) - SDRAM ** BAR3 - MEM - 0x2000 (0x10000 on Rev.2) - CNTRL ** ** Called by master adapter, that will initialize and add slave adapters */ int diva_4bri_init_card(diva_os_xdi_adapter_t *a) { int bar, i; byte __iomem *p; PADAPTER_LIST_ENTRY quadro_list; diva_os_xdi_adapter_t *diva_current; diva_os_xdi_adapter_t *adapter_list[4]; PISDN_ADAPTER Slave; unsigned long bar_length[ARRAY_SIZE(_4bri_bar_length)]; int v2 = _4bri_is_rev_2_card(a->CardOrdinal); int tasks = _4bri_is_rev_2_bri_card(a->CardOrdinal) ? 1 : MQ_INSTANCE_COUNT; int factor = (tasks == 1) ? 1 : 2; if (v2) { if (_4bri_is_rev_2_bri_card(a->CardOrdinal)) { memcpy(bar_length, _4bri_v2_bri_bar_length, sizeof(bar_length)); } else { memcpy(bar_length, _4bri_v2_bar_length, sizeof(bar_length)); } } else { memcpy(bar_length, _4bri_bar_length, sizeof(bar_length)); } DBG_TRC(("SDRAM_LENGTH=%08x, tasks=%d, factor=%d", bar_length[2], tasks, factor)) /* Get Serial Number The serial number of 4BRI is accessible in accordance with PCI spec via command register located in configuration space, also we do not have to map any BAR before we can access it */ if (!_4bri_get_serial_number(a)) { DBG_ERR(("A: 4BRI can't get Serial Number")) diva_4bri_cleanup_adapter(a); return (-1); } /* Set properties */ a->xdi_adapter.Properties = CardProperties[a->CardOrdinal]; DBG_LOG(("Load %s, SN:%ld, bus:%02x, func:%02x", a->xdi_adapter.Properties.Name, a->xdi_adapter.serialNo, a->resources.pci.bus, a->resources.pci.func)) /* First initialization step: get and check hardware resoures. Do not map resources and do not access card at this step */ for (bar = 0; bar < 4; bar++) { a->resources.pci.bar[bar] = divasa_get_pci_bar(a->resources.pci.bus, a->resources.pci.func, bar, a->resources.pci.hdev); if (!a->resources.pci.bar[bar] || (a->resources.pci.bar[bar] == 0xFFFFFFF0)) { DBG_ERR( ("A: invalid bar[%d]=%08x", bar, a->resources.pci.bar[bar])) return (-1); } } a->resources.pci.irq = (byte) divasa_get_pci_irq(a->resources.pci.bus, a->resources.pci.func, a->resources.pci.hdev); if (!a->resources.pci.irq) { DBG_ERR(("A: invalid irq")); return (-1); } a->xdi_adapter.sdram_bar = a->resources.pci.bar[2]; /* Map all MEMORY BAR's */ for (bar = 0; bar < 4; bar++) { if (bar != 1) { /* ignore I/O */ a->resources.pci.addr[bar] = divasa_remap_pci_bar(a, bar, a->resources.pci.bar[bar], bar_length[bar]); if (!a->resources.pci.addr[bar]) { DBG_ERR(("A: 4BRI: can't map bar[%d]", bar)) diva_4bri_cleanup_adapter(a); return (-1); } } } /* Register I/O port */ sprintf(&a->port_name[0], "DIVA 4BRI %ld", (long) a->xdi_adapter.serialNo); if (diva_os_register_io_port(a, 1, a->resources.pci.bar[1], bar_length[1], &a->port_name[0], 1)) { DBG_ERR(("A: 4BRI: can't register bar[1]")) diva_4bri_cleanup_adapter(a); return (-1); } a->resources.pci.addr[1] = (void *) (unsigned long) a->resources.pci.bar[1]; /* Set cleanup pointer for base adapter only, so slave adapter will be unable to get cleanup */ a->interface.cleanup_adapter_proc = diva_4bri_cleanup_adapter; /* Create slave adapters */ if (tasks > 1) { if (!(a->slave_adapters[0] = (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a)))) { diva_4bri_cleanup_adapter(a); return (-1); } if (!(a->slave_adapters[1] = (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a)))) { diva_os_free(0, a->slave_adapters[0]); a->slave_adapters[0] = NULL; diva_4bri_cleanup_adapter(a); return (-1); } if (!(a->slave_adapters[2] = (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a)))) { diva_os_free(0, a->slave_adapters[0]); diva_os_free(0, a->slave_adapters[1]); a->slave_adapters[0] = NULL; a->slave_adapters[1] = NULL; diva_4bri_cleanup_adapter(a); return (-1); } memset(a->slave_adapters[0], 0x00, sizeof(*a)); memset(a->slave_adapters[1], 0x00, sizeof(*a)); memset(a->slave_adapters[2], 0x00, sizeof(*a)); } adapter_list[0] = a; adapter_list[1] = a->slave_adapters[0]; adapter_list[2] = a->slave_adapters[1]; adapter_list[3] = a->slave_adapters[2]; /* Allocate slave list */ quadro_list = (PADAPTER_LIST_ENTRY) diva_os_malloc(0, sizeof(*quadro_list)); if (!(a->slave_list = quadro_list)) { for (i = 0; i < (tasks - 1); i++) { diva_os_free(0, a->slave_adapters[i]); a->slave_adapters[i] = NULL; } diva_4bri_cleanup_adapter(a); return (-1); } memset(quadro_list, 0x00, sizeof(*quadro_list)); /* Set interfaces */ a->xdi_adapter.QuadroList = quadro_list; for (i = 0; i < tasks; i++) { adapter_list[i]->xdi_adapter.ControllerNumber = i; adapter_list[i]->xdi_adapter.tasks = tasks; quadro_list->QuadroAdapter[i] = &adapter_list[i]->xdi_adapter; } for (i = 0; i < tasks; i++) { diva_current = adapter_list[i]; diva_current->dsp_mask = 0x00000003; diva_current->xdi_adapter.a.io = &diva_current->xdi_adapter; diva_current->xdi_adapter.DIRequest = request; diva_current->interface.cmd_proc = diva_4bri_cmd_card_proc; diva_current->xdi_adapter.Properties = CardProperties[a->CardOrdinal]; diva_current->CardOrdinal = a->CardOrdinal; diva_current->xdi_adapter.Channels = CardProperties[a->CardOrdinal].Channels; diva_current->xdi_adapter.e_max = CardProperties[a->CardOrdinal].E_info; diva_current->xdi_adapter.e_tbl = diva_os_malloc(0, diva_current->xdi_adapter.e_max * sizeof(E_INFO)); if (!diva_current->xdi_adapter.e_tbl) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } memset(diva_current->xdi_adapter.e_tbl, 0x00, diva_current->xdi_adapter.e_max * sizeof(E_INFO)); if (diva_os_initialize_spin_lock(&diva_current->xdi_adapter.isr_spin_lock, "isr")) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } if (diva_os_initialize_spin_lock(&diva_current->xdi_adapter.data_spin_lock, "data")) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } strcpy(diva_current->xdi_adapter.req_soft_isr. dpc_thread_name, "kdivas4brid"); if (diva_os_initialize_soft_isr(&diva_current->xdi_adapter.req_soft_isr, DIDpcRoutine, &diva_current->xdi_adapter)) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } /* Do not initialize second DPC - only one thread will be created */ diva_current->xdi_adapter.isr_soft_isr.object = diva_current->xdi_adapter.req_soft_isr.object; } if (v2) { prepare_qBri2_functions(&a->xdi_adapter); } else { prepare_qBri_functions(&a->xdi_adapter); } for (i = 0; i < tasks; i++) { diva_current = adapter_list[i]; if (i) memcpy(&diva_current->resources, &a->resources, sizeof(divas_card_resources_t)); diva_current->resources.pci.qoffset = (a->xdi_adapter.MemorySize >> factor); } /* Set up hardware related pointers */ a->xdi_adapter.cfg = (void *) (unsigned long) a->resources.pci.bar[0]; /* BAR0 CONFIG */ a->xdi_adapter.port = (void *) (unsigned long) a->resources.pci.bar[1]; /* BAR1 */ a->xdi_adapter.ctlReg = (void *) (unsigned long) a->resources.pci.bar[3]; /* BAR3 CNTRL */ for (i = 0; i < tasks; i++) { diva_current = adapter_list[i]; diva_4bri_set_addresses(diva_current); Slave = a->xdi_adapter.QuadroList->QuadroAdapter[i]; Slave->MultiMaster = &a->xdi_adapter; Slave->sdram_bar = a->xdi_adapter.sdram_bar; if (i) { Slave->serialNo = ((dword) (Slave->ControllerNumber << 24)) | a->xdi_adapter.serialNo; Slave->cardType = a->xdi_adapter.cardType; } } /* reset contains the base address for the PLX 9054 register set */ p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter); WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */ DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p); /* Set IRQ handler */ a->xdi_adapter.irq_info.irq_nr = a->resources.pci.irq; sprintf(a->xdi_adapter.irq_info.irq_name, "DIVA 4BRI %ld", (long) a->xdi_adapter.serialNo); if (diva_os_register_irq(a, a->xdi_adapter.irq_info.irq_nr, a->xdi_adapter.irq_info.irq_name)) { diva_4bri_cleanup_slave_adapters(a); diva_4bri_cleanup_adapter(a); for (i = 1; i < (tasks - 1); i++) { diva_os_free(0, adapter_list[i]); } return (-1); } a->xdi_adapter.irq_info.registered = 1; /* Add three slave adapters */ if (tasks > 1) { diva_add_slave_adapter(adapter_list[1]); diva_add_slave_adapter(adapter_list[2]); diva_add_slave_adapter(adapter_list[3]); } diva_log_info("%s IRQ:%d SerNo:%d", a->xdi_adapter.Properties.Name, a->resources.pci.irq, a->xdi_adapter.serialNo); return (0); } /* ** Cleanup function will be called for master adapter only ** this is guaranteed by design: cleanup callback is set ** by master adapter only */ static int diva_4bri_cleanup_adapter(diva_os_xdi_adapter_t *a) { int bar; /* Stop adapter if running */ if (a->xdi_adapter.Initialized) { diva_4bri_stop_adapter(a); } /* Remove IRQ handler */ if (a->xdi_adapter.irq_info.registered) { diva_os_remove_irq(a, a->xdi_adapter.irq_info.irq_nr); } a->xdi_adapter.irq_info.registered = 0; /* Free DPC's and spin locks on all adapters */ diva_4bri_cleanup_slave_adapters(a); /* Unmap all BARS */ for (bar = 0; bar < 4; bar++) { if (bar != 1) { if (a->resources.pci.bar[bar] && a->resources.pci.addr[bar]) { divasa_unmap_pci_bar(a->resources.pci.addr[bar]); a->resources.pci.bar[bar] = 0; a->resources.pci.addr[bar] = NULL; } } } /* Unregister I/O */ if (a->resources.pci.bar[1] && a->resources.pci.addr[1]) { diva_os_register_io_port(a, 0, a->resources.pci.bar[1], _4bri_is_rev_2_card(a-> CardOrdinal) ? _4bri_v2_bar_length[1] : _4bri_bar_length[1], &a->port_name[0], 1); a->resources.pci.bar[1] = 0; a->resources.pci.addr[1] = NULL; } if (a->slave_list) { diva_os_free(0, a->slave_list); a->slave_list = NULL; } return (0); } static int _4bri_get_serial_number(diva_os_xdi_adapter_t *a) { dword data[64]; dword serNo; word addr, status, i, j; byte Bus, Slot; void *hdev; Bus = a->resources.pci.bus; Slot = a->resources.pci.func; hdev = a->resources.pci.hdev; for (i = 0; i < 64; ++i) { addr = i * 4; for (j = 0; j < 5; ++j) { PCIwrite(Bus, Slot, 0x4E, &addr, sizeof(addr), hdev); diva_os_wait(1); PCIread(Bus, Slot, 0x4E, &status, sizeof(status), hdev); if (status & 0x8000) break; } if (j >= 5) { DBG_ERR(("EEPROM[%d] read failed (0x%x)", i * 4, addr)) return (0); } PCIread(Bus, Slot, 0x50, &data[i], sizeof(data[i]), hdev); } DBG_BLK(((char *) &data[0], sizeof(data))) serNo = data[32]; if (serNo == 0 || serNo == 0xffffffff) serNo = data[63]; if (!serNo) { DBG_LOG(("W: Serial Number == 0, create one serial number")); serNo = a->resources.pci.bar[1] & 0xffff0000; serNo |= a->resources.pci.bus << 8; serNo |= a->resources.pci.func; } a->xdi_adapter.serialNo = serNo; DBG_REG(("Serial No. : %ld", a->xdi_adapter.serialNo)) return (serNo); } /* ** Release resources of slave adapters */ static int diva_4bri_cleanup_slave_adapters(diva_os_xdi_adapter_t *a) { diva_os_xdi_adapter_t *adapter_list[4]; diva_os_xdi_adapter_t *diva_current; int i; adapter_list[0] = a; adapter_list[1] = a->slave_adapters[0]; adapter_list[2] = a->slave_adapters[1]; adapter_list[3] = a->slave_adapters[2]; for (i = 0; i < a->xdi_adapter.tasks; i++) { diva_current = adapter_list[i]; if (diva_current) { diva_os_destroy_spin_lock(&diva_current-> xdi_adapter. isr_spin_lock, "unload"); diva_os_destroy_spin_lock(&diva_current-> xdi_adapter. data_spin_lock, "unload"); diva_os_cancel_soft_isr(&diva_current->xdi_adapter. req_soft_isr); diva_os_cancel_soft_isr(&diva_current->xdi_adapter. isr_soft_isr); diva_os_remove_soft_isr(&diva_current->xdi_adapter. req_soft_isr); diva_current->xdi_adapter.isr_soft_isr.object = NULL; if (diva_current->xdi_adapter.e_tbl) { diva_os_free(0, diva_current->xdi_adapter. e_tbl); } diva_current->xdi_adapter.e_tbl = NULL; diva_current->xdi_adapter.e_max = 0; diva_current->xdi_adapter.e_count = 0; } } return (0); } static int diva_4bri_cmd_card_proc(struct _diva_os_xdi_adapter *a, diva_xdi_um_cfg_cmd_t *cmd, int length) { int ret = -1; if (cmd->adapter != a->controller) { DBG_ERR(("A: 4bri_cmd, invalid controller=%d != %d", cmd->adapter, a->controller)) return (-1); } switch (cmd->command) { case DIVA_XDI_UM_CMD_GET_CARD_ORDINAL: a->xdi_mbox.data_length = sizeof(dword); a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { *(dword *) a->xdi_mbox.data = (dword) a->CardOrdinal; a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } break; case DIVA_XDI_UM_CMD_GET_SERIAL_NR: a->xdi_mbox.data_length = sizeof(dword); a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { *(dword *) a->xdi_mbox.data = (dword) a->xdi_adapter.serialNo; a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } break; case DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG: if (!a->xdi_adapter.ControllerNumber) { /* Only master adapter can access hardware config */ a->xdi_mbox.data_length = sizeof(dword) * 9; a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { int i; dword *data = (dword *) a->xdi_mbox.data; for (i = 0; i < 8; i++) { *data++ = a->resources.pci.bar[i]; } *data++ = (dword) a->resources.pci.irq; a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } } break; case DIVA_XDI_UM_CMD_GET_CARD_STATE: if (!a->xdi_adapter.ControllerNumber) { a->xdi_mbox.data_length = sizeof(dword); a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox.data_length); if (a->xdi_mbox.data) { dword *data = (dword *) a->xdi_mbox.data; if (!a->xdi_adapter.ram || !a->xdi_adapter.reset || !a->xdi_adapter.cfg) { *data = 3; } else if (a->xdi_adapter.trapped) { *data = 2; } else if (a->xdi_adapter.Initialized) { *data = 1; } else { *data = 0; } a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } } break; case DIVA_XDI_UM_CMD_WRITE_FPGA: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_write_fpga_image(a, (byte *)&cmd[1], cmd->command_data. write_fpga. image_length); } break; case DIVA_XDI_UM_CMD_RESET_ADAPTER: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_reset_adapter(&a->xdi_adapter); } break; case DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_write_sdram_block(&a->xdi_adapter, cmd-> command_data. write_sdram. offset, (byte *) & cmd[1], cmd-> command_data. write_sdram. length, a->xdi_adapter. MemorySize); } break; case DIVA_XDI_UM_CMD_START_ADAPTER: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_start_adapter(&a->xdi_adapter, cmd->command_data. start.offset, cmd->command_data. start.features); } break; case DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES: if (!a->xdi_adapter.ControllerNumber) { a->xdi_adapter.features = cmd->command_data.features.features; a->xdi_adapter.a.protocol_capabilities = a->xdi_adapter.features; DBG_TRC(("Set raw protocol features (%08x)", a->xdi_adapter.features)) ret = 0; } break; case DIVA_XDI_UM_CMD_STOP_ADAPTER: if (!a->xdi_adapter.ControllerNumber) { ret = diva_4bri_stop_adapter(a); } break; case DIVA_XDI_UM_CMD_READ_XLOG_ENTRY: ret = diva_card_read_xlog(a); break; case DIVA_XDI_UM_CMD_READ_SDRAM: if (!a->xdi_adapter.ControllerNumber && a->xdi_adapter.Address) { if ( (a->xdi_mbox.data_length = cmd->command_data.read_sdram.length)) { if ( (a->xdi_mbox.data_length + cmd->command_data.read_sdram.offset) < a->xdi_adapter.MemorySize) { a->xdi_mbox.data = diva_os_malloc(0, a->xdi_mbox. data_length); if (a->xdi_mbox.data) { byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(&a->xdi_adapter); byte __iomem *src = p; byte *dst = a->xdi_mbox.data; dword len = a->xdi_mbox.data_length; src += cmd->command_data.read_sdram.offset; while (len--) { *dst++ = READ_BYTE(src++); } DIVA_OS_MEM_DETACH_ADDRESS(&a->xdi_adapter, p); a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY; ret = 0; } } } } break; default: DBG_ERR(("A: A(%d) invalid cmd=%d", a->controller, cmd->command)) } return (ret); } void *xdiLoadFile(char *FileName, dword *FileLength, unsigned long lim) { void *ret = diva_xdiLoadFileFile; if (FileLength) { *FileLength = diva_xdiLoadFileLength; } diva_xdiLoadFileFile = NULL; diva_xdiLoadFileLength = 0; return (ret); } void diva_os_set_qBri_functions(PISDN_ADAPTER IoAdapter) { } void diva_os_set_qBri2_functions(PISDN_ADAPTER IoAdapter) { } static int diva_4bri_write_fpga_image(diva_os_xdi_adapter_t *a, byte *data, dword length) { int ret; diva_xdiLoadFileFile = data; diva_xdiLoadFileLength = length; ret = qBri_FPGA_download(&a->xdi_adapter); diva_xdiLoadFileFile = NULL; diva_xdiLoadFileLength = 0; return (ret ? 0 : -1); } static int diva_4bri_reset_adapter(PISDN_ADAPTER IoAdapter) { PISDN_ADAPTER Slave; int i; if (!IoAdapter->Address || !IoAdapter->reset) { return (-1); } if (IoAdapter->Initialized) { DBG_ERR(("A: A(%d) can't reset 4BRI adapter - please stop first", IoAdapter->ANum)) return (-1); } /* Forget all entities on all adapters */ for (i = 0; ((i < IoAdapter->tasks) && IoAdapter->QuadroList); i++) { Slave = IoAdapter->QuadroList->QuadroAdapter[i]; Slave->e_count = 0; if (Slave->e_tbl) { memset(Slave->e_tbl, 0x00, Slave->e_max * sizeof(E_INFO)); } Slave->head = 0; Slave->tail = 0; Slave->assign = 0; Slave->trapped = 0; memset(&Slave->a.IdTable[0], 0x00, sizeof(Slave->a.IdTable)); memset(&Slave->a.IdTypeTable[0], 0x00, sizeof(Slave->a.IdTypeTable)); memset(&Slave->a.FlowControlIdTable[0], 0x00, sizeof(Slave->a.FlowControlIdTable)); memset(&Slave->a.FlowControlSkipTable[0], 0x00, sizeof(Slave->a.FlowControlSkipTable)); memset(&Slave->a.misc_flags_table[0], 0x00, sizeof(Slave->a.misc_flags_table)); memset(&Slave->a.rx_stream[0], 0x00, sizeof(Slave->a.rx_stream)); memset(&Slave->a.tx_stream[0], 0x00, sizeof(Slave->a.tx_stream)); memset(&Slave->a.tx_pos[0], 0x00, sizeof(Slave->a.tx_pos)); memset(&Slave->a.rx_pos[0], 0x00, sizeof(Slave->a.rx_pos)); } return (0); } static int diva_4bri_write_sdram_block(PISDN_ADAPTER IoAdapter, dword address, const byte *data, dword length, dword limit) { byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter); byte __iomem *mem = p; if (((address + length) >= limit) || !mem) { DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p); DBG_ERR(("A: A(%d) write 4BRI address=0x%08lx", IoAdapter->ANum, address + length)) return (-1); } mem += address; while (length--) { WRITE_BYTE(mem++, *data++); } DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p); return (0); } static int diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter, dword start_address, dword features) { volatile word __iomem *signature; int started = 0; int i; byte __iomem *p; /* start adapter */ start_qBri_hardware(IoAdapter); p = DIVA_OS_MEM_ATTACH_RAM(IoAdapter); /* wait for signature in shared memory (max. 3 seconds) */ signature = (volatile word __iomem *) (&p[0x1E]); for (i = 0; i < 300; ++i) { diva_os_wait(10); if (READ_WORD(&signature[0]) == 0x4447) { DBG_TRC(("Protocol startup time %d.%02d seconds", (i / 100), (i % 100))) started = 1; break; } } for (i = 1; i < IoAdapter->tasks; i++) { IoAdapter->QuadroList->QuadroAdapter[i]->features = IoAdapter->features; IoAdapter->QuadroList->QuadroAdapter[i]->a. protocol_capabilities = IoAdapter->features; } if (!started) { DBG_FTL(("%s: Adapter selftest failed, signature=%04x", IoAdapter->Properties.Name, READ_WORD(&signature[0]))) DIVA_OS_MEM_DETACH_RAM(IoAdapter, p); (*(IoAdapter->trapFnc)) (IoAdapter); IoAdapter->stop(IoAdapter); return (-1); } DIVA_OS_MEM_DETACH_RAM(IoAdapter, p); for (i = 0; i < IoAdapter->tasks; i++) { IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 1; IoAdapter->QuadroList->QuadroAdapter[i]->IrqCount = 0; } if (check_qBri_interrupt(IoAdapter)) { DBG_ERR(("A: A(%d) interrupt test failed", IoAdapter->ANum)) for (i = 0; i < IoAdapter->tasks; i++) { IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 0; } IoAdapter->stop(IoAdapter); return (-1); } IoAdapter->Properties.Features = (word) features; diva_xdi_display_adapter_features(IoAdapter->ANum); for (i = 0; i < IoAdapter->tasks; i++) { DBG_LOG(("A(%d) %s adapter successfully started", IoAdapter->QuadroList->QuadroAdapter[i]->ANum, (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI")) diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum); IoAdapter->QuadroList->QuadroAdapter[i]->Properties.Features = (word) features; } return (0); } static int check_qBri_interrupt(PISDN_ADAPTER IoAdapter) { #ifdef SUPPORT_INTERRUPT_TEST_ON_4BRI int i; ADAPTER *a = &IoAdapter->a; byte __iomem *p; IoAdapter->IrqCount = 0; if (IoAdapter->ControllerNumber > 0) return (-1); p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(&p[PLX9054_INTCSR], PLX9054_INT_ENABLE); DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); /* interrupt test */ a->ReadyInt = 1; a->ram_out(a, &PR_RAM->ReadyInt, 1); for (i = 100; !IoAdapter->IrqCount && (i-- > 0); diva_os_wait(10)); return ((IoAdapter->IrqCount > 0) ? 0 : -1); #else dword volatile __iomem *qBriIrq; byte __iomem *p; /* Reset on-board interrupt register */ IoAdapter->IrqCount = 0; p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter); qBriIrq = (dword volatile __iomem *) (&p[_4bri_is_rev_2_card (IoAdapter-> cardType) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]); WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF); DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p); p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter); WRITE_BYTE(&p[PLX9054_INTCSR], PLX9054_INT_ENABLE); DIVA_OS_MEM_DETACH_RESET(IoAdapter, p); diva_os_wait(100); return (0); #endif /* SUPPORT_INTERRUPT_TEST_ON_4BRI */ } static void diva_4bri_clear_interrupts(diva_os_xdi_adapter_t *a) { PISDN_ADAPTER IoAdapter = &a->xdi_adapter; /* clear any pending interrupt */ IoAdapter->disIrq(IoAdapter); IoAdapter->tst_irq(&IoAdapter->a); IoAdapter->clr_irq(&IoAdapter->a); IoAdapter->tst_irq(&IoAdapter->a); /* kill pending dpcs */ diva_os_cancel_soft_isr(&IoAdapter->req_soft_isr); diva_os_cancel_soft_isr(&IoAdapter->isr_soft_isr); } static int diva_4bri_stop_adapter(diva_os_xdi_adapter_t *a) { PISDN_ADAPTER IoAdapter = &a->xdi_adapter; int i; if (!IoAdapter->ram) { return (-1); } if (!IoAdapter->Initialized) { DBG_ERR(("A: A(%d) can't stop PRI adapter - not running", IoAdapter->ANum)) return (-1); /* nothing to stop */ } for (i = 0; i < IoAdapter->tasks; i++) { IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 0; } /* Disconnect Adapters from DIDD */ for (i = 0; i < IoAdapter->tasks; i++) { diva_xdi_didd_remove_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum); } i = 100; /* Stop interrupts */ a->clear_interrupts_proc = diva_4bri_clear_interrupts; IoAdapter->a.ReadyInt = 1; IoAdapter->a.ram_inc(&IoAdapter->a, &PR_RAM->ReadyInt); do { diva_os_sleep(10); } while (i-- && a->clear_interrupts_proc); if (a->clear_interrupts_proc) { diva_4bri_clear_interrupts(a); a->clear_interrupts_proc = NULL; DBG_ERR(("A: A(%d) no final interrupt from 4BRI adapter", IoAdapter->ANum)) } IoAdapter->a.ReadyInt = 0; /* Stop and reset adapter */ IoAdapter->stop(IoAdapter); return (0); }
{ "language": "C" }
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * * (C) 2010 by Argonne National Laboratory. * See COPYRIGHT in top-level directory. */ #include "mpi.h" #include <stdio.h> #include <stdlib.h> #include "mpitest.h" /* Test the given operation within a Fence epoch */ #define TEST_FENCE_OP(op_name_, fcn_call_) \ do { \ err = fcn_call_ \ if (err) { \ errs++; \ if (errs < 10) { \ MTestPrintErrorMsg( "PROC_NULL to " op_name_, err ); \ } \ } \ err = MPI_Win_fence( 0, win ); \ if (err) { \ errs++; \ if (errs < 10) { \ MTestPrintErrorMsg( "Fence after " op_name_, err ); \ } \ } \ } while (0) /* Test the given operation within a passive target epoch */ #define TEST_PT_OP(op_name_, fcn_call_) \ do { \ err = MPI_Win_lock(MPI_LOCK_EXCLUSIVE, MPI_PROC_NULL, 0, win); \ if (err) { \ errs++; \ if (errs < 10) { \ MTestPrintErrorMsg( "Lock before" op_name_, err ); \ } \ } \ err = fcn_call_ \ if (err) { \ errs++; \ if (errs < 10) { \ MTestPrintErrorMsg( "PROC_NULL to " op_name_, err ); \ } \ } \ err = MPI_Win_unlock( MPI_PROC_NULL, win ); \ if (err) { \ errs++; \ if (errs < 10) { \ MTestPrintErrorMsg( "Unlock after " op_name_, err ); \ } \ } \ } while (0) /* Test the given request-based operation within a passive target epoch */ #define TEST_REQ_OP(op_name_, req_, fcn_call_) \ do { \ err = MPI_Win_lock(MPI_LOCK_EXCLUSIVE, MPI_PROC_NULL, 0, win); \ if (err) { \ errs++; \ if (errs < 10) { \ MTestPrintErrorMsg( "Lock before" op_name_, err ); \ } \ } \ err = fcn_call_ \ if (err) { \ errs++; \ if (errs < 10) { \ MTestPrintErrorMsg( "PROC_NULL to " op_name_, err ); \ } \ } \ err = MPI_Win_unlock( MPI_PROC_NULL, win ); \ if (err) { \ errs++; \ if (errs < 10) { \ MTestPrintErrorMsg( "Unlock after " op_name_, err ); \ } \ } \ err = MPI_Wait( &req_, MPI_STATUS_IGNORE ); \ if (err) { \ errs++; \ if (errs < 10) { \ MTestPrintErrorMsg( "Wait after " op_name_, err ); \ } \ } \ } while (0) /* static char MTEST_Descrip[] = "Test the MPI_PROC_NULL is a valid target"; */ int main( int argc, char *argv[] ) { int errs = 0, err; int rank, size; int *buf, bufsize; int *result; int *rmabuf, rsize, rcount; MPI_Comm comm; MPI_Win win; MPI_Request req; MTest_Init( &argc, &argv ); bufsize = 256 * sizeof(int); buf = (int *)malloc( bufsize ); if (!buf) { fprintf( stderr, "Unable to allocated %d bytes\n", bufsize ); MPI_Abort( MPI_COMM_WORLD, 1 ); } result = (int *)malloc( bufsize ); if (!result) { fprintf( stderr, "Unable to allocated %d bytes\n", bufsize ); MPI_Abort( MPI_COMM_WORLD, 1 ); } rcount = 16; rsize = rcount * sizeof(int); rmabuf = (int *)malloc( rsize ); if (!rmabuf) { fprintf( stderr, "Unable to allocated %d bytes\n", rsize ); MPI_Abort( MPI_COMM_WORLD, 1 ); } /* The following illustrates the use of the routines to run through a selection of communicators and datatypes. Use subsets of these for tests that do not involve combinations of communicators, datatypes, and counts of datatypes */ while (MTestGetIntracommGeneral( &comm, 1, 1 )) { if (comm == MPI_COMM_NULL) continue; /* Determine the sender and receiver */ MPI_Comm_rank( comm, &rank ); MPI_Comm_size( comm, &size ); MPI_Win_create( buf, bufsize, sizeof(int), MPI_INFO_NULL, comm, &win ); /* To improve reporting of problems about operations, we change the error handler to errors return */ MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN ); /** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/ MPI_Win_fence( 0, win ); TEST_FENCE_OP("Put", MPI_Put( rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win ); ); TEST_FENCE_OP("Get", MPI_Get( rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win ); ); TEST_FENCE_OP("Accumulate", MPI_Accumulate( rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, MPI_SUM, win ); ); TEST_FENCE_OP("Get accumulate", MPI_Get_accumulate( rmabuf, rcount, MPI_INT, result, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, MPI_SUM, win ); ); TEST_FENCE_OP("Fetch and op", MPI_Fetch_and_op( rmabuf, result, MPI_INT, MPI_PROC_NULL, 0, MPI_SUM, win ); ); TEST_FENCE_OP("Compare and swap", MPI_Compare_and_swap( rmabuf, &rank, result, MPI_INT, MPI_PROC_NULL, 0, win ); ); /** TEST OPERATIONS USING PASSIVE TARGET SYNCHRONIZATION **/ TEST_PT_OP("Put", MPI_Put( rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win ); ); TEST_PT_OP("Get", MPI_Get( rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win ); ); TEST_PT_OP("Accumulate", MPI_Accumulate( rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, MPI_SUM, win ); ); TEST_PT_OP("Get accumulate", MPI_Get_accumulate( rmabuf, rcount, MPI_INT, result, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, MPI_SUM, win ); ); TEST_PT_OP("Fetch and op", MPI_Fetch_and_op( rmabuf, result, MPI_INT, MPI_PROC_NULL, 0, MPI_SUM, win ); ); TEST_PT_OP("Compare and swap", MPI_Compare_and_swap( rmabuf, &rank, result, MPI_INT, MPI_PROC_NULL, 0, win ); ); /** TEST REQUEST-BASED OPERATIONS (PASSIVE TARGET ONLY) **/ TEST_REQ_OP("Rput", req, MPI_Rput( rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win, &req ); ); TEST_REQ_OP("Rget", req, MPI_Rget( rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win, &req ); ); TEST_REQ_OP("Raccumulate", req, MPI_Raccumulate( rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, MPI_SUM, win, &req ); ); TEST_REQ_OP("Rget_accumulate", req, MPI_Rget_accumulate( rmabuf, rcount, MPI_INT, result, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, MPI_SUM, win, &req ); ); MPI_Win_free( &win ); MTestFreeComm(&comm); } free( result ); free( buf ); free( rmabuf ); MTest_Finalize( errs ); MPI_Finalize(); return 0; }
{ "language": "C" }
Index: cramfs-1.1/mkcramfs.c =================================================================== --- cramfs-1.1.orig/mkcramfs.c 2002-02-20 09:03:32.000000000 +0100 +++ cramfs-1.1/mkcramfs.c 2011-09-09 15:11:00.980895119 +0200 @@ -93,6 +93,7 @@ static int opt_verbose = 0; static char *opt_image = NULL; static char *opt_name = NULL; +static int swap_endian = 0; static int warn_dev, warn_gid, warn_namelen, warn_skip, warn_size, warn_uid; @@ -130,6 +131,8 @@ " -i file insert a file image into the filesystem (requires >= 2.4.0)\n" " -n name set name of cramfs filesystem\n" " -p pad by %d bytes for boot code\n" + " -l litte endian filesystem\n" + " -b big endian filesystem\n" " -s sort directory entries (old option, ignored)\n" " -v be more verbose\n" " -z make explicit holes (requires >= 2.3.39)\n" @@ -372,6 +375,50 @@ return totalsize; } +/* routines to swap endianness/bitfields in inode/superblock block data */ +static void fix_inode(struct cramfs_inode *inode) +{ +#define wswap(x) (((x)>>24) | (((x)>>8)&0xff00) | (((x)&0xff00)<<8) | (((x)&0xff)<<24)) + /* attempt #2 */ + inode->mode = (inode->mode >> 8) | ((inode->mode&0xff)<<8); + inode->uid = (inode->uid >> 8) | ((inode->uid&0xff)<<8); + inode->size = (inode->size >> 16) | (inode->size&0xff00) | + ((inode->size&0xff)<<16); + ((u32*)inode)[2] = wswap(inode->offset | (inode->namelen<<26)); +} + +static void fix_offset(struct cramfs_inode *inode, u32 offset) +{ + u32 tmp = wswap(((u32*)inode)[2]); + ((u32*)inode)[2] = wswap((offset >> 2) | (tmp&0xfc000000)); +} + +static void fix_block_pointer(u32 *p) +{ + *p = wswap(*p); +} + +static void fix_super(struct cramfs_super *super) +{ + u32 *p = (u32*)super; + + /* fix superblock fields */ + p[0] = wswap(p[0]); /* magic */ + p[1] = wswap(p[1]); /* size */ + p[2] = wswap(p[2]); /* flags */ + p[3] = wswap(p[3]); /* future */ + + /* fix filesystem info fields */ + p = (u32*)&super->fsid; + p[0] = wswap(p[0]); /* crc */ + p[1] = wswap(p[1]); /* edition */ + p[2] = wswap(p[2]); /* blocks */ + p[3] = wswap(p[3]); /* files */ + + fix_inode(&super->root); +#undef wswap +} + /* Returns sizeof(struct cramfs_super), which includes the root inode. */ static unsigned int write_superblock(struct entry *root, char *base, int size) { @@ -405,6 +452,7 @@ super->root.gid = root->gid; super->root.size = root->size; super->root.offset = offset >> 2; + if (swap_endian) fix_super(super); return offset; } @@ -419,7 +467,10 @@ if (offset >= (1 << (2 + CRAMFS_OFFSET_WIDTH))) { die(MKFS_ERROR, 0, "filesystem too big"); } - inode->offset = (offset >> 2); + if (swap_endian) + fix_offset(inode, offset); + else + inode->offset = (offset >> 2); } /* @@ -515,6 +566,7 @@ stack_entries++; } entry = entry->next; + if (swap_endian) fix_inode(inode); } /* @@ -609,6 +661,7 @@ } *(u32 *) (base + offset) = curr; + if (swap_endian) fix_block_pointer((u32*)(base + offset)); offset += 4; } while (size); @@ -699,7 +752,7 @@ progname = argv[0]; /* command line options */ - while ((c = getopt(argc, argv, "hEe:i:n:psvz")) != EOF) { + while ((c = getopt(argc, argv, "hEe:i:n:psvzlb")) != EOF) { switch (c) { case 'h': usage(MKFS_OK); @@ -727,6 +780,18 @@ opt_pad = PAD_SIZE; fslen_ub += PAD_SIZE; break; + case 'b': +#if __BYTE_ORDER == __LITTLE_ENDIAN + swap_endian = 1; + printf("Swapping filesystem endian-ness\n"); +#endif + break; + case 'l': +#if __BYTE_ORDER == __BIG_ENDIAN + swap_endian = 1; + printf("Swapping filesystem endian-ness\n"); +#endif + break; case 's': /* old option, ignored */ break; Index: cramfs-1.1/cramfsck.c =================================================================== --- cramfs-1.1.orig/cramfsck.c 2002-02-23 01:00:42.000000000 +0100 +++ cramfs-1.1/cramfsck.c 2011-09-09 15:10:06.810894275 +0200 @@ -30,6 +30,7 @@ * 2000/07/15: Daniel Quinlan (initial support for block devices) * 2002/01/10: Daniel Quinlan (additional checks, test more return codes, * use read if mmap fails, standardize messages) + * 2004/09/01: Alfonso Acosta (Add swapping support) */ /* compile-time options */ @@ -53,6 +54,7 @@ #define _LINUX_STRING_H_ #include <linux/fs.h> #include <linux/cramfs_fs.h> +#include <byteswap.h> #include <zlib.h> /* Exit codes used by fsck-type programs */ @@ -73,6 +75,7 @@ static char *filename; /* ROM image filename */ struct cramfs_super super; /* just find the cramfs superblock once */ static int opt_verbose = 0; /* 1 = verbose (-v), 2+ = very verbose (-vv) */ +static int need_swapping = 0; /* fs and host dont have the same endianness */ #ifdef INCLUDE_FS_TESTS static int opt_extract = 0; /* extract cramfs (-x) */ static char *extract_dir = "root"; /* extraction directory (-x) */ @@ -84,6 +87,9 @@ static unsigned long start_data = ~0UL; /* start of the data (256 MB = max) */ static unsigned long end_data = 0; /* end of the data */ +/* access 32 byte variables */ +#define CRAMFS_32(x) (need_swapping ? bswap_32(x) : x) + /* Guarantee access to at least 8kB at a time */ #define ROMBUFFER_BITS 13 #define ROMBUFFERSIZE (1 << ROMBUFFER_BITS) @@ -165,20 +171,34 @@ if (super.magic == CRAMFS_MAGIC) { *start = 0; } + else if (super.magic == bswap_32(CRAMFS_MAGIC)) { + *start = 0; + need_swapping = 1; + } + else if (*length >= (PAD_SIZE + sizeof(super))) { lseek(fd, PAD_SIZE, SEEK_SET); if (read(fd, &super, sizeof(super)) != sizeof(super)) { die(FSCK_ERROR, 1, "read failed: %s", filename); } - if (super.magic == CRAMFS_MAGIC) { + if (super.magic == CRAMFS_32(CRAMFS_MAGIC)) { *start = PAD_SIZE; } } /* superblock tests */ - if (super.magic != CRAMFS_MAGIC) { + if (super.magic != CRAMFS_32(CRAMFS_MAGIC)) { die(FSCK_UNCORRECTED, 0, "superblock magic not found"); } + if (need_swapping){ + super.size = bswap_32(super.size); + super.flags = bswap_32(super.flags); + super.future = bswap_32(super.future); + super.fsid.crc = bswap_32(super.fsid.crc); + super.fsid.edition = bswap_32(super.fsid.edition); + super.fsid.blocks = bswap_32(super.fsid.blocks); + super.fsid.files = bswap_32(super.fsid.files); + } if (super.flags & ~CRAMFS_SUPPORTED_FLAGS) { die(FSCK_ERROR, 0, "unsupported filesystem features"); } @@ -213,7 +233,10 @@ die(FSCK_USAGE, 0, "unable to test CRC: old cramfs format"); #endif /* not INCLUDE_FS_TESTS */ } - + else if (need_swapping) { + /* crc checking in this case would mean translating the whole file */ + return; + } crc = crc32(0L, Z_NULL, 0); buf = mmap(NULL, super.size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); @@ -298,12 +321,23 @@ static struct cramfs_inode *cramfs_iget(struct cramfs_inode * i) { +#define wswap(x) (((x)>>24) | (((x)>>8)&0xff00) | (((x)&0xff00)<<8) | (((x)&0xff)<<24)) struct cramfs_inode *inode = malloc(sizeof(struct cramfs_inode)); if (!inode) { die(FSCK_ERROR, 1, "malloc failed"); } - *inode = *i; + if(!need_swapping) { + *inode = *i; + } + else { + inode->mode=bswap_16(i->mode); + inode->uid=bswap_16(i->uid); + inode->size=bswap_32(i->size << 8); + inode->gid=i->gid; + inode->namelen = bswap_32(((u32*)i)[2]) >> 26; + inode->offset = bswap_32(((u32*)i)[2]) & 0x3FFFFFFF; + } return inode; } @@ -322,9 +356,9 @@ */ static struct cramfs_inode *read_super(void) { - unsigned long offset = super.root.offset << 2; - - if (!S_ISDIR(super.root.mode)) + struct cramfs_inode *root = cramfs_iget(&super.root); + unsigned long offset = root->offset << 2; + if (!S_ISDIR(root->mode)) die(FSCK_UNCORRECTED, 0, "root inode is not directory"); if (!(super.flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) && ((offset != sizeof(struct cramfs_super)) && @@ -332,7 +366,7 @@ { die(FSCK_UNCORRECTED, 0, "bad root offset (%lu)", offset); } - return cramfs_iget(&super.root); + return root; } static int uncompress_block(void *src, int len) @@ -364,7 +398,7 @@ do { unsigned long out = PAGE_CACHE_SIZE; - unsigned long next = *(u32 *) romfs_read(offset); + unsigned long next = CRAMFS_32(*(u32 *) romfs_read(offset)); if (next > end_data) { end_data = next; @@ -525,7 +559,7 @@ { unsigned long offset = i->offset << 2; unsigned long curr = offset + 4; - unsigned long next = *(u32 *) romfs_read(offset); + unsigned long next = CRAMFS_32(*(u32 *) romfs_read(offset)); unsigned long size; if (offset == 0) {
{ "language": "C" }
/******************************************************************************* Copyright (C) Marvell International Ltd. and its affiliates This software file (the "File") is owned and distributed by Marvell International Ltd. and/or its affiliates ("Marvell") under the following alternative licensing terms. Once you have made an election to distribute the File under one of the following license alternatives, please (i) delete this introductory statement regarding license alternatives, (ii) delete the two license alternatives that you have not elected to use and (iii) preserve the Marvell copyright notice above. ******************************************************************************** Marvell GPL License Option If you received this File from Marvell, you may opt to use, redistribute and/or modify this File in accordance with the terms and conditions of the General Public License Version 2, June 1991 (the "GPL License"), a copy of which is available along with the File in the license.txt file or by writing to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or on the worldwide web at http://www.gnu.org/licenses/gpl.txt. THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY DISCLAIMED. The GPL License provides additional details about this warranty disclaimer. *******************************************************************************/ #ifndef __mvCpuCntrs_h__ #define __mvCpuCntrs_h__ #include "mvTypes.h" #include "mvOs.h" #define MV_CPU_CNTRS_NUM 4 #define MV_CPU_CNTRS_OPS_NUM 32 typedef enum { MV_CPU_CNTRS_INVALID = 0, MV_CPU_CNTRS_CYCLES, MV_CPU_CNTRS_ICACHE_READ_MISS, MV_CPU_CNTRS_DCACHE_ACCESS, MV_CPU_CNTRS_DCACHE_READ_MISS, MV_CPU_CNTRS_DCACHE_READ_HIT, MV_CPU_CNTRS_DCACHE_WRITE_MISS, MV_CPU_CNTRS_DCACHE_WRITE_HIT, MV_CPU_CNTRS_DTLB_MISS, MV_CPU_CNTRS_TLB_MISS, MV_CPU_CNTRS_ITLB_MISS, MV_CPU_CNTRS_INSTRUCTIONS, MV_CPU_CNTRS_SINGLE_ISSUE, MV_CPU_CNTRS_MMU_READ_LATENCY, MV_CPU_CNTRS_MMU_READ_BEAT, MV_CPU_CNTRS_BRANCH_RETIRED, MV_CPU_CNTRS_BRANCH_TAKEN, MV_CPU_CNTRS_BRANCH_PREDICT_MISS, MV_CPU_CNTRS_BRANCH_PREDICT_COUNT, MV_CPU_CNTRS_WB_FULL_CYCLES, MV_CPU_CNTRS_WB_WRITE_LATENCY, MV_CPU_CNTRS_WB_WRITE_BEAT, MV_CPU_CNTRS_ICACHE_READ_LATENCY, MV_CPU_CNTRS_ICACHE_READ_BEAT, MV_CPU_CNTRS_DCACHE_READ_LATENCY, MV_CPU_CNTRS_DCACHE_READ_BEAT, MV_CPU_CNTRS_DCACHE_WRITE_LATENCY, MV_CPU_CNTRS_DCACHE_WRITE_BEAT, MV_CPU_CNTRS_LDM_STM_HOLD, MV_CPU_CNTRS_IS_HOLD, MV_CPU_CNTRS_DATA_WRITE_ACCESS, MV_CPU_CNTRS_DATA_READ_ACCESS, MV_CPU_CNTRS_BIU_SIMULT_ACCESS, MV_CPU_CNTRS_BIU_ANY_ACCESS, } MV_CPU_CNTRS_OPS; typedef struct { char name[16]; MV_CPU_CNTRS_OPS operation; int opIdx; MV_U32 overhead; } MV_CPU_CNTRS_ENTRY; typedef struct { char name[16]; MV_U32 num_of_measurements; MV_U32 avg_sample_count; MV_U64 counters_before[MV_CPU_CNTRS_NUM]; MV_U64 counters_after[MV_CPU_CNTRS_NUM]; MV_U64 counters_sum[MV_CPU_CNTRS_NUM]; } MV_CPU_CNTRS_EVENT; extern MV_CPU_CNTRS_ENTRY mvCpuCntrsTbl[MV_CPU_CNTRS_NUM]; MV_STATUS mvCpuCntrsProgram(int counter, MV_CPU_CNTRS_OPS op, char* name, MV_U32 overhead); void mvCpuCntrsInit(void); MV_CPU_CNTRS_EVENT* mvCpuCntrsEventCreate(char* name, MV_U32 print_threshold); void mvCpuCntrsEventDelete(MV_CPU_CNTRS_EVENT* event); void mvCpuCntrsReset(void); void mvCpuCntrsShow(MV_CPU_CNTRS_EVENT* pEvent); void mvCpuCntrsEventClear(MV_CPU_CNTRS_EVENT* pEvent); /* internal */ void program_counter(int counter, int op); static INLINE MV_U64 mvCpuCntrsRead(const int counter) { MV_U32 low = 0, high = 0; MV_U32 ll = 0; switch(counter) { case 0: MV_ASM ("mcr p15, 0, %0, c15, c12, 0" : : "r" (ll)); MV_ASM ("mrc p15, 0, %0, c15, c13, 0" : "=r" (low)); MV_ASM ("mrc p15, 0, %0, c15, c13, 1" : "=r" (high)); break; case 1: MV_ASM ("mcr p15, 0, %0, c15, c12, 1" : : "r" (ll)); MV_ASM ("mrc p15, 0, %0, c15, c13, 2" : "=r" (low)); MV_ASM ("mrc p15, 0, %0, c15, c13, 3" : "=r" (high)); break; case 2: MV_ASM ("mcr p15, 0, %0, c15, c12, 2" : : "r" (ll)); MV_ASM ("mrc p15, 0, %0, c15, c13, 4" : "=r" (low)); MV_ASM ("mrc p15, 0, %0, c15, c13, 5" : "=r" (high)); break; case 3: MV_ASM ("mcr p15, 0, %0, c15, c12, 3" : : "r" (ll)); MV_ASM ("mrc p15, 0, %0, c15, c13, 6" : "=r" (low)); MV_ASM ("mrc p15, 0, %0, c15, c13, 7" : "=r" (high)); break; default: mvOsPrintf("mv_cpu_cntrs_read: bad counter number (%d)\n", counter); } program_counter(counter, mvCpuCntrsTbl[counter].opIdx); return (((MV_U64)high << 32 ) | low); } static INLINE void mvCpuCntrsReadBefore(MV_CPU_CNTRS_EVENT* pEvent) { #if 0 int i; /* order is important - we want to measure the cycle count last here! */ for(i=0; i<MV_CPU_CNTRS_NUM; i++) pEvent->counters_before[i] = mvCpuCntrsRead(i); #else pEvent->counters_before[1] = mvCpuCntrsRead(1); pEvent->counters_before[3] = mvCpuCntrsRead(3); pEvent->counters_before[0] = mvCpuCntrsRead(0); pEvent->counters_before[2] = mvCpuCntrsRead(2); #endif } static INLINE void mvCpuCntrsReadAfter(MV_CPU_CNTRS_EVENT* pEvent) { int i; #if 0 /* order is important - we want to measure the cycle count first here! */ for(i=0; i<MV_CPU_CNTRS_NUM; i++) pEvent->counters_after[i] = mvCpuCntrsRead(i); #else pEvent->counters_after[2] = mvCpuCntrsRead(2); pEvent->counters_after[0] = mvCpuCntrsRead(0); pEvent->counters_after[3] = mvCpuCntrsRead(3); pEvent->counters_after[1] = mvCpuCntrsRead(1); #endif for(i=0; i<MV_CPU_CNTRS_NUM; i++) { pEvent->counters_sum[i] += (pEvent->counters_after[i] - pEvent->counters_before[i]); } pEvent->num_of_measurements++; } #ifdef CONFIG_MV_CPU_PERF_CNTRS #define MV_CPU_CNTRS_READ(counter) mvCpuCntrsRead(counter) #define MV_CPU_CNTRS_START(event) mvCpuCntrsReadBefore(event) #define MV_CPU_CNTRS_STOP(event) mvCpuCntrsReadAfter(event) #define MV_CPU_CNTRS_SHOW(event) mvCpuCntrsShow(event) #else #define MV_CPU_CNTRS_READ(counter) #define MV_CPU_CNTRS_START(event) #define MV_CPU_CNTRS_STOP(event) #define MV_CPU_CNTRS_SHOW(event) #endif /* CONFIG_MV_CPU_PERF_CNTRS */ #endif /* __mvCpuCntrs_h__ */
{ "language": "C" }
/* * Copyright (c) 2004, Bull S.A.. All rights reserved. * Created by: Sebastien Decugis * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * This file is a stress test for the function pthread_cond_timedwait. * * It aims to check the following assertion: * When a cancel request unblocks the thread, * it must not consume any pending condition signal request. * The steps are: * -> Create a bunch of threads waiting on a condvar. * -> At the same time (using a barrier) one thread is canceled and the condition is signaled. * -> Test checks that the cond signaling was not lost (at least one thread must have woken cleanly). * -> Then everything is cleaned up and started again. */ /********************************************************************************************/ /****************************** standard includes *****************************************/ /********************************************************************************************/ #include <pthread.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <string.h> #include <time.h> /********************************************************************************************/ /****************************** Test framework *****************************************/ /********************************************************************************************/ #include "testfrmw.h" #include "testfrmw.c" /* This header is responsible for defining the following macros: * UNRESOLVED(ret, descr); * where descr is a description of the error and ret is an int (error code for example) * FAILED(descr); * where descr is a short text saying why the test has failed. * PASSED(); * No parameter. * * Both three macros shall terminate the calling process. * The testcase shall not terminate in any other maneer. * * The other file defines the functions * void output_init() * void output(char * string, ...) * * Those may be used to output information. */ /********************************************************************************************/ /********************************** Configuration ******************************************/ /********************************************************************************************/ #ifndef SCALABILITY_FACTOR #define SCALABILITY_FACTOR 1 #endif #ifndef VERBOSE #define VERBOSE 1 #endif /* Size of the "bunch" of threads -- the real number will be 2 more threads per scenarii */ #define NCHILDREN (20) #define TIMEOUT (60) #ifndef WITHOUT_ALTCLK #define USE_ALTCLK /* make tests with MONOTONIC CLOCK if supported */ #endif /********************************************************************************************/ /*********************************** Test case *****************************************/ /********************************************************************************************/ struct _scenar { int m_type; /* Mutex type to use */ int mc_pshared; /* 0: mutex and cond are process-private (default) ~ !0: Both are process-shared, if supported */ int c_clock; /* 0: cond uses the default clock. ~ !0: Cond uses monotonic clock, if supported. */ int fork; /* 0: Test between threads. ~ !0: Test across processes, if supported (mmap) */ char *descr; /* Case description */ } scenarii[] = { { PTHREAD_MUTEX_DEFAULT, 0, 0, 0, "Default mutex"} , { PTHREAD_MUTEX_NORMAL, 0, 0, 0, "Normal mutex"} , { PTHREAD_MUTEX_ERRORCHECK, 0, 0, 0, "Errorcheck mutex"} , { PTHREAD_MUTEX_RECURSIVE, 0, 0, 0, "Recursive mutex"} , { PTHREAD_MUTEX_DEFAULT, 1, 0, 0, "PShared default mutex"} , { PTHREAD_MUTEX_NORMAL, 1, 0, 0, "Pshared normal mutex"} , { PTHREAD_MUTEX_ERRORCHECK, 1, 0, 0, "Pshared errorcheck mutex"} , { PTHREAD_MUTEX_RECURSIVE, 1, 0, 0, "Pshared recursive mutex"} , { PTHREAD_MUTEX_DEFAULT, 1, 0, 1, "Pshared default mutex across processes"} , { PTHREAD_MUTEX_NORMAL, 1, 0, 1, "Pshared normal mutex across processes"} , { PTHREAD_MUTEX_ERRORCHECK, 1, 0, 1, "Pshared errorcheck mutex across processes"} , { PTHREAD_MUTEX_RECURSIVE, 1, 0, 1, "Pshared recursive mutex across processes"} #ifdef USE_ALTCLK , { PTHREAD_MUTEX_DEFAULT, 1, 1, 1, "Pshared default mutex and alt clock condvar across processes"} , { PTHREAD_MUTEX_NORMAL, 1, 1, 1, "Pshared normal mutex and alt clock condvar across processes"} , { PTHREAD_MUTEX_ERRORCHECK, 1, 1, 1, "Pshared errorcheck mutex and alt clock condvar across processes"} , { PTHREAD_MUTEX_RECURSIVE, 1, 1, 1, "Pshared recursive mutex and alt clock condvar across processes"} , { PTHREAD_MUTEX_DEFAULT, 0, 1, 0, "Default mutex and alt clock condvar"} , { PTHREAD_MUTEX_NORMAL, 0, 1, 0, "Normal mutex and alt clock condvar"} , { PTHREAD_MUTEX_ERRORCHECK, 0, 1, 0, "Errorcheck mutex and alt clock condvar"} , { PTHREAD_MUTEX_RECURSIVE, 0, 1, 0, "Recursive mutex and alt clock condvar"} , { PTHREAD_MUTEX_DEFAULT, 1, 1, 0, "PShared default mutex and alt clock condvar"} , { PTHREAD_MUTEX_NORMAL, 1, 1, 0, "Pshared normal mutex and alt clock condvar"} , { PTHREAD_MUTEX_ERRORCHECK, 1, 1, 0, "Pshared errorcheck mutex and alt clock condvar"} , { PTHREAD_MUTEX_RECURSIVE, 1, 1, 0, "Pshared recursive mutex and alt clock condvar"} #endif }; #define NSCENAR (sizeof(scenarii)/sizeof(scenarii[0])) /* This is the shared structure for all threads related to the same condvar */ struct celldata { pthread_t workers[NCHILDREN * SCALABILITY_FACTOR + 2]; pthread_t signaler; pthread_barrier_t bar; pthread_mutex_t mtx; pthread_cond_t cnd; clockid_t cid; int boolean; int count; long canceled; long cancelfailed; long cnttotal; } cells[NSCENAR * SCALABILITY_FACTOR]; char do_it = 1; pthread_attr_t ta; void cleanup(void *arg) { int ret; struct celldata *cd = (struct celldata *)arg; /* Unlock the mutex */ ret = pthread_mutex_unlock(&(cd->mtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to unlock mutex in cancel handler"); } } void *worker(void *arg) { int ret; struct celldata *cd = (struct celldata *)arg; struct timespec ts; /* lock the mutex */ ret = pthread_mutex_lock(&(cd->mtx)); if (ret != 0) { UNRESOLVED(ret, "Unable to lock mutex in worker"); } /* Tell the cellmaster we are ready (count++) */ cd->count += 1; /* Timeout = now + TIMEOUT */ ret = clock_gettime(cd->cid, &ts); if (ret != 0) { UNRESOLVED(errno, "Gettime failed"); } ts.tv_sec += TIMEOUT * SCALABILITY_FACTOR; /* register cleanup handler */ pthread_cleanup_push(cleanup, arg); do { /* cond timedwait (while boolean == false) */ ret = pthread_cond_timedwait(&(cd->cnd), &(cd->mtx), &ts); /* if timeout => failed (lost signal) */ if (ret == ETIMEDOUT) { FAILED ("Timeout occured. A condition signal was probably lost."); } if (ret != 0) { UNRESOLVED(ret, "Cond timedwait failed"); } } while (cd->boolean == 0); /* broadcast the condition */ ret = pthread_cond_broadcast(&(cd->cnd)); if (ret != 0) { UNRESOLVED(ret, "Broadcasting the condition failed"); } /* unregister the cleanup */ pthread_cleanup_pop(0); /* unlock the mutex */ ret = pthread_mutex_unlock(&(cd->mtx)); if (ret != 0) { UNRESOLVED(ret, "Unable to unlock the mutex"); } return NULL; } void *signaler(void *arg) { int ret; struct celldata *cd = (struct celldata *)arg; /* Lock the mutex if required */ if (cd->boolean == -1) { ret = pthread_mutex_lock(&(cd->mtx)); if (ret != 0) { UNRESOLVED(ret, "mutex lock failed in signaler"); } } /* wait the barrier */ ret = pthread_barrier_wait(&(cd->bar)); if ((ret != 0) && (ret != PTHREAD_BARRIER_SERIAL_THREAD)) { UNRESOLVED(ret, "Barrier wait failed"); } /* signal the cond */ ret = pthread_cond_signal(&(cd->cnd)); if (ret != 0) { UNRESOLVED(ret, "Signaling the cond failed"); } /* Unlock the mutex if required */ if (cd->boolean == -1) { ret = pthread_mutex_unlock(&(cd->mtx)); if (ret != 0) { UNRESOLVED(ret, "mutex unlock failed in signaler"); } } return NULL; } void *cellmanager(void *arg) { int ret, i; struct celldata *cd = (struct celldata *)arg; struct timespec ts; int randval; void *w_ret; cd->canceled = 0; cd->cancelfailed = 0; cd->cnttotal = 0; /* while do_it */ while (do_it) { /* Initialize some stuff */ cd->boolean = 0; cd->count = 0; cd->cnttotal += 1; /* create the workers */ for (i = 0; i < NCHILDREN * SCALABILITY_FACTOR + 2; i++) { ret = pthread_create(&(cd->workers[i]), &ta, worker, arg); if (ret != 0) { UNRESOLVED(ret, "Unable to create enough threads"); } } /* choose a (pseudo) random thread to cancel */ ret = clock_gettime(cd->cid, &ts); if (ret != 0) { UNRESOLVED(errno, "Failed to read clock"); } randval = (ts.tv_sec + (ts.tv_nsec >> 10)) % (NCHILDREN * SCALABILITY_FACTOR + 2); /* wait for the workers to be ready */ do { ret = pthread_mutex_lock(&(cd->mtx)); if (ret != 0) { UNRESOLVED(ret, "Mutex lock failed"); } i = cd->count; ret = pthread_mutex_unlock(&(cd->mtx)); if (ret != 0) { UNRESOLVED(ret, "Mutex unlock failed"); } } while (i < NCHILDREN * SCALABILITY_FACTOR + 2); /* Set the boolean (1 => no lock in signaler; -1 => lock) */ cd->boolean = (ts.tv_sec & 1) ? -1 : 1; /* create the signaler */ ret = pthread_create(&(cd->signaler), &ta, signaler, arg); if (ret != 0) { UNRESOLVED(ret, "Failed to create signaler thread"); } /* wait the barrier */ ret = pthread_barrier_wait(&(cd->bar)); if ((ret != 0) && (ret != PTHREAD_BARRIER_SERIAL_THREAD)) { UNRESOLVED(ret, "Failed to wait for the barrier"); } /* cancel the chosen thread */ ret = pthread_cancel(cd->workers[randval]); /* it is possible the thread is already terminated -- so we don't stop on error */ if (ret != 0) { #if VERBOSE > 2 output("%d\n", randval); #endif cd->cancelfailed += 1; } /* join every threads */ ret = pthread_join(cd->signaler, NULL); if (ret != 0) { UNRESOLVED(ret, "Failed to join the signaler thread"); } for (i = 0; i < NCHILDREN * SCALABILITY_FACTOR + 2; i++) { ret = pthread_join(cd->workers[i], &w_ret); if (ret != 0) { UNRESOLVED(ret, "Unable to join a worker"); } if (w_ret == PTHREAD_CANCELED) cd->canceled += 1; } } return NULL; } void sighdl(int sig) { /* do_it = 0 */ do { do_it = 0; } while (do_it); } int main(int argc, char *argv[]) { int ret, i, j; struct sigaction sa; pthread_mutexattr_t ma; pthread_condattr_t ca; clockid_t cid = CLOCK_REALTIME; long canceled = 0; long cancelfailed = 0; long cnttotal = 0; long pshared, monotonic, cs; pthread_t mngrs[NSCENAR * SCALABILITY_FACTOR]; output_init(); /* check the system abilities */ pshared = sysconf(_SC_THREAD_PROCESS_SHARED); cs = sysconf(_SC_CLOCK_SELECTION); monotonic = sysconf(_SC_MONOTONIC_CLOCK); #if VERBOSE > 0 output("Test starting\n"); output("System abilities:\n"); output(" TPS : %li\n", pshared); output(" CS : %li\n", cs); output(" MON : %li\n", monotonic); if ((cs < 0) || (monotonic < 0)) output("Alternative clock won't be tested\n"); #endif if (monotonic < 0) cs = -1; #ifndef USE_ALTCLK if (cs > 0) output ("Implementation supports the MONOTONIC CLOCK but option is disabled in test.\n"); #endif /* Initialize the celldatas according to scenarii */ for (i = 0; i < NSCENAR; i++) { #if VERBOSE > 1 output("[parent] Preparing attributes for: %s\n", scenarii[i].descr); #ifdef WITHOUT_XOPEN output("[parent] Mutex attributes DISABLED -> not used\n"); #endif #endif /* set / reset everything */ ret = pthread_mutexattr_init(&ma); if (ret != 0) { UNRESOLVED(ret, "[parent] Unable to initialize the mutex attribute object"); } ret = pthread_condattr_init(&ca); if (ret != 0) { UNRESOLVED(ret, "[parent] Unable to initialize the cond attribute object"); } #ifndef WITHOUT_XOPEN /* Set the mutex type */ ret = pthread_mutexattr_settype(&ma, scenarii[i].m_type); if (ret != 0) { UNRESOLVED(ret, "[parent] Unable to set mutex type"); } #if VERBOSE > 1 output("[parent] Mutex type : %i\n", scenarii[i].m_type); #endif #endif /* Set the pshared attributes, if supported */ if ((pshared > 0) && (scenarii[i].mc_pshared != 0)) { ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED); if (ret != 0) { UNRESOLVED(ret, "[parent] Unable to set the mutex process-shared"); } ret = pthread_condattr_setpshared(&ca, PTHREAD_PROCESS_SHARED); if (ret != 0) { UNRESOLVED(ret, "[parent] Unable to set the cond var process-shared"); } #if VERBOSE > 1 output("[parent] Mutex & cond are process-shared\n"); #endif } #if VERBOSE > 1 else { output("[parent] Mutex & cond are process-private\n"); } #endif /* Set the alternative clock, if supported */ #ifdef USE_ALTCLK if ((cs > 0) && (scenarii[i].c_clock != 0)) { ret = pthread_condattr_setclock(&ca, CLOCK_MONOTONIC); if (ret != 0) { UNRESOLVED(ret, "[parent] Unable to set the monotonic clock for the cond"); } #if VERBOSE > 1 output("[parent] Cond uses the Monotonic clock\n"); #endif } #if VERBOSE > 1 else { output("[parent] Cond uses the default clock\n"); } #endif ret = pthread_condattr_getclock(&ca, &cid); if (ret != 0) { UNRESOLVED(ret, "Unable to get clock from cond attr"); } #endif /* Initialize all the mutex and condvars which uses those attributes */ for (j = 0; j < SCALABILITY_FACTOR; j++) { cells[i + j * NSCENAR].cid = cid; /* initialize the condvar */ ret = pthread_cond_init(&(cells[i + j * NSCENAR].cnd), &ca); if (ret != 0) { UNRESOLVED(ret, "Cond init failed"); } /* initialize the mutex */ ret = pthread_mutex_init(&(cells[i + j * NSCENAR].mtx), &ma); if (ret != 0) { UNRESOLVED(ret, "Mutex init failed"); } /* initialize the barrier */ ret = pthread_barrier_init(&(cells[i + j * NSCENAR].bar), NULL, 2); if (ret != 0) { UNRESOLVED(ret, "Failed to init barrier"); } } ret = pthread_condattr_destroy(&ca); if (ret != 0) { UNRESOLVED(ret, "Failed to destroy the cond var attribute object"); } ret = pthread_mutexattr_destroy(&ma); if (ret != 0) { UNRESOLVED(ret, "Failed to destroy the mutex attribute object"); } } #if VERBOSE > 1 output("[parent] All condvars & mutex are ready\n"); #endif /* register the signal handler */ sigemptyset(&sa.sa_mask); sa.sa_flags = 0; sa.sa_handler = sighdl; if ((ret = sigaction(SIGUSR1, &sa, NULL))) { UNRESOLVED(ret, "Unable to register signal handler"); } #if VERBOSE > 1 output("[parent] Signal handler registered\n"); #endif /* Initialize the thread attribute object */ ret = pthread_attr_init(&ta); if (ret != 0) { UNRESOLVED(ret, "[parent] Failed to initialize a thread attribute object"); } ret = pthread_attr_setstacksize(&ta, sysconf(_SC_THREAD_STACK_MIN)); if (ret != 0) { UNRESOLVED(ret, "[parent] Failed to set thread stack size"); } /* create the NSCENAR * SCALABILITY_FACTOR manager threads */ for (i = 0; i < NSCENAR * SCALABILITY_FACTOR; i++) { ret = pthread_create(&mngrs[i], &ta, cellmanager, &(cells[i])); /* In case of failure we can exit; the child process will die after a while */ if (ret != 0) { UNRESOLVED(ret, "[Parent] Failed to create a thread"); } #if VERBOSE > 1 if ((i % 4) == 0) output("[parent] %i manager threads created...\n", i + 1); #endif } #if VERBOSE > 1 output("[parent] All %i manager threads are running...\n", NSCENAR * SCALABILITY_FACTOR); #endif /* join the manager threads and destroy the cells */ for (i = 0; i < NSCENAR * SCALABILITY_FACTOR; i++) { ret = pthread_join(mngrs[i], NULL); if (ret != 0) { UNRESOLVED(ret, "[Parent] Failed to join a thread"); } canceled += cells[i].canceled; cancelfailed += cells[i].cancelfailed; cnttotal += cells[i].cnttotal; ret = pthread_barrier_destroy(&(cells[i].bar)); if (ret != 0) { UNRESOLVED(ret, "Failed to destroy a barrier"); } ret = pthread_cond_destroy(&(cells[i].cnd)); if (ret != 0) { UNRESOLVED(ret, "Failed to destroy a cond"); } ret = pthread_mutex_destroy(&(cells[i].mtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to destroy a mutex"); } } /* exit */ #if VERBOSE > 0 output("Test passed\n"); output(" Total loops : %8li\n", cnttotal); #endif #if VERBOSE > 1 output(" Failed cancel request: %8li\n", cancelfailed); output(" Canceled threads : %8li\n", canceled); #endif PASSED; }
{ "language": "C" }
/* * Aic94xx SAS/SATA driver hardware interface. * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/pci.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/firmware.h> #include "aic94xx.h" #include "aic94xx_reg.h" #include "aic94xx_hwi.h" #include "aic94xx_seq.h" #include "aic94xx_dump.h" u32 MBAR0_SWB_SIZE; /* ---------- Initialization ---------- */ static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha) { /* adapter came with a sas address */ if (asd_ha->hw_prof.sas_addr[0]) return 0; return sas_request_addr(asd_ha->sas_ha.core.shost, asd_ha->hw_prof.sas_addr); } static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha) { int i; for (i = 0; i < ASD_MAX_PHYS; i++) { if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0) continue; /* Set a phy's address only if it has none. */ ASD_DPRINTK("setting phy%d addr to %llx\n", i, SAS_ADDR(asd_ha->hw_prof.sas_addr)); memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); } } /* ---------- PHY initialization ---------- */ static void asd_init_phy_identify(struct asd_phy *phy) { phy->identify_frame = phy->id_frm_tok->vaddr; memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); phy->identify_frame->dev_type = SAS_END_DEVICE; if (phy->sas_phy.role & PHY_ROLE_INITIATOR) phy->identify_frame->initiator_bits = phy->sas_phy.iproto; if (phy->sas_phy.role & PHY_ROLE_TARGET) phy->identify_frame->target_bits = phy->sas_phy.tproto; memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr, SAS_ADDR_SIZE); phy->identify_frame->phy_id = phy->sas_phy.id; } static int asd_init_phy(struct asd_phy *phy) { struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha; struct asd_sas_phy *sas_phy = &phy->sas_phy; sas_phy->enabled = 1; sas_phy->class = SAS; sas_phy->iproto = SAS_PROTOCOL_ALL; sas_phy->tproto = 0; sas_phy->type = PHY_TYPE_PHYSICAL; sas_phy->role = PHY_ROLE_INITIATOR; sas_phy->oob_mode = OOB_NOT_CONNECTED; sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; phy->id_frm_tok = asd_alloc_coherent(asd_ha, sizeof(*phy->identify_frame), GFP_KERNEL); if (!phy->id_frm_tok) { asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id); return -ENOMEM; } else asd_init_phy_identify(phy); memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd)); return 0; } static void asd_init_ports(struct asd_ha_struct *asd_ha) { int i; spin_lock_init(&asd_ha->asd_ports_lock); for (i = 0; i < ASD_MAX_PHYS; i++) { struct asd_port *asd_port = &asd_ha->asd_ports[i]; memset(asd_port->sas_addr, 0, SAS_ADDR_SIZE); memset(asd_port->attached_sas_addr, 0, SAS_ADDR_SIZE); asd_port->phy_mask = 0; asd_port->num_phys = 0; } } static int asd_init_phys(struct asd_ha_struct *asd_ha) { u8 i; u8 phy_mask = asd_ha->hw_prof.enabled_phys; for (i = 0; i < ASD_MAX_PHYS; i++) { struct asd_phy *phy = &asd_ha->phys[i]; phy->phy_desc = &asd_ha->hw_prof.phy_desc[i]; phy->asd_port = NULL; phy->sas_phy.enabled = 0; phy->sas_phy.id = i; phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0]; phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0]; phy->sas_phy.ha = &asd_ha->sas_ha; phy->sas_phy.lldd_phy = phy; } /* Now enable and initialize only the enabled phys. */ for_each_phy(phy_mask, phy_mask, i) { int err = asd_init_phy(&asd_ha->phys[i]); if (err) return err; } return 0; } /* ---------- Sliding windows ---------- */ static int asd_init_sw(struct asd_ha_struct *asd_ha) { struct pci_dev *pcidev = asd_ha->pcidev; int err; u32 v; /* Unlock MBARs */ err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v); if (err) { asd_printk("couldn't access conf. space of %s\n", pci_name(pcidev)); goto Err; } if (v) err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v); if (err) { asd_printk("couldn't write to MBAR_KEY of %s\n", pci_name(pcidev)); goto Err; } /* Set sliding windows A, B and C to point to proper internal * memory regions. */ pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR); pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB, REG_BASE_ADDR_CSEQCIO); pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI); asd_ha->io_handle[0].swa_base = REG_BASE_ADDR; asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO; asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI; MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80; if (!asd_ha->iospace) { /* MBAR1 will point to OCM (On Chip Memory) */ pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR); asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR; } spin_lock_init(&asd_ha->iolock); Err: return err; } /* ---------- SCB initialization ---------- */ /** * asd_init_scbs - manually allocate the first SCB. * @asd_ha: pointer to host adapter structure * * This allocates the very first SCB which would be sent to the * sequencer for execution. Its bus address is written to * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of * the _next_ scb to be DMA-ed to the host adapter is read from the last * SCB DMA-ed to the host adapter, we have to always stay one step * ahead of the sequencer and keep one SCB already allocated. */ static int asd_init_scbs(struct asd_ha_struct *asd_ha) { struct asd_seq_data *seq = &asd_ha->seq; int bitmap_bytes; /* allocate the index array and bitmap */ asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs; asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits* sizeof(void *), GFP_KERNEL); if (!asd_ha->seq.tc_index_array) return -ENOMEM; bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8; bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); if (!asd_ha->seq.tc_index_bitmap) { kfree(asd_ha->seq.tc_index_array); asd_ha->seq.tc_index_array = NULL; return -ENOMEM; } spin_lock_init(&seq->tc_index_lock); seq->next_scb.size = sizeof(struct scb); seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL, &seq->next_scb.dma_handle); if (!seq->next_scb.vaddr) { kfree(asd_ha->seq.tc_index_bitmap); kfree(asd_ha->seq.tc_index_array); asd_ha->seq.tc_index_bitmap = NULL; asd_ha->seq.tc_index_array = NULL; return -ENOMEM; } seq->pending = 0; spin_lock_init(&seq->pend_q_lock); INIT_LIST_HEAD(&seq->pend_q); return 0; } static void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha) { asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE; asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE; ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n", asd_ha->hw_prof.max_scbs, asd_ha->hw_prof.max_ddbs); } /* ---------- Done List initialization ---------- */ static void asd_dl_tasklet_handler(unsigned long); static int asd_init_dl(struct asd_ha_struct *asd_ha) { asd_ha->seq.actual_dl = asd_alloc_coherent(asd_ha, ASD_DL_SIZE * sizeof(struct done_list_struct), GFP_KERNEL); if (!asd_ha->seq.actual_dl) return -ENOMEM; asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr; asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE; asd_ha->seq.dl_next = 0; tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler, (unsigned long) asd_ha); return 0; } /* ---------- EDB and ESCB init ---------- */ static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, gfp_t gfp_flags) { struct asd_seq_data *seq = &asd_ha->seq; int i; seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags); if (!seq->edb_arr) return -ENOMEM; for (i = 0; i < seq->num_edbs; i++) { seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE, gfp_flags); if (!seq->edb_arr[i]) goto Err_unroll; memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE); } ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs); return 0; Err_unroll: for (i-- ; i >= 0; i--) asd_free_coherent(asd_ha, seq->edb_arr[i]); kfree(seq->edb_arr); seq->edb_arr = NULL; return -ENOMEM; } static int asd_alloc_escbs(struct asd_ha_struct *asd_ha, gfp_t gfp_flags) { struct asd_seq_data *seq = &asd_ha->seq; struct asd_ascb *escb; int i, escbs; seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr), gfp_flags); if (!seq->escb_arr) return -ENOMEM; escbs = seq->num_escbs; escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags); if (!escb) { asd_printk("couldn't allocate list of escbs\n"); goto Err; } seq->num_escbs -= escbs; /* subtract what was not allocated */ ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs); for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next, struct asd_ascb, list)) { seq->escb_arr[i] = escb; escb->scb->header.opcode = EMPTY_SCB; } return 0; Err: kfree(seq->escb_arr); seq->escb_arr = NULL; return -ENOMEM; } static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha) { struct asd_seq_data *seq = &asd_ha->seq; int i, k, z = 0; for (i = 0; i < seq->num_escbs; i++) { struct asd_ascb *ascb = seq->escb_arr[i]; struct empty_scb *escb = &ascb->scb->escb; ascb->edb_index = z; escb->num_valid = ASD_EDBS_PER_SCB; for (k = 0; k < ASD_EDBS_PER_SCB; k++) { struct sg_el *eb = &escb->eb[k]; struct asd_dma_tok *edb = seq->edb_arr[z++]; memset(eb, 0, sizeof(*eb)); eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle)); eb->size = cpu_to_le32(((u32) edb->size)); } } } /** * asd_init_escbs -- allocate and initialize empty scbs * @asd_ha: pointer to host adapter structure * * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers. * They transport sense data, etc. */ static int asd_init_escbs(struct asd_ha_struct *asd_ha) { struct asd_seq_data *seq = &asd_ha->seq; int err = 0; /* Allocate two empty data buffers (edb) per sequencer. */ int edbs = 2*(1+asd_ha->hw_prof.num_phys); seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB; seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB; err = asd_alloc_edbs(asd_ha, GFP_KERNEL); if (err) { asd_printk("couldn't allocate edbs\n"); return err; } err = asd_alloc_escbs(asd_ha, GFP_KERNEL); if (err) { asd_printk("couldn't allocate escbs\n"); return err; } asd_assign_edbs2escbs(asd_ha); /* In order to insure that normal SCBs do not overfill sequencer * memory and leave no space for escbs (halting condition), * we increment pending here by the number of escbs. However, * escbs are never pending. */ seq->pending = seq->num_escbs; seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2; return 0; } /* ---------- HW initialization ---------- */ /** * asd_chip_hardrst -- hard reset the chip * @asd_ha: pointer to host adapter structure * * This takes 16 cycles and is synchronous to CFCLK, which runs * at 200 MHz, so this should take at most 80 nanoseconds. */ int asd_chip_hardrst(struct asd_ha_struct *asd_ha) { int i; int count = 100; u32 reg; for (i = 0 ; i < 4 ; i++) { asd_write_reg_dword(asd_ha, COMBIST, HARDRST); } do { udelay(1); reg = asd_read_reg_dword(asd_ha, CHIMINT); if (reg & HARDRSTDET) { asd_write_reg_dword(asd_ha, CHIMINT, HARDRSTDET|PORRSTDET); return 0; } } while (--count > 0); return -ENODEV; } /** * asd_init_chip -- initialize the chip * @asd_ha: pointer to host adapter structure * * Hard resets the chip, disables HA interrupts, downloads the sequnecer * microcode and starts the sequencers. The caller has to explicitly * enable HA interrupts with asd_enable_ints(asd_ha). */ static int asd_init_chip(struct asd_ha_struct *asd_ha) { int err; err = asd_chip_hardrst(asd_ha); if (err) { asd_printk("couldn't hard reset %s\n", pci_name(asd_ha->pcidev)); goto out; } asd_disable_ints(asd_ha); err = asd_init_seqs(asd_ha); if (err) { asd_printk("couldn't init seqs for %s\n", pci_name(asd_ha->pcidev)); goto out; } err = asd_start_seqs(asd_ha); if (err) { asd_printk("couldn't start seqs for %s\n", pci_name(asd_ha->pcidev)); goto out; } out: return err; } #define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE)) static int max_devs = 0; module_param_named(max_devs, max_devs, int, S_IRUGO); MODULE_PARM_DESC(max_devs, "\n" "\tMaximum number of SAS devices to support (not LUs).\n" "\tDefault: 2176, Maximum: 65663.\n"); static int max_cmnds = 0; module_param_named(max_cmnds, max_cmnds, int, S_IRUGO); MODULE_PARM_DESC(max_cmnds, "\n" "\tMaximum number of commands queuable.\n" "\tDefault: 512, Maximum: 66047.\n"); static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha) { unsigned long dma_addr = OCM_BASE_ADDR; u32 d; dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr); d = asd_read_reg_dword(asd_ha, CTXDOMAIN); d |= 4; asd_write_reg_dword(asd_ha, CTXDOMAIN, d); asd_ha->hw_prof.max_ddbs += MAX_DEVS; } static int asd_extend_devctx(struct asd_ha_struct *asd_ha) { dma_addr_t dma_handle; unsigned long dma_addr; u32 d; int size; asd_extend_devctx_ocm(asd_ha); asd_ha->hw_prof.ddb_ext = NULL; if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) { max_devs = asd_ha->hw_prof.max_ddbs; return 0; } size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE; asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); if (!asd_ha->hw_prof.ddb_ext) { asd_printk("couldn't allocate memory for %d devices\n", max_devs); max_devs = asd_ha->hw_prof.max_ddbs; return -ENOMEM; } dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle; dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE); dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; dma_handle = (dma_addr_t) dma_addr; asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle); d = asd_read_reg_dword(asd_ha, CTXDOMAIN); d &= ~4; asd_write_reg_dword(asd_ha, CTXDOMAIN, d); asd_ha->hw_prof.max_ddbs = max_devs; return 0; } static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha) { dma_addr_t dma_handle; unsigned long dma_addr; u32 d; int size; asd_ha->hw_prof.scb_ext = NULL; if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) { max_cmnds = asd_ha->hw_prof.max_scbs; return 0; } size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE; asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); if (!asd_ha->hw_prof.scb_ext) { asd_printk("couldn't allocate memory for %d commands\n", max_cmnds); max_cmnds = asd_ha->hw_prof.max_scbs; return -ENOMEM; } dma_handle = asd_ha->hw_prof.scb_ext->dma_handle; dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE); dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE; dma_handle = (dma_addr_t) dma_addr; asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle); d = asd_read_reg_dword(asd_ha, CTXDOMAIN); d &= ~1; asd_write_reg_dword(asd_ha, CTXDOMAIN, d); asd_ha->hw_prof.max_scbs = max_cmnds; return 0; } /** * asd_init_ctxmem -- initialize context memory * asd_ha: pointer to host adapter structure * * This function sets the maximum number of SCBs and * DDBs which can be used by the sequencer. This is normally * 512 and 128 respectively. If support for more SCBs or more DDBs * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are * initialized here to extend context memory to point to host memory, * thus allowing unlimited support for SCBs and DDBs -- only limited * by host memory. */ static int asd_init_ctxmem(struct asd_ha_struct *asd_ha) { int bitmap_bytes; asd_get_max_scb_ddb(asd_ha); asd_extend_devctx(asd_ha); asd_extend_cmdctx(asd_ha); /* The kernel wants bitmaps to be unsigned long sized. */ bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8; bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); if (!asd_ha->hw_prof.ddb_bitmap) return -ENOMEM; spin_lock_init(&asd_ha->hw_prof.ddb_lock); return 0; } int asd_init_hw(struct asd_ha_struct *asd_ha) { int err; u32 v; err = asd_init_sw(asd_ha); if (err) return err; err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v); if (err) { asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n", pci_name(asd_ha->pcidev)); return err; } err = pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, v | SC_TMR_DIS); if (err) { asd_printk("couldn't disable split completion timer of %s\n", pci_name(asd_ha->pcidev)); return err; } err = asd_read_ocm(asd_ha); if (err) { asd_printk("couldn't read ocm(%d)\n", err); /* While suspicios, it is not an error that we * couldn't read the OCM. */ } err = asd_read_flash(asd_ha); if (err) { asd_printk("couldn't read flash(%d)\n", err); /* While suspicios, it is not an error that we * couldn't read FLASH memory. */ } asd_init_ctxmem(asd_ha); if (asd_get_user_sas_addr(asd_ha)) { asd_printk("No SAS Address provided for %s\n", pci_name(asd_ha->pcidev)); err = -ENODEV; goto Out; } asd_propagate_sas_addr(asd_ha); err = asd_init_phys(asd_ha); if (err) { asd_printk("couldn't initialize phys for %s\n", pci_name(asd_ha->pcidev)); goto Out; } asd_init_ports(asd_ha); err = asd_init_scbs(asd_ha); if (err) { asd_printk("couldn't initialize scbs for %s\n", pci_name(asd_ha->pcidev)); goto Out; } err = asd_init_dl(asd_ha); if (err) { asd_printk("couldn't initialize the done list:%d\n", err); goto Out; } err = asd_init_escbs(asd_ha); if (err) { asd_printk("couldn't initialize escbs\n"); goto Out; } err = asd_init_chip(asd_ha); if (err) { asd_printk("couldn't init the chip\n"); goto Out; } Out: return err; } /* ---------- Chip reset ---------- */ /** * asd_chip_reset -- reset the host adapter, etc * @asd_ha: pointer to host adapter structure of interest * * Called from the ISR. Hard reset the chip. Let everything * timeout. This should be no different than hot-unplugging the * host adapter. Once everything times out we'll init the chip with * a call to asd_init_chip() and enable interrupts with asd_enable_ints(). * XXX finish. */ static void asd_chip_reset(struct asd_ha_struct *asd_ha) { struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev)); asd_chip_hardrst(asd_ha); sas_ha->notify_ha_event(sas_ha, HAE_RESET); } /* ---------- Done List Routines ---------- */ static void asd_dl_tasklet_handler(unsigned long data) { struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data; struct asd_seq_data *seq = &asd_ha->seq; unsigned long flags; while (1) { struct done_list_struct *dl = &seq->dl[seq->dl_next]; struct asd_ascb *ascb; if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle) break; /* find the aSCB */ spin_lock_irqsave(&seq->tc_index_lock, flags); ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index)); spin_unlock_irqrestore(&seq->tc_index_lock, flags); if (unlikely(!ascb)) { ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n"); goto next_1; } else if (ascb->scb->header.opcode == EMPTY_SCB) { goto out; } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) { goto next_1; } spin_lock_irqsave(&seq->pend_q_lock, flags); list_del_init(&ascb->list); seq->pending--; spin_unlock_irqrestore(&seq->pend_q_lock, flags); out: ascb->tasklet_complete(ascb, dl); next_1: seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1); if (!seq->dl_next) seq->dl_toggle ^= DL_TOGGLE_MASK; } } /* ---------- Interrupt Service Routines ---------- */ /** * asd_process_donelist_isr -- schedule processing of done list entries * @asd_ha: pointer to host adapter structure */ static void asd_process_donelist_isr(struct asd_ha_struct *asd_ha) { tasklet_schedule(&asd_ha->seq.dl_tasklet); } /** * asd_com_sas_isr -- process device communication interrupt (COMINT) * @asd_ha: pointer to host adapter structure */ static void asd_com_sas_isr(struct asd_ha_struct *asd_ha) { u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT); /* clear COMSTAT int */ asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF); if (comstat & CSBUFPERR) { asd_printk("%s: command/status buffer dma parity error\n", pci_name(asd_ha->pcidev)); } else if (comstat & CSERR) { int i; u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); dmaerr &= 0xFF; asd_printk("%s: command/status dma error, DMAERR: 0x%02x, " "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n", pci_name(asd_ha->pcidev), dmaerr, asd_read_reg_dword(asd_ha, CSDMAADR), asd_read_reg_dword(asd_ha, CSDMAADR+4)); asd_printk("CSBUFFER:\n"); for (i = 0; i < 8; i++) { asd_printk("%08x %08x %08x %08x\n", asd_read_reg_dword(asd_ha, CSBUFFER), asd_read_reg_dword(asd_ha, CSBUFFER+4), asd_read_reg_dword(asd_ha, CSBUFFER+8), asd_read_reg_dword(asd_ha, CSBUFFER+12)); } asd_dump_seq_state(asd_ha, 0); } else if (comstat & OVLYERR) { u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); dmaerr = (dmaerr >> 8) & 0xFF; asd_printk("%s: overlay dma error:0x%x\n", pci_name(asd_ha->pcidev), dmaerr); } asd_chip_reset(asd_ha); } static void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus) { static const char *halt_code[256] = { "UNEXPECTED_INTERRUPT0", "UNEXPECTED_INTERRUPT1", "UNEXPECTED_INTERRUPT2", "UNEXPECTED_INTERRUPT3", "UNEXPECTED_INTERRUPT4", "UNEXPECTED_INTERRUPT5", "UNEXPECTED_INTERRUPT6", "UNEXPECTED_INTERRUPT7", "UNEXPECTED_INTERRUPT8", "UNEXPECTED_INTERRUPT9", "UNEXPECTED_INTERRUPT10", [11 ... 19] = "unknown[11,19]", "NO_FREE_SCB_AVAILABLE", "INVALID_SCB_OPCODE", "INVALID_MBX_OPCODE", "INVALID_ATA_STATE", "ATA_QUEUE_FULL", "ATA_TAG_TABLE_FAULT", "ATA_TAG_MASK_FAULT", "BAD_LINK_QUEUE_STATE", "DMA2CHIM_QUEUE_ERROR", "EMPTY_SCB_LIST_FULL", "unknown[30]", "IN_USE_SCB_ON_FREE_LIST", "BAD_OPEN_WAIT_STATE", "INVALID_STP_AFFILIATION", "unknown[34]", "EXEC_QUEUE_ERROR", "TOO_MANY_EMPTIES_NEEDED", "EMPTY_REQ_QUEUE_ERROR", "Q_MONIRTT_MGMT_ERROR", "TARGET_MODE_FLOW_ERROR", "DEVICE_QUEUE_NOT_FOUND", "START_IRTT_TIMER_ERROR", "ABORT_TASK_ILLEGAL_REQ", [43 ... 255] = "unknown[43,255]" }; if (dchstatus & CSEQINT) { u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT); if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) { asd_printk("%s: CSEQ arp2int:0x%x\n", pci_name(asd_ha->pcidev), arp2int); } else if (arp2int & ARP2HALTC) asd_printk("%s: CSEQ halted: %s\n", pci_name(asd_ha->pcidev), halt_code[(arp2int>>16)&0xFF]); else asd_printk("%s: CARP2INT:0x%x\n", pci_name(asd_ha->pcidev), arp2int); } if (dchstatus & LSEQINT_MASK) { int lseq; u8 lseq_mask = dchstatus & LSEQINT_MASK; for_each_sequencer(lseq_mask, lseq_mask, lseq) { u32 arp2int = asd_read_reg_dword(asd_ha, LmARP2INT(lseq)); if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR | ARP2CIOPERR)) { asd_printk("%s: LSEQ%d arp2int:0x%x\n", pci_name(asd_ha->pcidev), lseq, arp2int); /* XXX we should only do lseq reset */ } else if (arp2int & ARP2HALTC) asd_printk("%s: LSEQ%d halted: %s\n", pci_name(asd_ha->pcidev), lseq,halt_code[(arp2int>>16)&0xFF]); else asd_printk("%s: LSEQ%d ARP2INT:0x%x\n", pci_name(asd_ha->pcidev), lseq, arp2int); } } asd_chip_reset(asd_ha); } /** * asd_dch_sas_isr -- process device channel interrupt (DEVINT) * @asd_ha: pointer to host adapter structure */ static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha) { u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS); if (dchstatus & CFIFTOERR) { asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev)); asd_chip_reset(asd_ha); } else asd_arp2_err(asd_ha, dchstatus); } /** * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR) * @asd_ha: pointer to host adapter structure */ static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha) { u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R); if (!(stat0r & ASIERR)) { asd_printk("hmm, EXSI interrupted but no error?\n"); return; } if (stat0r & ASIFMTERR) { asd_printk("ASI SEEPROM format error for %s\n", pci_name(asd_ha->pcidev)); } else if (stat0r & ASISEECHKERR) { u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R); asd_printk("ASI SEEPROM checksum 0x%x error for %s\n", stat1r & CHECKSUM_MASK, pci_name(asd_ha->pcidev)); } else { u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR); if (!(statr & CPI2ASIMSTERR_MASK)) { ASD_DPRINTK("hmm, ASIERR?\n"); return; } else { u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR); u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR); asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, " "count: 0x%x, byteen: 0x%x, targerr: 0x%x " "master id: 0x%x, master err: 0x%x\n", pci_name(asd_ha->pcidev), addr, data, (statr & CPI2ASIBYTECNT_MASK) >> 16, (statr & CPI2ASIBYTEEN_MASK) >> 12, (statr & CPI2ASITARGERR_MASK) >> 8, (statr & CPI2ASITARGMID_MASK) >> 4, (statr & CPI2ASIMSTERR_MASK)); } } asd_chip_reset(asd_ha); } /** * asd_hst_pcix_isr -- process host interface interrupts * @asd_ha: pointer to host adapter structure * * Asserted on PCIX errors: target abort, etc. */ static void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha) { u16 status; u32 pcix_status; u32 ecc_status; pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status); pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status); pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status); if (status & PCI_STATUS_DETECTED_PARITY) asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev)); else if (status & PCI_STATUS_REC_MASTER_ABORT) asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev)); else if (status & PCI_STATUS_REC_TARGET_ABORT) asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev)); else if (status & PCI_STATUS_PARITY) asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev)); else if (pcix_status & RCV_SCE) { asd_printk("received split completion error for %s\n", pci_name(asd_ha->pcidev)); pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); /* XXX: Abort task? */ return; } else if (pcix_status & UNEXP_SC) { asd_printk("unexpected split completion for %s\n", pci_name(asd_ha->pcidev)); pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); /* ignore */ return; } else if (pcix_status & SC_DISCARD) asd_printk("split completion discarded for %s\n", pci_name(asd_ha->pcidev)); else if (ecc_status & UNCOR_ECCERR) asd_printk("uncorrectable ECC error for %s\n", pci_name(asd_ha->pcidev)); asd_chip_reset(asd_ha); } /** * asd_hw_isr -- host adapter interrupt service routine * @irq: ignored * @dev_id: pointer to host adapter structure * * The ISR processes done list entries and level 3 error handling. */ irqreturn_t asd_hw_isr(int irq, void *dev_id) { struct asd_ha_struct *asd_ha = dev_id; u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT); if (!chimint) return IRQ_NONE; asd_write_reg_dword(asd_ha, CHIMINT, chimint); (void) asd_read_reg_dword(asd_ha, CHIMINT); if (chimint & DLAVAIL) asd_process_donelist_isr(asd_ha); if (chimint & COMINT) asd_com_sas_isr(asd_ha); if (chimint & DEVINT) asd_dch_sas_isr(asd_ha); if (chimint & INITERR) asd_rbi_exsi_isr(asd_ha); if (chimint & HOSTERR) asd_hst_pcix_isr(asd_ha); return IRQ_HANDLED; } /* ---------- SCB handling ---------- */ static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, gfp_t gfp_flags) { extern struct kmem_cache *asd_ascb_cache; struct asd_seq_data *seq = &asd_ha->seq; struct asd_ascb *ascb; unsigned long flags; ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags); if (ascb) { ascb->dma_scb.size = sizeof(struct scb); ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, gfp_flags, &ascb->dma_scb.dma_handle); if (!ascb->dma_scb.vaddr) { kmem_cache_free(asd_ascb_cache, ascb); return NULL; } memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb)); asd_init_ascb(asd_ha, ascb); spin_lock_irqsave(&seq->tc_index_lock, flags); ascb->tc_index = asd_tc_index_get(seq, ascb); spin_unlock_irqrestore(&seq->tc_index_lock, flags); if (ascb->tc_index == -1) goto undo; ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index); } return ascb; undo: dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, ascb->dma_scb.dma_handle); kmem_cache_free(asd_ascb_cache, ascb); ASD_DPRINTK("no index for ascb\n"); return NULL; } /** * asd_ascb_alloc_list -- allocate a list of aSCBs * @asd_ha: pointer to host adapter structure * @num: pointer to integer number of aSCBs * @gfp_flags: GFP_ flags. * * This is the only function which is used to allocate aSCBs. * It can allocate one or many. If more than one, then they form * a linked list in two ways: by their list field of the ascb struct * and by the next_scb field of the scb_header. * * Returns NULL if no memory was available, else pointer to a list * of ascbs. When this function returns, @num would be the number * of SCBs which were not able to be allocated, 0 if all requested * were able to be allocated. */ struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct *asd_ha, int *num, gfp_t gfp_flags) { struct asd_ascb *first = NULL; for ( ; *num > 0; --*num) { struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags); if (!ascb) break; else if (!first) first = ascb; else { struct asd_ascb *last = list_entry(first->list.prev, struct asd_ascb, list); list_add_tail(&ascb->list, &first->list); last->scb->header.next_scb = cpu_to_le64(((u64)ascb->dma_scb.dma_handle)); } } return first; } /** * asd_swap_head_scb -- swap the head scb * @asd_ha: pointer to host adapter structure * @ascb: pointer to the head of an ascb list * * The sequencer knows the DMA address of the next SCB to be DMAed to * the host adapter, from initialization or from the last list DMAed. * seq->next_scb keeps the address of this SCB. The sequencer will * DMA to the host adapter this list of SCBs. But the head (first * element) of this list is not known to the sequencer. Here we swap * the head of the list with the known SCB (memcpy()). * Only one memcpy() is required per list so it is in our interest * to keep the list of SCB as long as possible so that the ratio * of number of memcpy calls to the number of SCB DMA-ed is as small * as possible. * * LOCKING: called with the pending list lock held. */ static void asd_swap_head_scb(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb) { struct asd_seq_data *seq = &asd_ha->seq; struct asd_ascb *last = list_entry(ascb->list.prev, struct asd_ascb, list); struct asd_dma_tok t = ascb->dma_scb; memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb)); ascb->dma_scb = seq->next_scb; ascb->scb = ascb->dma_scb.vaddr; seq->next_scb = t; last->scb->header.next_scb = cpu_to_le64(((u64)seq->next_scb.dma_handle)); } /** * asd_start_timers -- (add and) start timers of SCBs * @list: pointer to struct list_head of the scbs * @to: timeout in jiffies * * If an SCB in the @list has no timer function, assign the default * one, then start the timer of the SCB. This function is * intended to be called from asd_post_ascb_list(), just prior to * posting the SCBs to the sequencer. */ static void asd_start_scb_timers(struct list_head *list) { struct asd_ascb *ascb; list_for_each_entry(ascb, list, list) { if (!ascb->uldd_timer) { ascb->timer.data = (unsigned long) ascb; ascb->timer.function = asd_ascb_timedout; ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; add_timer(&ascb->timer); } } } /** * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter * @asd_ha: pointer to a host adapter structure * @ascb: pointer to the first aSCB in the list * @num: number of aSCBs in the list (to be posted) * * See queueing comment in asd_post_escb_list(). * * Additional note on queuing: In order to minimize the ratio of memcpy() * to the number of ascbs sent, we try to batch-send as many ascbs as possible * in one go. * Two cases are possible: * A) can_queue >= num, * B) can_queue < num. * Case A: we can send the whole batch at once. Increment "pending" * in the beginning of this function, when it is checked, in order to * eliminate races when this function is called by multiple processes. * Case B: should never happen. */ int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, int num) { unsigned long flags; LIST_HEAD(list); int can_queue; spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending; if (can_queue >= num) asd_ha->seq.pending += num; else can_queue = 0; if (!can_queue) { spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev)); return -SAS_QUEUE_FULL; } asd_swap_head_scb(asd_ha, ascb); __list_add(&list, ascb->list.prev, &ascb->list); asd_start_scb_timers(&list); asd_ha->seq.scbpro += num; list_splice_init(&list, asd_ha->seq.pend_q.prev); asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); return 0; } /** * asd_post_escb_list -- post a list of 1 or more empty scb * @asd_ha: pointer to a host adapter structure * @ascb: pointer to the first empty SCB in the list * @num: number of aSCBs in the list (to be posted) * * This is essentially the same as asd_post_ascb_list, but we do not * increment pending, add those to the pending list or get indexes. * See asd_init_escbs() and asd_init_post_escbs(). * * Since sending a list of ascbs is a superset of sending a single * ascb, this function exists to generalize this. More specifically, * when sending a list of those, we want to do only a _single_ * memcpy() at swap head, as opposed to for each ascb sent (in the * case of sending them one by one). That is, we want to minimize the * ratio of memcpy() operations to the number of ascbs sent. The same * logic applies to asd_post_ascb_list(). */ int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, int num) { unsigned long flags; spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); asd_swap_head_scb(asd_ha, ascb); asd_ha->seq.scbpro += num; asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); return 0; } /* ---------- LED ---------- */ /** * asd_turn_led -- turn on/off an LED * @asd_ha: pointer to host adapter structure * @phy_id: the PHY id whose LED we want to manupulate * @op: 1 to turn on, 0 to turn off */ void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op) { if (phy_id < ASD_MAX_PHYS) { u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id)); if (op) v |= LEDPOL; else v &= ~LEDPOL; asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v); } } /** * asd_control_led -- enable/disable an LED on the board * @asd_ha: pointer to host adapter structure * @phy_id: integer, the phy id * @op: integer, 1 to enable, 0 to disable the LED * * First we output enable the LED, then we set the source * to be an external module. */ void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op) { if (phy_id < ASD_MAX_PHYS) { u32 v; v = asd_read_reg_dword(asd_ha, GPIOOER); if (op) v |= (1 << phy_id); else v &= ~(1 << phy_id); asd_write_reg_dword(asd_ha, GPIOOER, v); v = asd_read_reg_dword(asd_ha, GPIOCNFGR); if (op) v |= (1 << phy_id); else v &= ~(1 << phy_id); asd_write_reg_dword(asd_ha, GPIOCNFGR, v); } } /* ---------- PHY enable ---------- */ static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id) { struct asd_phy *phy = &asd_ha->phys[phy_id]; asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0); asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY), HOTPLUG_DELAY_TIMEOUT); /* Get defaults from manuf. sector */ /* XXX we need defaults for those in case MS is broken. */ asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0), phy->phy_desc->phy_control_0); asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1), phy->phy_desc->phy_control_1); asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2), phy->phy_desc->phy_control_2); asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3), phy->phy_desc->phy_control_3); asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id), ASD_COMINIT_TIMEOUT); asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id), phy->id_frm_tok->dma_handle); asd_control_led(asd_ha, phy_id, 1); return 0; } int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask) { u8 phy_m; u8 i; int num = 0, k; struct asd_ascb *ascb; struct asd_ascb *ascb_list; if (!phy_mask) { asd_printk("%s called with phy_mask of 0!?\n", __func__); return 0; } for_each_phy(phy_mask, phy_m, i) { num++; asd_enable_phy(asd_ha, i); } k = num; ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL); if (!ascb_list) { asd_printk("no memory for control phy ascb list\n"); return -ENOMEM; } num -= k; ascb = ascb_list; for_each_phy(phy_mask, phy_m, i) { asd_build_control_phy(ascb, i, ENABLE_PHY); ascb = list_entry(ascb->list.next, struct asd_ascb, list); } ASD_DPRINTK("posting %d control phy scbs\n", num); k = asd_post_ascb_list(asd_ha, ascb_list, num); if (k) asd_ascb_free_list(ascb_list); return k; }
{ "language": "C" }
/* crypto/idea/idea.h */ /* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #ifndef HEADER_IDEA_H # define HEADER_IDEA_H # include <openssl/opensslconf.h>/* IDEA_INT, OPENSSL_NO_IDEA */ # ifdef OPENSSL_NO_IDEA # error IDEA is disabled. # endif # define IDEA_ENCRYPT 1 # define IDEA_DECRYPT 0 # define IDEA_BLOCK 8 # define IDEA_KEY_LENGTH 16 #ifdef __cplusplus extern "C" { #endif typedef struct idea_key_st { IDEA_INT data[9][6]; } IDEA_KEY_SCHEDULE; const char *idea_options(void); void idea_ecb_encrypt(const unsigned char *in, unsigned char *out, IDEA_KEY_SCHEDULE *ks); # ifdef OPENSSL_FIPS void private_idea_set_encrypt_key(const unsigned char *key, IDEA_KEY_SCHEDULE *ks); # endif void idea_set_encrypt_key(const unsigned char *key, IDEA_KEY_SCHEDULE *ks); void idea_set_decrypt_key(IDEA_KEY_SCHEDULE *ek, IDEA_KEY_SCHEDULE *dk); void idea_cbc_encrypt(const unsigned char *in, unsigned char *out, long length, IDEA_KEY_SCHEDULE *ks, unsigned char *iv, int enc); void idea_cfb64_encrypt(const unsigned char *in, unsigned char *out, long length, IDEA_KEY_SCHEDULE *ks, unsigned char *iv, int *num, int enc); void idea_ofb64_encrypt(const unsigned char *in, unsigned char *out, long length, IDEA_KEY_SCHEDULE *ks, unsigned char *iv, int *num); void idea_encrypt(unsigned long *in, IDEA_KEY_SCHEDULE *ks); #ifdef __cplusplus } #endif #endif
{ "language": "C" }
/* * linux/drivers/s390/cio/qdio_main.c * * Linux for s390 qdio support, buffer handling, qdio API and module support. * * Copyright 2000,2008 IBM Corp. * Author(s): Utz Bacher <utz.bacher@de.ibm.com> * Jan Glauber <jang@linux.vnet.ibm.com> * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/gfp.h> #include <asm/atomic.h> #include <asm/debug.h> #include <asm/qdio.h> #include "cio.h" #include "css.h" #include "device.h" #include "qdio.h" #include "qdio_debug.h" MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ "Jan Glauber <jang@linux.vnet.ibm.com>"); MODULE_DESCRIPTION("QDIO base support"); MODULE_LICENSE("GPL"); static inline int do_siga_sync(unsigned long schid, unsigned int out_mask, unsigned int in_mask, unsigned int fc) { register unsigned long __fc asm ("0") = fc; register unsigned long __schid asm ("1") = schid; register unsigned long out asm ("2") = out_mask; register unsigned long in asm ("3") = in_mask; int cc; asm volatile( " siga 0\n" " ipm %0\n" " srl %0,28\n" : "=d" (cc) : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); return cc; } static inline int do_siga_input(unsigned long schid, unsigned int mask, unsigned int fc) { register unsigned long __fc asm ("0") = fc; register unsigned long __schid asm ("1") = schid; register unsigned long __mask asm ("2") = mask; int cc; asm volatile( " siga 0\n" " ipm %0\n" " srl %0,28\n" : "=d" (cc) : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory"); return cc; } /** * do_siga_output - perform SIGA-w/wt function * @schid: subchannel id or in case of QEBSM the subchannel token * @mask: which output queues to process * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer * @fc: function code to perform * * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION. * Note: For IQDC unicast queues only the highest priority queue is processed. */ static inline int do_siga_output(unsigned long schid, unsigned long mask, unsigned int *bb, unsigned int fc) { register unsigned long __fc asm("0") = fc; register unsigned long __schid asm("1") = schid; register unsigned long __mask asm("2") = mask; int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; asm volatile( " siga 0\n" "0: ipm %0\n" " srl %0,28\n" "1:\n" EX_TABLE(0b, 1b) : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) : : "cc", "memory"); *bb = ((unsigned int) __fc) >> 31; return cc; } static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) { /* all done or next buffer state different */ if (ccq == 0 || ccq == 32) return 0; /* not all buffers processed */ if (ccq == 96 || ccq == 97) return 1; /* notify devices immediately */ DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); return -EIO; } /** * qdio_do_eqbs - extract buffer states for QEBSM * @q: queue to manipulate * @state: state of the extracted buffers * @start: buffer number to start at * @count: count of buffers to examine * @auto_ack: automatically acknowledge buffers * * Returns the number of successfully extracted equal buffer states. * Stops processing if a state is different from the last buffers state. */ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, int start, int count, int auto_ack) { unsigned int ccq = 0; int tmp_count = count, tmp_start = start; int nr = q->nr; int rc; BUG_ON(!q->irq_ptr->sch_token); qperf_inc(q, eqbs); if (!q->is_input_q) nr += q->irq_ptr->nr_input_qs; again: ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, auto_ack); rc = qdio_check_ccq(q, ccq); /* At least one buffer was processed, return and extract the remaining * buffers later. */ if ((ccq == 96) && (count != tmp_count)) { qperf_inc(q, eqbs_partial); return (count - tmp_count); } if (rc == 1) { DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); goto again; } if (rc < 0) { DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 0, -1, -1, q->irq_ptr->int_parm); return 0; } return count - tmp_count; } /** * qdio_do_sqbs - set buffer states for QEBSM * @q: queue to manipulate * @state: new state of the buffers * @start: first buffer number to change * @count: how many buffers to change * * Returns the number of successfully changed buffers. * Does retrying until the specified count of buffer states is set or an * error occurs. */ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, int count) { unsigned int ccq = 0; int tmp_count = count, tmp_start = start; int nr = q->nr; int rc; if (!count) return 0; BUG_ON(!q->irq_ptr->sch_token); qperf_inc(q, sqbs); if (!q->is_input_q) nr += q->irq_ptr->nr_input_qs; again: ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); rc = qdio_check_ccq(q, ccq); if (rc == 1) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); qperf_inc(q, sqbs_partial); goto again; } if (rc < 0) { DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 0, -1, -1, q->irq_ptr->int_parm); return 0; } WARN_ON(tmp_count); return count - tmp_count; } /* returns number of examined buffers and their common state in *state */ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, unsigned char *state, unsigned int count, int auto_ack) { unsigned char __state = 0; int i; BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); if (is_qebsm(q)) return qdio_do_eqbs(q, state, bufnr, count, auto_ack); for (i = 0; i < count; i++) { if (!__state) __state = q->slsb.val[bufnr]; else if (q->slsb.val[bufnr] != __state) break; bufnr = next_buf(bufnr); } *state = __state; return i; } static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, int auto_ack) { return get_buf_states(q, bufnr, state, 1, auto_ack); } /* wrap-around safe setting of slsb states, returns number of changed buffers */ static inline int set_buf_states(struct qdio_q *q, int bufnr, unsigned char state, int count) { int i; BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); if (is_qebsm(q)) return qdio_do_sqbs(q, state, bufnr, count); for (i = 0; i < count; i++) { xchg(&q->slsb.val[bufnr], state); bufnr = next_buf(bufnr); } return count; } static inline int set_buf_state(struct qdio_q *q, int bufnr, unsigned char state) { return set_buf_states(q, bufnr, state, 1); } /* set slsb states to initial state */ void qdio_init_buf_states(struct qdio_irq *irq_ptr) { struct qdio_q *q; int i; for_each_input_queue(irq_ptr, q, i) set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, QDIO_MAX_BUFFERS_PER_Q); for_each_output_queue(irq_ptr, q, i) set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, QDIO_MAX_BUFFERS_PER_Q); } static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, unsigned int input) { unsigned long schid = *((u32 *) &q->irq_ptr->schid); unsigned int fc = QDIO_SIGA_SYNC; int cc; if (!need_siga_sync(q)) return 0; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); qperf_inc(q, siga_sync); if (is_qebsm(q)) { schid = q->irq_ptr->sch_token; fc |= QDIO_SIGA_QEBSM_FLAG; } cc = do_siga_sync(schid, output, input, fc); if (cc) DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); return cc; } static inline int qdio_siga_sync_q(struct qdio_q *q) { if (q->is_input_q) return qdio_siga_sync(q, 0, q->mask); else return qdio_siga_sync(q, q->mask, 0); } static inline int qdio_siga_sync_out(struct qdio_q *q) { return qdio_siga_sync(q, ~0U, 0); } static inline int qdio_siga_sync_all(struct qdio_q *q) { return qdio_siga_sync(q, ~0U, ~0U); } static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) { unsigned long schid = *((u32 *) &q->irq_ptr->schid); unsigned int fc = QDIO_SIGA_WRITE; u64 start_time = 0; int cc; if (q->u.out.use_enh_siga) fc = 3; if (is_qebsm(q)) { schid = q->irq_ptr->sch_token; fc |= QDIO_SIGA_QEBSM_FLAG; } again: cc = do_siga_output(schid, q->mask, busy_bit, fc); /* hipersocket busy condition */ if (*busy_bit) { WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); if (!start_time) { start_time = get_clock(); goto again; } if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) goto again; } return cc; } static inline int qdio_siga_input(struct qdio_q *q) { unsigned long schid = *((u32 *) &q->irq_ptr->schid); unsigned int fc = QDIO_SIGA_READ; int cc; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); qperf_inc(q, siga_read); if (is_qebsm(q)) { schid = q->irq_ptr->sch_token; fc |= QDIO_SIGA_QEBSM_FLAG; } cc = do_siga_input(schid, q->mask, fc); if (cc) DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); return cc; } static inline void qdio_sync_after_thinint(struct qdio_q *q) { if (pci_out_supported(q)) { if (need_siga_sync_thinint(q)) qdio_siga_sync_all(q); else if (need_siga_sync_out_thinint(q)) qdio_siga_sync_out(q); } else qdio_siga_sync_q(q); } int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state) { qdio_siga_sync_q(q); return get_buf_states(q, bufnr, state, 1, 0); } static inline void qdio_stop_polling(struct qdio_q *q) { if (!q->u.in.polling) return; q->u.in.polling = 0; qperf_inc(q, stop_polling); /* show the card that we are not polling anymore */ if (is_qebsm(q)) { set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, q->u.in.ack_count); q->u.in.ack_count = 0; } else set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } static inline void account_sbals(struct qdio_q *q, int count) { int pos = 0; q->q_stats.nr_sbal_total += count; if (count == QDIO_MAX_BUFFERS_MASK) { q->q_stats.nr_sbals[7]++; return; } while (count >>= 1) pos++; q->q_stats.nr_sbals[pos]++; } static void announce_buffer_error(struct qdio_q *q, int count) { q->qdio_error |= QDIO_ERROR_SLSB_STATE; /* special handling for no target buffer empty */ if ((!q->is_input_q && (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { qperf_inc(q, target_full); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", q->first_to_check); return; } DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); DBF_ERROR("F14:%2x F15:%2x", q->sbal[q->first_to_check]->element[14].flags & 0xff, q->sbal[q->first_to_check]->element[15].flags & 0xff); } static inline void inbound_primed(struct qdio_q *q, int count) { int new; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count); /* for QEBSM the ACK was already set by EQBS */ if (is_qebsm(q)) { if (!q->u.in.polling) { q->u.in.polling = 1; q->u.in.ack_count = count; q->u.in.ack_start = q->first_to_check; return; } /* delete the previous ACK's */ set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, q->u.in.ack_count); q->u.in.ack_count = count; q->u.in.ack_start = q->first_to_check; return; } /* * ACK the newest buffer. The ACK will be removed in qdio_stop_polling * or by the next inbound run. */ new = add_buf(q->first_to_check, count - 1); if (q->u.in.polling) { /* reset the previous ACK but first set the new one */ set_buf_state(q, new, SLSB_P_INPUT_ACK); set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } else { q->u.in.polling = 1; set_buf_state(q, new, SLSB_P_INPUT_ACK); } q->u.in.ack_start = new; count--; if (!count) return; /* need to change ALL buffers to get more interrupts */ set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); } static int get_inbound_buffer_frontier(struct qdio_q *q) { int count, stop; unsigned char state; /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved * would return 0. */ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); stop = add_buf(q->first_to_check, count); if (q->first_to_check == stop) goto out; /* * No siga sync here, as a PCI or we after a thin interrupt * already sync'ed the queues. */ count = get_buf_states(q, q->first_to_check, &state, count, 1); if (!count) goto out; switch (state) { case SLSB_P_INPUT_PRIMED: inbound_primed(q, count); q->first_to_check = add_buf(q->first_to_check, count); if (atomic_sub(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); break; case SLSB_P_INPUT_ERROR: announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled) account_sbals_error(q, count); break; case SLSB_CU_INPUT_EMPTY: case SLSB_P_INPUT_NOT_INIT: case SLSB_P_INPUT_ACK: if (q->irq_ptr->perf_stat_enabled) q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); break; default: BUG(); } out: return q->first_to_check; } static int qdio_inbound_q_moved(struct qdio_q *q) { int bufnr; bufnr = get_inbound_buffer_frontier(q); if ((bufnr != q->last_move) || q->qdio_error) { q->last_move = bufnr; if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) q->u.in.timestamp = get_clock(); return 1; } else return 0; } static inline int qdio_inbound_q_done(struct qdio_q *q) { unsigned char state = 0; if (!atomic_read(&q->nr_buf_used)) return 1; qdio_siga_sync_q(q); get_buf_state(q, q->first_to_check, &state, 0); if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) /* more work coming */ return 0; if (is_thinint_irq(q->irq_ptr)) return 1; /* don't poll under z/VM */ if (MACHINE_IS_VM) return 1; /* * At this point we know, that inbound first_to_check * has (probably) not moved (see qdio_inbound_processing). */ if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", q->first_to_check); return 1; } else return 0; } static void qdio_kick_handler(struct qdio_q *q) { int start = q->first_to_kick; int end = q->first_to_check; int count; if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; count = sub_buf(end, start); if (q->is_input_q) { qperf_inc(q, inbound_handler); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); } else { qperf_inc(q, outbound_handler); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", start, count); } q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, q->irq_ptr->int_parm); /* for the next time */ q->first_to_kick = end; q->qdio_error = 0; } static void __qdio_inbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_inbound); if (!qdio_inbound_q_moved(q)) return; qdio_kick_handler(q); if (!qdio_inbound_q_done(q)) { /* means poll time is not yet over */ qperf_inc(q, tasklet_inbound_resched); if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { tasklet_schedule(&q->tasklet); return; } } qdio_stop_polling(q); /* * We need to check again to not lose initiative after * resetting the ACK state. */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) tasklet_schedule(&q->tasklet); } } void qdio_inbound_processing(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; __qdio_inbound_processing(q); } static int get_outbound_buffer_frontier(struct qdio_q *q) { int count, stop; unsigned char state; if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) qdio_siga_sync_q(q); /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved * would return 0. */ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); stop = add_buf(q->first_to_check, count); if (q->first_to_check == stop) return q->first_to_check; count = get_buf_states(q, q->first_to_check, &state, count, 0); if (!count) return q->first_to_check; switch (state) { case SLSB_P_OUTPUT_EMPTY: /* the adapter got it */ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); atomic_sub(count, &q->nr_buf_used); q->first_to_check = add_buf(q->first_to_check, count); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); break; case SLSB_P_OUTPUT_ERROR: announce_buffer_error(q, count); /* process the buffer, the upper layer will take care of it */ q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled) account_sbals_error(q, count); break; case SLSB_CU_OUTPUT_PRIMED: /* the adapter has not fetched the output yet */ if (q->irq_ptr->perf_stat_enabled) q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); break; case SLSB_P_OUTPUT_NOT_INIT: case SLSB_P_OUTPUT_HALTED: break; default: BUG(); } return q->first_to_check; } /* all buffers processed? */ static inline int qdio_outbound_q_done(struct qdio_q *q) { return atomic_read(&q->nr_buf_used) == 0; } static inline int qdio_outbound_q_moved(struct qdio_q *q) { int bufnr; bufnr = get_outbound_buffer_frontier(q); if ((bufnr != q->last_move) || q->qdio_error) { q->last_move = bufnr; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); return 1; } else return 0; } static int qdio_kick_outbound_q(struct qdio_q *q) { unsigned int busy_bit; int cc; if (!need_siga_out(q)) return 0; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); qperf_inc(q, siga_write); cc = qdio_siga_output(q, &busy_bit); switch (cc) { case 0: break; case 2: if (busy_bit) { DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); cc |= QDIO_ERROR_SIGA_BUSY; } else DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); break; case 1: case 3: DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); break; } return cc; } static void __qdio_outbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_outbound); BUG_ON(atomic_read(&q->nr_buf_used) < 0); if (qdio_outbound_q_moved(q)) qdio_kick_handler(q); if (queue_type(q) == QDIO_ZFCP_QFMT) if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) goto sched; /* bail out for HiperSockets unicast queues */ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) return; if ((queue_type(q) == QDIO_IQDIO_QFMT) && (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) goto sched; if (q->u.out.pci_out_enabled) return; /* * Now we know that queue type is either qeth without pci enabled * or HiperSockets multicast. Make sure buffer switch from PRIMED to * EMPTY is noticed and outbound_handler is called after some time. */ if (qdio_outbound_q_done(q)) del_timer(&q->u.out.timer); else if (!timer_pending(&q->u.out.timer)) mod_timer(&q->u.out.timer, jiffies + 10 * HZ); return; sched: if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) return; tasklet_schedule(&q->tasklet); } /* outbound tasklet */ void qdio_outbound_processing(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; __qdio_outbound_processing(q); } void qdio_outbound_timer(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) return; tasklet_schedule(&q->tasklet); } static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) { struct qdio_q *out; int i; if (!pci_out_supported(q)) return; for_each_output_queue(q->irq_ptr, out, i) if (!qdio_outbound_q_done(out)) tasklet_schedule(&out->tasklet); } static void __tiqdio_inbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_inbound); qdio_sync_after_thinint(q); /* * The interrupt could be caused by a PCI request. Check the * PCI capable outbound queues. */ qdio_check_outbound_after_thinint(q); if (!qdio_inbound_q_moved(q)) return; qdio_kick_handler(q); if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched); if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { tasklet_schedule(&q->tasklet); return; } } qdio_stop_polling(q); /* * We need to check again to not lose initiative after * resetting the ACK state. */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) tasklet_schedule(&q->tasklet); } } void tiqdio_inbound_processing(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; __tiqdio_inbound_processing(q); } static inline void qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) { DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); irq_ptr->state = state; mb(); } static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) { if (irb->esw.esw0.erw.cons) { DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); DBF_ERROR_HEX(irb, 64); DBF_ERROR_HEX(irb->ecw, 64); } } /* PCI interrupt handler */ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) { int i; struct qdio_q *q; if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) return; for_each_input_queue(irq_ptr, q, i) tasklet_schedule(&q->tasklet); if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) return; for_each_output_queue(irq_ptr, q, i) { if (qdio_outbound_q_done(q)) continue; if (!siga_syncs_out_pci(q)) qdio_siga_sync_q(q); tasklet_schedule(&q->tasklet); } } static void qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, int cstat, int dstat) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_q *q; DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); DBF_ERROR("intp :%lx", intparm); DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); if (irq_ptr->nr_input_qs) { q = irq_ptr->input_qs[0]; } else if (irq_ptr->nr_output_qs) { q = irq_ptr->output_qs[0]; } else { dump_stack(); goto no_handler; } q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 0, -1, -1, irq_ptr->int_parm); no_handler: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); } static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); if (cstat) goto error; if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) goto error; if (!(dstat & DEV_STAT_DEV_END)) goto error; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); return; error: DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); } /* qdio interrupt handler */ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; int cstat, dstat; if (!intparm || !irq_ptr) { DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); return; } if (irq_ptr->perf_stat_enabled) irq_ptr->perf_stat.qdio_int++; if (IS_ERR(irb)) { switch (PTR_ERR(irb)) { case -EIO: DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); wake_up(&cdev->private->wait_q); return; default: WARN_ON(1); return; } } qdio_irq_check_sense(irq_ptr, irb); cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; switch (irq_ptr->state) { case QDIO_IRQ_STATE_INACTIVE: qdio_establish_handle_irq(cdev, cstat, dstat); break; case QDIO_IRQ_STATE_CLEANUP: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); break; case QDIO_IRQ_STATE_ESTABLISHED: case QDIO_IRQ_STATE_ACTIVE: if (cstat & SCHN_STAT_PCI) { qdio_int_handler_pci(irq_ptr); return; } if (cstat || dstat) qdio_handle_activate_check(cdev, intparm, cstat, dstat); break; case QDIO_IRQ_STATE_STOPPED: break; default: WARN_ON(1); } wake_up(&cdev->private->wait_q); } /** * qdio_get_ssqd_desc - get qdio subchannel description * @cdev: ccw device to get description for * @data: where to store the ssqd * * Returns 0 or an error code. The results of the chsc are stored in the * specified structure. */ int qdio_get_ssqd_desc(struct ccw_device *cdev, struct qdio_ssqd_desc *data) { if (!cdev || !cdev->private) return -EINVAL; DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); } EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); static void qdio_shutdown_queues(struct ccw_device *cdev) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_q *q; int i; for_each_input_queue(irq_ptr, q, i) tasklet_kill(&q->tasklet); for_each_output_queue(irq_ptr, q, i) { del_timer(&q->u.out.timer); tasklet_kill(&q->tasklet); } } /** * qdio_shutdown - shut down a qdio subchannel * @cdev: associated ccw device * @how: use halt or clear to shutdown */ int qdio_shutdown(struct ccw_device *cdev, int how) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; int rc; unsigned long flags; if (!irq_ptr) return -ENODEV; BUG_ON(irqs_disabled()); DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); mutex_lock(&irq_ptr->setup_mutex); /* * Subchannel was already shot down. We cannot prevent being called * twice since cio may trigger a shutdown asynchronously. */ if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { mutex_unlock(&irq_ptr->setup_mutex); return 0; } /* * Indicate that the device is going down. Scheduling the queue * tasklets is forbidden from here on. */ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); tiqdio_remove_input_queues(irq_ptr); qdio_shutdown_queues(cdev); qdio_shutdown_debug_entries(irq_ptr, cdev); /* cleanup subchannel */ spin_lock_irqsave(get_ccwdev_lock(cdev), flags); if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); else /* default behaviour is halt */ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); if (rc) { DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4d", rc); goto no_cleanup; } qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); wait_event_interruptible_timeout(cdev->private->wait_q, irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || irq_ptr->state == QDIO_IRQ_STATE_ERR, 10 * HZ); spin_lock_irqsave(get_ccwdev_lock(cdev), flags); no_cleanup: qdio_shutdown_thinint(irq_ptr); /* restore interrupt handler */ if ((void *)cdev->handler == (void *)qdio_int_handler) cdev->handler = irq_ptr->orig_handler; spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); mutex_unlock(&irq_ptr->setup_mutex); if (rc) return rc; return 0; } EXPORT_SYMBOL_GPL(qdio_shutdown); /** * qdio_free - free data structures for a qdio subchannel * @cdev: associated ccw device */ int qdio_free(struct ccw_device *cdev) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); mutex_lock(&irq_ptr->setup_mutex); if (irq_ptr->debug_area != NULL) { debug_unregister(irq_ptr->debug_area); irq_ptr->debug_area = NULL; } cdev->private->qdio_data = NULL; mutex_unlock(&irq_ptr->setup_mutex); qdio_release_memory(irq_ptr); return 0; } EXPORT_SYMBOL_GPL(qdio_free); /** * qdio_allocate - allocate qdio queues and associated data * @init_data: initialization data */ int qdio_allocate(struct qdio_initialize *init_data) { struct qdio_irq *irq_ptr; DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); if ((init_data->no_input_qs && !init_data->input_handler) || (init_data->no_output_qs && !init_data->output_handler)) return -EINVAL; if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) return -EINVAL; if ((!init_data->input_sbal_addr_array) || (!init_data->output_sbal_addr_array)) return -EINVAL; /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!irq_ptr) goto out_err; mutex_init(&irq_ptr->setup_mutex); qdio_allocate_dbf(init_data, irq_ptr); /* * Allocate a page for the chsc calls in qdio_establish. * Must be pre-allocated since a zfcp recovery will call * qdio_establish. In case of low memory and swap on a zfcp disk * we may not be able to allocate memory otherwise. */ irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); if (!irq_ptr->chsc_page) goto out_rel; /* qdr is used in ccw1.cda which is u32 */ irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!irq_ptr->qdr) goto out_rel; WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, init_data->no_output_qs)) goto out_rel; init_data->cdev->private->qdio_data = irq_ptr; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); return 0; out_rel: qdio_release_memory(irq_ptr); out_err: return -ENOMEM; } EXPORT_SYMBOL_GPL(qdio_allocate); /** * qdio_establish - establish queues on a qdio subchannel * @init_data: initialization data */ int qdio_establish(struct qdio_initialize *init_data) { struct qdio_irq *irq_ptr; struct ccw_device *cdev = init_data->cdev; unsigned long saveflags; int rc; DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE) return -EINVAL; mutex_lock(&irq_ptr->setup_mutex); qdio_setup_irq(init_data); rc = qdio_establish_thinint(irq_ptr); if (rc) { mutex_unlock(&irq_ptr->setup_mutex); qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); return rc; } /* establish q */ irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; irq_ptr->ccw.flags = CCW_FLAG_SLI; irq_ptr->ccw.count = irq_ptr->equeue.count; irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); ccw_device_set_options_mask(cdev, 0); rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); if (rc) { DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); } spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); if (rc) { mutex_unlock(&irq_ptr->setup_mutex); qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); return rc; } wait_event_interruptible_timeout(cdev->private->wait_q, irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { mutex_unlock(&irq_ptr->setup_mutex); qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); return -EIO; } qdio_setup_ssqd_info(irq_ptr); DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc); DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); /* qebsm is now setup if available, initialize buffer states */ qdio_init_buf_states(irq_ptr); mutex_unlock(&irq_ptr->setup_mutex); qdio_print_subchannel_info(irq_ptr, cdev); qdio_setup_debug_entries(irq_ptr, cdev); return 0; } EXPORT_SYMBOL_GPL(qdio_establish); /** * qdio_activate - activate queues on a qdio subchannel * @cdev: associated cdev */ int qdio_activate(struct ccw_device *cdev) { struct qdio_irq *irq_ptr; int rc; unsigned long saveflags; DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE) return -EINVAL; mutex_lock(&irq_ptr->setup_mutex); if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { rc = -EBUSY; goto out; } irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; irq_ptr->ccw.flags = CCW_FLAG_SLI; irq_ptr->ccw.count = irq_ptr->aqueue.count; irq_ptr->ccw.cda = 0; spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 0, DOIO_DENY_PREFETCH); if (rc) { DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); } spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); if (rc) goto out; if (is_thinint_irq(irq_ptr)) tiqdio_add_input_queues(irq_ptr); /* wait for subchannel to become active */ msleep(5); switch (irq_ptr->state) { case QDIO_IRQ_STATE_STOPPED: case QDIO_IRQ_STATE_ERR: rc = -EIO; break; default: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); rc = 0; } out: mutex_unlock(&irq_ptr->setup_mutex); return rc; } EXPORT_SYMBOL_GPL(qdio_activate); static inline int buf_in_between(int bufnr, int start, int count) { int end = add_buf(start, count); if (end > start) { if (bufnr >= start && bufnr < end) return 1; else return 0; } /* wrap-around case */ if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || (bufnr < end)) return 1; else return 0; } /** * handle_inbound - reset processed input buffers * @q: queue containing the buffers * @callflags: flags * @bufnr: first buffer to process * @count: how many buffers are emptied */ static int handle_inbound(struct qdio_q *q, unsigned int callflags, int bufnr, int count) { int used, diff; qperf_inc(q, inbound_call); if (!q->u.in.polling) goto set; /* protect against stop polling setting an ACK for an emptied slsb */ if (count == QDIO_MAX_BUFFERS_PER_Q) { /* overwriting everything, just delete polling status */ q->u.in.polling = 0; q->u.in.ack_count = 0; goto set; } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { if (is_qebsm(q)) { /* partial overwrite, just update ack_start */ diff = add_buf(bufnr, count); diff = sub_buf(diff, q->u.in.ack_start); q->u.in.ack_count -= diff; if (q->u.in.ack_count <= 0) { q->u.in.polling = 0; q->u.in.ack_count = 0; goto set; } q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); } else /* the only ACK will be deleted, so stop polling */ q->u.in.polling = 0; } set: count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); used = atomic_add_return(count, &q->nr_buf_used) - count; BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); /* no need to signal as long as the adapter had free buffers */ if (used) return 0; if (need_siga_in(q)) return qdio_siga_input(q); return 0; } /** * handle_outbound - process filled outbound buffers * @q: queue containing the buffers * @callflags: flags * @bufnr: first buffer to process * @count: how many buffers are filled */ static int handle_outbound(struct qdio_q *q, unsigned int callflags, int bufnr, int count) { unsigned char state; int used, rc = 0; qperf_inc(q, outbound_call); count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); used = atomic_add_return(count, &q->nr_buf_used); BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); if (callflags & QDIO_FLAG_PCI_OUT) { q->u.out.pci_out_enabled = 1; qperf_inc(q, pci_request_int); } else q->u.out.pci_out_enabled = 0; if (queue_type(q) == QDIO_IQDIO_QFMT) { if (multicast_outbound(q)) rc = qdio_kick_outbound_q(q); else if ((q->irq_ptr->ssqd_desc.mmwc > 1) && (count > 1) && (count <= q->irq_ptr->ssqd_desc.mmwc)) { /* exploit enhanced SIGA */ q->u.out.use_enh_siga = 1; rc = qdio_kick_outbound_q(q); } else { /* * One siga-w per buffer required for unicast * HiperSockets. */ q->u.out.use_enh_siga = 0; while (count--) { rc = qdio_kick_outbound_q(q); if (rc) goto out; } } goto out; } if (need_siga_sync(q)) { qdio_siga_sync_q(q); goto out; } /* try to fast requeue buffers */ get_buf_state(q, prev_buf(bufnr), &state, 0); if (state != SLSB_CU_OUTPUT_PRIMED) rc = qdio_kick_outbound_q(q); else qperf_inc(q, fast_requeue); out: tasklet_schedule(&q->tasklet); return rc; } /** * do_QDIO - process input or output buffers * @cdev: associated ccw_device for the qdio subchannel * @callflags: input or output and special flags from the program * @q_nr: queue number * @bufnr: buffer number * @count: how many buffers to process */ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr, unsigned int bufnr, unsigned int count) { struct qdio_irq *irq_ptr; if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) return -EINVAL; irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; DBF_DEV_EVENT(DBF_INFO, irq_ptr, "do%02x b:%02x c:%02x", callflags, bufnr, count); if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) return -EBUSY; if (callflags & QDIO_FLAG_SYNC_INPUT) return handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr, count); else if (callflags & QDIO_FLAG_SYNC_OUTPUT) return handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr, count); return -EINVAL; } EXPORT_SYMBOL_GPL(do_QDIO); static int __init init_QDIO(void) { int rc; rc = qdio_setup_init(); if (rc) return rc; rc = tiqdio_allocate_memory(); if (rc) goto out_cache; rc = qdio_debug_init(); if (rc) goto out_ti; rc = tiqdio_register_thinints(); if (rc) goto out_debug; return 0; out_debug: qdio_debug_exit(); out_ti: tiqdio_free_memory(); out_cache: qdio_setup_exit(); return rc; } static void __exit exit_QDIO(void) { tiqdio_unregister_thinints(); tiqdio_free_memory(); qdio_debug_exit(); qdio_setup_exit(); } module_init(init_QDIO); module_exit(exit_QDIO);
{ "language": "C" }
/* * e-book-shell-sidebar.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * * Copyright (C) 1999-2008 Novell, Inc. (www.novell.com) * */ #include "evolution-config.h" #include "e-book-shell-sidebar.h" #include <string.h> #include <glib/gi18n.h> #include <e-util/e-util.h> #include "e-book-shell-view.h" #include "e-book-shell-backend.h" #include "e-addressbook-selector.h" #define E_BOOK_SHELL_SIDEBAR_GET_PRIVATE(obj) \ (G_TYPE_INSTANCE_GET_PRIVATE \ ((obj), E_TYPE_BOOK_SHELL_SIDEBAR, EBookShellSidebarPrivate)) struct _EBookShellSidebarPrivate { GtkWidget *selector; }; enum { PROP_0, PROP_SELECTOR }; G_DEFINE_DYNAMIC_TYPE ( EBookShellSidebar, e_book_shell_sidebar, E_TYPE_SHELL_SIDEBAR) static gboolean book_shell_sidebar_map_uid_to_source (GValue *value, GVariant *variant, gpointer user_data) { ESourceRegistry *registry; ESource *source; const gchar *uid; registry = E_SOURCE_REGISTRY (user_data); uid = g_variant_get_string (variant, NULL); if (uid != NULL && *uid != '\0') source = e_source_registry_ref_source (registry, uid); else source = e_source_registry_ref_default_address_book (registry); g_value_take_object (value, source); return (source != NULL); } static GVariant * book_shell_sidebar_map_source_to_uid (const GValue *value, const GVariantType *expected_type, gpointer user_data) { GVariant *variant = NULL; ESource *source; source = g_value_get_object (value); if (source != NULL) { const gchar *uid; uid = e_source_get_uid (source); variant = g_variant_new_string (uid); } return variant; } static void book_shell_sidebar_get_property (GObject *object, guint property_id, GValue *value, GParamSpec *pspec) { switch (property_id) { case PROP_SELECTOR: g_value_set_object ( value, e_book_shell_sidebar_get_selector ( E_BOOK_SHELL_SIDEBAR (object))); return; } G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); } static void book_shell_sidebar_dispose (GObject *object) { EBookShellSidebarPrivate *priv; priv = E_BOOK_SHELL_SIDEBAR_GET_PRIVATE (object); if (priv->selector != NULL) { g_object_unref (priv->selector); priv->selector = NULL; } /* Chain up to parent's dispose() method. */ G_OBJECT_CLASS (e_book_shell_sidebar_parent_class)->dispose (object); } static void book_shell_sidebar_constructed (GObject *object) { EBookShellSidebarPrivate *priv; EShell *shell; EShellView *shell_view; EShellBackend *shell_backend; EShellSidebar *shell_sidebar; EClientCache *client_cache; GtkContainer *container; GtkWidget *widget; GSettings *settings; priv = E_BOOK_SHELL_SIDEBAR_GET_PRIVATE (object); /* Chain up to parent's constructed() method. */ G_OBJECT_CLASS (e_book_shell_sidebar_parent_class)->constructed (object); shell_sidebar = E_SHELL_SIDEBAR (object); shell_view = e_shell_sidebar_get_shell_view (shell_sidebar); shell_backend = e_shell_view_get_shell_backend (shell_view); shell = e_shell_backend_get_shell (shell_backend); container = GTK_CONTAINER (shell_sidebar); widget = gtk_scrolled_window_new (NULL, NULL); gtk_scrolled_window_set_policy ( GTK_SCROLLED_WINDOW (widget), GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC); gtk_container_add (container, widget); gtk_widget_show (widget); container = GTK_CONTAINER (widget); client_cache = e_shell_get_client_cache (shell); widget = e_addressbook_selector_new (client_cache); gtk_container_add (GTK_CONTAINER (container), widget); priv->selector = g_object_ref (widget); gtk_widget_show (widget); e_source_selector_load_groups_setup (E_SOURCE_SELECTOR (priv->selector), e_shell_view_get_state_key_file (shell_view)); settings = e_util_ref_settings ("org.gnome.evolution.addressbook"); g_settings_bind_with_mapping ( settings, "primary-addressbook", widget, "primary-selection", G_SETTINGS_BIND_DEFAULT, book_shell_sidebar_map_uid_to_source, book_shell_sidebar_map_source_to_uid, e_client_cache_ref_registry (client_cache), (GDestroyNotify) g_object_unref); g_object_unref (settings); } static guint32 book_shell_sidebar_check_state (EShellSidebar *shell_sidebar) { EBookShellSidebar *book_shell_sidebar; ESourceSelector *selector; ESourceRegistry *registry; ESource *source, *clicked_source; gboolean is_writable = FALSE; gboolean is_removable = FALSE; gboolean is_remote_creatable = FALSE; gboolean is_remote_deletable = FALSE; gboolean in_collection = FALSE; gboolean has_primary_source = FALSE; gboolean refresh_supported = FALSE; guint32 state = 0; book_shell_sidebar = E_BOOK_SHELL_SIDEBAR (shell_sidebar); selector = e_book_shell_sidebar_get_selector (book_shell_sidebar); source = e_source_selector_ref_primary_selection (selector); registry = e_source_selector_get_registry (selector); if (source != NULL) { EClient *client; ESource *collection; has_primary_source = TRUE; is_writable = e_source_get_writable (source); is_removable = e_source_get_removable (source); is_remote_creatable = e_source_get_remote_creatable (source); is_remote_deletable = e_source_get_remote_deletable (source); collection = e_source_registry_find_extension ( registry, source, E_SOURCE_EXTENSION_COLLECTION); if (collection != NULL) { in_collection = TRUE; g_object_unref (collection); } client = e_client_selector_ref_cached_client ( E_CLIENT_SELECTOR (selector), source); if (client != NULL) { refresh_supported = e_client_check_refresh_supported (client); g_object_unref (client); } else { /* It's also used to allow-auth-prompt for the source */ refresh_supported = TRUE; } g_object_unref (source); } clicked_source = e_book_shell_view_get_clicked_source (e_shell_sidebar_get_shell_view (shell_sidebar)); if (clicked_source && clicked_source == source) state |= E_BOOK_SHELL_SIDEBAR_CLICKED_SOURCE_IS_PRIMARY; if (clicked_source && e_source_has_extension (clicked_source, E_SOURCE_EXTENSION_COLLECTION)) state |= E_BOOK_SHELL_SIDEBAR_CLICKED_SOURCE_IS_COLLECTION; if (has_primary_source) state |= E_BOOK_SHELL_SIDEBAR_HAS_PRIMARY_SOURCE; if (is_writable) state |= E_BOOK_SHELL_SIDEBAR_PRIMARY_SOURCE_IS_WRITABLE; if (is_removable) state |= E_BOOK_SHELL_SIDEBAR_PRIMARY_SOURCE_IS_REMOVABLE; if (is_remote_creatable) state |= E_BOOK_SHELL_SIDEBAR_PRIMARY_SOURCE_IS_REMOTE_CREATABLE; if (is_remote_deletable) state |= E_BOOK_SHELL_SIDEBAR_PRIMARY_SOURCE_IS_REMOTE_DELETABLE; if (in_collection) state |= E_BOOK_SHELL_SIDEBAR_PRIMARY_SOURCE_IN_COLLECTION; if (refresh_supported) state |= E_BOOK_SHELL_SIDEBAR_SOURCE_SUPPORTS_REFRESH; return state; } static void e_book_shell_sidebar_class_init (EBookShellSidebarClass *class) { GObjectClass *object_class; EShellSidebarClass *shell_sidebar_class; g_type_class_add_private (class, sizeof (EBookShellSidebarPrivate)); object_class = G_OBJECT_CLASS (class); object_class->get_property = book_shell_sidebar_get_property; object_class->dispose = book_shell_sidebar_dispose; object_class->constructed = book_shell_sidebar_constructed; shell_sidebar_class = E_SHELL_SIDEBAR_CLASS (class); shell_sidebar_class->check_state = book_shell_sidebar_check_state; g_object_class_install_property ( object_class, PROP_SELECTOR, g_param_spec_object ( "selector", "Source Selector Widget", "This widget displays groups of address books", E_TYPE_SOURCE_SELECTOR, G_PARAM_READABLE)); } static void e_book_shell_sidebar_class_finalize (EBookShellSidebarClass *class) { } static void e_book_shell_sidebar_init (EBookShellSidebar *book_shell_sidebar) { book_shell_sidebar->priv = E_BOOK_SHELL_SIDEBAR_GET_PRIVATE (book_shell_sidebar); /* Postpone widget construction until we have a shell view. */ } void e_book_shell_sidebar_type_register (GTypeModule *type_module) { /* XXX G_DEFINE_DYNAMIC_TYPE declares a static type registration * function, so we have to wrap it with a public function in * order to register types from a separate compilation unit. */ e_book_shell_sidebar_register_type (type_module); } GtkWidget * e_book_shell_sidebar_new (EShellView *shell_view) { g_return_val_if_fail (E_IS_SHELL_VIEW (shell_view), NULL); return g_object_new ( E_TYPE_BOOK_SHELL_SIDEBAR, "shell-view", shell_view, NULL); } ESourceSelector * e_book_shell_sidebar_get_selector (EBookShellSidebar *book_shell_sidebar) { g_return_val_if_fail ( E_IS_BOOK_SHELL_SIDEBAR (book_shell_sidebar), NULL); return E_SOURCE_SELECTOR (book_shell_sidebar->priv->selector); }
{ "language": "C" }
#include <cstdio> #include <map> #include <iostream> #include <string> using namespace std; string infixa, prefixa; map<char, int> in, pre; void post(int inmin, int inmax, int premin, int premax){ if (inmin>inmax) return; char pivot = prefixa[premin]; int inpos = in[pivot]; int firstRight=premin+1; while(inpos>in[prefixa[firstRight]]){ firstRight++; } post(inmin, inpos-1, premin+1, firstRight-1); post(inpos+1, inmax, firstRight, premax); printf("%c", pivot); } int main() { while(cin >> prefixa >> infixa){ for (int i = 0; i < prefixa.size(); ++i) { in[infixa[i]]=i; pre[prefixa[i]]=i; } post(0, prefixa.size()-1, 0, prefixa.size()-1); printf("\n"); in.clear(); pre.clear(); } return 0; }
{ "language": "C" }
/* * H261 decoder * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2004 Maarten Daniels * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.261 decoder. */ #include "dsputil.h" #include "avcodec.h" #include "mpegvideo.h" #include "h263.h" #include "h261.h" #include "h261data.h" #define H261_MBA_VLC_BITS 9 #define H261_MTYPE_VLC_BITS 6 #define H261_MV_VLC_BITS 7 #define H261_CBP_VLC_BITS 9 #define TCOEFF_VLC_BITS 9 #define MBA_STUFFING 33 #define MBA_STARTCODE 34 extern uint8_t ff_h261_rl_table_store[2][2*MAX_RUN + MAX_LEVEL + 3]; static VLC h261_mba_vlc; static VLC h261_mtype_vlc; static VLC h261_mv_vlc; static VLC h261_cbp_vlc; static int h261_decode_block(H261Context * h, DCTELEM * block, int n, int coded); static av_cold void h261_decode_init_vlc(H261Context *h){ static int done = 0; if(!done){ done = 1; INIT_VLC_STATIC(&h261_mba_vlc, H261_MBA_VLC_BITS, 35, h261_mba_bits, 1, 1, h261_mba_code, 1, 1, 662); INIT_VLC_STATIC(&h261_mtype_vlc, H261_MTYPE_VLC_BITS, 10, h261_mtype_bits, 1, 1, h261_mtype_code, 1, 1, 80); INIT_VLC_STATIC(&h261_mv_vlc, H261_MV_VLC_BITS, 17, &h261_mv_tab[0][1], 2, 1, &h261_mv_tab[0][0], 2, 1, 144); INIT_VLC_STATIC(&h261_cbp_vlc, H261_CBP_VLC_BITS, 63, &h261_cbp_tab[0][1], 2, 1, &h261_cbp_tab[0][0], 2, 1, 512); init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store); INIT_VLC_RL(h261_rl_tcoeff, 552); } } static av_cold int h261_decode_init(AVCodecContext *avctx){ H261Context *h= avctx->priv_data; MpegEncContext * const s = &h->s; // set defaults MPV_decode_defaults(s); s->avctx = avctx; s->width = s->avctx->coded_width; s->height = s->avctx->coded_height; s->codec_id = s->avctx->codec->id; s->out_format = FMT_H261; s->low_delay= 1; avctx->pix_fmt= PIX_FMT_YUV420P; s->codec_id= avctx->codec->id; h261_decode_init_vlc(h); h->gob_start_code_skipped = 0; return 0; } /** * decodes the group of blocks header or slice header. * @return <0 if an error occurred */ static int h261_decode_gob_header(H261Context *h){ unsigned int val; MpegEncContext * const s = &h->s; if ( !h->gob_start_code_skipped ){ /* Check for GOB Start Code */ val = show_bits(&s->gb, 15); if(val) return -1; /* We have a GBSC */ skip_bits(&s->gb, 16); } h->gob_start_code_skipped = 0; h->gob_number = get_bits(&s->gb, 4); /* GN */ s->qscale = get_bits(&s->gb, 5); /* GQUANT */ /* Check if gob_number is valid */ if (s->mb_height==18){ //cif if ((h->gob_number<=0) || (h->gob_number>12)) return -1; } else{ //qcif if ((h->gob_number!=1) && (h->gob_number!=3) && (h->gob_number!=5)) return -1; } /* GEI */ while (get_bits1(&s->gb) != 0) { skip_bits(&s->gb, 8); } if(s->qscale==0) { av_log(s->avctx, AV_LOG_ERROR, "qscale has forbidden 0 value\n"); if (s->avctx->error_recognition >= FF_ER_COMPLIANT) return -1; } // For the first transmitted macroblock in a GOB, MBA is the absolute address. For // subsequent macroblocks, MBA is the difference between the absolute addresses of // the macroblock and the last transmitted macroblock. h->current_mba = 0; h->mba_diff = 0; return 0; } /** * decodes the group of blocks / video packet header. * @return <0 if no resync found */ static int ff_h261_resync(H261Context *h){ MpegEncContext * const s = &h->s; int left, ret; if ( h->gob_start_code_skipped ){ ret= h261_decode_gob_header(h); if(ret>=0) return 0; } else{ if(show_bits(&s->gb, 15)==0){ ret= h261_decode_gob_header(h); if(ret>=0) return 0; } //OK, it is not where it is supposed to be ... s->gb= s->last_resync_gb; align_get_bits(&s->gb); left= get_bits_left(&s->gb); for(;left>15+1+4+5; left-=8){ if(show_bits(&s->gb, 15)==0){ GetBitContext bak= s->gb; ret= h261_decode_gob_header(h); if(ret>=0) return 0; s->gb= bak; } skip_bits(&s->gb, 8); } } return -1; } /** * decodes skipped macroblocks * @return 0 */ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 ) { MpegEncContext * const s = &h->s; int i; s->mb_intra = 0; for(i=mba1; i<mba2; i++){ int j, xy; s->mb_x= ((h->gob_number-1) % 2) * 11 + i % 11; s->mb_y= ((h->gob_number-1) / 2) * 3 + i / 11; xy = s->mb_x + s->mb_y * s->mb_stride; ff_init_block_index(s); ff_update_block_index(s); for(j=0;j<6;j++) s->block_last_index[j] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = 1; h->mtype &= ~MB_TYPE_H261_FIL; MPV_decode_mb(s, s->block); } return 0; } static int decode_mv_component(GetBitContext *gb, int v){ int mv_diff = get_vlc2(gb, h261_mv_vlc.table, H261_MV_VLC_BITS, 2); /* check if mv_diff is valid */ if ( mv_diff < 0 ) return v; mv_diff = mvmap[mv_diff]; if(mv_diff && !get_bits1(gb)) mv_diff= -mv_diff; v += mv_diff; if (v <=-16) v+= 32; else if(v >= 16) v-= 32; return v; } static int h261_decode_mb(H261Context *h){ MpegEncContext * const s = &h->s; int i, cbp, xy; cbp = 63; // Read mba do{ h->mba_diff = get_vlc2(&s->gb, h261_mba_vlc.table, H261_MBA_VLC_BITS, 2); /* Check for slice end */ /* NOTE: GOB can be empty (no MB data) or exist only of MBA_stuffing */ if (h->mba_diff == MBA_STARTCODE){ // start code h->gob_start_code_skipped = 1; return SLICE_END; } } while( h->mba_diff == MBA_STUFFING ); // stuffing if ( h->mba_diff < 0 ){ if ( get_bits_count(&s->gb) + 7 >= s->gb.size_in_bits ) return SLICE_END; av_log(s->avctx, AV_LOG_ERROR, "illegal mba at %d %d\n", s->mb_x, s->mb_y); return SLICE_ERROR; } h->mba_diff += 1; h->current_mba += h->mba_diff; if ( h->current_mba > MBA_STUFFING ) return SLICE_ERROR; s->mb_x= ((h->gob_number-1) % 2) * 11 + ((h->current_mba-1) % 11); s->mb_y= ((h->gob_number-1) / 2) * 3 + ((h->current_mba-1) / 11); xy = s->mb_x + s->mb_y * s->mb_stride; ff_init_block_index(s); ff_update_block_index(s); // Read mtype h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2); h->mtype = h261_mtype_map[h->mtype]; // Read mquant if ( IS_QUANT ( h->mtype ) ){ ff_set_qscale(s, get_bits(&s->gb, 5)); } s->mb_intra = IS_INTRA4x4(h->mtype); // Read mv if ( IS_16X16 ( h->mtype ) ){ // Motion vector data is included for all MC macroblocks. MVD is obtained from the macroblock vector by subtracting the // vector of the preceding macroblock. For this calculation the vector of the preceding macroblock is regarded as zero in the // following three situations: // 1) evaluating MVD for macroblocks 1, 12 and 23; // 2) evaluating MVD for macroblocks in which MBA does not represent a difference of 1; // 3) MTYPE of the previous macroblock was not MC. if ( ( h->current_mba == 1 ) || ( h->current_mba == 12 ) || ( h->current_mba == 23 ) || ( h->mba_diff != 1)) { h->current_mv_x = 0; h->current_mv_y = 0; } h->current_mv_x= decode_mv_component(&s->gb, h->current_mv_x); h->current_mv_y= decode_mv_component(&s->gb, h->current_mv_y); }else{ h->current_mv_x = 0; h->current_mv_y = 0; } // Read cbp if ( HAS_CBP( h->mtype ) ){ cbp = get_vlc2(&s->gb, h261_cbp_vlc.table, H261_CBP_VLC_BITS, 2) + 1; } if(s->mb_intra){ s->current_picture.mb_type[xy]= MB_TYPE_INTRA; goto intra; } //set motion vectors s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0; s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation s->mv[0][0][1] = h->current_mv_y * 2; intra: /* decode each block */ if(s->mb_intra || HAS_CBP(h->mtype)){ s->dsp.clear_blocks(s->block[0]); for (i = 0; i < 6; i++) { if (h261_decode_block(h, s->block[i], i, cbp&32) < 0){ return SLICE_ERROR; } cbp+=cbp; } }else{ for (i = 0; i < 6; i++) s->block_last_index[i]= -1; } MPV_decode_mb(s, s->block); return SLICE_OK; } /** * decodes a macroblock * @return <0 if an error occurred */ static int h261_decode_block(H261Context * h, DCTELEM * block, int n, int coded) { MpegEncContext * const s = &h->s; int code, level, i, j, run; RLTable *rl = &h261_rl_tcoeff; const uint8_t *scan_table; // For the variable length encoding there are two code tables, one being used for // the first transmitted LEVEL in INTER, INTER+MC and INTER+MC+FIL blocks, the second // for all other LEVELs except the first one in INTRA blocks which is fixed length // coded with 8 bits. // NOTE: the two code tables only differ in one VLC so we handle that manually. scan_table = s->intra_scantable.permutated; if (s->mb_intra){ /* DC coef */ level = get_bits(&s->gb, 8); // 0 (00000000b) and -128 (10000000b) are FORBIDDEN if((level&0x7F) == 0){ av_log(s->avctx, AV_LOG_ERROR, "illegal dc %d at %d %d\n", level, s->mb_x, s->mb_y); return -1; } // The code 1000 0000 is not used, the reconstruction level of 1024 being coded as 1111 1111. if (level == 255) level = 128; block[0] = level; i = 1; }else if(coded){ // Run Level Code // EOB Not possible for first level when cbp is available (that's why the table is different) // 0 1 1s // * * 0* int check = show_bits(&s->gb, 2); i = 0; if ( check & 0x2 ){ skip_bits(&s->gb, 2); block[0] = ( check & 0x1 ) ? -1 : 1; i = 1; } }else{ i = 0; } if(!coded){ s->block_last_index[n] = i - 1; return 0; } for(;;){ code = get_vlc2(&s->gb, rl->vlc.table, TCOEFF_VLC_BITS, 2); if (code < 0){ av_log(s->avctx, AV_LOG_ERROR, "illegal ac vlc code at %dx%d\n", s->mb_x, s->mb_y); return -1; } if (code == rl->n) { /* escape */ // The remaining combinations of (run, level) are encoded with a 20-bit word consisting of 6 bits escape, 6 bits run and 8 bits level. run = get_bits(&s->gb, 6); level = get_sbits(&s->gb, 8); }else if(code == 0){ break; }else{ run = rl->table_run[code]; level = rl->table_level[code]; if (get_bits1(&s->gb)) level = -level; } i += run; if (i >= 64){ av_log(s->avctx, AV_LOG_ERROR, "run overflow at %dx%d\n", s->mb_x, s->mb_y); return -1; } j = scan_table[i]; block[j] = level; i++; } s->block_last_index[n] = i-1; return 0; } /** * decodes the H261 picture header. * @return <0 if no startcode found */ static int h261_decode_picture_header(H261Context *h){ MpegEncContext * const s = &h->s; int format, i; uint32_t startcode= 0; for(i= get_bits_left(&s->gb); i>24; i-=1){ startcode = ((startcode << 1) | get_bits(&s->gb, 1)) & 0x000FFFFF; if(startcode == 0x10) break; } if (startcode != 0x10){ av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n"); return -1; } /* temporal reference */ i= get_bits(&s->gb, 5); /* picture timestamp */ if(i < (s->picture_number&31)) i += 32; s->picture_number = (s->picture_number&~31) + i; s->avctx->time_base= (AVRational){1001, 30000}; s->current_picture.pts= s->picture_number; /* PTYPE starts here */ skip_bits1(&s->gb); /* split screen off */ skip_bits1(&s->gb); /* camera off */ skip_bits1(&s->gb); /* freeze picture release off */ format = get_bits1(&s->gb); //only 2 formats possible if (format == 0){//QCIF s->width = 176; s->height = 144; s->mb_width = 11; s->mb_height = 9; }else{//CIF s->width = 352; s->height = 288; s->mb_width = 22; s->mb_height = 18; } s->mb_num = s->mb_width * s->mb_height; skip_bits1(&s->gb); /* still image mode off */ skip_bits1(&s->gb); /* Reserved */ /* PEI */ while (get_bits1(&s->gb) != 0){ skip_bits(&s->gb, 8); } // h261 has no I-FRAMES, but if we pass FF_I_TYPE for the first frame, the codec crashes if it does // not contain all I-blocks (e.g. when a packet is lost) s->pict_type = FF_P_TYPE; h->gob_number = 0; return 0; } static int h261_decode_gob(H261Context *h){ MpegEncContext * const s = &h->s; ff_set_qscale(s, s->qscale); /* decode mb's */ while(h->current_mba <= MBA_STUFFING) { int ret; /* DCT & quantize */ ret= h261_decode_mb(h); if(ret<0){ if(ret==SLICE_END){ h261_decode_mb_skipped(h, h->current_mba, 33); return 0; } av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", s->mb_x + s->mb_y*s->mb_stride); return -1; } h261_decode_mb_skipped(h, h->current_mba-h->mba_diff, h->current_mba-1); } return -1; } /** * returns the number of bytes consumed for building the current frame */ static int get_consumed_bytes(MpegEncContext *s, int buf_size){ int pos= get_bits_count(&s->gb)>>3; if(pos==0) pos=1; //avoid infinite loops (i doubt that is needed but ...) if(pos+10>buf_size) pos=buf_size; // oops ;) return pos; } static int h261_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; H261Context *h= avctx->priv_data; MpegEncContext *s = &h->s; int ret; AVFrame *pict = data; dprintf(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size); dprintf(avctx, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); s->flags= avctx->flags; s->flags2= avctx->flags2; h->gob_start_code_skipped=0; retry: init_get_bits(&s->gb, buf, buf_size*8); if(!s->context_initialized){ if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix return -1; } //we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ int i= ff_find_unused_picture(s, 0); s->current_picture_ptr= &s->picture[i]; } ret = h261_decode_picture_header(h); /* skip if the header was thrashed */ if (ret < 0){ av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return -1; } if (s->width != avctx->coded_width || s->height != avctx->coded_height){ ParseContext pc= s->parse_context; //FIXME move this demuxing hack to libavformat s->parse_context.buffer=0; MPV_common_end(s); s->parse_context= pc; } if (!s->context_initialized) { avcodec_set_dimensions(avctx, s->width, s->height); goto retry; } // for hurry_up==5 s->current_picture.pict_type= s->pict_type; s->current_picture.key_frame= s->pict_type == FF_I_TYPE; /* skip everything if we are in a hurry>=5 */ if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size); if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE) ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE) || avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); if(MPV_frame_start(s, avctx) < 0) return -1; ff_er_frame_start(s); /* decode each macroblock */ s->mb_x=0; s->mb_y=0; while(h->gob_number < (s->mb_height==18 ? 12 : 5)){ if(ff_h261_resync(h)<0) break; h261_decode_gob(h); } MPV_frame_end(s); assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); assert(s->current_picture.pict_type == s->pict_type); *pict= *(AVFrame*)s->current_picture_ptr; ff_print_debug_info(s, pict); *data_size = sizeof(AVFrame); return get_consumed_bytes(s, buf_size); } static av_cold int h261_decode_end(AVCodecContext *avctx) { H261Context *h= avctx->priv_data; MpegEncContext *s = &h->s; MPV_common_end(s); return 0; } AVCodec h261_decoder = { "h261", AVMEDIA_TYPE_VIDEO, CODEC_ID_H261, sizeof(H261Context), h261_decode_init, NULL, h261_decode_end, h261_decode_frame, CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("H.261"), };
{ "language": "C" }
=pod =head1 NAME PKCS7_sign_add_signer - add a signer PKCS7 signed data structure. =head1 SYNOPSIS #include <openssl/pkcs7.h> PKCS7_SIGNER_INFO *PKCS7_sign_add_signer(PKCS7 *p7, X509 *signcert, EVP_PKEY *pkey, const EVP_MD *md, int flags); =head1 DESCRIPTION PKCS7_sign_add_signer() adds a signer with certificate B<signcert> and private key B<pkey> using message digest B<md> to a PKCS7 signed data structure B<p7>. The PKCS7 structure should be obtained from an initial call to PKCS7_sign() with the flag B<PKCS7_PARTIAL> set or in the case or re-signing a valid PKCS7 signed data structure. If the B<md> parameter is B<NULL> then the default digest for the public key algorithm will be used. Unless the B<PKCS7_REUSE_DIGEST> flag is set the returned PKCS7 structure is not complete and must be finalized either by streaming (if applicable) or a call to PKCS7_final(). =head1 NOTES The main purpose of this function is to provide finer control over a PKCS#7 signed data structure where the simpler PKCS7_sign() function defaults are not appropriate. For example if multiple signers or non default digest algorithms are needed. Any of the following flags (ored together) can be passed in the B<flags> parameter. If B<PKCS7_REUSE_DIGEST> is set then an attempt is made to copy the content digest value from the PKCS7 struture: to add a signer to an existing structure. An error occurs if a matching digest value cannot be found to copy. The returned PKCS7 structure will be valid and finalized when this flag is set. If B<PKCS7_PARTIAL> is set in addition to B<PKCS7_REUSE_DIGEST> then the B<PKCS7_SIGNER_INO> structure will not be finalized so additional attributes can be added. In this case an explicit call to PKCS7_SIGNER_INFO_sign() is needed to finalize it. If B<PKCS7_NOCERTS> is set the signer's certificate will not be included in the PKCS7 structure, the signer's certificate must still be supplied in the B<signcert> parameter though. This can reduce the size of the signature if the signers certificate can be obtained by other means: for example a previously signed message. The signedData structure includes several PKCS#7 autenticatedAttributes including the signing time, the PKCS#7 content type and the supported list of ciphers in an SMIMECapabilities attribute. If B<PKCS7_NOATTR> is set then no authenticatedAttributes will be used. If B<PKCS7_NOSMIMECAP> is set then just the SMIMECapabilities are omitted. If present the SMIMECapabilities attribute indicates support for the following algorithms: triple DES, 128 bit RC2, 64 bit RC2, DES and 40 bit RC2. If any of these algorithms is disabled then it will not be included. PKCS7_sign_add_signers() returns an internal pointer to the PKCS7_SIGNER_INFO structure just added, this can be used to set additional attributes before it is finalized. =head1 RETURN VALUES PKCS7_sign_add_signers() returns an internal pointer to the PKCS7_SIGNER_INFO structure just added or NULL if an error occurs. =head1 SEE ALSO L<ERR_get_error(3)|ERR_get_error(3)>, L<PKCS7_sign(3)|PKCS7_sign(3)>, L<PKCS7_final(3)|PKCS7_final(3)>, =head1 HISTORY PPKCS7_sign_add_signer() was added to OpenSSL 1.0.0 =cut
{ "language": "C" }
#include <stdint.h> #include <stddef.h> typedef struct Curve Curve; Curve* lovrCurveCreate(void); void lovrCurveDestroy(void* ref); void lovrCurveEvaluate(Curve* curve, float t, float point[4]); void lovrCurveGetTangent(Curve* curve, float t, float point[4]); Curve* lovrCurveSlice(Curve* curve, float t1, float t2); size_t lovrCurveGetPointCount(Curve* curve); void lovrCurveGetPoint(Curve* curve, size_t index, float point[4]); void lovrCurveSetPoint(Curve* curve, size_t index, float point[4]); void lovrCurveAddPoint(Curve* curve, float point[4], size_t index); void lovrCurveRemovePoint(Curve* curve, size_t index);
{ "language": "C" }
/* =========================================================================== Copyright (c) 2010-2015 Darkstar Dev Teams This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/ This file is part of DarkStar-server source code. =========================================================================== */ #ifndef _INSTANCEUTILS_H #define _INSTANCEUTILS_H #include "../../common/cbasetypes.h" class CInstanceLoader; class CCharEntity; namespace instanceutils { void CheckInstance(); void LoadInstance(uint8 instanceid, uint16 zoneid, CCharEntity* PRequester); }; #endif
{ "language": "C" }
#include"nasmfunc.h" #include"memory.h" #include"sheet.h" #include"graphic.h" #include"dsctbl.h" #include"buffer.h" #include"timer.h" #include"keyboard.h" #include"mouse.h" #include"mtask.h" #include"int.h" #include"console.h" #include"calculator.h" #include"main.h" #include<stdio.h> void HariMain() { //中断初始化 init_gdtidt(); init_pic(); io_sti(); io_out8(PIC0_IMR, 0xf8);//允许PIC中断与键盘中断 io_out8(PIC1_IMR, 0xef);//允许鼠标中断 //初始化鼠标、键盘、定时器缓冲区(主任务键鼠缓冲区定义在栈中,子任务缓冲定义在堆中) unsigned char data; unsigned char keyb[32],mouseb[1024],timeb[8]; struct Buffer timeBuffer; initBuffer(&allBuf.key,32,keyb); initBuffer(&allBuf.mouse,1024,mouseb); initBuffer(&timeBuffer,8,timeb); //定时器初始化 initPit(); //设定定时器 struct Timer *timer1;//timer1为光标闪烁定时器,timer2为倒计时定时器 timer1=allocTimer(); initTimer(timer1,&timeBuffer,10); setTimer(timer1,50); //初始化屏幕 struct BootInfo *binfo=(struct BootInfo *) BOOTADDR;//屏幕长宽,图像缓冲区起始位置 initPalette();//设定调色板 putStrOnSrn(binfo,0,20,LIGHTRED,"Welcome To my OS"); putStrOnSrn(binfo,0,50,LIGHTRED,"Check the memory now..."); //鼠标->鼠标控制电路->CPU //先初始化鼠标控制电路(隐藏在键盘控制电路里) //再激活鼠标 mdec.x=100; mdec.y=100; init_keyboard();//先初始化键盘控制电路 enable_mouse(&mdec);//再激活鼠标 //初始化内存 //struct MemoryList * meml=(struct MemoryList *)MEMORYLISTADDR; int size; initMem(); meml->maxsize=0x7fffffff; size=memtest(0x00400000,0xbfffffff);//内存测试 freeMem(0x00001000,0x0009e000); freeMem(0x00400000,size-0x00400000); meml->used[0].addr=0x9e000; meml->used[0].size=0x00400000-0x9e000; meml->used[0].flag=1; sprintf (meml->used[0].status,"System"); //初始化键盘数据 char keyTable[0x160]= { 0 , 0 ,'1','2','3','4','5','6','7','8','9','0','-','=', 0 , 0 , 'q','w','e','r','t','y','u','i','o','p','[',']', 0 , 0 ,'a','s', 'd','f','g','h','j','k','l',';','\'','`', 0 ,'\\','z','x','c','v', 'b','n','m',',','.','/', 0 ,'*', 0 ,' ', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,'7','8','9','-','4','5','6','+','1', '2','3','0','.', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,'_', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,'\\', 0 , 0 , 0 , 0 ,'!','@','#','$','%','^','&','*','(',')','_','+', 0 , 0 , 'Q','W','E','R','T','Y','U','I','O','P','[',']', 0 , 0 ,'A','S', 'D','F','G','H','J','K','L',';','|','~', 0 ,'\\','Z','X','C','V', 'B','N','M',',','.','/', 0 ,'*', 0 ,' ', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,'7','8','9','_','4','5','6','+','1', '2','3','0','.', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,'_', 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,'\\', 0 , 0 }; for (int i=0x00;i<0x160;i++) keyboard.keyTable[i]=keyTable[i]; char curInput[128]; int curPos = 0; //初始化图层表 scl=initSCL(binfo); sprintf (meml->used[1].status,"Sheet Control"); //初始化桌面图层 struct Sheet *sht_back; unsigned char *buf_back; sht_back=allocSheet();//申请图层 buf_back=(unsigned char *)allocMem_4k(binfo->scrnx*binfo->scrny,"Background UI");//申请内存空间 setBufInSheet(sht_back,buf_back,binfo->scrnx,binfo->scrny,-1);//初始化图层,其中无透明色 initScreenOnSht(sht_back);//画图层 slideSheet(sht_back,0,0);//移动图层位置 updownSheet(sht_back,0); //初始化鼠标图层 struct Sheet *sht_mouse; char mousebuf[256]; sht_mouse=allocSheet(); setBufInSheet(sht_mouse,mousebuf,16,16,99); initMouseCursor(sht_mouse); slideSheet(sht_mouse, mdec.x, mdec.y); updownSheet(sht_mouse,1); //显示基本信息 char str[128]; //显示内存大小 unsigned int u=meml->maxsize/1024/1024; sprintf (str,"Memory: %dM",u); putStrAndBackOnSht(sht_back,0,0,LIGHTRED,LIGHTGRAY,str,-1); //显示Focus窗口 sprintf (str,"Fouse: Console"); putStrAndBackOnSht(sht_back,0,1*16,LIGHTRED,LIGHTBLUE,str,40); refreshAllSheet();//所有图层准备 //初始化多任务 initTaskCTL(); //设定主任务 struct Task *mainTask; mainTask=getMainTask(); //初始化焦点,开始焦点为Console任务 initWindow(); //定义Console任务 struct Task *consoleTask; consoleTask=allocTask(); initTask(consoleTask,(int)&consoleTask_Main,"Console",11); createWindow(consoleTask,"Console"); runTask(consoleTask); //定义calculator任务 /*struct Task *calculatorTask; calculatorTask=allocTask(); window.winCount++; initTask(calculatorTask,(int)&calculatorTask_Main,window.winCount); sprintf (window.winName[window.winCount],"Calculator"); runTask(calculatorTask);*/ int flag; keyboard.isShift=0; //程序大循环 while(1) { flag=0; if (window.isChanged) { sprintf (str,"Focus: %s, winCount: %d, runningCount: %d",window.winName[window.focus],window.winCount,taskctl->runningCount); putStrAndBackOnSht(sht_back,0,1*16,LIGHTRED,LIGHTBLUE,str,60); window.isChanged=0; } //检查各类中断 io_cli(); if (getBuffer(&allBuf.key,&data)) { //键盘 io_sti(); flag=1; sprintf (str,"Key: %x",data); putStrAndBackOnSht(sht_back,0,2*16,LIGHTRED,LIGHTBLUE,str,26);//显示键盘信息 //执行键盘动作 switch (data) { case 0x0f://Tab键 window.focus++; window.focus%=window.winCount; window.isChanged=1; break; case 0x2a: case 0x36://Shift按住 keyboard.isShift=1; break; case 0xaa: case 0xb6://Shift松开 keyboard.isShift=0; break; default://剩余键盘动作 传给子任务 putBuffer(&taskctl->tasks[1+window.focus]->bufAll.key,data);//主任务没有窗口,故窗口号+1=任务号 break; } /*if (window.focus==0) { if (data<0x80 && keyTable[data]>0 && curPos<=15)//字母,数字 { curInput[curPos++] = keyTable[data]; curInput[curPos] = '\0'; }else switch (data) { case 0x0e://退格键 if (curPos>0) curInput[--curPos] = '\0'; break; case 0x0f://Tab键 window.focus++; window.focus%=window.winCount; break; } sprintf (str,"%s",curInput); putStrAndBackOnSht(sht_window,16,44,BLACK,WHITE,str,16); }*/ } if (getBuffer(&allBuf.mouse,&data)) { //鼠标 io_sti(); flag=2; if (mouse_decode(&mdec,data)) { mdec.x+=mdec.dx; if (mdec.x<0) mdec.x=0; if (mdec.x>binfo->scrnx-1) mdec.x=binfo->scrnx-1; mdec.y+=mdec.dy; if (mdec.y<0) mdec.y=0; if (mdec.y>binfo->scrny-1) mdec.y=binfo->scrny-1; sprintf (str,"Mouse: "); if (mdec.lbtn) sprintf (str,"%sL",str); if (mdec.mbtn) sprintf (str,"%sM",str); if (mdec.rbtn) sprintf (str,"%sR",str); sprintf (str,"%s (%d,%d)",str,mdec.x,mdec.y); putStrAndBackOnSht(sht_back,0,3*16,LIGHTRED,LIGHTBLUE,str,25);//显示鼠标信息 slideSheet(sht_mouse,mdec.x,mdec.y); //剩余鼠标动作传给子任务 //主任务没有窗口,故图层号+1=窗口号+1=任务号 if (mdec.lbtn) putBuffer(&taskctl->tasks[window.focus+1]->bufAll.mouse,0); else if (mdec.mbtn) putBuffer(&taskctl->tasks[window.focus+1]->bufAll.mouse,1); else if (mdec.rbtn) putBuffer(&taskctl->tasks[window.focus+1]->bufAll.mouse,2); } } /*if (getBuffer(&timeBuffer,&data)) { //定时器 io_sti(); flag=3; switch (data)//为了区分不同的定时器 { case 10://timer1显示光标 initTimer(timer1,&timeBuffer,11); boxfillOnSht(sht_window,16+8*curPos,44,8,15,BLACK); refreshSubInSheet(sht_window,16+8*curPos,44,8,15); setTimer(timer1,50); break; case 11://timer1隐藏光标 initTimer(timer1,&timeBuffer,10); boxfillOnSht(sht_window,16+8*curPos,44,8,15,WHITE); refreshSubInSheet(sht_window,16+8*curPos,44,8,15); setTimer(timer1,50); break; } }*/ if (flag==0) { //没有事件就休眠 //sleepTask(mainTask); io_sti(); } } } /* //任务calculator void calculatorTask_Main(struct Task *task) { //初始化缓冲区 char bufferArray[128]; struct Buffer bufferTime; initBuffer(&bufferTime,128,bufferArray); //初始化定时器 struct Timer *timerCur; timerCur=allocTimer(); initTimer(timerCur,&bufferTime,1); setTimer(timerCur,50); //初始化控制台图层 struct Sheet *consoleSheet; unsigned char *consoleBuffer; consoleBuffer=(unsigned char *)allocMem_4k(512*310); consoleSheet=allocSheet(); setBufInSheet(consoleSheet,consoleBuffer,512,310,-1);//没有透明色 slideSheet(consoleSheet,202,8); makeWindow(consoleSheet,512,310,"Calculator"); makeTextBox(consoleSheet,8,27,496,276,BLACK); setHeightSheet(consoleSheet,1); unsigned char data; int curPos=0; int flag=0; while (1) { flag=0; if (window.focus!=task->winID)//焦点不在,取消光标 { boxfillOnSht(consoleSheet,8+8*curPos,28,8,15,BLACK); refreshSubInSheet(consoleSheet,8+8*curPos,28,8,15); continue; }else if (timerCur->flag==TIMER_ALLOCED)//重新获得焦点,重启光标 { initTimer(timerCur,&bufferTime,1); setTimer(timerCur,50); } io_cli(); if (getBuffer(&bufferTime,&data)) { io_sti(); flag=3; switch (data) { case 0: initTimer(timerCur,&bufferTime,1); boxfillOnSht(consoleSheet,8+8*curPos,28,8,15,WHITE); refreshSubInSheet(consoleSheet,8+8*curPos,28,8,15); setTimer(timerCur,50); break; case 1: initTimer(timerCur,&bufferTime,0); boxfillOnSht(consoleSheet,8+8*curPos,28,8,15,BLACK); refreshSubInSheet(consoleSheet,8+8*curPos,28,8,15); setTimer(timerCur,50); break; } } if (flag==0) { io_sti(); } } }*/
{ "language": "C" }
#include <dlfcn.h> #include <i915_drm.h> #include "gputop-perf.h" int ioctl(int fd, unsigned long request, void *data) { static int (*real_ioctl) (int fd, unsigned long request, void *data); int ret; if (!real_ioctl) real_ioctl = dlsym(RTLD_NEXT, "ioctl"); ret = real_ioctl(fd, request, data); if (!ret) { /* Contexts are specific to the drm_file instance used in rendering. * The ctx_id created by the kernel is _not_ globally * unique, but rather unique for that drm_file instance. * For example from the global context_list, there will be multiple * default contexts (0), but actually only one of these per file instance. * Therefore to properly enable OA metrics on a per-context basis, we * need the file descriptor and the ctx_id. */ if (request == DRM_IOCTL_I915_GEM_CONTEXT_CREATE) { struct drm_i915_gem_context_create *ctx_create = data; gputop_add_ctx_handle(fd, ctx_create->ctx_id); } else if (request == DRM_IOCTL_I915_GEM_CONTEXT_DESTROY) { struct drm_i915_gem_context_destroy *ctx_destroy = data; gputop_remove_ctx_handle(ctx_destroy->ctx_id); } } return ret; }
{ "language": "C" }
#include <stdlib.h> #include <stdio.h> #include <string.h> #include "test_helpers.h" int main(void) { char *first_null = tmpnam(NULL); if(first_null == NULL) { // NOTE: assuming that we can at least get one file name puts("tmpnam(NULL) returned NULL on first try"); exit(EXIT_FAILURE); } printf("%s\n", first_null); char *second_null = tmpnam(NULL); if(second_null == NULL) { // NOTE: assuming that we can at least get one file name puts("tmpnam(NULL) returned NULL on second try"); exit(EXIT_FAILURE); } printf("%s\n", second_null); if(first_null != second_null) { puts("tmpnam(NULL) returns different addresses"); exit(EXIT_FAILURE); } char buffer[L_tmpnam + 1]; char *buf_result = tmpnam(buffer); if(buf_result == NULL) { puts("tmpnam(buffer) failed"); exit(EXIT_FAILURE); } else if(buf_result != buffer) { puts("tmpnam(buffer) did not return buffer's address"); exit(EXIT_FAILURE); } printf("%s\n", buffer); return 0; }
{ "language": "C" }
/*********************************************************************** Copyright (c) 2006-2012, Skype Limited. All rights reserved. Redistribution and use in source and binary forms, with or without modification, (subject to the limitations in the disclaimer below) are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of Skype Limited, nor the names of specific contributors, may be used to endorse or promote products derived from this software without specific prior written permission. NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***********************************************************************/ #ifndef SKP_SILK_MAIN_FIX_H #define SKP_SILK_MAIN_FIX_H #include <stdlib.h> #include "SKP_Silk_SigProc_FIX.h" #include "SKP_Silk_structs_FIX.h" #include "SKP_Silk_main.h" #include "SKP_Silk_PLC.h" #define TIC(TAG_NAME) #define TOC(TAG_NAME) #ifndef FORCE_CPP_BUILD #ifdef __cplusplus extern "C" { #endif #endif /*********************/ /* Encoder Functions */ /*********************/ /* Initializes the Silk encoder state */ SKP_int SKP_Silk_init_encoder_FIX( SKP_Silk_encoder_state_FIX *psEnc /* I/O Pointer to Silk FIX encoder state */ ); /* Control the Silk encoder */ SKP_int SKP_Silk_control_encoder_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O Pointer to Silk encoder state */ const SKP_int PacketSize_ms, /* I Packet length (ms) */ const SKP_int32 TargetRate_bps, /* I Target max bitrate (bps) */ const SKP_int PacketLoss_perc, /* I Packet loss rate (in percent) */ const SKP_int DTX_enabled, /* I Enable / disable DTX */ const SKP_int Complexity /* I Complexity (0->low; 1->medium; 2->high) */ ); /* Encoder main function */ SKP_int SKP_Silk_encode_frame_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O Pointer to Silk FIX encoder state */ SKP_uint8 *pCode, /* O Pointer to payload */ SKP_int16 *pnBytesOut, /* I/O Pointer to number of payload bytes; */ /* input: max length; output: used */ const SKP_int16 *pIn /* I Pointer to input speech frame */ ); /* Low BitRate Redundancy encoding functionality. Reuse all parameters but encode with lower bitrate */ void SKP_Silk_LBRR_encode_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O Pointer to Silk FIX encoder state */ SKP_Silk_encoder_control_FIX *psEncCtrl, /* I/O Pointer to Silk FIX encoder control struct */ SKP_uint8 *pCode, /* O Pointer to payload */ SKP_int16 *pnBytesOut, /* I/O Pointer to number of payload bytes */ SKP_int16 xfw[] /* I Input signal */ ); /* High-pass filter with cutoff frequency adaptation based on pitch lag statistics */ void SKP_Silk_HP_variable_cutoff_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O Encoder state */ SKP_Silk_encoder_control_FIX *psEncCtrl, /* I/O Encoder control */ SKP_int16 *out, /* O high-pass filtered output signal */ const SKP_int16 *in /* I input signal */ ); /****************/ /* Prefiltering */ /****************/ void SKP_Silk_prefilter_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O Encoder state */ const SKP_Silk_encoder_control_FIX *psEncCtrl, /* I Encoder control */ SKP_int16 xw[], /* O Weighted signal */ const SKP_int16 x[] /* I Speech signal */ ); /**************************************************************/ /* Compute noise shaping coefficients and initial gain values */ /**************************************************************/ void SKP_Silk_noise_shape_analysis_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O Encoder state FIX */ SKP_Silk_encoder_control_FIX *psEncCtrl, /* I/O Encoder control FIX */ const SKP_int16 *pitch_res, /* I LPC residual from pitch analysis */ const SKP_int16 *x /* I Input signal [ frame_length + la_shape ] */ ); /* Autocorrelations for a warped frequency axis */ void SKP_Silk_warped_autocorrelation_FIX( SKP_int32 *corr, /* O Result [order + 1] */ SKP_int *scale, /* O Scaling of the correlation vector */ const SKP_int16 *input, /* I Input data to correlate */ const SKP_int16 warping_Q16, /* I Warping coefficient */ const SKP_int length, /* I Length of input */ const SKP_int order /* I Correlation order (even) */ ); /* Processing of gains */ void SKP_Silk_process_gains_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O Encoder state */ SKP_Silk_encoder_control_FIX *psEncCtrl /* I/O Encoder control */ ); /* Control low bitrate redundancy usage */ void SKP_Silk_LBRR_ctrl_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O encoder state */ SKP_Silk_encoder_control *psEncCtrlC /* I/O encoder control */ ); /* Calculation of LTP state scaling */ void SKP_Silk_LTP_scale_ctrl_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O encoder state */ SKP_Silk_encoder_control_FIX *psEncCtrl /* I/O encoder control */ ); /**********************************************/ /* Prediction Analysis */ /**********************************************/ /* Find pitch lags */ void SKP_Silk_find_pitch_lags_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O encoder state */ SKP_Silk_encoder_control_FIX *psEncCtrl, /* I/O encoder control */ SKP_int16 res[], /* O residual */ const SKP_int16 x[] /* I Speech signal */ ); void SKP_Silk_find_pred_coefs_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O encoder state */ SKP_Silk_encoder_control_FIX *psEncCtrl, /* I/O encoder control */ const SKP_int16 res_pitch[] /* I Residual from pitch analysis */ ); void SKP_Silk_find_LPC_FIX( SKP_int NLSF_Q15[], /* O NLSFs */ SKP_int *interpIndex, /* O NLSF interpolation index, only used for NLSF interpolation */ const SKP_int prev_NLSFq_Q15[], /* I previous NLSFs, only used for NLSF interpolation */ const SKP_int useInterpolatedLSFs, /* I Flag */ const SKP_int LPC_order, /* I LPC order */ const SKP_int16 x[], /* I Input signal */ const SKP_int subfr_length /* I Input signal subframe length including preceeding samples */ ); void SKP_Silk_warped_LPC_analysis_filter_FIX( SKP_int32 state[], /* I/O State [order + 1] */ SKP_int16 res[], /* O Residual signal [length] */ const SKP_int16 coef_Q13[], /* I Coefficients [order] */ const SKP_int16 input[], /* I Input signal [length] */ const SKP_int16 lambda_Q16, /* I Warping factor */ const SKP_int length, /* I Length of input signal */ const SKP_int order /* I Filter order (even) */ ); void SKP_Silk_LTP_analysis_filter_FIX( SKP_int16 *LTP_res, /* O: LTP residual signal of length NB_SUBFR * ( pre_length + subfr_length ) */ const SKP_int16 *x, /* I: Pointer to input signal with at least max( pitchL ) preceeding samples */ const SKP_int16 LTPCoef_Q14[ LTP_ORDER * NB_SUBFR ],/* I: LTP_ORDER LTP coefficients for each NB_SUBFR subframe */ const SKP_int pitchL[ NB_SUBFR ], /* I: Pitch lag, one for each subframe */ const SKP_int32 invGains_Q16[ NB_SUBFR ], /* I: Inverse quantization gains, one for each subframe */ const SKP_int subfr_length, /* I: Length of each subframe */ const SKP_int pre_length /* I: Length of the preceeding samples starting at &x[0] for each subframe */ ); /* Finds LTP vector from correlations */ void SKP_Silk_find_LTP_FIX( SKP_int16 b_Q14[ NB_SUBFR * LTP_ORDER ], /* O LTP coefs */ SKP_int32 WLTP[ NB_SUBFR * LTP_ORDER * LTP_ORDER ], /* O Weight for LTP quantization */ SKP_int *LTPredCodGain_Q7, /* O LTP coding gain */ const SKP_int16 r_first[], /* I residual signal after LPC signal + state for first 10 ms */ const SKP_int16 r_last[], /* I residual signal after LPC signal + state for last 10 ms */ const SKP_int lag[ NB_SUBFR ], /* I LTP lags */ const SKP_int32 Wght_Q15[ NB_SUBFR ], /* I weights */ const SKP_int subfr_length, /* I subframe length */ const SKP_int mem_offset, /* I number of samples in LTP memory */ SKP_int corr_rshifts[ NB_SUBFR ] /* O right shifts applied to correlations */ ); /* LTP tap quantizer */ void SKP_Silk_quant_LTP_gains_FIX( SKP_int16 B_Q14[], /* I/O (un)quantized LTP gains */ SKP_int cbk_index[], /* O Codebook Index */ SKP_int *periodicity_index, /* O Periodicity Index */ const SKP_int32 W_Q18[], /* I Error Weights in Q18 */ SKP_int mu_Q8, /* I Mu value (R/D tradeoff) */ SKP_int lowComplexity /* I Flag for low complexity */ ); /******************/ /* NLSF Quantizer */ /******************/ /* Limit, stabilize, convert and quantize NLSFs. */ void SKP_Silk_process_NLSFs_FIX( SKP_Silk_encoder_state_FIX *psEnc, /* I/O encoder state */ SKP_Silk_encoder_control_FIX *psEncCtrl, /* I/O encoder control */ SKP_int *pNLSF_Q15 /* I/O Normalized LSFs (quant out) (0 - (2^15-1)) */ ); /* NLSF vector encoder */ void SKP_Silk_NLSF_MSVQ_encode_FIX( SKP_int *NLSFIndices, /* O Codebook path vector [ CB_STAGES ] */ SKP_int *pNLSF_Q15, /* I/O Quantized NLSF vector [ LPC_ORDER ] */ const SKP_Silk_NLSF_CB_struct *psNLSF_CB, /* I Codebook object */ const SKP_int *pNLSF_q_Q15_prev, /* I Prev. quantized NLSF vector [LPC_ORDER] */ const SKP_int *pW_Q6, /* I NLSF weight vector [ LPC_ORDER ] */ const SKP_int NLSF_mu_Q15, /* I Rate weight for the RD optimization */ const SKP_int NLSF_mu_fluc_red_Q16, /* I Fluctuation reduction error weight */ const SKP_int NLSF_MSVQ_Survivors, /* I Max survivors from each stage */ const SKP_int LPC_order, /* I LPC order */ const SKP_int deactivate_fluc_red /* I Deactivate fluctuation reduction */ ); /* Rate-Distortion calculations for multiple input data vectors */ void SKP_Silk_NLSF_VQ_rate_distortion_FIX( SKP_int32 *pRD_Q20, /* O Rate-distortion values [psNLSF_CBS->nVectors*N] */ const SKP_Silk_NLSF_CBS *psNLSF_CBS, /* I NLSF codebook stage struct */ const SKP_int *in_Q15, /* I Input vectors to be quantized */ const SKP_int *w_Q6, /* I Weight vector */ const SKP_int32 *rate_acc_Q5, /* I Accumulated rates from previous stage */ const SKP_int mu_Q15, /* I Weight between weighted error and rate */ const SKP_int N, /* I Number of input vectors to be quantized */ const SKP_int LPC_order /* I LPC order */ ); /* Compute weighted quantization errors for an LPC_order element input vector, over one codebook stage */ void SKP_Silk_NLSF_VQ_sum_error_FIX( SKP_int32 *err_Q20, /* O Weighted quantization errors [N*K] */ const SKP_int *in_Q15, /* I Input vectors to be quantized [N*LPC_order] */ const SKP_int *w_Q6, /* I Weighting vectors [N*LPC_order] */ const SKP_int16 *pCB_Q15, /* I Codebook vectors [K*LPC_order] */ const SKP_int N, /* I Number of input vectors */ const SKP_int K, /* I Number of codebook vectors */ const SKP_int LPC_order /* I Number of LPCs */ ); /* Entropy constrained MATRIX-weighted VQ, for a single input data vector */ void SKP_Silk_VQ_WMat_EC_FIX( SKP_int *ind, /* O index of best codebook vector */ SKP_int32 *rate_dist_Q14, /* O best weighted quantization error + mu * rate*/ const SKP_int16 *in_Q14, /* I input vector to be quantized */ const SKP_int32 *W_Q18, /* I weighting matrix */ const SKP_int16 *cb_Q14, /* I codebook */ const SKP_int16 *cl_Q6, /* I code length for each codebook vector */ const SKP_int mu_Q8, /* I tradeoff between weighted error and rate */ SKP_int L /* I number of vectors in codebook */ ); /******************/ /* Linear Algebra */ /******************/ /* Calculates correlation matrix X'*X */ void SKP_Silk_corrMatrix_FIX( const SKP_int16 *x, /* I x vector [L + order - 1] used to form data matrix X */ const SKP_int L, /* I Length of vectors */ const SKP_int order, /* I Max lag for correlation */ const SKP_int head_room, /* I Desired headroom */ SKP_int32 *XX, /* O Pointer to X'*X correlation matrix [ order x order ]*/ SKP_int *rshifts /* I/O Right shifts of correlations */ ); /* Calculates correlation vector X'*t */ void SKP_Silk_corrVector_FIX( const SKP_int16 *x, /* I x vector [L + order - 1] used to form data matrix X */ const SKP_int16 *t, /* I Target vector [L] */ const SKP_int L, /* I Length of vectors */ const SKP_int order, /* I Max lag for correlation */ SKP_int32 *Xt, /* O Pointer to X'*t correlation vector [order] */ const SKP_int rshifts /* I Right shifts of correlations */ ); /* Add noise to matrix diagonal */ void SKP_Silk_regularize_correlations_FIX( SKP_int32 *XX, /* I/O Correlation matrices */ SKP_int32 *xx, /* I/O Correlation values */ SKP_int32 noise, /* I Noise to add */ SKP_int D /* I Dimension of XX */ ); /* Solves Ax = b, assuming A is symmetric */ void SKP_Silk_solve_LDL_FIX( SKP_int32 *A, /* I Pointer to symetric square matrix A */ SKP_int M, /* I Size of matrix */ const SKP_int32 *b, /* I Pointer to b vector */ SKP_int32 *x_Q16 /* O Pointer to x solution vector */ ); /* Residual energy: nrg = wxx - 2 * wXx * c + c' * wXX * c */ SKP_int32 SKP_Silk_residual_energy16_covar_FIX( const SKP_int16 *c, /* I Prediction vector */ const SKP_int32 *wXX, /* I Correlation matrix */ const SKP_int32 *wXx, /* I Correlation vector */ SKP_int32 wxx, /* I Signal energy */ SKP_int D, /* I Dimension */ SKP_int cQ /* I Q value for c vector 0 - 15 */ ); /* Calculates residual energies of input subframes where all subframes have LPC_order */ /* of preceeding samples */ void SKP_Silk_residual_energy_FIX( SKP_int32 nrgs[ NB_SUBFR ], /* O Residual energy per subframe */ SKP_int nrgsQ[ NB_SUBFR ], /* O Q value per subframe */ const SKP_int16 x[], /* I Input signal */ SKP_int16 a_Q12[ 2 ][ MAX_LPC_ORDER ],/* I AR coefs for each frame half */ const SKP_int32 gains[ NB_SUBFR ], /* I Quantization gains */ const SKP_int subfr_length, /* I Subframe length */ const SKP_int LPC_order /* I LPC order */ ); #ifndef FORCE_CPP_BUILD #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* FORCE_CPP_BUILD */ #endif /* SKP_SILK_MAIN_FIX_H */
{ "language": "C" }
/* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <stdlib.h> #include "compiler/brw_inst.h" #include "compiler/brw_eu.h" #include "gen_disasm.h" struct gen_disasm { struct gen_device_info devinfo; }; static bool is_send(uint32_t opcode) { return (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC || opcode == BRW_OPCODE_SENDS || opcode == BRW_OPCODE_SENDSC ); } static int gen_disasm_find_end(struct gen_disasm *disasm, const void *assembly, int start) { struct gen_device_info *devinfo = &disasm->devinfo; int offset = start; /* This loop exits when send-with-EOT or when opcode is 0 */ while (true) { const brw_inst *insn = assembly + offset; if (brw_inst_cmpt_control(devinfo, insn)) { offset += 8; } else { offset += 16; } /* Simplistic, but efficient way to terminate disasm */ uint32_t opcode = brw_inst_opcode(devinfo, insn); if (opcode == 0 || (is_send(opcode) && brw_inst_eot(devinfo, insn))) { break; } } return offset; } void gen_disasm_disassemble(struct gen_disasm *disasm, const void *assembly, int start, FILE *out) { struct gen_device_info *devinfo = &disasm->devinfo; int end = gen_disasm_find_end(disasm, assembly, start); /* Make a dummy disasm structure that brw_validate_instructions * can work from. */ struct disasm_info *disasm_info = disasm_initialize(devinfo, NULL); disasm_new_inst_group(disasm_info, start); disasm_new_inst_group(disasm_info, end); brw_validate_instructions(devinfo, assembly, start, end, disasm_info); foreach_list_typed(struct inst_group, group, link, &disasm_info->group_list) { struct exec_node *next_node = exec_node_get_next(&group->link); if (exec_node_is_tail_sentinel(next_node)) break; struct inst_group *next = exec_node_data(struct inst_group, next_node, link); int start_offset = group->offset; int end_offset = next->offset; brw_disassemble(devinfo, assembly, start_offset, end_offset, out); if (group->error) { fputs(group->error, out); } } ralloc_free(disasm_info); } struct gen_disasm * gen_disasm_create(const struct gen_device_info *devinfo) { struct gen_disasm *gd; gd = malloc(sizeof *gd); if (gd == NULL) return NULL; gd->devinfo = *devinfo; brw_init_compaction_tables(&gd->devinfo); return gd; } void gen_disasm_destroy(struct gen_disasm *disasm) { free(disasm); }
{ "language": "C" }
/* ** Enable SQLite debug assertions if requested */ #ifndef SQLITE_DEBUG #if defined(SQLITE_ENABLE_DEBUG) && (SQLITE_ENABLE_DEBUG == 1) #define SQLITE_DEBUG 1 #endif #endif /* ** To enable the extension functions define SQLITE_ENABLE_EXTFUNC on compiling this module ** To enable the reading CSV files define SQLITE_ENABLE_CSV on compiling this module ** To enable the SHA3 support define SQLITE_ENABLE_SHA3 on compiling this module ** To enable the CARRAY support define SQLITE_ENABLE_CARRAY on compiling this module ** To enable the FILEIO support define SQLITE_ENABLE_FILEIO on compiling this module ** To enable the SERIES support define SQLITE_ENABLE_SERIES on compiling this module */ #if defined(SQLITE_ENABLE_EXTFUNC) || defined(SQLITE_ENABLE_CSV) || defined(SQLITE_ENABLE_SHA3) || defined(SQLITE_ENABLE_CARRAY) || defined(SQLITE_ENABLE_FILEIO) || defined(SQLITE_ENABLE_SERIES) #define sqlite3_open sqlite3_open_internal #define sqlite3_open16 sqlite3_open16_internal #define sqlite3_open_v2 sqlite3_open_v2_internal #endif /* ** Enable the user authentication feature */ #ifndef SQLITE_USER_AUTHENTICATION #define SQLITE_USER_AUTHENTICATION 1 #endif #include "sqlite3.c" #ifdef SQLITE_USER_AUTHENTICATION #include "sha2.h" #include "sha2.c" #include "userauth.c" #endif #if defined(SQLITE_ENABLE_EXTFUNC) || defined(SQLITE_ENABLE_CSV) || defined(SQLITE_ENABLE_SHA3) || defined(SQLITE_ENABLE_CARRAY) || defined(SQLITE_ENABLE_FILEIO) || defined(SQLITE_ENABLE_SERIES) #undef sqlite3_open #undef sqlite3_open16 #undef sqlite3_open_v2 #endif #ifndef SQLITE_OMIT_DISKIO #ifdef SQLITE_HAS_CODEC /* ** Get the codec argument for this pager */ void* mySqlite3PagerGetCodec( Pager *pPager ){ #if (SQLITE_VERSION_NUMBER >= 3006016) return sqlite3PagerGetCodec(pPager); #else return (pPager->xCodec) ? pPager->pCodecArg : NULL; #endif } /* ** Set the codec argument for this pager */ void mySqlite3PagerSetCodec( Pager *pPager, void *(*xCodec)(void*,void*,Pgno,int), void (*xCodecSizeChng)(void*,int,int), void (*xCodecFree)(void*), void *pCodec ){ sqlite3PagerSetCodec(pPager, xCodec, xCodecSizeChng, xCodecFree, pCodec); } #include "rijndael.c" #include "codec.c" #include "codecext.c" #endif #endif /* ** Extension functions */ #ifdef SQLITE_ENABLE_EXTFUNC #include "extensionfunctions.c" #endif /* ** CSV import */ #ifdef SQLITE_ENABLE_CSV #include "csv.c" #endif /* ** SHA3 */ #ifdef SQLITE_ENABLE_SHA3 #include "shathree.c" #endif /* ** CARRAY */ #ifdef SQLITE_ENABLE_CARRAY #include "carray.c" #endif /* ** FILEIO */ #ifdef SQLITE_ENABLE_FILEIO #include "fileio.c" #endif /* ** SERIES */ #ifdef SQLITE_ENABLE_SERIES #include "series.c" #endif #if defined(SQLITE_ENABLE_EXTFUNC) || defined(SQLITE_ENABLE_CSV) || defined(SQLITE_ENABLE_SHA3) || defined(SQLITE_ENABLE_CARRAY) || defined(SQLITE_ENABLE_FILEIO) || defined(SQLITE_ENABLE_SERIES) static void registerAllExtensions(sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi) { #ifdef SQLITE_ENABLE_EXTFUNC RegisterExtensionFunctions(db); #endif #ifdef SQLITE_ENABLE_CSV sqlite3_csv_init(db, NULL, NULL); #endif #ifdef SQLITE_ENABLE_SHA3 sqlite3_shathree_init(db, NULL, NULL); #endif #ifdef SQLITE_ENABLE_CARRAY sqlite3_carray_init(db, NULL, NULL); #endif #ifdef SQLITE_ENABLE_FILEIO sqlite3_fileio_init(db, NULL, NULL); #endif #ifdef SQLITE_ENABLE_SERIES sqlite3_series_init(db, NULL, NULL); #endif } SQLITE_API int sqlite3_open( const char *filename, /* Database filename (UTF-8) */ sqlite3 **ppDb /* OUT: SQLite db handle */ ) { int ret = sqlite3_open_internal(filename, ppDb); if (ret == 0) { registerAllExtensions(*ppDb, NULL, NULL); } return ret; } SQLITE_API int sqlite3_open16( const void *filename, /* Database filename (UTF-16) */ sqlite3 **ppDb /* OUT: SQLite db handle */ ) { int ret = sqlite3_open16_internal(filename, ppDb); if (ret == 0) { registerAllExtensions(*ppDb, NULL, NULL); } return ret; } SQLITE_API int sqlite3_open_v2( const char *filename, /* Database filename (UTF-8) */ sqlite3 **ppDb, /* OUT: SQLite db handle */ int flags, /* Flags */ const char *zVfs /* Name of VFS module to use */ ) { int ret = sqlite3_open_v2_internal(filename, ppDb, flags, zVfs); if (ret == 0) { registerAllExtensions(*ppDb, NULL, NULL); } return ret; } #endif
{ "language": "C" }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef T_FUNCTION_H #define T_FUNCTION_H #include <string> #include "thrift/parse/t_type.h" #include "thrift/parse/t_struct.h" #include "thrift/parse/t_doc.h" /** * Representation of a function. Key parts are return type, function name, * optional modifiers, and an argument list, which is implemented as a thrift * struct. * */ class t_function : public t_doc { public: t_function(t_type* returntype, std::string name, t_struct* arglist, bool oneway = false) : returntype_(returntype), name_(name), arglist_(arglist), xceptions_(new t_struct(NULL)), own_xceptions_(true), oneway_(oneway) { if (oneway_ && (!returntype_->is_void())) { pwarning(1, "Oneway methods should return void.\n"); } } t_function(t_type* returntype, std::string name, t_struct* arglist, t_struct* xceptions, bool oneway = false) : returntype_(returntype), name_(name), arglist_(arglist), xceptions_(xceptions), own_xceptions_(false), oneway_(oneway) { if (oneway_ && !xceptions_->get_members().empty()) { throw std::string("Oneway methods can't throw exceptions."); } if (oneway_ && (!returntype_->is_void())) { pwarning(1, "Oneway methods should return void.\n"); } } ~t_function() { if (own_xceptions_) delete xceptions_; } t_type* get_returntype() const { return returntype_; } const std::string& get_name() const { return name_; } t_struct* get_arglist() const { return arglist_; } t_struct* get_xceptions() const { return xceptions_; } bool is_oneway() const { return oneway_; } std::map<std::string, std::string> annotations_; private: t_type* returntype_; std::string name_; t_struct* arglist_; t_struct* xceptions_; bool own_xceptions_; bool oneway_; }; #endif
{ "language": "C" }
/* * Access vector cache interface for object managers. * * Author : Eamon Walsh <ewalsh@epoch.ncsc.mil> */ #ifndef _SELINUX_AVC_H_ #define _SELINUX_AVC_H_ #include <stdint.h> #include <errno.h> #include <stdlib.h> #include <selinux/selinux.h> #ifdef __cplusplus extern "C" { #endif /* * SID format and operations */ struct security_id { char * ctx; unsigned int refcnt; }; typedef struct security_id *security_id_t; #define SECSID_WILD (security_id_t)NULL /* unspecified SID */ /** * avc_sid_to_context - get copy of context corresponding to SID. * @sid: input SID * @ctx: pointer to context reference * * Return a copy of the security context corresponding to the input * @sid in the memory referenced by @ctx. The caller is expected to * free the context with freecon(). Return %0 on success, -%1 on * failure, with @errno set to %ENOMEM if insufficient memory was * available to make the copy, or %EINVAL if the input SID is invalid. */ extern int avc_sid_to_context(security_id_t sid, char ** ctx); extern int avc_sid_to_context_raw(security_id_t sid, char ** ctx); /** * avc_context_to_sid - get SID for context. * @ctx: input security context * @sid: pointer to SID reference * * Look up security context @ctx in SID table, making * a new entry if @ctx is not found. Increment the * reference counter for the SID. Store a pointer * to the SID structure into the memory referenced by @sid, * returning %0 on success or -%1 on error with @errno set. */ extern int avc_context_to_sid(const char * ctx, security_id_t * sid); extern int avc_context_to_sid_raw(const char * ctx, security_id_t * sid); /** * sidget - increment SID reference counter. * @sid: SID reference * * Increment the reference counter for @sid, indicating that * @sid is in use by an (additional) object. Return the * new reference count, or zero if @sid is invalid (has zero * reference count). Note that avc_context_to_sid() also * increments reference counts. */ extern int sidget(security_id_t sid) #ifdef __GNUC__ __attribute__ ((deprecated)) #endif ; /** * sidput - decrement SID reference counter. * @sid: SID reference * * Decrement the reference counter for @sid, indicating that * a reference to @sid is no longer in use. Return the * new reference count. When the reference count reaches * zero, the SID is invalid, and avc_context_to_sid() must * be called to obtain a new SID for the security context. */ extern int sidput(security_id_t sid) #ifdef __GNUC__ __attribute__ ((deprecated)) #endif ; /** * avc_get_initial_sid - get SID for an initial kernel security identifier * @name: input name of initial kernel security identifier * @sid: pointer to a SID reference * * Get the context for an initial kernel security identifier specified by * @name using security_get_initial_context() and then call * avc_context_to_sid() to get the corresponding SID. */ extern int avc_get_initial_sid(const char *name, security_id_t * sid); /* * AVC entry */ struct avc_entry; struct avc_entry_ref { struct avc_entry *ae; }; /** * avc_entry_ref_init - initialize an AVC entry reference. * @aeref: pointer to avc entry reference structure * * Use this macro to initialize an avc entry reference structure * before first use. These structures are passed to avc_has_perm(), * which stores cache entry references in them. They can increase * performance on repeated queries. */ #define avc_entry_ref_init(aeref) ((aeref)->ae = NULL) /* * User-provided callbacks for memory, auditing, and locking */ /* These structures are passed by reference to avc_init(). Passing * a NULL reference will cause the AVC to use a default. The default * memory callbacks are malloc() and free(). The default logging method * is to print on stderr. If no thread callbacks are passed, a separate * listening thread won't be started for kernel policy change messages. * If no locking callbacks are passed, no locking will take place. */ struct avc_memory_callback { /* malloc() equivalent. */ void *(*func_malloc) (size_t size); /* free() equivalent. */ void (*func_free) (void *ptr); /* Note that these functions should set errno on failure. If not, some avc routines may return -1 without errno set. */ }; struct avc_log_callback { /* log the printf-style format and arguments. */ void #ifdef __GNUC__ __attribute__ ((format(printf, 1, 2))) #endif (*func_log) (const char *fmt, ...); /* store a string representation of auditdata (corresponding to the given security class) into msgbuf. */ void (*func_audit) (void *auditdata, security_class_t cls, char *msgbuf, size_t msgbufsize); }; struct avc_thread_callback { /* create and start a thread, returning an opaque pointer to it; the thread should run the given function. */ void *(*func_create_thread) (void (*run) (void)); /* cancel a given thread and free its resources. */ void (*func_stop_thread) (void *thread); }; struct avc_lock_callback { /* create a lock and return an opaque pointer to it. */ void *(*func_alloc_lock) (void); /* obtain a given lock, blocking if necessary. */ void (*func_get_lock) (void *lock); /* release a given lock. */ void (*func_release_lock) (void *lock); /* destroy a given lock (free memory, etc.) */ void (*func_free_lock) (void *lock); }; /* * Available options */ /* no-op option, useful for unused slots in an array of options */ #define AVC_OPT_UNUSED 0 /* override kernel enforcing mode (boolean value) */ #define AVC_OPT_SETENFORCE 1 /* * AVC operations */ /** * avc_init - Initialize the AVC. * @msgprefix: prefix for log messages * @mem_callbacks: user-supplied memory callbacks * @log_callbacks: user-supplied logging callbacks * @thread_callbacks: user-supplied threading callbacks * @lock_callbacks: user-supplied locking callbacks * * Initialize the access vector cache. Return %0 on * success or -%1 with @errno set on failure. * If @msgprefix is NULL, use "uavc". If any callback * structure references are NULL, use default methods * for those callbacks (see the definition of the callback * structures above). */ extern int avc_init(const char *msgprefix, const struct avc_memory_callback *mem_callbacks, const struct avc_log_callback *log_callbacks, const struct avc_thread_callback *thread_callbacks, const struct avc_lock_callback *lock_callbacks) #ifdef __GNUC__ __attribute__ ((deprecated("Use avc_open and selinux_set_callback"))) #endif ; /** * avc_open - Initialize the AVC. * @opts: array of selabel_opt structures specifying AVC options or NULL. * @nopts: number of elements in opts array or zero for no options. * * This function is identical to avc_init(), except the message prefix * is set to "avc" and any callbacks desired should be specified via * selinux_set_callback(). Available options are listed above. */ extern int avc_open(struct selinux_opt *opts, unsigned nopts); /** * avc_cleanup - Remove unused SIDs and AVC entries. * * Search the SID table for SID structures with zero * reference counts, and remove them along with all * AVC entries that reference them. This can be used * to return memory to the system. */ extern void avc_cleanup(void); /** * avc_reset - Flush the cache and reset statistics. * * Remove all entries from the cache and reset all access * statistics (as returned by avc_cache_stats()) to zero. * The SID mapping is not affected. Return %0 on success, * -%1 with @errno set on error. */ extern int avc_reset(void); /** * avc_destroy - Free all AVC structures. * * Destroy all AVC structures and free all allocated * memory. User-supplied locking, memory, and audit * callbacks will be retained, but security-event * callbacks will not. All SID's will be invalidated. * User must call avc_init() if further use of AVC is desired. */ extern void avc_destroy(void); /** * avc_has_perm_noaudit - Check permissions but perform no auditing. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass * @aeref: AVC entry reference * @avd: access vector decisions * * Check the AVC to determine whether the @requested permissions are granted * for the SID pair (@ssid, @tsid), interpreting the permissions * based on @tclass, and call the security server on a cache miss to obtain * a new decision and add it to the cache. Update @aeref to refer to an AVC * entry with the resulting decisions, and return a copy of the decisions * in @avd. Return %0 if all @requested permissions are granted, -%1 with * @errno set to %EACCES if any permissions are denied, or to another value * upon other errors. This function is typically called by avc_has_perm(), * but may also be called directly to separate permission checking from * auditing, e.g. in cases where a lock must be held for the check but * should be released for the auditing. */ extern int avc_has_perm_noaudit(security_id_t ssid, security_id_t tsid, security_class_t tclass, access_vector_t requested, struct avc_entry_ref *aeref, struct av_decision *avd); /** * avc_has_perm - Check permissions and perform any appropriate auditing. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass * @aeref: AVC entry reference * @auditdata: auxiliary audit data * * Check the AVC to determine whether the @requested permissions are granted * for the SID pair (@ssid, @tsid), interpreting the permissions * based on @tclass, and call the security server on a cache miss to obtain * a new decision and add it to the cache. Update @aeref to refer to an AVC * entry with the resulting decisions. Audit the granting or denial of * permissions in accordance with the policy. Return %0 if all @requested * permissions are granted, -%1 with @errno set to %EACCES if any permissions * are denied or to another value upon other errors. */ extern int avc_has_perm(security_id_t ssid, security_id_t tsid, security_class_t tclass, access_vector_t requested, struct avc_entry_ref *aeref, void *auditdata); /** * avc_audit - Audit the granting or denial of permissions. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions * @avd: access vector decisions * @result: result from avc_has_perm_noaudit * @auditdata: auxiliary audit data * * Audit the granting or denial of permissions in accordance * with the policy. This function is typically called by * avc_has_perm() after a permission check, but can also be * called directly by callers who use avc_has_perm_noaudit() * in order to separate the permission check from the auditing. * For example, this separation is useful when the permission check must * be performed under a lock, to allow the lock to be released * before calling the auditing code. */ extern void avc_audit(security_id_t ssid, security_id_t tsid, security_class_t tclass, access_vector_t requested, struct av_decision *avd, int result, void *auditdata); /** * avc_compute_create - Compute SID for labeling a new object. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @newsid: pointer to SID reference * * Call the security server to obtain a context for labeling a * new object. Look up the context in the SID table, making * a new entry if not found. Increment the reference counter * for the SID. Store a pointer to the SID structure into the * memory referenced by @newsid, returning %0 on success or -%1 on * error with @errno set. */ extern int avc_compute_create(security_id_t ssid, security_id_t tsid, security_class_t tclass, security_id_t * newsid); /** * avc_compute_member - Compute SID for polyinstantation. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @newsid: pointer to SID reference * * Call the security server to obtain a context for labeling an * object instance. Look up the context in the SID table, making * a new entry if not found. Increment the reference counter * for the SID. Store a pointer to the SID structure into the * memory referenced by @newsid, returning %0 on success or -%1 on * error with @errno set. */ extern int avc_compute_member(security_id_t ssid, security_id_t tsid, security_class_t tclass, security_id_t * newsid); /* * security event callback facility */ /* security events */ #define AVC_CALLBACK_GRANT 1 #define AVC_CALLBACK_TRY_REVOKE 2 #define AVC_CALLBACK_REVOKE 4 #define AVC_CALLBACK_RESET 8 #define AVC_CALLBACK_AUDITALLOW_ENABLE 16 #define AVC_CALLBACK_AUDITALLOW_DISABLE 32 #define AVC_CALLBACK_AUDITDENY_ENABLE 64 #define AVC_CALLBACK_AUDITDENY_DISABLE 128 /** * avc_add_callback - Register a callback for security events. * @callback: callback function * @events: bitwise OR of desired security events * @ssid: source security identifier or %SECSID_WILD * @tsid: target security identifier or %SECSID_WILD * @tclass: target security class * @perms: permissions * * Register a callback function for events in the set @events * related to the SID pair (@ssid, @tsid) and * and the permissions @perms, interpreting * @perms based on @tclass. Returns %0 on success or * -%1 if insufficient memory exists to add the callback. */ extern int avc_add_callback(int (*callback) (uint32_t event, security_id_t ssid, security_id_t tsid, security_class_t tclass, access_vector_t perms, access_vector_t * out_retained), uint32_t events, security_id_t ssid, security_id_t tsid, security_class_t tclass, access_vector_t perms); /* * AVC statistics */ /* If set, cache statistics are tracked. This may * become a compile-time option in the future. */ #define AVC_CACHE_STATS 1 struct avc_cache_stats { unsigned entry_lookups; unsigned entry_hits; unsigned entry_misses; unsigned entry_discards; unsigned cav_lookups; unsigned cav_hits; unsigned cav_probes; unsigned cav_misses; }; /** * avc_cache_stats - get cache access statistics. * @stats: reference to statistics structure * * Fill the supplied structure with information about AVC * activity since the last call to avc_init() or * avc_reset(). See the structure definition for * details. */ extern void avc_cache_stats(struct avc_cache_stats *stats); /** * avc_av_stats - log av table statistics. * * Log a message with information about the size and * distribution of the access vector table. The audit * callback is used to print the message. */ extern void avc_av_stats(void); /** * avc_sid_stats - log SID table statistics. * * Log a message with information about the size and * distribution of the SID table. The audit callback * is used to print the message. */ extern void avc_sid_stats(void); /** * avc_netlink_open - Create a netlink socket and connect to the kernel. */ extern int avc_netlink_open(int blocking); /** * avc_netlink_loop - Wait for netlink messages from the kernel */ extern void avc_netlink_loop(void); /** * avc_netlink_close - Close the netlink socket */ extern void avc_netlink_close(void); /** * avc_netlink_acquire_fd - Acquire netlink socket fd. * * Allows the application to manage messages from the netlink socket in * its own main loop. */ extern int avc_netlink_acquire_fd(void); /** * avc_netlink_release_fd - Release netlink socket fd. * * Returns ownership of the netlink socket to the library. */ extern void avc_netlink_release_fd(void); /** * avc_netlink_check_nb - Check netlink socket for new messages. * * Called by the application when using avc_netlink_acquire_fd() to * process kernel netlink events. */ extern int avc_netlink_check_nb(void); /** * selinux_status_open - Open and map SELinux kernel status page * */ extern int selinux_status_open(int fallback); /** * selinux_status_close - Unmap and close SELinux kernel status page * */ extern void selinux_status_close(void); /** * selinux_status_updated - Inform us whether the kernel status has been updated * */ extern int selinux_status_updated(void); /** * selinux_status_getenforce - Get the enforce flag value * */ extern int selinux_status_getenforce(void); /** * selinux_status_policyload - Get the number of policy reloaded * */ extern int selinux_status_policyload(void); /** * selinux_status_deny_unknown - Get the behavior for undefined classes/permissions * */ extern int selinux_status_deny_unknown(void); #ifdef __cplusplus } #endif #endif /* _SELINUX_AVC_H_ */
{ "language": "C" }
/* Copyright (C) 2017 Milo Yip. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of pngout nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*! \file \brief svpng() is a minimalistic C function for saving RGB/RGBA image into uncompressed PNG. \author Milo Yip \version 0.1.1 \copyright MIT license \sa http://github.com/miloyip/svpng */ //Modified by Matthias Mueller //Added support for 8-bit gray-scale (16-bit - ToDo) #ifndef SVPNG_INC_ #define SVPNG_INC_ /*! \def SVPNG_LINKAGE \brief User customizable linkage for svpng() function. By default this macro is empty. User may define this macro as static for static linkage, and/or inline in C99/C++, etc. */ #ifndef SVPNG_LINKAGE #define SVPNG_LINKAGE #endif /*! \def SVPNG_OUTPUT \brief User customizable output stream. By default, it uses C file descriptor and fputc() to output bytes. In C++, for example, user may use std::ostream or std::vector instead. */ #ifndef SVPNG_OUTPUT #include <stdio.h> #define SVPNG_OUTPUT FILE* fp #endif /*! \def SVPNG_PUT \brief Write a byte */ #ifndef SVPNG_PUT #define SVPNG_PUT(u) fputc(u, fp) #endif /*! \brief Save a RGB/RGBA image in PNG format. \param SVPNG_OUTPUT Output stream (by default using file descriptor). \param w Width of the image. (<16383) \param h Height of the image. \param img Image pixel data in 24-bit RGB or 32-bit RGBA format. \param alpha Whether the image contains alpha channel. */ SVPNG_LINKAGE void svpng(SVPNG_OUTPUT, unsigned w, unsigned h, const unsigned char* img, int alpha, int gray = 0) { static const unsigned t[] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, /* CRC32 Table */ 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c }; unsigned a = 1, b = 0, c, x, y, i; /* ADLER-a, ADLER-b, CRC, pitch */ unsigned p = w + 1; if (!gray) p += w * (alpha ? 3 : 2); #define SVPNG_U8A(ua, l) for (i = 0; i < l; i++) SVPNG_PUT((ua)[i]); #define SVPNG_U32(u) do { SVPNG_PUT((u) >> 24); SVPNG_PUT(((u) >> 16) & 255); SVPNG_PUT(((u) >> 8) & 255); SVPNG_PUT((u) & 255); } while(0) #define SVPNG_U8C(u) do { SVPNG_PUT(u); c ^= (u); c = (c >> 4) ^ t[c & 15]; c = (c >> 4) ^ t[c & 15]; } while(0) #define SVPNG_U8AC(ua, l) for (i = 0; i < l; i++) SVPNG_U8C((ua)[i]) #define SVPNG_U16LC(u) do { SVPNG_U8C((u) & 255); SVPNG_U8C(((u) >> 8) & 255); } while(0) #define SVPNG_U16C(u) do { SVPNG_U8C(((u) >> 8) & 255); SVPNG_U8C((u) & 255); } while(0) #define SVPNG_U32C(u) do { SVPNG_U8C((u) >> 24); SVPNG_U8C(((u) >> 16) & 255); SVPNG_U8C(((u) >> 8) & 255); SVPNG_U8C((u) & 255); } while(0) #define SVPNG_U8ADLER(u) do { SVPNG_U8C(u); a = (a + (u)) % 65521; b = (b + a) % 65521; } while(0) #define SVPNG_U16ADLER(u) do { SVPNG_U16C(u); a = (a + (u)) % 65521; b = (b + a) % 65521; } while(0) #define SVPNG_BEGIN(s, l) do { SVPNG_U32(l); c = ~0U; SVPNG_U8AC(s, 4); } while(0) #define SVPNG_END() SVPNG_U32(~c) SVPNG_U8A("\x89PNG\r\n\32\n", 8); /* Magic */ SVPNG_BEGIN("IHDR", 13); /* IHDR chunk { */ SVPNG_U32C(w); SVPNG_U32C(h); /* Width & Height (8 bytes) */ SVPNG_U8C(8); /* Depth=8 (1 byte) */ if (gray) SVPNG_U8C(0); else SVPNG_U8C(alpha ? 6 : 2); /* Color=Gray or True color with/without alpha (1 byte) */ SVPNG_U8AC("\0\0\0", 3); /* Compression=Deflate, Filter=No, Interlace=No (3 bytes) */ SVPNG_END(); /* } */ SVPNG_BEGIN("IDAT", 2 + h * (5 + p) + 4); /* IDAT chunk { */ SVPNG_U8AC("\x78\1", 2); /* Deflate block begin (2 bytes) */ for (y = 0; y < h; y++) { /* Each horizontal line makes a block for simplicity */ SVPNG_U8C(y == h - 1); /* 1 for the last block, 0 for others (1 byte) */ SVPNG_U16LC(p); SVPNG_U16LC(~p); /* Size of block in little endian and its 1's complement (4 bytes) */ SVPNG_U8ADLER(0); /* No filter prefix (1 byte) */ for (x = 0; x < p - 1; x++, img++) SVPNG_U8ADLER(*img); /* Image pixel data */ } SVPNG_U32C((b << 16) | a); /* Deflate block end with adler (4 bytes) */ SVPNG_END(); /* } */ SVPNG_BEGIN("IEND", 0); SVPNG_END(); /* IEND chunk {} */ } #endif /* SVPNG_INC_ */
{ "language": "C" }
/***************************************************************************** * asm-offsets.h: asm offsets for aarch64 ***************************************************************************** * Copyright (C) 2014-2020 x264 project * * Authors: Janne Grunau <janne-x264@jannau.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. * * This program is also available under a commercial proprietary license. * For more information, contact us at licensing@x264.com. *****************************************************************************/ #ifndef X264_AARCH64_ASM_OFFSETS_H #define X264_AARCH64_ASM_OFFSETS_H #define CABAC_I_LOW 0x00 #define CABAC_I_RANGE 0x04 #define CABAC_I_QUEUE 0x08 #define CABAC_I_BYTES_OUTSTANDING 0x0c #define CABAC_P_START 0x10 #define CABAC_P 0x18 #define CABAC_P_END 0x20 #define CABAC_F8_BITS_ENCODED 0x30 #define CABAC_STATE 0x34 #endif
{ "language": "C" }
/* * Copyright 2013 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef SMU7_FUSION_H #define SMU7_FUSION_H #include "smu7.h" #pragma pack(push, 1) #define SMU7_DTE_ITERATIONS 5 #define SMU7_DTE_SOURCES 5 #define SMU7_DTE_SINKS 3 #define SMU7_NUM_CPU_TES 2 #define SMU7_NUM_GPU_TES 1 #define SMU7_NUM_NON_TES 2 // All 'soft registers' should be uint32_t. struct SMU7_SoftRegisters { uint32_t RefClockFrequency; uint32_t PmTimerP; uint32_t FeatureEnables; uint32_t HandshakeDisables; uint8_t DisplayPhy1Config; uint8_t DisplayPhy2Config; uint8_t DisplayPhy3Config; uint8_t DisplayPhy4Config; uint8_t DisplayPhy5Config; uint8_t DisplayPhy6Config; uint8_t DisplayPhy7Config; uint8_t DisplayPhy8Config; uint32_t AverageGraphicsA; uint32_t AverageMemoryA; uint32_t AverageGioA; uint8_t SClkDpmEnabledLevels; uint8_t MClkDpmEnabledLevels; uint8_t LClkDpmEnabledLevels; uint8_t PCIeDpmEnabledLevels; uint8_t UVDDpmEnabledLevels; uint8_t SAMUDpmEnabledLevels; uint8_t ACPDpmEnabledLevels; uint8_t VCEDpmEnabledLevels; uint32_t DRAM_LOG_ADDR_H; uint32_t DRAM_LOG_ADDR_L; uint32_t DRAM_LOG_PHY_ADDR_H; uint32_t DRAM_LOG_PHY_ADDR_L; uint32_t DRAM_LOG_BUFF_SIZE; uint32_t UlvEnterC; uint32_t UlvTime; uint32_t Reserved[3]; }; typedef struct SMU7_SoftRegisters SMU7_SoftRegisters; struct SMU7_Fusion_GraphicsLevel { uint32_t MinVddNb; uint32_t SclkFrequency; uint8_t Vid; uint8_t VidOffset; uint16_t AT; uint8_t PowerThrottle; uint8_t GnbSlow; uint8_t ForceNbPs1; uint8_t SclkDid; uint8_t DisplayWatermark; uint8_t EnabledForActivity; uint8_t EnabledForThrottle; uint8_t UpH; uint8_t DownH; uint8_t VoltageDownH; uint8_t DeepSleepDivId; uint8_t ClkBypassCntl; uint32_t reserved; }; typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel; struct SMU7_Fusion_GIOLevel { uint8_t EnabledForActivity; uint8_t LclkDid; uint8_t Vid; uint8_t VoltageDownH; uint32_t MinVddNb; uint16_t ResidencyCounter; uint8_t UpH; uint8_t DownH; uint32_t LclkFrequency; uint8_t ActivityLevel; uint8_t EnabledForThrottle; uint8_t ClkBypassCntl; uint8_t padding; }; typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel; // UVD VCLK/DCLK state (level) definition. struct SMU7_Fusion_UvdLevel { uint32_t VclkFrequency; uint32_t DclkFrequency; uint16_t MinVddNb; uint8_t VclkDivider; uint8_t DclkDivider; uint8_t VClkBypassCntl; uint8_t DClkBypassCntl; uint8_t padding[2]; }; typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel; // Clocks for other external blocks (VCE, ACP, SAMU). struct SMU7_Fusion_ExtClkLevel { uint32_t Frequency; uint16_t MinVoltage; uint8_t Divider; uint8_t ClkBypassCntl; uint32_t Reserved; }; typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel; struct SMU7_Fusion_ACPILevel { uint32_t Flags; uint32_t MinVddNb; uint32_t SclkFrequency; uint8_t SclkDid; uint8_t GnbSlow; uint8_t ForceNbPs1; uint8_t DisplayWatermark; uint8_t DeepSleepDivId; uint8_t padding[3]; }; typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel; struct SMU7_Fusion_NbDpm { uint8_t DpmXNbPsHi; uint8_t DpmXNbPsLo; uint8_t Dpm0PgNbPsHi; uint8_t Dpm0PgNbPsLo; uint8_t EnablePsi1; uint8_t SkipDPM0; uint8_t SkipPG; uint8_t Hysteresis; uint8_t EnableDpmPstatePoll; uint8_t padding[3]; }; typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm; struct SMU7_Fusion_StateInfo { uint32_t SclkFrequency; uint32_t LclkFrequency; uint32_t VclkFrequency; uint32_t DclkFrequency; uint32_t SamclkFrequency; uint32_t AclkFrequency; uint32_t EclkFrequency; uint8_t DisplayWatermark; uint8_t McArbIndex; int8_t SclkIndex; int8_t MclkIndex; }; typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo; struct SMU7_Fusion_DpmTable { uint32_t SystemFlags; SMU7_PIDController GraphicsPIDController; SMU7_PIDController GioPIDController; uint8_t GraphicsDpmLevelCount; uint8_t GIOLevelCount; uint8_t UvdLevelCount; uint8_t VceLevelCount; uint8_t AcpLevelCount; uint8_t SamuLevelCount; uint16_t FpsHighT; SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE]; SMU7_Fusion_ACPILevel ACPILevel; SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD]; SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE]; SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP]; SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU]; uint8_t UvdBootLevel; uint8_t VceBootLevel; uint8_t AcpBootLevel; uint8_t SamuBootLevel; uint8_t UVDInterval; uint8_t VCEInterval; uint8_t ACPInterval; uint8_t SAMUInterval; uint8_t GraphicsBootLevel; uint8_t GraphicsInterval; uint8_t GraphicsThermThrottleEnable; uint8_t GraphicsVoltageChangeEnable; uint8_t GraphicsClkSlowEnable; uint8_t GraphicsClkSlowDivider; uint16_t FpsLowT; uint32_t DisplayCac; uint32_t LowSclkInterruptT; uint32_t DRAM_LOG_ADDR_H; uint32_t DRAM_LOG_ADDR_L; uint32_t DRAM_LOG_PHY_ADDR_H; uint32_t DRAM_LOG_PHY_ADDR_L; uint32_t DRAM_LOG_BUFF_SIZE; }; struct SMU7_Fusion_GIODpmTable { SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO]; SMU7_PIDController GioPIDController; uint32_t GIOLevelCount; uint8_t Enable; uint8_t GIOVoltageChangeEnable; uint8_t GIOBootLevel; uint8_t padding; uint8_t padding1[2]; uint8_t TargetState; uint8_t CurrenttState; uint8_t ThrottleOnHtc; uint8_t ThermThrottleStatus; uint8_t ThermThrottleTempSelect; uint8_t ThermThrottleEnable; uint16_t TemperatureLimitHigh; uint16_t TemperatureLimitLow; }; typedef struct SMU7_Fusion_DpmTable SMU7_Fusion_DpmTable; typedef struct SMU7_Fusion_GIODpmTable SMU7_Fusion_GIODpmTable; #pragma pack(pop) #endif
{ "language": "C" }
/* GIMP - The GNU Image Manipulation Program * Copyright (C) 1995 Spencer Kimball and Peter Mattis * * Screenshot plug-in * Copyright 1998-2007 Sven Neumann <sven@gimp.org> * Copyright 2003 Henrik Brix Andersen <brix@gimp.org> * Copyright 2016 Michael Natterer <mitch@gimp.org> * Copyright 2017 Jehan <jehan@gimp.org> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ #include "config.h" #include <glib.h> #include <libgimp/gimp.h> #include <libgimp/gimpui.h> #include "screenshot.h" #include "screenshot-freedesktop.h" static GDBusProxy *proxy = NULL; gboolean screenshot_freedesktop_available (void) { proxy = g_dbus_proxy_new_for_bus_sync (G_BUS_TYPE_SESSION, G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START, NULL, "org.freedesktop.portal.Desktop", "/org/freedesktop/portal/desktop", "org.freedesktop.portal.Screenshot", NULL, NULL); if (proxy) { GError *error = NULL; g_dbus_proxy_call_sync (proxy, "org.freedesktop.DBus.Peer.Ping", NULL, G_DBUS_CALL_FLAGS_NONE, -1, NULL, &error); if (! error) return TRUE; g_clear_error (&error); g_object_unref (proxy); proxy = NULL; } return FALSE; } ScreenshotCapabilities screenshot_freedesktop_get_capabilities (void) { /* Portal has no capabilities other than root screenshot! */ return 0; } static void screenshot_freedesktop_dbus_signal (GDBusProxy *proxy, gchar *sender_name, gchar *signal_name, GVariant *parameters, GimpImage **image) { if (g_strcmp0 (signal_name, "Response") == 0) { GVariant *results; guint32 response; g_variant_get (parameters, "(u@a{sv})", &response, &results); /* Possible values: * 0: Success, the request is carried out * 1: The user cancelled the interaction * 2: The user interaction was ended in some other way * Cf. https://github.com/flatpak/xdg-desktop-portal/blob/master/data/org.freedesktop.portal.Request.xml */ if (response == 0) { gchar *uri; if (g_variant_lookup (results, "uri", "s", &uri)) { GFile *file = g_file_new_for_uri (uri); *image = gimp_file_load (GIMP_RUN_NONINTERACTIVE, file); gimp_image_set_file (*image, g_file_new_for_path ("screenshot.png")); /* Delete the actual file. */ g_file_delete (file, NULL, NULL); g_object_unref (file); g_free (uri); } } g_variant_unref (results); /* Quit anyway. */ gtk_main_quit (); } } GimpPDBStatusType screenshot_freedesktop_shoot (ScreenshotValues *shootvals, GdkMonitor *monitor, GimpImage **image, GError **error) { GVariant *retval; gchar *opath = NULL; if (shootvals->shoot_type != SHOOT_ROOT) { /* This should not happen. */ return GIMP_PDB_EXECUTION_ERROR; } if (shootvals->screenshot_delay > 0) screenshot_delay (shootvals->screenshot_delay); retval = g_dbus_proxy_call_sync (proxy, "Screenshot", g_variant_new ("(sa{sv})", "", NULL), G_DBUS_CALL_FLAGS_NONE, -1, NULL, error); g_object_unref (proxy); proxy = NULL; if (retval) { g_variant_get (retval, "(o)", &opath); g_variant_unref (retval); } if (opath) { GDBusProxy *proxy2 = NULL; proxy2 = g_dbus_proxy_new_for_bus_sync (G_BUS_TYPE_SESSION, G_DBUS_PROXY_FLAGS_DO_NOT_AUTO_START, NULL, "org.freedesktop.portal.Desktop", opath, "org.freedesktop.portal.Request", NULL, NULL); *image = NULL; g_signal_connect (proxy2, "g-signal", G_CALLBACK (screenshot_freedesktop_dbus_signal), image); gtk_main (); g_object_unref (proxy2); g_free (opath); /* Signal got a response. */ if (*image) { GimpColorProfile *profile; /* Just assign profile of current monitor. This will work only * as long as this is a single-display setup. * We need to figure out how to do better color management for * portal screenshots. * TODO! */ profile = gimp_monitor_get_color_profile (monitor); if (profile) { gimp_image_set_color_profile (*image, profile); g_object_unref (profile); } return GIMP_PDB_SUCCESS; } } return GIMP_PDB_EXECUTION_ERROR; }
{ "language": "C" }
/* * VP9 SIMD optimizations * * Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef AVCODEC_X86_VP9DSP_INIT_H #define AVCODEC_X86_VP9DSP_INIT_H #include "libavcodec/vp9dsp.h" // hack to force-expand BPC #define cat(a, bpp, b) a##bpp##b #define decl_fpel_func(avg, sz, bpp, opt) \ void ff_vp9_##avg##sz##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my) #define decl_mc_func(avg, sz, dir, opt, type, f_sz, bpp) \ void ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, const type (*filter)[f_sz]) #define decl_mc_funcs(sz, opt, type, fsz, bpp) \ decl_mc_func(put, sz, h, opt, type, fsz, bpp); \ decl_mc_func(avg, sz, h, opt, type, fsz, bpp); \ decl_mc_func(put, sz, v, opt, type, fsz, bpp); \ decl_mc_func(avg, sz, v, opt, type, fsz, bpp) #define decl_ipred_fn(type, sz, bpp, opt) \ void ff_vp9_ipred_##type##_##sz##x##sz##_##bpp##_##opt(uint8_t *dst, \ ptrdiff_t stride, \ const uint8_t *l, \ const uint8_t *a) #define decl_ipred_fns(type, bpp, opt4, opt8_16_32) \ decl_ipred_fn(type, 4, bpp, opt4); \ decl_ipred_fn(type, 8, bpp, opt8_16_32); \ decl_ipred_fn(type, 16, bpp, opt8_16_32); \ decl_ipred_fn(type, 32, bpp, opt8_16_32) #define decl_itxfm_func(typea, typeb, size, bpp, opt) \ void cat(ff_vp9_##typea##_##typeb##_##size##x##size##_add_, bpp, _##opt)(uint8_t *dst, \ ptrdiff_t stride, \ int16_t *block, \ int eob) #define decl_itxfm_funcs(size, bpp, opt) \ decl_itxfm_func(idct, idct, size, bpp, opt); \ decl_itxfm_func(iadst, idct, size, bpp, opt); \ decl_itxfm_func(idct, iadst, size, bpp, opt); \ decl_itxfm_func(iadst, iadst, size, bpp, opt) #define mc_rep_func(avg, sz, hsz, hszb, dir, opt, type, f_sz, bpp) \ static av_always_inline void \ ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, const type (*filter)[f_sz]) \ { \ ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##bpp##_##opt(dst, dst_stride, src, \ src_stride, h, filter); \ ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##bpp##_##opt(dst + hszb, dst_stride, src + hszb, \ src_stride, h, filter); \ } #define mc_rep_funcs(sz, hsz, hszb, opt, type, fsz, bpp) \ mc_rep_func(put, sz, hsz, hszb, h, opt, type, fsz, bpp) \ mc_rep_func(avg, sz, hsz, hszb, h, opt, type, fsz, bpp) \ mc_rep_func(put, sz, hsz, hszb, v, opt, type, fsz, bpp) \ mc_rep_func(avg, sz, hsz, hszb, v, opt, type, fsz, bpp) #define filter_8tap_1d_fn(op, sz, f, f_opt, fname, dir, dvar, bpp, opt) \ static void op##_8tap_##fname##_##sz##dir##_##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my) \ { \ ff_vp9_##op##_8tap_1d_##dir##_##sz##_##bpp##_##opt(dst, dst_stride, src, src_stride, \ h, ff_filters_##f_opt[f][dvar - 1]); \ } #define filters_8tap_1d_fn(op, sz, dir, dvar, bpp, opt, f_opt) \ filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, dir, dvar, bpp, opt) \ filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, dir, dvar, bpp, opt) \ filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, dir, dvar, bpp, opt) #define filters_8tap_1d_fn2(op, sz, bpp, opt, f_opt) \ filters_8tap_1d_fn(op, sz, h, mx, bpp, opt, f_opt) \ filters_8tap_1d_fn(op, sz, v, my, bpp, opt, f_opt) #define filters_8tap_1d_fn3(op, bpp, opt4, opt8, f_opt) \ filters_8tap_1d_fn2(op, 64, bpp, opt8, f_opt) \ filters_8tap_1d_fn2(op, 32, bpp, opt8, f_opt) \ filters_8tap_1d_fn2(op, 16, bpp, opt8, f_opt) \ filters_8tap_1d_fn2(op, 8, bpp, opt8, f_opt) \ filters_8tap_1d_fn2(op, 4, bpp, opt4, f_opt) #define filter_8tap_2d_fn(op, sz, f, f_opt, fname, align, bpp, bytes, opt) \ static void op##_8tap_##fname##_##sz##hv_##bpp##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my) \ { \ LOCAL_ALIGNED_##align(uint8_t, temp, [71 * 64 * bytes]); \ ff_vp9_put_8tap_1d_h_##sz##_##bpp##_##opt(temp, 64 * bytes, src - 3 * src_stride, \ src_stride, h + 7, \ ff_filters_##f_opt[f][mx - 1]); \ ff_vp9_##op##_8tap_1d_v_##sz##_##bpp##_##opt(dst, dst_stride, temp + 3 * bytes * 64, \ 64 * bytes, h, \ ff_filters_##f_opt[f][my - 1]); \ } #define filters_8tap_2d_fn(op, sz, align, bpp, bytes, opt, f_opt) \ filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, align, bpp, bytes, opt) \ filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, align, bpp, bytes, opt) \ filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, align, bpp, bytes, opt) #define filters_8tap_2d_fn2(op, align, bpp, bytes, opt4, opt8, f_opt) \ filters_8tap_2d_fn(op, 64, align, bpp, bytes, opt8, f_opt) \ filters_8tap_2d_fn(op, 32, align, bpp, bytes, opt8, f_opt) \ filters_8tap_2d_fn(op, 16, align, bpp, bytes, opt8, f_opt) \ filters_8tap_2d_fn(op, 8, align, bpp, bytes, opt8, f_opt) \ filters_8tap_2d_fn(op, 4, align, bpp, bytes, opt4, f_opt) #define init_fpel_func(idx1, idx2, sz, type, bpp, opt) \ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \ dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_##type##sz##bpp##_##opt #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, bpp, opt) \ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = \ type##_8tap_smooth_##sz##dir##_##bpp##_##opt; \ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = \ type##_8tap_regular_##sz##dir##_##bpp##_##opt; \ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = \ type##_8tap_sharp_##sz##dir##_##bpp##_##opt #define init_subpel2(idx1, idx2, sz, type, bpp, opt) \ init_subpel1(idx1, idx2, 1, 1, sz, hv, type, bpp, opt); \ init_subpel1(idx1, idx2, 0, 1, sz, v, type, bpp, opt); \ init_subpel1(idx1, idx2, 1, 0, sz, h, type, bpp, opt) #define init_subpel3_32_64(idx, type, bpp, opt) \ init_subpel2(0, idx, 64, type, bpp, opt); \ init_subpel2(1, idx, 32, type, bpp, opt) #define init_subpel3_8to64(idx, type, bpp, opt) \ init_subpel3_32_64(idx, type, bpp, opt); \ init_subpel2(2, idx, 16, type, bpp, opt); \ init_subpel2(3, idx, 8, type, bpp, opt) #define init_subpel3(idx, type, bpp, opt) \ init_subpel3_8to64(idx, type, bpp, opt); \ init_subpel2(4, idx, 4, type, bpp, opt) #define init_ipred_func(type, enum, sz, bpp, opt) \ dsp->intra_pred[TX_##sz##X##sz][enum##_PRED] = \ cat(ff_vp9_ipred_##type##_##sz##x##sz##_, bpp, _##opt) #define init_8_16_32_ipred_funcs(type, enum, bpp, opt) \ init_ipred_func(type, enum, 8, bpp, opt); \ init_ipred_func(type, enum, 16, bpp, opt); \ init_ipred_func(type, enum, 32, bpp, opt) #define init_ipred_funcs(type, enum, bpp, opt) \ init_ipred_func(type, enum, 4, bpp, opt); \ init_8_16_32_ipred_funcs(type, enum, bpp, opt) void ff_vp9dsp_init_10bpp_x86(VP9DSPContext *dsp, int bitexact); void ff_vp9dsp_init_12bpp_x86(VP9DSPContext *dsp, int bitexact); void ff_vp9dsp_init_16bpp_x86(VP9DSPContext *dsp); #endif /* AVCODEC_X86_VP9DSP_INIT_H */
{ "language": "C" }
/* Simple DirectMedia Layer Copyright (C) 1997-2017 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../../SDL_internal.h" #ifndef SDL_ALSA_audio_h_ #define SDL_ALSA_audio_h_ #include <alsa/asoundlib.h> #include "../SDL_sysaudio.h" /* Hidden "this" pointer for the audio functions */ #define _THIS SDL_AudioDevice *this struct SDL_PrivateAudioData { /* The audio device handle */ snd_pcm_t *pcm_handle; /* Raw mixing buffer */ Uint8 *mixbuf; int mixlen; /* swizzle function */ void (*swizzle_func)(_THIS, void *buffer, Uint32 bufferlen); }; #endif /* SDL_ALSA_audio_h_ */ /* vi: set ts=4 sw=4 expandtab: */
{ "language": "C" }
/* * arch/arm/include/asm/arch-txl/io.h * * Copyright (C) 2015 Amlogic, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __MACH_MESSON_REGS_IO_H #define __MACH_MESSON_REGS_IO_H #ifndef __ASSEMBLY__ #include <asm/io.h> #define IO_CBUS_BASE (0xFFD00000L) #define IO_AXI_BUS_BASE (0xFFB00000L) /* gpv */ #define IO_AHB_BUS_BASE (0xFF500000L) /* usb0 */ #define IO_APB_BUS_BASE (0xFFFC0000L) /* AHB SRAM, sec/sys ahb? txlx_mem_map.xlsx */ #define IO_APB_HDMI_BUS_BASE (0xFFE00000L) /* */ #define IO_VPU_BUS_BASE (0xFF900000L) /* VPU */ #define CBUS_REG_OFFSET(reg) ((reg) << 2) #define CBUS_REG_ADDR(reg) (IO_CBUS_BASE + CBUS_REG_OFFSET(reg)) #define AXI_REG_OFFSET(reg) ((reg) << 2) #define AXI_REG_ADDR(reg) (IO_AXI_BUS_BASE + AXI_REG_OFFSET(reg)) #define AHB_REG_OFFSET(reg) ((reg) << 2) #define AHB_REG_ADDR(reg) (IO_AHB_BUS_BASE + AHB_REG_OFFSET(reg)) #define VPU_REG_OFFSET(reg) ((reg) << 2) #define VPU_REG_ADDR(reg) (IO_VPU_BUS_BASE + VPU_REG_OFFSET(reg)) #define APB_REG_OFFSET(reg) (reg) #define APB_REG_ADDR(reg) (IO_APB_BUS_BASE + APB_REG_OFFSET(reg)) #define APB_REG_ADDR_VALID(reg) (((unsigned long)(reg) & 3) == 0) #define APB_HDMI_REG_OFFSET(reg) (reg) #define APB_HDMI_REG_ADDR(reg) (IO_APB_HDMI_BUS_BASE + APB_HDMI_REG_OFFSET(reg)) #define APB_HDMI_REG_ADDR_VALID(reg) (((unsigned long)(reg) & 3) == 0) #define WRITE_CBUS_REG(reg, val) __raw_writel(val, CBUS_REG_ADDR(reg)) #define READ_CBUS_REG(reg) (__raw_readl(CBUS_REG_ADDR(reg))) #define WRITE_CBUS_REG_BITS(reg, val, start, len) \ WRITE_CBUS_REG(reg, (READ_CBUS_REG(reg) & ~(((1L<<(len))-1)<<(start)) )| ((unsigned)((val)&((1L<<(len))-1)) << (start))) #define READ_CBUS_REG_BITS(reg, start, len) \ ((READ_CBUS_REG(reg) >> (start)) & ((1L<<(len))-1)) #define CLEAR_CBUS_REG_MASK(reg, mask) WRITE_CBUS_REG(reg, (READ_CBUS_REG(reg)&(~(mask)))) #define SET_CBUS_REG_MASK(reg, mask) WRITE_CBUS_REG(reg, (READ_CBUS_REG(reg)|(mask))) #define WRITE_AXI_REG(reg, val) __raw_writel(val, AXI_REG_ADDR(reg)) #define READ_AXI_REG(reg) (__raw_readl(AXI_REG_ADDR(reg))) #define WRITE_AXI_REG_BITS(reg, val, start, len) \ WRITE_AXI_REG(reg, (READ_AXI_REG(reg) & ~(((1L<<(len))-1)<<(start)) )| ((unsigned)((val)&((1L<<(len))-1)) << (start))) #define READ_AXI_REG_BITS(reg, start, len) \ ((READ_AXI_REG(reg) >> (start)) & ((1L<<(len))-1)) #define CLEAR_AXI_REG_MASK(reg, mask) WRITE_AXI_REG(reg, (READ_AXI_REG(reg)&(~(mask)))) #define SET_AXI_REG_MASK(reg, mask) WRITE_AXI_REG(reg, (READ_AXI_REG(reg)|(mask))) #define WRITE_AHB_REG(reg, val) __raw_writel(val, AHB_REG_ADDR(reg)) #define READ_AHB_REG(reg) (__raw_readl(AHB_REG_ADDR(reg))) #define WRITE_AHB_REG_BITS(reg, val, start, len) \ WRITE_AHB_REG(reg, (READ_AHB_REG(reg) & ~(((1L<<(len))-1)<<(start)) )| ((unsigned)((val)&((1L<<(len))-1)) << (start))) #define READ_AHB_REG_BITS(reg, start, len) \ ((READ_AHB_REG(reg) >> (start)) & ((1L<<(len))-1)) #define CLEAR_AHB_REG_MASK(reg, mask) WRITE_AHB_REG(reg, (READ_AHB_REG(reg)&(~(mask)))) #define SET_AHB_REG_MASK(reg, mask) WRITE_AHB_REG(reg, (READ_AHB_REG(reg)|(mask))) #define WRITE_APB_REG(reg, val) __raw_writel(val, APB_REG_ADDR(reg)) #define READ_APB_REG(reg) (__raw_readl(APB_REG_ADDR(reg))) #define WRITE_APB_REG_BITS(reg, val, start, len) \ WRITE_APB_REG(reg, (READ_APB_REG(reg) & ~(((1L<<(len))-1)<<(start)) )| ((unsigned)((val)&((1L<<(len))-1)) << (start))) #define READ_APB_REG_BITS(reg, start, len) \ ((READ_APB_REG(reg) >> (start)) & ((1L<<(len))-1)) #define CLEAR_APB_REG_MASK(reg, mask) WRITE_APB_REG(reg, (READ_APB_REG(reg)&(~(mask)))) #define SET_APB_REG_MASK(reg, mask) WRITE_APB_REG(reg, (READ_APB_REG(reg)|(mask))) #define WRITE_APB_HDMI_REG(reg, val) __raw_writel(val, APB_HDMI_REG_ADDR(reg)) #define READ_APB_HDMI_REG(reg) (__raw_readl(APB_HDMI_REG_ADDR(reg))) #define WRITE_APB_HDMI_REG_BITS(reg, val, start, len) \ WRITE_APB_HDMI_REG(reg, (READ_APB_HDMI_REG(reg) & ~(((1L<<(len))-1)<<(start)) )| ((unsigned)((val)&((1L<<(len))-1)) << (start))) #define READ_APB_HDMI_REG_BITS(reg, start, len) \ ((READ_APB_HDMI_REG(reg) >> (start)) & ((1L<<(len))-1)) #define CLEAR_APB_HDMI_REG_MASK(reg, mask) WRITE_APB_HDMI_REG(reg, (READ_APB_HDMI_REG(reg)&(~(mask)))) #define SET_APB_HDMI_REG_MASK(reg, mask) WRITE_APB_HDMI_REG(reg, (READ_APB_HDMI_REG(reg)|(mask))) /* for back compatible alias */ #define WRITE_MPEG_REG(reg, val) \ WRITE_CBUS_REG(reg, val) #define READ_MPEG_REG(reg) \ READ_CBUS_REG(reg) #define WRITE_MPEG_REG_BITS(reg, val, start, len) \ WRITE_CBUS_REG_BITS(reg, val, start, len) #define READ_MPEG_REG_BITS(reg, start, len) \ READ_CBUS_REG_BITS(reg, start, len) #define CLEAR_MPEG_REG_MASK(reg, mask) \ CLEAR_CBUS_REG_MASK(reg, mask) #define SET_MPEG_REG_MASK(reg, mask) \ SET_CBUS_REG_MASK(reg, mask) #endif #endif
{ "language": "C" }
/* * libmad - MPEG audio decoder library * Copyright (C) 2000-2004 Underbit Technologies, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * $Id: minimad.c,v 1.4 2004/01/23 09:41:32 rob Exp $ */ # include <stdio.h> # include <unistd.h> # include <sys/stat.h> # include <sys/mman.h> # include "mad.h" /* * This is perhaps the simplest example use of the MAD high-level API. * Standard input is mapped into memory via mmap(), then the high-level API * is invoked with three callbacks: input, output, and error. The output * callback converts MAD's high-resolution PCM samples to 16 bits, then * writes them to standard output in little-endian, stereo-interleaved * format. */ static int decode(unsigned char const *, unsigned long); int main(int argc, char *argv[]) { struct stat stat; void *fdm; if (argc != 1) return 1; if (fstat(STDIN_FILENO, &stat) == -1 || stat.st_size == 0) return 2; fdm = mmap(0, stat.st_size, PROT_READ, MAP_SHARED, STDIN_FILENO, 0); if (fdm == MAP_FAILED) return 3; decode(fdm, stat.st_size); if (munmap(fdm, stat.st_size) == -1) return 4; return 0; } /* * This is a private message structure. A generic pointer to this structure * is passed to each of the callback functions. Put here any data you need * to access from within the callbacks. */ struct buffer { unsigned char const *start; unsigned long length; }; /* * This is the input callback. The purpose of this callback is to (re)fill * the stream buffer which is to be decoded. In this example, an entire file * has been mapped into memory, so we just call mad_stream_buffer() with the * address and length of the mapping. When this callback is called a second * time, we are finished decoding. */ static enum mad_flow input(void *data, struct mad_stream *stream) { struct buffer *buffer = data; if (!buffer->length) return MAD_FLOW_STOP; mad_stream_buffer(stream, buffer->start, buffer->length); buffer->length = 0; return MAD_FLOW_CONTINUE; } /* * The following utility routine performs simple rounding, clipping, and * scaling of MAD's high-resolution samples down to 16 bits. It does not * perform any dithering or noise shaping, which would be recommended to * obtain any exceptional audio quality. It is therefore not recommended to * use this routine if high-quality output is desired. */ static inline signed int scale(mad_fixed_t sample) { /* round */ sample += (1L << (MAD_F_FRACBITS - 16)); /* clip */ if (sample >= MAD_F_ONE) sample = MAD_F_ONE - 1; else if (sample < -MAD_F_ONE) sample = -MAD_F_ONE; /* quantize */ return sample >> (MAD_F_FRACBITS + 1 - 16); } /* * This is the output callback function. It is called after each frame of * MPEG audio data has been completely decoded. The purpose of this callback * is to output (or play) the decoded PCM audio. */ static enum mad_flow output(void *data, struct mad_header const *header, struct mad_pcm *pcm) { unsigned int nchannels, nsamples; mad_fixed_t const *left_ch, *right_ch; /* pcm->samplerate contains the sampling frequency */ nchannels = pcm->channels; nsamples = pcm->length; left_ch = pcm->samples[0]; right_ch = pcm->samples[1]; while (nsamples--) { signed int sample; /* output sample(s) in 16-bit signed little-endian PCM */ sample = scale(*left_ch++); putchar((sample >> 0) & 0xff); putchar((sample >> 8) & 0xff); if (nchannels == 2) { sample = scale(*right_ch++); putchar((sample >> 0) & 0xff); putchar((sample >> 8) & 0xff); } } return MAD_FLOW_CONTINUE; } /* * This is the error callback function. It is called whenever a decoding * error occurs. The error is indicated by stream->error; the list of * possible MAD_ERROR_* errors can be found in the mad.h (or stream.h) * header file. */ static enum mad_flow error(void *data, struct mad_stream *stream, struct mad_frame *frame) { struct buffer *buffer = data; fprintf(stderr, "decoding error 0x%04x (%s) at byte offset %u\n", stream->error, mad_stream_errorstr(stream), stream->this_frame - buffer->start); /* return MAD_FLOW_BREAK here to stop decoding (and propagate an error) */ return MAD_FLOW_CONTINUE; } /* * This is the function called by main() above to perform all the decoding. * It instantiates a decoder object and configures it with the input, * output, and error callback functions above. A single call to * mad_decoder_run() continues until a callback function returns * MAD_FLOW_STOP (to stop decoding) or MAD_FLOW_BREAK (to stop decoding and * signal an error). */ static int decode(unsigned char const *start, unsigned long length) { struct buffer buffer; struct mad_decoder decoder; int result; /* initialize our private message structure */ buffer.start = start; buffer.length = length; /* configure input, output, and error functions */ mad_decoder_init(&decoder, &buffer, input, 0 /* header */, 0 /* filter */, output, error, 0 /* message */); /* start decoding */ result = mad_decoder_run(&decoder, MAD_DECODER_MODE_SYNC); /* release the decoder */ mad_decoder_finish(&decoder); return result; }
{ "language": "C" }
///////////////////////////////////////////////////////////////////////// // $Id$ ///////////////////////////////////////////////////////////////////////// /* * PCIDEV: PCI host device mapping * Copyright (C) 2003 - Frank Cornelis * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2 as published by the Free Software Foundation. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef BX_IODEV_PCIDEV_H #define BX_IODEV_PCIDEV_H #if BX_USE_PCIDEV_SMF # define BX_PCIDEV_THIS thePciDevAdapter-> # define BX_PCIDEV_THIS_ thePciDevAdapter #else # define BX_PCIDEV_THIS this-> # define BX_PCIDEV_THIS_ this #endif struct region_struct { Bit32u config_value; Bit32u start; // can change Bit32u size; Bit32u host_start; // never changes!!! class bx_pcidev_c *pcidev; }; class bx_pcidev_c : public bx_devmodel_c, public bx_pci_device_stub_c { public: bx_pcidev_c(); virtual ~bx_pcidev_c(); virtual void init(void); virtual void reset(unsigned type); virtual Bit32u pci_read_handler(Bit8u address, unsigned io_len); virtual void pci_write_handler(Bit8u address, Bit32u value, unsigned io_len); int pcidev_fd; // to access the pcidev // resource mapping struct region_struct regions[6]; Bit8u devfunc; Bit8u intpin; Bit8u irq; private: static Bit32u read_handler(void *param, Bit32u address, unsigned io_len); static void write_handler(void *param, Bit32u address, Bit32u value, unsigned io_len); #if !BX_USE_PCIDEV_SMF Bit32u read(void *param, Bit32u address, unsigned io_len); void write(void *param, Bit32u address, Bit32u value, unsigned io_len); #endif }; #endif
{ "language": "C" }
/** * @file sip-sec-digest-test.c * * pidgin-sipe * * Copyright (C) 2013-2019 SIPE Project <http://sipe.sourceforge.net/> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <stdint.h> #include <glib.h> #include "sip-transport.h" #include "sipe-common.h" #include "sipe-crypt.h" #include "uuid.h" #define SIP_SEC_DIGEST_COMPILING_TEST static const gchar *cnonce_fixed; #include "sip-sec-digest.c" /* * Stubs */ gboolean sipe_backend_debug_enabled(void) { return(TRUE); } void sipe_backend_debug_literal(sipe_debug_level level, const gchar *msg) { printf("DEBUG(%d): %s\n", level, msg); } void sipe_backend_debug(sipe_debug_level level, const gchar *format, ...) { va_list ap; gchar *newformat = g_strdup_printf("DEBUG(%d): %s\n", level, format); va_start(ap, format); vprintf(newformat, ap); va_end(ap); g_free(newformat); } const gchar *sip_transport_epid(SIPE_UNUSED_PARAMETER struct sipe_core_private *sipe_private) { return(NULL); } const gchar *sip_transport_ip_address(SIPE_UNUSED_PARAMETER struct sipe_core_private *sipe_private) { return(NULL); } char *generateUUIDfromEPID(SIPE_UNUSED_PARAMETER const gchar *epid) { return(NULL); } char *sipe_get_epid(SIPE_UNUSED_PARAMETER const char *self_sip_uri, SIPE_UNUSED_PARAMETER const char *hostname, SIPE_UNUSED_PARAMETER const char *ip_address) { return(NULL); } /* needed when linking against NSS */ void md4sum(const uint8_t *data, uint32_t length, uint8_t *digest); void md4sum(SIPE_UNUSED_PARAMETER const uint8_t *data, SIPE_UNUSED_PARAMETER uint32_t length, SIPE_UNUSED_PARAMETER uint8_t *digest) { } /* * Tester code */ #define PARSED_USERNAME 0 #define PARSED_REALM 1 #define PARSED_NONCE 2 #define PARSED_URI 3 #define PARSED_QOP 4 #define PARSED_NC 5 #define PARSED_CNONCE 6 #define PARSED_RESPONSE 7 #define PARSED_OPAQUE 8 #define PARSED_MAX 9 static void parse(const gchar *string, gchar *parsed[PARSED_MAX]) { const gchar *header; const gchar *param; guint i; for (i = 0; i < PARSED_MAX; i++) parsed[i] = NULL; if (strstr(string, "Digest ") == NULL) return; header = string + 7; /* skip white space */ while (*header == ' ') header++; /* start of next parameter value */ while ((param = strchr(header, '=')) != NULL) { const gchar *end; /* parameter value type */ param++; if (*param == '"') { /* string: xyz="..."(,) */ end = strchr(++param, '"'); if (!end) { SIPE_DEBUG_ERROR("parse: corrupted string parameter near '%s'", header); break; } } else { /* number: xyz=12345(,) */ end = strchr(param, ','); if (!end) { /* last parameter */ end = param + strlen(param); } } #define COMPARE(string, index) \ if (g_str_has_prefix(header, #string "=")) { \ g_free(parsed[ PARSED_ ## index]); \ parsed[ PARSED_ ## index] = g_strndup(param, end - param); \ } else COMPARE(username, USERNAME) COMPARE(realm, REALM) COMPARE(nonce, NONCE) COMPARE(uri, URI) COMPARE(qop, QOP) COMPARE(nc, NC) COMPARE(cnonce, CNONCE) COMPARE(response, RESPONSE) COMPARE(opaque, OPAQUE) { /* ignore */ } /* skip to next parameter */ while ((*end == '"') || (*end == ',') || (*end == ' ')) end++; header = end; } } static guint expected(const gchar *reference, const gchar *testvalue) { gchar *reference_parsed[PARSED_MAX]; gchar *testvalue_parsed[PARSED_MAX]; guint i; guint failed = 0; parse(reference, reference_parsed); parse(testvalue, testvalue_parsed); for (i = 0; i < PARSED_MAX; i++) { gchar *ref = reference_parsed[i]; gchar *test = testvalue_parsed[i]; if (!sipe_strequal(ref, test) && (ref || test)) { SIPE_DEBUG_ERROR("FAILED(%d): expected '%s' got '%s'", i, ref, test); failed = 1; } g_free(test); g_free(ref); } SIPE_DEBUG_INFO("Response: %s", testvalue); return(failed); } int main(SIPE_UNUSED_PARAMETER int argc, SIPE_UNUSED_PARAMETER char *argv[]) { guint failed = 0; /* Initialization for crypto backend (test mode) */ sipe_crypto_init(FALSE); #define RUNTEST(_user, _password, _cnonce, _header, _method, _uri, _reference) \ { \ struct sipe_core_private sipe_private; \ gchar *response; \ printf("\n"); \ sipe_private.authuser = _user ; \ sipe_private.password = _password ; \ cnonce_fixed = _cnonce; \ response = sip_sec_digest_authorization(&sipe_private, _header, _method, _uri); \ failed += expected(_reference, response); \ g_free(response); \ } /* * RFC-2617 Section 3.5 */ RUNTEST("Mufasa", "Circle Of Life", "0a4f113b", "realm=\"testrealm@host.com\", qop=\"auth,auth-int\", nonce=\"dcd98b7102dd2f0e8b11d0f600bfb0c093\", opaque=\"5ccc069c403ebaf9f0171e9517f40e41\"", "GET", "/dir/index.html", "Digest username=\"Mufasa\", realm=\"testrealm@host.com\", nonce=\"dcd98b7102dd2f0e8b11d0f600bfb0c093\", uri=\"/dir/index.html\", qop=auth, nc=00000001, cnonce=\"0a4f113b\", response=\"6629fae49393a05397450978507c4ef1\", opaque=\"5ccc069c403ebaf9f0171e9517f40e41\""); /* * http://www.ntu.edu.sg/home/ehchua/programming/webprogramming/HTTP_Authentication.html */ RUNTEST("bob", "bob", "1672b410efa182c061c2f0a58acaa17d", /* * The Server challenge shown does not correspond to the * Client response. Use realm/nonce from the Client response. * * "realm=\"Members only\", nonce=\"LHOKe1l2BAA=5c373ae0d933a0bb6321125a56a2fcdb6fd7c93b\", algorithm=MD5, qop=\"auth\"", */ "realm=\"members only\", nonce=\"5UImQA==3d76b2ab859e1770ec60ed285ec68a3e63028461\", algorithm=MD5, qop=\"auth\"", "GET", "/digest_auth/test.html", "Digest username=\"bob\", realm=\"members only\", qop=\"auth\", algorithm=\"MD5\", uri=\"/digest_auth/test.html\", nonce=\"5UImQA==3d76b2ab859e1770ec60ed285ec68a3e63028461\", nc=00000001, cnonce=\"1672b410efa182c061c2f0a58acaa17d\", response=\"3d9ebe6b9534a7135a3fde59a5a72668\""); return(failed); } /* Local Variables: mode: c c-file-style: "bsd" indent-tabs-mode: t tab-width: 8 End: */
{ "language": "C" }
/* * cs42l52.h -- CS42L52 ALSA SoC audio driver * * Copyright 2012 CirrusLogic, Inc. * * Author: Georgi Vlaev <joe@nucleusys.com> * Author: Brian Austin <brian.austin@cirrus.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #ifndef __CS42L52_H__ #define __CS42L52_H__ #define CS42L52_NAME "CS42L52" #define CS42L52_DEFAULT_CLK 12000000 #define CS42L52_MIN_CLK 11000000 #define CS42L52_MAX_CLK 27000000 #define CS42L52_DEFAULT_FORMAT SNDRV_PCM_FMTBIT_S16_LE #define CS42L52_DEFAULT_MAX_CHANS 2 #define CS42L52_SYSCLK 1 #define CS42L52_CHIP_SWICTH (1 << 17) #define CS42L52_ALL_IN_ONE (1 << 16) #define CS42L52_CHIP_ONE 0x00 #define CS42L52_CHIP_TWO 0x01 #define CS42L52_CHIP_THR 0x02 #define CS42L52_CHIP_MASK 0x0f #define CS42L52_FIX_BITS_CTL 0x00 #define CS42L52_CHIP 0x01 #define CS42L52_CHIP_ID 0xE0 #define CS42L52_CHIP_ID_MASK 0xF8 #define CS42L52_CHIP_REV_A0 0x00 #define CS42L52_CHIP_REV_A1 0x01 #define CS42L52_CHIP_REV_B0 0x02 #define CS42L52_CHIP_REV_MASK 0x07 #define CS42L52_PWRCTL1 0x02 #define CS42L52_PWRCTL1_PDN_ALL 0x9F #define CS42L52_PWRCTL1_PDN_CHRG 0x80 #define CS42L52_PWRCTL1_PDN_PGAB 0x10 #define CS42L52_PWRCTL1_PDN_PGAA 0x08 #define CS42L52_PWRCTL1_PDN_ADCB 0x04 #define CS42L52_PWRCTL1_PDN_ADCA 0x02 #define CS42L52_PWRCTL1_PDN_CODEC 0x01 #define CS42L52_PWRCTL2 0x03 #define CS42L52_PWRCTL2_OVRDB (1 << 4) #define CS42L52_PWRCTL2_OVRDA (1 << 3) #define CS42L52_PWRCTL2_PDN_MICB (1 << 2) #define CS42L52_PWRCTL2_PDN_MICB_SHIFT 2 #define CS42L52_PWRCTL2_PDN_MICA (1 << 1) #define CS42L52_PWRCTL2_PDN_MICA_SHIFT 1 #define CS42L52_PWRCTL2_PDN_MICBIAS (1 << 0) #define CS42L52_PWRCTL2_PDN_MICBIAS_SHIFT 0 #define CS42L52_PWRCTL3 0x04 #define CS42L52_PWRCTL3_HPB_PDN_SHIFT 6 #define CS42L52_PWRCTL3_HPB_ON_LOW 0x00 #define CS42L52_PWRCTL3_HPB_ON_HIGH 0x01 #define CS42L52_PWRCTL3_HPB_ALWAYS_ON 0x02 #define CS42L52_PWRCTL3_HPB_ALWAYS_OFF 0x03 #define CS42L52_PWRCTL3_HPA_PDN_SHIFT 4 #define CS42L52_PWRCTL3_HPA_ON_LOW 0x00 #define CS42L52_PWRCTL3_HPA_ON_HIGH 0x01 #define CS42L52_PWRCTL3_HPA_ALWAYS_ON 0x02 #define CS42L52_PWRCTL3_HPA_ALWAYS_OFF 0x03 #define CS42L52_PWRCTL3_SPKB_PDN_SHIFT 2 #define CS42L52_PWRCTL3_SPKB_ON_LOW 0x00 #define CS42L52_PWRCTL3_SPKB_ON_HIGH 0x01 #define CS42L52_PWRCTL3_SPKB_ALWAYS_ON 0x02 #define CS42L52_PWRCTL3_PDN_SPKB (1 << 2) #define CS42L52_PWRCTL3_PDN_SPKA (1 << 0) #define CS42L52_PWRCTL3_SPKA_PDN_SHIFT 0 #define CS42L52_PWRCTL3_SPKA_ON_LOW 0x00 #define CS42L52_PWRCTL3_SPKA_ON_HIGH 0x01 #define CS42L52_PWRCTL3_SPKA_ALWAYS_ON 0x02 #define CS42L52_DEFAULT_OUTPUT_STATE 0x05 #define CS42L52_PWRCTL3_CONF_MASK 0x03 #define CS42L52_CLK_CTL 0x05 #define CLK_AUTODECT_ENABLE (1 << 7) #define CLK_SPEED_SHIFT 5 #define CLK_DS_MODE 0x00 #define CLK_SS_MODE 0x01 #define CLK_HS_MODE 0x02 #define CLK_QS_MODE 0x03 #define CLK_32K_SR_SHIFT 4 #define CLK_32K 0x01 #define CLK_NO_32K 0x00 #define CLK_27M_MCLK_SHIFT 3 #define CLK_27M_MCLK 0x01 #define CLK_NO_27M 0x00 #define CLK_RATIO_SHIFT 1 #define CLK_R_128 0x00 #define CLK_R_125 0x01 #define CLK_R_132 0x02 #define CLK_R_136 0x03 #define CS42L52_IFACE_CTL1 0x06 #define CS42L52_IFACE_CTL1_MASTER (1 << 7) #define CS42L52_IFACE_CTL1_SLAVE (0 << 7) #define CS42L52_IFACE_CTL1_INV_SCLK (1 << 6) #define CS42L52_IFACE_CTL1_ADC_FMT_I2S (1 << 5) #define CS42L52_IFACE_CTL1_ADC_FMT_LEFT_J (0 << 5) #define CS42L52_IFACE_CTL1_DSP_MODE_EN (1 << 4) #define CS42L52_IFACE_CTL1_DAC_FMT_LEFT_J (0 << 2) #define CS42L52_IFACE_CTL1_DAC_FMT_I2S (1 << 2) #define CS42L52_IFACE_CTL1_DAC_FMT_RIGHT_J (2 << 2) #define CS42L52_IFACE_CTL1_WL_32BIT (0x00) #define CS42L52_IFACE_CTL1_WL_24BIT (0x01) #define CS42L52_IFACE_CTL1_WL_20BIT (0x02) #define CS42L52_IFACE_CTL1_WL_16BIT (0x03) #define CS42L52_IFACE_CTL1_WL_MASK 0xFFFF #define CS42L52_IFACE_CTL2 0x07 #define CS42L52_IFACE_CTL2_SC_MC_EQ (1 << 6) #define CS42L52_IFACE_CTL2_LOOPBACK (1 << 5) #define CS42L52_IFACE_CTL2_S_MODE_OUTPUT_EN (0 << 4) #define CS42L52_IFACE_CTL2_S_MODE_OUTPUT_HIZ (1 << 4) #define CS42L52_IFACE_CTL2_HP_SW_INV (1 << 3) #define CS42L52_IFACE_CTL2_BIAS_LVL 0x07 #define CS42L52_ADC_PGA_A 0x08 #define CS42L52_ADC_PGA_B 0x09 #define CS42L52_ADC_SEL_SHIFT 5 #define CS42L52_ADC_SEL_AIN1 0x00 #define CS42L52_ADC_SEL_AIN2 0x01 #define CS42L52_ADC_SEL_AIN3 0x02 #define CS42L52_ADC_SEL_AIN4 0x03 #define CS42L52_ADC_SEL_PGA 0x04 #define CS42L52_ANALOG_HPF_CTL 0x0A #define CS42L52_HPF_CTL_ANLGSFTB (1 << 3) #define CS42L52_HPF_CTL_ANLGSFTA (1 << 0) #define CS42L52_ADC_HPF_FREQ 0x0B #define CS42L52_ADC_MISC_CTL 0x0C #define CS42L52_ADC_MISC_CTL_SOURCE_DSP (1 << 6) #define CS42L52_PB_CTL1 0x0D #define CS42L52_PB_CTL1_HP_GAIN_SHIFT 5 #define CS42L52_PB_CTL1_HP_GAIN_03959 0x00 #define CS42L52_PB_CTL1_HP_GAIN_04571 0x01 #define CS42L52_PB_CTL1_HP_GAIN_05111 0x02 #define CS42L52_PB_CTL1_HP_GAIN_06047 0x03 #define CS42L52_PB_CTL1_HP_GAIN_07099 0x04 #define CS42L52_PB_CTL1_HP_GAIN_08399 0x05 #define CS42L52_PB_CTL1_HP_GAIN_10000 0x06 #define CS42L52_PB_CTL1_HP_GAIN_11430 0x07 #define CS42L52_PB_CTL1_INV_PCMB (1 << 3) #define CS42L52_PB_CTL1_INV_PCMA (1 << 2) #define CS42L52_PB_CTL1_MSTB_MUTE (1 << 1) #define CS42L52_PB_CTL1_MSTA_MUTE (1 << 0) #define CS42L52_PB_CTL1_MUTE_MASK 0x03 #define CS42L52_PB_CTL1_MUTE 3 #define CS42L52_PB_CTL1_UNMUTE 0 #define CS42L52_MISC_CTL 0x0E #define CS42L52_MISC_CTL_DEEMPH (1 << 2) #define CS42L52_MISC_CTL_DIGSFT (1 << 1) #define CS42L52_MISC_CTL_DIGZC (1 << 0) #define CS42L52_PB_CTL2 0x0F #define CS42L52_PB_CTL2_HPB_MUTE (1 << 7) #define CS42L52_PB_CTL2_HPA_MUTE (1 << 6) #define CS42L52_PB_CTL2_SPKB_MUTE (1 << 5) #define CS42L52_PB_CTL2_SPKA_MUTE (1 << 4) #define CS42L52_PB_CTL2_SPK_SWAP (1 << 2) #define CS42L52_PB_CTL2_SPK_MONO (1 << 1) #define CS42L52_PB_CTL2_SPK_MUTE50 (1 << 0) #define CS42L52_MICA_CTL 0x10 #define CS42L52_MICB_CTL 0x11 #define CS42L52_MIC_CTL_MIC_SEL_MASK 0xBF #define CS42L52_MIC_CTL_MIC_SEL_SHIFT 6 #define CS42L52_MIC_CTL_TYPE_MASK 0x20 #define CS42L52_MIC_CTL_TYPE_SHIFT 5 #define CS42L52_PGAA_CTL 0x12 #define CS42L52_PGAB_CTL 0x13 #define CS42L52_PGAX_CTL_VOL_12DB 24 #define CS42L52_PGAX_CTL_VOL_6DB 12 /*step size 0.5db*/ #define CS42L52_PASSTHRUA_VOL 0x14 #define CS42L52_PASSTHRUB_VOL 0x15 #define CS42L52_ADCA_VOL 0x16 #define CS42L52_ADCB_VOL 0x17 #define CS42L52_ADCX_VOL_24DB 24 /*step size 1db*/ #define CS42L52_ADCX_VOL_12DB 12 #define CS42L52_ADCX_VOL_6DB 6 #define CS42L52_ADCA_MIXER_VOL 0x18 #define CS42L52_ADCB_MIXER_VOL 0x19 #define CS42L52_ADC_MIXER_VOL_12DB 0x18 #define CS42L52_PCMA_MIXER_VOL 0x1A #define CS42L52_PCMB_MIXER_VOL 0x1B #define CS42L52_BEEP_FREQ 0x1C #define CS42L52_BEEP_VOL 0x1D #define CS42L52_BEEP_TONE_CTL 0x1E #define CS42L52_BEEP_RATE_SHIFT 4 #define CS42L52_BEEP_RATE_MASK 0x0F #define CS42L52_TONE_CTL 0x1F #define CS42L52_BEEP_EN_MASK 0x3F #define CS42L52_MASTERA_VOL 0x20 #define CS42L52_MASTERB_VOL 0x21 #define CS42L52_HPA_VOL 0x22 #define CS42L52_HPB_VOL 0x23 #define CS42L52_DEFAULT_HP_VOL 0xF0 #define CS42L52_SPKA_VOL 0x24 #define CS42L52_SPKB_VOL 0x25 #define CS42L52_DEFAULT_SPK_VOL 0xF0 #define CS42L52_ADC_PCM_MIXER 0x26 #define CS42L52_LIMITER_CTL1 0x27 #define CS42L52_LIMITER_CTL2 0x28 #define CS42L52_LIMITER_AT_RATE 0x29 #define CS42L52_ALC_CTL 0x2A #define CS42L52_ALC_CTL_ALCB_ENABLE_SHIFT 7 #define CS42L52_ALC_CTL_ALCA_ENABLE_SHIFT 6 #define CS42L52_ALC_CTL_FASTEST_ATTACK 0 #define CS42L52_ALC_RATE 0x2B #define CS42L52_ALC_SLOWEST_RELEASE 0x3F #define CS42L52_ALC_THRESHOLD 0x2C #define CS42L52_ALC_MAX_RATE_SHIFT 5 #define CS42L52_ALC_MIN_RATE_SHIFT 2 #define CS42L52_ALC_RATE_0DB 0 #define CS42L52_ALC_RATE_3DB 1 #define CS42L52_ALC_RATE_6DB 2 #define CS42L52_NOISE_GATE_CTL 0x2D #define CS42L52_NG_ENABLE_SHIFT 6 #define CS42L52_NG_THRESHOLD_SHIFT 2 #define CS42L52_NG_MIN_70DB 2 #define CS42L52_NG_DELAY_SHIFT 0 #define CS42L52_NG_DELAY_100MS 1 #define CS42L52_CLK_STATUS 0x2E #define CS42L52_BATT_COMPEN 0x2F #define CS42L52_BATT_LEVEL 0x30 #define CS42L52_SPK_STATUS 0x31 #define CS42L52_SPK_STATUS_PIN_SHIFT 3 #define CS42L52_SPK_STATUS_PIN_HIGH 1 #define CS42L52_TEM_CTL 0x32 #define CS42L52_TEM_CTL_SET 0x80 #define CS42L52_THE_FOLDBACK 0x33 #define CS42L52_CHARGE_PUMP 0x34 #define CS42L52_CHARGE_PUMP_MASK 0xF0 #define CS42L52_CHARGE_PUMP_SHIFT 4 #define CS42L52_FIX_BITS1 0x3E #define CS42L52_FIX_BITS2 0x47 #define CS42L52_MAX_REGISTER 0x47 #endif
{ "language": "C" }
/* $Id: avm_pci.c,v 1.29.2.4 2004/02/11 13:21:32 keil Exp $ * * low level stuff for AVM Fritz!PCI and ISA PnP isdn cards * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * Thanks to AVM, Berlin for information * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "isdnl1.h" #include <linux/pci.h> #include <linux/isapnp.h> #include <linux/interrupt.h> static const char *avm_pci_rev = "$Revision: 1.29.2.4 $"; #define AVM_FRITZ_PCI 1 #define AVM_FRITZ_PNP 2 #define HDLC_FIFO 0x0 #define HDLC_STATUS 0x4 #define AVM_HDLC_1 0x00 #define AVM_HDLC_2 0x01 #define AVM_ISAC_FIFO 0x02 #define AVM_ISAC_REG_LOW 0x04 #define AVM_ISAC_REG_HIGH 0x06 #define AVM_STATUS0_IRQ_ISAC 0x01 #define AVM_STATUS0_IRQ_HDLC 0x02 #define AVM_STATUS0_IRQ_TIMER 0x04 #define AVM_STATUS0_IRQ_MASK 0x07 #define AVM_STATUS0_RESET 0x01 #define AVM_STATUS0_DIS_TIMER 0x02 #define AVM_STATUS0_RES_TIMER 0x04 #define AVM_STATUS0_ENA_IRQ 0x08 #define AVM_STATUS0_TESTBIT 0x10 #define AVM_STATUS1_INT_SEL 0x0f #define AVM_STATUS1_ENA_IOM 0x80 #define HDLC_MODE_ITF_FLG 0x01 #define HDLC_MODE_TRANS 0x02 #define HDLC_MODE_CCR_7 0x04 #define HDLC_MODE_CCR_16 0x08 #define HDLC_MODE_TESTLOOP 0x80 #define HDLC_INT_XPR 0x80 #define HDLC_INT_XDU 0x40 #define HDLC_INT_RPR 0x20 #define HDLC_INT_MASK 0xE0 #define HDLC_STAT_RME 0x01 #define HDLC_STAT_RDO 0x10 #define HDLC_STAT_CRCVFRRAB 0x0E #define HDLC_STAT_CRCVFR 0x06 #define HDLC_STAT_RML_MASK 0x3f00 #define HDLC_CMD_XRS 0x80 #define HDLC_CMD_XME 0x01 #define HDLC_CMD_RRS 0x20 #define HDLC_CMD_XML_MASK 0x3f00 /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { register u_char idx = (offset > 0x2f) ? AVM_ISAC_REG_HIGH : AVM_ISAC_REG_LOW; register u_char val; outb(idx, cs->hw.avm.cfg_reg + 4); val = inb(cs->hw.avm.isac + (offset & 0xf)); return (val); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { register u_char idx = (offset > 0x2f) ? AVM_ISAC_REG_HIGH : AVM_ISAC_REG_LOW; outb(idx, cs->hw.avm.cfg_reg + 4); outb(value, cs->hw.avm.isac + (offset & 0xf)); } static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size) { outb(AVM_ISAC_FIFO, cs->hw.avm.cfg_reg + 4); insb(cs->hw.avm.isac, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size) { outb(AVM_ISAC_FIFO, cs->hw.avm.cfg_reg + 4); outsb(cs->hw.avm.isac, data, size); } static inline u_int ReadHDLCPCI(struct IsdnCardState *cs, int chan, u_char offset) { register u_int idx = chan ? AVM_HDLC_2 : AVM_HDLC_1; register u_int val; outl(idx, cs->hw.avm.cfg_reg + 4); val = inl(cs->hw.avm.isac + offset); return (val); } static inline void WriteHDLCPCI(struct IsdnCardState *cs, int chan, u_char offset, u_int value) { register u_int idx = chan ? AVM_HDLC_2 : AVM_HDLC_1; outl(idx, cs->hw.avm.cfg_reg + 4); outl(value, cs->hw.avm.isac + offset); } static inline u_char ReadHDLCPnP(struct IsdnCardState *cs, int chan, u_char offset) { register u_char idx = chan ? AVM_HDLC_2 : AVM_HDLC_1; register u_char val; outb(idx, cs->hw.avm.cfg_reg + 4); val = inb(cs->hw.avm.isac + offset); return (val); } static inline void WriteHDLCPnP(struct IsdnCardState *cs, int chan, u_char offset, u_char value) { register u_char idx = chan ? AVM_HDLC_2 : AVM_HDLC_1; outb(idx, cs->hw.avm.cfg_reg + 4); outb(value, cs->hw.avm.isac + offset); } static u_char ReadHDLC_s(struct IsdnCardState *cs, int chan, u_char offset) { return(0xff & ReadHDLCPCI(cs, chan, offset)); } static void WriteHDLC_s(struct IsdnCardState *cs, int chan, u_char offset, u_char value) { WriteHDLCPCI(cs, chan, offset, value); } static inline struct BCState *Sel_BCS(struct IsdnCardState *cs, int channel) { if (cs->bcs[0].mode && (cs->bcs[0].channel == channel)) return(&cs->bcs[0]); else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel)) return(&cs->bcs[1]); else return(NULL); } static void write_ctrl(struct BCState *bcs, int which) { if (bcs->cs->debug & L1_DEB_HSCX) debugl1(bcs->cs, "hdlc %c wr%x ctrl %x", 'A' + bcs->channel, which, bcs->hw.hdlc.ctrl.ctrl); if (bcs->cs->subtyp == AVM_FRITZ_PCI) { WriteHDLCPCI(bcs->cs, bcs->channel, HDLC_STATUS, bcs->hw.hdlc.ctrl.ctrl); } else { if (which & 4) WriteHDLCPnP(bcs->cs, bcs->channel, HDLC_STATUS + 2, bcs->hw.hdlc.ctrl.sr.mode); if (which & 2) WriteHDLCPnP(bcs->cs, bcs->channel, HDLC_STATUS + 1, bcs->hw.hdlc.ctrl.sr.xml); if (which & 1) WriteHDLCPnP(bcs->cs, bcs->channel, HDLC_STATUS, bcs->hw.hdlc.ctrl.sr.cmd); } } static void modehdlc(struct BCState *bcs, int mode, int bc) { struct IsdnCardState *cs = bcs->cs; int hdlc = bcs->channel; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "hdlc %c mode %d --> %d ichan %d --> %d", 'A' + hdlc, bcs->mode, mode, hdlc, bc); bcs->hw.hdlc.ctrl.ctrl = 0; switch (mode) { case (-1): /* used for init */ bcs->mode = 1; bcs->channel = bc; bc = 0; case (L1_MODE_NULL): if (bcs->mode == L1_MODE_NULL) return; bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; bcs->hw.hdlc.ctrl.sr.mode = HDLC_MODE_TRANS; write_ctrl(bcs, 5); bcs->mode = L1_MODE_NULL; bcs->channel = bc; break; case (L1_MODE_TRANS): bcs->mode = mode; bcs->channel = bc; bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; bcs->hw.hdlc.ctrl.sr.mode = HDLC_MODE_TRANS; write_ctrl(bcs, 5); bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS; write_ctrl(bcs, 1); bcs->hw.hdlc.ctrl.sr.cmd = 0; schedule_event(bcs, B_XMTBUFREADY); break; case (L1_MODE_HDLC): bcs->mode = mode; bcs->channel = bc; bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS; bcs->hw.hdlc.ctrl.sr.mode = HDLC_MODE_ITF_FLG; write_ctrl(bcs, 5); bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS; write_ctrl(bcs, 1); bcs->hw.hdlc.ctrl.sr.cmd = 0; schedule_event(bcs, B_XMTBUFREADY); break; } } static inline void hdlc_empty_fifo(struct BCState *bcs, int count) { register u_int *ptr; u_char *p; u_char idx = bcs->channel ? AVM_HDLC_2 : AVM_HDLC_1; int cnt=0; struct IsdnCardState *cs = bcs->cs; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "hdlc_empty_fifo %d", count); if (bcs->hw.hdlc.rcvidx + count > HSCX_BUFMAX) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "hdlc_empty_fifo: incoming packet too large"); return; } p = bcs->hw.hdlc.rcvbuf + bcs->hw.hdlc.rcvidx; ptr = (u_int *)p; bcs->hw.hdlc.rcvidx += count; if (cs->subtyp == AVM_FRITZ_PCI) { outl(idx, cs->hw.avm.cfg_reg + 4); while (cnt < count) { #ifdef __powerpc__ *ptr++ = in_be32((unsigned *)(cs->hw.avm.isac +_IO_BASE)); #else *ptr++ = inl(cs->hw.avm.isac); #endif /* __powerpc__ */ cnt += 4; } } else { outb(idx, cs->hw.avm.cfg_reg + 4); while (cnt < count) { *p++ = inb(cs->hw.avm.isac); cnt++; } } if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; if (cs->subtyp == AVM_FRITZ_PNP) p = (u_char *) ptr; t += sprintf(t, "hdlc_empty_fifo %c cnt %d", bcs->channel ? 'B' : 'A', count); QuickHex(t, p, count); debugl1(cs, bcs->blog); } } static inline void hdlc_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int count, cnt =0; int fifo_size = 32; u_char *p; u_int *ptr; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "hdlc_fill_fifo"); if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; bcs->hw.hdlc.ctrl.sr.cmd &= ~HDLC_CMD_XME; if (bcs->tx_skb->len > fifo_size) { count = fifo_size; } else { count = bcs->tx_skb->len; if (bcs->mode != L1_MODE_TRANS) bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XME; } if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "hdlc_fill_fifo %d/%ld", count, bcs->tx_skb->len); p = bcs->tx_skb->data; ptr = (u_int *)p; skb_pull(bcs->tx_skb, count); bcs->tx_cnt -= count; bcs->hw.hdlc.count += count; bcs->hw.hdlc.ctrl.sr.xml = ((count == fifo_size) ? 0 : count); write_ctrl(bcs, 3); /* sets the correct index too */ if (cs->subtyp == AVM_FRITZ_PCI) { while (cnt<count) { #ifdef __powerpc__ out_be32((unsigned *)(cs->hw.avm.isac +_IO_BASE), *ptr++); #else outl(*ptr++, cs->hw.avm.isac); #endif /* __powerpc__ */ cnt += 4; } } else { while (cnt<count) { outb(*p++, cs->hw.avm.isac); cnt++; } } if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; if (cs->subtyp == AVM_FRITZ_PNP) p = (u_char *) ptr; t += sprintf(t, "hdlc_fill_fifo %c cnt %d", bcs->channel ? 'B' : 'A', count); QuickHex(t, p, count); debugl1(cs, bcs->blog); } } static void HDLC_irq(struct BCState *bcs, u_int stat) { int len; struct sk_buff *skb; if (bcs->cs->debug & L1_DEB_HSCX) debugl1(bcs->cs, "ch%d stat %#x", bcs->channel, stat); if (stat & HDLC_INT_RPR) { if (stat & HDLC_STAT_RDO) { if (bcs->cs->debug & L1_DEB_HSCX) debugl1(bcs->cs, "RDO"); else debugl1(bcs->cs, "ch%d stat %#x", bcs->channel, stat); bcs->hw.hdlc.ctrl.sr.xml = 0; bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_RRS; write_ctrl(bcs, 1); bcs->hw.hdlc.ctrl.sr.cmd &= ~HDLC_CMD_RRS; write_ctrl(bcs, 1); bcs->hw.hdlc.rcvidx = 0; } else { if (!(len = (stat & HDLC_STAT_RML_MASK)>>8)) len = 32; hdlc_empty_fifo(bcs, len); if ((stat & HDLC_STAT_RME) || (bcs->mode == L1_MODE_TRANS)) { if (((stat & HDLC_STAT_CRCVFRRAB)==HDLC_STAT_CRCVFR) || (bcs->mode == L1_MODE_TRANS)) { if (!(skb = dev_alloc_skb(bcs->hw.hdlc.rcvidx))) printk(KERN_WARNING "HDLC: receive out of memory\n"); else { memcpy(skb_put(skb, bcs->hw.hdlc.rcvidx), bcs->hw.hdlc.rcvbuf, bcs->hw.hdlc.rcvidx); skb_queue_tail(&bcs->rqueue, skb); } bcs->hw.hdlc.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } else { if (bcs->cs->debug & L1_DEB_HSCX) debugl1(bcs->cs, "invalid frame"); else debugl1(bcs->cs, "ch%d invalid frame %#x", bcs->channel, stat); bcs->hw.hdlc.rcvidx = 0; } } } } if (stat & HDLC_INT_XDU) { /* Here we lost an TX interrupt, so * restart transmitting the whole frame. */ if (bcs->tx_skb) { skb_push(bcs->tx_skb, bcs->hw.hdlc.count); bcs->tx_cnt += bcs->hw.hdlc.count; bcs->hw.hdlc.count = 0; if (bcs->cs->debug & L1_DEB_WARN) debugl1(bcs->cs, "ch%d XDU", bcs->channel); } else if (bcs->cs->debug & L1_DEB_WARN) debugl1(bcs->cs, "ch%d XDU without skb", bcs->channel); bcs->hw.hdlc.ctrl.sr.xml = 0; bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XRS; write_ctrl(bcs, 1); bcs->hw.hdlc.ctrl.sr.cmd &= ~HDLC_CMD_XRS; write_ctrl(bcs, 1); hdlc_fill_fifo(bcs); } else if (stat & HDLC_INT_XPR) { if (bcs->tx_skb) { if (bcs->tx_skb->len) { hdlc_fill_fifo(bcs); return; } else { if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->hw.hdlc.count; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } dev_kfree_skb_irq(bcs->tx_skb); bcs->hw.hdlc.count = 0; bcs->tx_skb = NULL; } } if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { bcs->hw.hdlc.count = 0; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); hdlc_fill_fifo(bcs); } else { test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); schedule_event(bcs, B_XMTBUFREADY); } } } static inline void HDLC_irq_main(struct IsdnCardState *cs) { u_int stat; struct BCState *bcs; if (cs->subtyp == AVM_FRITZ_PCI) { stat = ReadHDLCPCI(cs, 0, HDLC_STATUS); } else { stat = ReadHDLCPnP(cs, 0, HDLC_STATUS); if (stat & HDLC_INT_RPR) stat |= (ReadHDLCPnP(cs, 0, HDLC_STATUS+1))<<8; } if (stat & HDLC_INT_MASK) { if (!(bcs = Sel_BCS(cs, 0))) { if (cs->debug) debugl1(cs, "hdlc spurious channel 0 IRQ"); } else HDLC_irq(bcs, stat); } if (cs->subtyp == AVM_FRITZ_PCI) { stat = ReadHDLCPCI(cs, 1, HDLC_STATUS); } else { stat = ReadHDLCPnP(cs, 1, HDLC_STATUS); if (stat & HDLC_INT_RPR) stat |= (ReadHDLCPnP(cs, 1, HDLC_STATUS+1))<<8; } if (stat & HDLC_INT_MASK) { if (!(bcs = Sel_BCS(cs, 1))) { if (cs->debug) debugl1(cs, "hdlc spurious channel 1 IRQ"); } else HDLC_irq(bcs, stat); } } static void hdlc_l2l1(struct PStack *st, int pr, void *arg) { struct BCState *bcs = st->l1.bcs; struct sk_buff *skb = arg; u_long flags; switch (pr) { case (PH_DATA | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { skb_queue_tail(&bcs->squeue, skb); } else { bcs->tx_skb = skb; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->hw.hdlc.count = 0; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { printk(KERN_WARNING "hdlc_l2l1: this shouldn't happen\n"); } else { test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; bcs->hw.hdlc.count = 0; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | REQUEST): if (!bcs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (PH_ACTIVATE | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag); modehdlc(bcs, st->l1.mode, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | REQUEST): l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | CONFIRM): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag); test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); modehdlc(bcs, 0, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL); break; } } static void close_hdlcstate(struct BCState *bcs) { modehdlc(bcs, 0, 0); if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) { kfree(bcs->hw.hdlc.rcvbuf); bcs->hw.hdlc.rcvbuf = NULL; kfree(bcs->blog); bcs->blog = NULL; skb_queue_purge(&bcs->rqueue); skb_queue_purge(&bcs->squeue); if (bcs->tx_skb) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } } static int open_hdlcstate(struct IsdnCardState *cs, struct BCState *bcs) { if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) { if (!(bcs->hw.hdlc.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for hdlc.rcvbuf\n"); return (1); } if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for bcs->blog\n"); test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); kfree(bcs->hw.hdlc.rcvbuf); bcs->hw.hdlc.rcvbuf = NULL; return (2); } skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); } bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); bcs->event = 0; bcs->hw.hdlc.rcvidx = 0; bcs->tx_cnt = 0; return (0); } static int setstack_hdlc(struct PStack *st, struct BCState *bcs) { bcs->channel = st->l1.bc; if (open_hdlcstate(st->l1.hardware, bcs)) return (-1); st->l1.bcs = bcs; st->l2.l2l1 = hdlc_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); return (0); } #if 0 void __init clear_pending_hdlc_ints(struct IsdnCardState *cs) { u_int val; if (cs->subtyp == AVM_FRITZ_PCI) { val = ReadHDLCPCI(cs, 0, HDLC_STATUS); debugl1(cs, "HDLC 1 STA %x", val); val = ReadHDLCPCI(cs, 1, HDLC_STATUS); debugl1(cs, "HDLC 2 STA %x", val); } else { val = ReadHDLCPnP(cs, 0, HDLC_STATUS); debugl1(cs, "HDLC 1 STA %x", val); val = ReadHDLCPnP(cs, 0, HDLC_STATUS + 1); debugl1(cs, "HDLC 1 RML %x", val); val = ReadHDLCPnP(cs, 0, HDLC_STATUS + 2); debugl1(cs, "HDLC 1 MODE %x", val); val = ReadHDLCPnP(cs, 0, HDLC_STATUS + 3); debugl1(cs, "HDLC 1 VIN %x", val); val = ReadHDLCPnP(cs, 1, HDLC_STATUS); debugl1(cs, "HDLC 2 STA %x", val); val = ReadHDLCPnP(cs, 1, HDLC_STATUS + 1); debugl1(cs, "HDLC 2 RML %x", val); val = ReadHDLCPnP(cs, 1, HDLC_STATUS + 2); debugl1(cs, "HDLC 2 MODE %x", val); val = ReadHDLCPnP(cs, 1, HDLC_STATUS + 3); debugl1(cs, "HDLC 2 VIN %x", val); } } #endif /* 0 */ static void inithdlc(struct IsdnCardState *cs) { cs->bcs[0].BC_SetStack = setstack_hdlc; cs->bcs[1].BC_SetStack = setstack_hdlc; cs->bcs[0].BC_Close = close_hdlcstate; cs->bcs[1].BC_Close = close_hdlcstate; modehdlc(cs->bcs, -1, 0); modehdlc(cs->bcs + 1, -1, 1); } static irqreturn_t avm_pcipnp_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_long flags; u_char val; u_char sval; spin_lock_irqsave(&cs->lock, flags); sval = inb(cs->hw.avm.cfg_reg + 2); if ((sval & AVM_STATUS0_IRQ_MASK) == AVM_STATUS0_IRQ_MASK) { /* possible a shared IRQ reqest */ spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } if (!(sval & AVM_STATUS0_IRQ_ISAC)) { val = ReadISAC(cs, ISAC_ISTA); isac_interrupt(cs, val); } if (!(sval & AVM_STATUS0_IRQ_HDLC)) { HDLC_irq_main(cs); } WriteISAC(cs, ISAC_MASK, 0xFF); WriteISAC(cs, ISAC_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void reset_avmpcipnp(struct IsdnCardState *cs) { printk(KERN_INFO "AVM PCI/PnP: reset\n"); outb(AVM_STATUS0_RESET | AVM_STATUS0_DIS_TIMER, cs->hw.avm.cfg_reg + 2); mdelay(10); outb(AVM_STATUS0_DIS_TIMER | AVM_STATUS0_RES_TIMER | AVM_STATUS0_ENA_IRQ, cs->hw.avm.cfg_reg + 2); outb(AVM_STATUS1_ENA_IOM | cs->irq, cs->hw.avm.cfg_reg + 3); mdelay(10); printk(KERN_INFO "AVM PCI/PnP: S1 %x\n", inb(cs->hw.avm.cfg_reg + 3)); } static int AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_avmpcipnp(cs); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_RELEASE: outb(0, cs->hw.avm.cfg_reg + 2); release_region(cs->hw.avm.cfg_reg, 32); return(0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); reset_avmpcipnp(cs); clear_pending_isac_ints(cs); initisac(cs); inithdlc(cs); outb(AVM_STATUS0_DIS_TIMER | AVM_STATUS0_RES_TIMER, cs->hw.avm.cfg_reg + 2); WriteISAC(cs, ISAC_MASK, 0); outb(AVM_STATUS0_DIS_TIMER | AVM_STATUS0_RES_TIMER | AVM_STATUS0_ENA_IRQ, cs->hw.avm.cfg_reg + 2); /* RESET Receiver and Transmitter */ WriteISAC(cs, ISAC_CMDR, 0x41); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_TEST: return(0); } return(0); } static int __devinit avm_setup_rest(struct IsdnCardState *cs) { u_int val, ver; cs->hw.avm.isac = cs->hw.avm.cfg_reg + 0x10; if (!request_region(cs->hw.avm.cfg_reg, 32, (cs->subtyp == AVM_FRITZ_PCI) ? "avm PCI" : "avm PnP")) { printk(KERN_WARNING "HiSax: Fritz!PCI/PNP config port %x-%x already in use\n", cs->hw.avm.cfg_reg, cs->hw.avm.cfg_reg + 31); return (0); } switch (cs->subtyp) { case AVM_FRITZ_PCI: val = inl(cs->hw.avm.cfg_reg); printk(KERN_INFO "AVM PCI: stat %#x\n", val); printk(KERN_INFO "AVM PCI: Class %X Rev %d\n", val & 0xff, (val>>8) & 0xff); cs->BC_Read_Reg = &ReadHDLC_s; cs->BC_Write_Reg = &WriteHDLC_s; break; case AVM_FRITZ_PNP: val = inb(cs->hw.avm.cfg_reg); ver = inb(cs->hw.avm.cfg_reg + 1); printk(KERN_INFO "AVM PnP: Class %X Rev %d\n", val, ver); cs->BC_Read_Reg = &ReadHDLCPnP; cs->BC_Write_Reg = &WriteHDLCPnP; break; default: printk(KERN_WARNING "AVM unknown subtype %d\n", cs->subtyp); return(0); } printk(KERN_INFO "HiSax: %s config irq:%d base:0x%X\n", (cs->subtyp == AVM_FRITZ_PCI) ? "AVM Fritz!PCI" : "AVM Fritz!PnP", cs->irq, cs->hw.avm.cfg_reg); setup_isac(cs); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Send_Data = &hdlc_fill_fifo; cs->cardmsg = &AVM_card_msg; cs->irq_func = &avm_pcipnp_interrupt; cs->writeisac(cs, ISAC_MASK, 0xFF); ISACVersion(cs, (cs->subtyp == AVM_FRITZ_PCI) ? "AVM PCI:" : "AVM PnP:"); return (1); } #ifndef __ISAPNP__ static int __devinit avm_pnp_setup(struct IsdnCardState *cs) { return(1); /* no-op: success */ } #else static struct pnp_card *pnp_avm_c __devinitdata = NULL; static int __devinit avm_pnp_setup(struct IsdnCardState *cs) { struct pnp_dev *pnp_avm_d = NULL; if (!isapnp_present()) return(1); /* no-op: success */ if ((pnp_avm_c = pnp_find_card( ISAPNP_VENDOR('A', 'V', 'M'), ISAPNP_FUNCTION(0x0900), pnp_avm_c))) { if ((pnp_avm_d = pnp_find_dev(pnp_avm_c, ISAPNP_VENDOR('A', 'V', 'M'), ISAPNP_FUNCTION(0x0900), pnp_avm_d))) { int err; pnp_disable_dev(pnp_avm_d); err = pnp_activate_dev(pnp_avm_d); if (err<0) { printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n", __func__, err); return(0); } cs->hw.avm.cfg_reg = pnp_port_start(pnp_avm_d, 0); cs->irq = pnp_irq(pnp_avm_d, 0); if (!cs->irq) { printk(KERN_ERR "FritzPnP:No IRQ\n"); return(0); } if (!cs->hw.avm.cfg_reg) { printk(KERN_ERR "FritzPnP:No IO address\n"); return(0); } cs->subtyp = AVM_FRITZ_PNP; return (2); /* goto 'ready' label */ } } return (1); } #endif /* __ISAPNP__ */ #ifndef CONFIG_PCI_LEGACY static int __devinit avm_pci_setup(struct IsdnCardState *cs) { return(1); /* no-op: success */ } #else static struct pci_dev *dev_avm __devinitdata = NULL; static int __devinit avm_pci_setup(struct IsdnCardState *cs) { if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, dev_avm))) { if (pci_enable_device(dev_avm)) return(0); cs->irq = dev_avm->irq; if (!cs->irq) { printk(KERN_ERR "FritzPCI: No IRQ for PCI card found\n"); return(0); } cs->hw.avm.cfg_reg = pci_resource_start(dev_avm, 1); if (!cs->hw.avm.cfg_reg) { printk(KERN_ERR "FritzPCI: No IO-Adr for PCI card found\n"); return(0); } cs->subtyp = AVM_FRITZ_PCI; } else { printk(KERN_WARNING "FritzPCI: No PCI card found\n"); return(0); } cs->irq_flags |= IRQF_SHARED; return (1); } #endif /* CONFIG_PCI_LEGACY */ int __devinit setup_avm_pcipnp(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; int rc; strcpy(tmp, avm_pci_rev); printk(KERN_INFO "HiSax: AVM PCI driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_FRITZPCI) return (0); if (card->para[1]) { /* old manual method */ cs->hw.avm.cfg_reg = card->para[1]; cs->irq = card->para[0]; cs->subtyp = AVM_FRITZ_PNP; goto ready; } rc = avm_pnp_setup(cs); if (rc < 1) return (0); if (rc == 2) goto ready; rc = avm_pci_setup(cs); if (rc < 1) return (0); ready: return avm_setup_rest(cs); }
{ "language": "C" }
/* * FreeRTOS Kernel V10.4.1 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * http://www.FreeRTOS.org * http://aws.amazon.com/freertos * * 1 tab == 4 spaces! */ #ifndef FREERTOS_CONFIG_H #define FREERTOS_CONFIG_H #include <lpc210x.h> /*----------------------------------------------------------- * Application specific definitions. * * These definitions should be adjusted for your particular hardware and * application requirements. * * THESE PARAMETERS ARE DESCRIBED WITHIN THE 'CONFIGURATION' SECTION OF THE * FreeRTOS API DOCUMENTATION AVAILABLE ON THE FreeRTOS.org WEB SITE. * * See http://www.freertos.org/a00110.html *----------------------------------------------------------*/ #define configUSE_PREEMPTION 1 #define configUSE_IDLE_HOOK 0 #define configUSE_TICK_HOOK 0 #define configCPU_CLOCK_HZ ( ( unsigned long ) 58982400 ) /* =14.7456MHz xtal multiplied by 4 using the PLL. */ #define configTICK_RATE_HZ ( ( TickType_t ) 1000 ) #define configMAX_PRIORITIES ( 5 ) #define configMINIMAL_STACK_SIZE ( ( unsigned short ) 128 ) #define configTOTAL_HEAP_SIZE ( ( size_t ) ( 23 * 1024 ) ) #define configMAX_TASK_NAME_LEN ( 16 ) #define configUSE_TRACE_FACILITY 0 #define configUSE_16_BIT_TICKS 0 #define configIDLE_SHOULD_YIELD 1 /* Co-routine definitions. */ #define configUSE_CO_ROUTINES 0 #define configMAX_CO_ROUTINE_PRIORITIES ( 2 ) /* Set the following definitions to 1 to include the API function, or zero to exclude the API function. */ #define INCLUDE_vTaskPrioritySet 1 #define INCLUDE_uxTaskPriorityGet 1 #define INCLUDE_vTaskDelete 1 #define INCLUDE_vTaskCleanUpResources 0 #define INCLUDE_vTaskSuspend 1 #define INCLUDE_vTaskDelayUntil 1 #define INCLUDE_vTaskDelay 1 #endif /* FREERTOS_CONFIG_H */
{ "language": "C" }
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Netscape Portable Runtime (NSPR). * * The Initial Developer of the Original Code is * Netscape Communications Corporation. * Portions created by the Initial Developer are Copyright (C) 1998-2000 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ #ifndef nspr_cpucfg___ #define nspr_cpucfg___ #ifndef _SGI_MP_SOURCE #define _SGI_MP_SOURCE #endif #ifndef XP_UNIX #define XP_UNIX #endif #ifndef IRIX #define IRIX #endif #undef IS_LITTLE_ENDIAN #define IS_BIG_ENDIAN 1 #define PR_AF_INET6 24 /* same as AF_INET6 */ #define PR_BYTES_PER_BYTE 1 #define PR_BYTES_PER_SHORT 2 #define PR_BYTES_PER_INT 4 #define PR_BYTES_PER_INT64 8 #define PR_BYTES_PER_LONG 4 #define PR_BYTES_PER_FLOAT 4 #define PR_BYTES_PER_DOUBLE 8 #define PR_BYTES_PER_WORD 4 #define PR_BYTES_PER_DWORD 8 #define PR_BITS_PER_BYTE 8 #define PR_BITS_PER_SHORT 16 #define PR_BITS_PER_INT 32 #define PR_BITS_PER_INT64 64 #define PR_BITS_PER_LONG 32 #define PR_BITS_PER_FLOAT 32 #define PR_BITS_PER_DOUBLE 64 #define PR_BITS_PER_WORD 32 #define PR_BITS_PER_BYTE_LOG2 3 #define PR_BITS_PER_SHORT_LOG2 4 #define PR_BITS_PER_INT_LOG2 5 #define PR_BITS_PER_INT64_LOG2 6 #define PR_BITS_PER_LONG_LOG2 5 #define PR_BITS_PER_FLOAT_LOG2 5 #define PR_BITS_PER_DOUBLE_LOG2 6 #define PR_BITS_PER_WORD_LOG2 5 #define PR_BYTES_PER_WORD_LOG2 2 #define PR_BYTES_PER_DWORD_LOG2 3 #define PR_ALIGN_OF_SHORT 2 #define PR_ALIGN_OF_INT 4 #define PR_ALIGN_OF_LONG 4 #define PR_ALIGN_OF_INT64 8 #define PR_ALIGN_OF_FLOAT 4 #define PR_ALIGN_OF_DOUBLE 8 #define PR_ALIGN_OF_POINTER 4 #define PR_ALIGN_OF_WORD 4 #define HAVE_LONG_LONG #define HAVE_ALIGNED_DOUBLES #define HAVE_ALIGNED_LONGLONGS #define _PR_POLL_BACKCOMPAT #ifndef NO_NSPR_10_SUPPORT #define BYTES_PER_BYTE PR_BYTES_PER_BYTE #define BYTES_PER_SHORT PR_BYTES_PER_SHORT #define BYTES_PER_INT PR_BYTES_PER_INT #define BYTES_PER_INT64 PR_BYTES_PER_INT64 #define BYTES_PER_LONG PR_BYTES_PER_LONG #define BYTES_PER_FLOAT PR_BYTES_PER_FLOAT #define BYTES_PER_DOUBLE PR_BYTES_PER_DOUBLE #define BYTES_PER_WORD PR_BYTES_PER_WORD #define BYTES_PER_DWORD PR_BYTES_PER_DWORD #define BITS_PER_BYTE PR_BITS_PER_BYTE #define BITS_PER_SHORT PR_BITS_PER_SHORT #define BITS_PER_INT PR_BITS_PER_INT #define BITS_PER_INT64 PR_BITS_PER_INT64 #define BITS_PER_LONG PR_BITS_PER_LONG #define BITS_PER_FLOAT PR_BITS_PER_FLOAT #define BITS_PER_DOUBLE PR_BITS_PER_DOUBLE #define BITS_PER_WORD PR_BITS_PER_WORD #define BITS_PER_BYTE_LOG2 PR_BITS_PER_BYTE_LOG2 #define BITS_PER_SHORT_LOG2 PR_BITS_PER_SHORT_LOG2 #define BITS_PER_INT_LOG2 PR_BITS_PER_INT_LOG2 #define BITS_PER_INT64_LOG2 PR_BITS_PER_INT64_LOG2 #define BITS_PER_LONG_LOG2 PR_BITS_PER_LONG_LOG2 #define BITS_PER_FLOAT_LOG2 PR_BITS_PER_FLOAT_LOG2 #define BITS_PER_DOUBLE_LOG2 PR_BITS_PER_DOUBLE_LOG2 #define BITS_PER_WORD_LOG2 PR_BITS_PER_WORD_LOG2 #define ALIGN_OF_SHORT PR_ALIGN_OF_SHORT #define ALIGN_OF_INT PR_ALIGN_OF_INT #define ALIGN_OF_LONG PR_ALIGN_OF_LONG #define ALIGN_OF_INT64 PR_ALIGN_OF_INT64 #define ALIGN_OF_FLOAT PR_ALIGN_OF_FLOAT #define ALIGN_OF_DOUBLE PR_ALIGN_OF_DOUBLE #define ALIGN_OF_POINTER PR_ALIGN_OF_POINTER #define ALIGN_OF_WORD PR_ALIGN_OF_WORD #define BYTES_PER_WORD_LOG2 PR_BYTES_PER_WORD_LOG2 #define BYTES_PER_DWORD_LOG2 PR_BYTES_PER_DWORD_LOG2 #define WORDS_PER_DWORD_LOG2 PR_WORDS_PER_DWORD_LOG2 #endif /* NO_NSPR_10_SUPPORT */ #endif /* nspr_cpucfg___ */
{ "language": "C" }
#include <stdlib.h> #include <3ds/types.h> #include <3ds/result.h> #include <3ds/svc.h> #include <3ds/srv.h> #include <3ds/synchronization.h> #include <3ds/services/ptmu.h> #include <3ds/ipc.h> static Handle ptmuHandle; static int ptmuRefCount; Result ptmuInit(void) { if (AtomicPostIncrement(&ptmuRefCount)) return 0; Result res = srvGetServiceHandle(&ptmuHandle, "ptm:u"); if (R_FAILED(res)) AtomicDecrement(&ptmuRefCount); return res; } void ptmuExit(void) { if (AtomicDecrement(&ptmuRefCount)) return; svcCloseHandle(ptmuHandle); } Result PTMU_GetShellState(u8 *out) { Result ret=0; u32 *cmdbuf = getThreadCommandBuffer(); cmdbuf[0] = IPC_MakeHeader(0x6,0,0); // 0x60000 if(R_FAILED(ret = svcSendSyncRequest(ptmuHandle)))return ret; *out = (u8)cmdbuf[2] & 0xFF; return (Result)cmdbuf[1]; } Result PTMU_GetBatteryLevel(u8 *out) { Result ret=0; u32 *cmdbuf = getThreadCommandBuffer(); cmdbuf[0] = IPC_MakeHeader(0x7,0,0); // 0x70000 if(R_FAILED(ret = svcSendSyncRequest(ptmuHandle)))return ret; *out = (u8)cmdbuf[2] & 0xFF; return (Result)cmdbuf[1]; } Result PTMU_GetBatteryChargeState(u8 *out) { Result ret=0; u32 *cmdbuf = getThreadCommandBuffer(); cmdbuf[0] = IPC_MakeHeader(0x8,0,0); // 0x80000 if(R_FAILED(ret = svcSendSyncRequest(ptmuHandle)))return ret; *out = (u8)cmdbuf[2] & 0xFF; return (Result)cmdbuf[1]; } Result PTMU_GetPedometerState(u8 *out) { Result ret=0; u32 *cmdbuf = getThreadCommandBuffer(); cmdbuf[0] = IPC_MakeHeader(0x9,0,0); // 0x90000 if(R_FAILED(ret = svcSendSyncRequest(ptmuHandle)))return ret; *out = (u8)cmdbuf[2] & 0xFF; return (Result)cmdbuf[1]; } Result PTMU_GetTotalStepCount(u32 *steps) { Result ret=0; u32 *cmdbuf = getThreadCommandBuffer(); cmdbuf[0] = IPC_MakeHeader(0xC,0,0); // 0xC0000 if(R_FAILED(ret = svcSendSyncRequest(ptmuHandle)))return ret; *steps = cmdbuf[2]; return (Result)cmdbuf[1]; } Result PTMU_GetAdapterState(bool *out) { Result ret=0; u32 *cmdbuf = getThreadCommandBuffer(); cmdbuf[0] = IPC_MakeHeader(0x5,0,0); // 0x50000 if(R_FAILED(ret = svcSendSyncRequest(ptmuHandle)))return ret; *out = cmdbuf[2] & 0xFF; return (Result)cmdbuf[1]; }
{ "language": "C" }
#ifndef __LIMITS #define __LIMITS #define CHAR_BIT 8 #define MB_LEN_MAX 1 #define UCHAR_MAX 0xff #define USHRT_MAX 0xffff #define UINT_MAX (~0U) #define ULONG_MAX (~0UL) #define SCHAR_MAX 0x7f #define SHRT_MAX 0x7fff #define INT_MAX 0x7fffffff #define LONG_MAX 0x7fffffffL #define SCHAR_MIN (-SCHAR_MAX-1) #define SHRT_MIN (-SHRT_MAX-1) #define INT_MIN (-INT_MAX-1) #define LONG_MIN (-LONG_MAX-1) #ifdef __CHAR_UNSIGNED__ #define CHAR_MAX UCHAR_MAX #define CHAR_MIN 0 #else #define CHAR_MAX SCHAR_MAX #define CHAR_MIN SCHAR_MIN #endif #endif /* __LIMITS */
{ "language": "C" }
/***************************************************************************** Copyright (c) 2014, Intel Corp. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************** * Contents: Native high-level C interface to LAPACK function ssygvd * Author: Intel Corporation * Generated November 2015 *****************************************************************************/ #include "lapacke_utils.h" lapack_int LAPACKE_ssygvd( int matrix_layout, lapack_int itype, char jobz, char uplo, lapack_int n, float* a, lapack_int lda, float* b, lapack_int ldb, float* w ) { lapack_int info = 0; lapack_int liwork = -1; lapack_int lwork = -1; lapack_int* iwork = NULL; float* work = NULL; lapack_int iwork_query; float work_query; if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_ssygvd", -1 ); return -1; } #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ if( LAPACKE_sge_nancheck( matrix_layout, n, n, a, lda ) ) { return -6; } if( LAPACKE_sge_nancheck( matrix_layout, n, n, b, ldb ) ) { return -8; } } #endif /* Query optimal working array(s) size */ info = LAPACKE_ssygvd_work( matrix_layout, itype, jobz, uplo, n, a, lda, b, ldb, w, &work_query, lwork, &iwork_query, liwork ); if( info != 0 ) { goto exit_level_0; } liwork = iwork_query; lwork = (lapack_int)work_query; /* Allocate memory for work arrays */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * liwork ); if( iwork == NULL ) { info = LAPACK_WORK_MEMORY_ERROR; goto exit_level_0; } work = (float*)LAPACKE_malloc( sizeof(float) * lwork ); if( work == NULL ) { info = LAPACK_WORK_MEMORY_ERROR; goto exit_level_1; } /* Call middle-level interface */ info = LAPACKE_ssygvd_work( matrix_layout, itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, iwork, liwork ); /* Release memory and exit */ LAPACKE_free( work ); exit_level_1: LAPACKE_free( iwork ); exit_level_0: if( info == LAPACK_WORK_MEMORY_ERROR ) { LAPACKE_xerbla( "LAPACKE_ssygvd", info ); } return info; }
{ "language": "C" }
// SPDX-License-Identifier: GPL-2.0 /* * HWA Host Controller Driver * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8]) * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This driver implements a USB Host Controller (struct usb_hcd) for a * Wireless USB Host Controller based on the Wireless USB 1.0 * Host-Wire-Adapter specification (in layman terms, a USB-dongle that * implements a Wireless USB host). * * Check out the Design-overview.txt file in the source documentation * for other details on the implementation. * * Main blocks: * * driver glue with the driver API, workqueue daemon * * lc RC instance life cycle management (create, destroy...) * * hcd glue with the USB API Host Controller Interface API. * * nep Notification EndPoint management: collect notifications * and queue them with the workqueue daemon. * * Handle notifications as coming from the NEP. Sends them * off others to their respective modules (eg: connect, * disconnect and reset go to devconnect). * * rpipe Remote Pipe management; rpipe is what we use to write * to an endpoint on a WUSB device that is connected to a * HWA RC. * * xfer Transfer management -- this is all the code that gets a * buffer and pushes it to a device (or viceversa). * * * Some day a lot of this code will be shared between this driver and * the drivers for DWA (xfer, rpipe). * * All starts at driver.c:hwahc_probe(), when one of this guys is * connected. hwahc_disconnect() stops it. * * During operation, the main driver is devices connecting or * disconnecting. They cause the HWA RC to send notifications into * nep.c:hwahc_nep_cb() that will dispatch them to * notif.c:wa_notif_dispatch(). From there they will fan to cause * device connects, disconnects, etc. * * Note much of the activity is difficult to follow. For example a * device connect goes to devconnect, which will cause the "fake" root * hub port to show a connect and stop there. Then hub_wq will notice * and call into the rh.c:hwahc_rc_port_reset() code to authenticate * the device (and this might require user intervention) and enable * the port. * * We also have a timer workqueue going from devconnect.c that * schedules in hwahc_devconnect_create(). * * The rest of the traffic is in the usual entry points of a USB HCD, * which are hooked up in driver.c:hwahc_rc_driver, and defined in * hcd.c. */ #ifndef __HWAHC_INTERNAL_H__ #define __HWAHC_INTERNAL_H__ #include <linux/completion.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/uwb.h> #include <linux/usb/wusb.h> #include <linux/usb/wusb-wa.h> struct wusbhc; struct wahc; extern void wa_urb_enqueue_run(struct work_struct *ws); extern void wa_process_errored_transfers_run(struct work_struct *ws); /** * RPipe instance * * @descr's fields are kept in LE, as we need to send it back and * forth. * * @wa is referenced when set * * @segs_available is the number of requests segments that still can * be submitted to the controller without overloading * it. It is initialized to descr->wRequests when * aiming. * * A rpipe supports a max of descr->wRequests at the same time; before * submitting seg_lock has to be taken. If segs_avail > 0, then we can * submit; if not, we have to queue them. */ struct wa_rpipe { struct kref refcnt; struct usb_rpipe_descriptor descr; struct usb_host_endpoint *ep; struct wahc *wa; spinlock_t seg_lock; struct list_head seg_list; struct list_head list_node; atomic_t segs_available; u8 buffer[1]; /* For reads/writes on USB */ }; enum wa_dti_state { WA_DTI_TRANSFER_RESULT_PENDING, WA_DTI_ISOC_PACKET_STATUS_PENDING, WA_DTI_BUF_IN_DATA_PENDING }; enum wa_quirks { /* * The Alereon HWA expects the data frames in isochronous transfer * requests to be concatenated and not sent as separate packets. */ WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01, /* * The Alereon HWA can be instructed to not send transfer notifications * as an optimization. */ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS = 0x02, }; enum wa_vendor_specific_requests { WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C, WA_REQ_ALEREON_FEATURE_SET = 0x01, WA_REQ_ALEREON_FEATURE_CLEAR = 0x00, }; #define WA_MAX_BUF_IN_URBS 4 /** * Instance of a HWA Host Controller * * Except where a more specific lock/mutex applies or atomic, all * fields protected by @mutex. * * @wa_descr Can be accessed without locking because it is in * the same area where the device descriptors were * read, so it is guaranteed to exist unmodified while * the device exists. * * Endianess has been converted to CPU's. * * @nep_* can be accessed without locking as its processing is * serialized; we submit a NEP URB and it comes to * hwahc_nep_cb(), which won't issue another URB until it is * done processing it. * * @xfer_list: * * List of active transfers to verify existence from a xfer id * gotten from the xfer result message. Can't use urb->list because * it goes by endpoint, and we don't know the endpoint at the time * when we get the xfer result message. We can't really rely on the * pointer (will have to change for 64 bits) as the xfer id is 32 bits. * * @xfer_delayed_list: List of transfers that need to be started * (with a workqueue, because they were * submitted from an atomic context). * * FIXME: this needs to be layered up: a wusbhc layer (for sharing * commonalities with WHCI), a wa layer (for sharing * commonalities with DWA-RC). */ struct wahc { struct usb_device *usb_dev; struct usb_interface *usb_iface; /* HC to deliver notifications */ union { struct wusbhc *wusb; struct dwahc *dwa; }; const struct usb_endpoint_descriptor *dto_epd, *dti_epd; const struct usb_wa_descriptor *wa_descr; struct urb *nep_urb; /* Notification EndPoint [lockless] */ struct edc nep_edc; void *nep_buffer; size_t nep_buffer_size; atomic_t notifs_queued; u16 rpipes; unsigned long *rpipe_bm; /* rpipe usage bitmap */ struct list_head rpipe_delayed_list; /* delayed RPIPES. */ spinlock_t rpipe_lock; /* protect rpipe_bm and delayed list */ struct mutex rpipe_mutex; /* assigning resources to endpoints */ /* * dti_state is used to track the state of the dti_urb. When dti_state * is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and * dti_isoc_xfer_seg identify which xfer the incoming isoc packet * status refers to. */ enum wa_dti_state dti_state; u32 dti_isoc_xfer_in_progress; u8 dti_isoc_xfer_seg; struct urb *dti_urb; /* URB for reading xfer results */ /* URBs for reading data in */ struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS]; int active_buf_in_urbs; /* number of buf_in_urbs active. */ struct edc dti_edc; /* DTI error density counter */ void *dti_buf; size_t dti_buf_size; unsigned long dto_in_use; /* protect dto endoint serialization */ s32 status; /* For reading status */ struct list_head xfer_list; struct list_head xfer_delayed_list; struct list_head xfer_errored_list; /* * lock for the above xfer lists. Can be taken while a xfer->lock is * held but not in the reverse order. */ spinlock_t xfer_list_lock; struct work_struct xfer_enqueue_work; struct work_struct xfer_error_work; atomic_t xfer_id_count; kernel_ulong_t quirks; }; extern int wa_create(struct wahc *wa, struct usb_interface *iface, kernel_ulong_t); extern void __wa_destroy(struct wahc *wa); extern int wa_dti_start(struct wahc *wa); void wa_reset_all(struct wahc *wa); /* Miscellaneous constants */ enum { /** Max number of EPROTO errors we tolerate on the NEP in a * period of time */ HWAHC_EPROTO_MAX = 16, /** Period of time for EPROTO errors (in jiffies) */ HWAHC_EPROTO_PERIOD = 4 * HZ, }; /* Notification endpoint handling */ extern int wa_nep_create(struct wahc *, struct usb_interface *); extern void wa_nep_destroy(struct wahc *); static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask) { struct urb *urb = wa->nep_urb; urb->transfer_buffer = wa->nep_buffer; urb->transfer_buffer_length = wa->nep_buffer_size; return usb_submit_urb(urb, gfp_mask); } static inline void wa_nep_disarm(struct wahc *wa) { usb_kill_urb(wa->nep_urb); } /* RPipes */ static inline void wa_rpipe_init(struct wahc *wa) { INIT_LIST_HEAD(&wa->rpipe_delayed_list); spin_lock_init(&wa->rpipe_lock); mutex_init(&wa->rpipe_mutex); } static inline void wa_init(struct wahc *wa) { int index; edc_init(&wa->nep_edc); atomic_set(&wa->notifs_queued, 0); wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; wa_rpipe_init(wa); edc_init(&wa->dti_edc); INIT_LIST_HEAD(&wa->xfer_list); INIT_LIST_HEAD(&wa->xfer_delayed_list); INIT_LIST_HEAD(&wa->xfer_errored_list); spin_lock_init(&wa->xfer_list_lock); INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run); INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run); wa->dto_in_use = 0; atomic_set(&wa->xfer_id_count, 1); /* init the buf in URBs */ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) usb_init_urb(&(wa->buf_in_urbs[index])); wa->active_buf_in_urbs = 0; } /** * Destroy a pipe (when refcount drops to zero) * * Assumes it has been moved to the "QUIESCING" state. */ struct wa_xfer; extern void rpipe_destroy(struct kref *_rpipe); static inline void __rpipe_get(struct wa_rpipe *rpipe) { kref_get(&rpipe->refcnt); } extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *, struct urb *, gfp_t); static inline void rpipe_put(struct wa_rpipe *rpipe) { kref_put(&rpipe->refcnt, rpipe_destroy); } extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *); extern void rpipe_clear_feature_stalled(struct wahc *, struct usb_host_endpoint *); extern int wa_rpipes_create(struct wahc *); extern void wa_rpipes_destroy(struct wahc *); static inline void rpipe_avail_dec(struct wa_rpipe *rpipe) { atomic_dec(&rpipe->segs_available); } /** * Returns true if the rpipe is ready to submit more segments. */ static inline int rpipe_avail_inc(struct wa_rpipe *rpipe) { return atomic_inc_return(&rpipe->segs_available) > 0 && !list_empty(&rpipe->seg_list); } /* Transferring data */ extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *, struct urb *, gfp_t); extern int wa_urb_dequeue(struct wahc *, struct urb *, int); extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *); /* Misc * * FIXME: Refcounting for the actual @hwahc object is not correct; I * mean, this should be refcounting on the HCD underneath, but * it is not. In any case, the semantics for HCD refcounting * are *weird*...on refcount reaching zero it just frees * it...no RC specific function is called...unless I miss * something. * * FIXME: has to go away in favour of a 'struct' hcd based solution */ static inline struct wahc *wa_get(struct wahc *wa) { usb_get_intf(wa->usb_iface); return wa; } static inline void wa_put(struct wahc *wa) { usb_put_intf(wa->usb_iface); } static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature) { return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, feature, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); } static inline int __wa_set_feature(struct wahc *wa, u16 feature) { return __wa_feature(wa, 1, feature); } static inline int __wa_clear_feature(struct wahc *wa, u16 feature) { return __wa_feature(wa, 0, feature); } /** * Return the status of a Wire Adapter * * @wa: Wire Adapter instance * @returns < 0 errno code on error, or status bitmap as described * in WUSB1.0[8.3.1.6]. * * NOTE: need malloc, some arches don't take USB from the stack */ static inline s32 __wa_get_status(struct wahc *wa) { s32 result; result = usb_control_msg( wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, &wa->status, sizeof(wa->status), USB_CTRL_GET_TIMEOUT); if (result >= 0) result = wa->status; return result; } /** * Waits until the Wire Adapter's status matches @mask/@value * * @wa: Wire Adapter instance. * @returns < 0 errno code on error, otherwise status. * * Loop until the WAs status matches the mask and value (status & mask * == value). Timeout if it doesn't happen. * * FIXME: is there an official specification on how long status * changes can take? */ static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value) { s32 result; unsigned loops = 10; do { msleep(50); result = __wa_get_status(wa); if ((result & mask) == value) break; if (loops-- == 0) { result = -ETIMEDOUT; break; } } while (result >= 0); return result; } /** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */ static inline int __wa_stop(struct wahc *wa) { int result; struct device *dev = &wa->usb_iface->dev; result = __wa_clear_feature(wa, WA_ENABLE); if (result < 0 && result != -ENODEV) { dev_err(dev, "error commanding HC to stop: %d\n", result); goto out; } result = __wa_wait_status(wa, WA_ENABLE, 0); if (result < 0 && result != -ENODEV) dev_err(dev, "error waiting for HC to stop: %d\n", result); out: return 0; } #endif /* #ifndef __HWAHC_INTERNAL_H__ */
{ "language": "C" }
/***************************************************************************//** * @file * @brief EFM32TG11B_TIMER_CC register and bit field definitions ******************************************************************************* * # License * <b>Copyright 2020 Silicon Laboratories Inc. www.silabs.com</b> ******************************************************************************* * * SPDX-License-Identifier: Zlib * * The licensor of this software is Silicon Laboratories Inc. * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. * ******************************************************************************/ #if defined(__ICCARM__) #pragma system_include /* Treat file as system include file. */ #elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) #pragma clang system_header /* Treat file as system include file. */ #endif /***************************************************************************//** * @addtogroup Parts * @{ ******************************************************************************/ /***************************************************************************//** * @brief TIMER_CC TIMER CC Register * @ingroup EFM32TG11B_TIMER ******************************************************************************/ typedef struct { __IOM uint32_t CTRL; /**< CC Channel Control Register */ __IOM uint32_t CCV; /**< CC Channel Value Register */ __IM uint32_t CCVP; /**< CC Channel Value Peek Register */ __IOM uint32_t CCVB; /**< CC Channel Buffer Register */ } TIMER_CC_TypeDef; /** @} End of group Parts */
{ "language": "C" }
/* * FreeRTOS Kernel V10.3.1 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * http://www.FreeRTOS.org * http://aws.amazon.com/freertos * * 1 tab == 4 spaces! */ /* * Creates all the demo application tasks, then starts the scheduler. The WEB * documentation provides more details of the demo application tasks. * * In addition to the standard demo tasks there are two tasks defined within * this file: * * 1 - The check task * The 'check' task is responsible for ensuring that all the standard demo * tasks are executing as expected. It only executes every three seconds, but * has the highest priority within the system so is guaranteed to get execution * time. Any errors discovered by the check task are latched until the * processor is reset. At the end of each cycle the check task sends either * a pass or fail message to the 'print' task for display on the LCD. * * 2 - The print task * The print task is the LCD 'gatekeeper'. That is, it is the only task that * should access the LCD directly so is always guaranteed exclusive (and * therefore consistent) access. The print task simply blocks on a queue * to wait for messages from other tasks wishing to display text on the LCD. * When a message arrives it displays its contents on the LCD then blocks to * wait again. */ /* ST includes. */ #include "lcd.h" /* Kernel includes. */ #include "FreeRTOS.h" #include "task.h" #include "queue.h" /* Demo application includes. */ #include "partest.h" #include "flash.h" #include "integer.h" #include "blocktim.h" #include "BlockQ.h" #include "comtest2.h" #include "dynamic.h" /* Demo application task priorities. */ #define mainCHECK_TASK_PRIORITY ( tskIDLE_PRIORITY + 4 ) #define mainBLOCK_Q_PRIORITY ( tskIDLE_PRIORITY + 2 ) #define mainLED_TASK_PRIORITY ( tskIDLE_PRIORITY + 1 ) #define mainCOM_TEST_PRIORITY ( tskIDLE_PRIORITY + 1 ) #define mainLCD_TASK_PRIORITY ( tskIDLE_PRIORITY + 1 ) /* How often should we check the other tasks? */ #define mainCHECK_TASK_CYCLE_TIME ( 3000 ) /* The maximum offset into the pass and fail strings sent to the LCD. An offset is used a simple method of using a different column each time a message is written to the LCD. */ #define mainMAX_WRITE_COLUMN ( 14 ) /* Baud rate used by the comtest tasks. */ #define mainCOM_TEST_BAUD_RATE ( 19200 ) /* The LED used by the comtest tasks. See the comtest.c file for more information. */ #define mainCOM_TEST_LED ( 3 ) /* The number of messages that can be queued for display on the LCD at any one time. */ #define mainLCD_QUEUE_LENGTH ( 2 ) /* The time to wait when sending to mainLCD_QUEUE_LENGTH. */ #define mainNO_DELAY ( 0 ) /*-----------------------------------------------------------*/ /* The type that is posted to the LCD queue. */ typedef struct LCD_MESSAGE { unsigned char *pucString; /* Points to the string to be displayed. */ unsigned char ucLine; /* The line of the LCD that should be used. */ } LCDMessage; /*-----------------------------------------------------------*/ /* * The task that executes at the highest priority and checks the operation of * all the other tasks in the system. See the description at the top of the * file. */ static void vCheckTask( void *pvParameters ); /* * ST provided routine to configure the processor. */ static void prvSetupHardware(void); /* * The only task that should access the LCD. Other tasks wanting to write * to the LCD should send a message of type LCDMessage containing the * information to display to the print task. The print task simply blocks * waiting for the arrival of such messages, displays the message, then blocks * again. */ static void vPrintTask( void *pvParameters ); /*-----------------------------------------------------------*/ /* The queue used to communicate with the LCD print task. */ static QueueHandle_t xLCDQueue; /*-----------------------------------------------------------*/ /* Create all the demo application tasks, then start the scheduler. */ int main( void ) { /* Perform any hardware setup necessary. */ prvSetupHardware(); vParTestInitialise(); /* Create the queue used to communicate with the LCD print task. */ xLCDQueue = xQueueCreate( mainLCD_QUEUE_LENGTH, sizeof( LCDMessage ) ); /* Create the standard demo application tasks. See the WEB documentation for more information on these tasks. */ vCreateBlockTimeTasks(); vStartBlockingQueueTasks( mainBLOCK_Q_PRIORITY ); vAltStartComTestTasks( mainCOM_TEST_PRIORITY, mainCOM_TEST_BAUD_RATE, mainCOM_TEST_LED ); vStartDynamicPriorityTasks(); vStartLEDFlashTasks( mainLED_TASK_PRIORITY ); vStartIntegerMathTasks( tskIDLE_PRIORITY ); /* Create the tasks defined within this file. */ xTaskCreate( vPrintTask, "LCD", configMINIMAL_STACK_SIZE, NULL, mainLCD_TASK_PRIORITY, NULL ); xTaskCreate( vCheckTask, "Check", configMINIMAL_STACK_SIZE, NULL, mainCHECK_TASK_PRIORITY, NULL ); vTaskStartScheduler(); /* Execution will only reach here if there was insufficient heap to start the scheduler. */ return 0; } /*-----------------------------------------------------------*/ static void vCheckTask( void *pvParameters ) { static unsigned long ulErrorDetected = pdFALSE; TickType_t xLastExecutionTime; unsigned char *ucErrorMessage = ( unsigned char * )" FAIL"; unsigned char *ucSuccessMessage = ( unsigned char * )" PASS"; unsigned portBASE_TYPE uxColumn = mainMAX_WRITE_COLUMN; LCDMessage xMessage; /* Initialise xLastExecutionTime so the first call to vTaskDelayUntil() works correctly. */ xLastExecutionTime = xTaskGetTickCount(); for( ;; ) { /* Wait until it is time for the next cycle. */ vTaskDelayUntil( &xLastExecutionTime, mainCHECK_TASK_CYCLE_TIME ); /* Has an error been found in any of the standard demo tasks? */ if( xAreIntegerMathsTaskStillRunning() != pdTRUE ) { ulErrorDetected = pdTRUE; } if( xAreBlockTimeTestTasksStillRunning() != pdTRUE ) { ulErrorDetected = pdTRUE; } if( xAreBlockingQueuesStillRunning() != pdTRUE ) { ulErrorDetected = pdTRUE; } if( xAreComTestTasksStillRunning() != pdTRUE ) { ulErrorDetected = pdTRUE; } if( xAreDynamicPriorityTasksStillRunning() != pdTRUE ) { ulErrorDetected = pdTRUE; } /* Calculate the LCD line on which we would like the message to be displayed. The column variable is used for convenience as it is incremented each cycle anyway. */ xMessage.ucLine = ( unsigned char ) ( uxColumn & 0x01 ); /* The message displayed depends on whether an error was found or not. Any discovered error is latched. Here the column variable is used as an index into the text string as a simple way of moving the text from column to column. */ if( ulErrorDetected == pdFALSE ) { xMessage.pucString = ucSuccessMessage + uxColumn; } else { xMessage.pucString = ucErrorMessage + uxColumn; } /* Send the message to the print task for display. */ xQueueSend( xLCDQueue, ( void * ) &xMessage, mainNO_DELAY ); /* Make sure the message is printed in a different column the next time around. */ uxColumn--; if( uxColumn == 0 ) { uxColumn = mainMAX_WRITE_COLUMN; } } } /*-----------------------------------------------------------*/ static void vPrintTask( void *pvParameters ) { LCDMessage xMessage; for( ;; ) { /* Wait until a message arrives. */ while( xQueueReceive( xLCDQueue, ( void * ) &xMessage, portMAX_DELAY ) != pdPASS ); /* The message contains the text to display, and the line on which the text should be displayed. */ LCD_Clear(); LCD_DisplayString( xMessage.ucLine, xMessage.pucString, BlackText ); } } /*-----------------------------------------------------------*/ static void prvSetupHardware(void) { ErrorStatus OSC4MStartUpStatus01; /* ST provided routine. */ /* MRCC system reset */ MRCC_DeInit(); /* Wait for OSC4M start-up */ OSC4MStartUpStatus01 = MRCC_WaitForOSC4MStartUp(); if(OSC4MStartUpStatus01 == SUCCESS) { /* Set HCLK to 60MHz */ MRCC_HCLKConfig(MRCC_CKSYS_Div1); /* Set CKTIM to 60MHz */ MRCC_CKTIMConfig(MRCC_HCLK_Div1); /* Set PCLK to 30MHz */ MRCC_PCLKConfig(MRCC_CKTIM_Div2); /* Enable Flash Burst mode */ CFG_FLASHBurstConfig(CFG_FLASHBurst_Enable); /* Set CK_SYS to 60 MHz */ MRCC_CKSYSConfig(MRCC_CKSYS_OSC4MPLL, MRCC_PLL_Mul_15); } /* GPIO pins optimized for 3V3 operation */ MRCC_IOVoltageRangeConfig(MRCC_IOVoltageRange_3V3); /* GPIO clock source enable */ MRCC_PeripheralClockConfig(MRCC_Peripheral_GPIO, ENABLE); /* EXTIT clock source enable */ MRCC_PeripheralClockConfig(MRCC_Peripheral_EXTIT, ENABLE); /* TB clock source enable */ MRCC_PeripheralClockConfig(MRCC_Peripheral_TB, ENABLE); /* Initialize the demonstration menu */ LCD_Init(); LCD_DisplayString(Line1, ( unsigned char * ) "www.FreeRTOS.org", BlackText); LCD_DisplayString(Line2, ( unsigned char * ) " STR750 Demo ", BlackText); EIC_IRQCmd(ENABLE); } /*-----------------------------------------------------------*/
{ "language": "C" }