filename
stringlengths 3
9
| code
stringlengths 4
1.05M
|
---|---|
650914.c | /*
* Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include <stdio.h>
#include "internal/cryptlib.h"
#include <openssl/buffer.h>
#include <openssl/bn.h>
#include <openssl/objects.h>
#include <openssl/x509.h>
#include <openssl/x509v3.h>
#include <openssl/rsa.h>
#include <openssl/dsa.h>
#ifndef OPENSSL_NO_STDIO
int X509_REQ_print_fp(FILE *fp, X509_REQ *x)
{
BIO *b;
int ret;
if ((b = BIO_new(BIO_s_file())) == NULL) {
X509err(X509_F_X509_REQ_PRINT_FP, ERR_R_BUF_LIB);
return 0;
}
BIO_set_fp(b, fp, BIO_NOCLOSE);
ret = X509_REQ_print(b, x);
BIO_free(b);
return ret;
}
#endif
int X509_REQ_print_ex(BIO *bp, X509_REQ *x, unsigned long nmflags,
unsigned long cflag)
{
long l;
int i;
EVP_PKEY *pkey;
STACK_OF(X509_EXTENSION) *exts;
char mlch = ' ';
int nmindent = 0;
if ((nmflags & XN_FLAG_SEP_MASK) == XN_FLAG_SEP_MULTILINE) {
mlch = '\n';
nmindent = 12;
}
if (nmflags == X509_FLAG_COMPAT)
nmindent = 16;
if (!(cflag & X509_FLAG_NO_HEADER)) {
if (BIO_write(bp, "Certificate Request:\n", 21) <= 0)
goto err;
if (BIO_write(bp, " Data:\n", 10) <= 0)
goto err;
}
if (!(cflag & X509_FLAG_NO_VERSION)) {
l = X509_REQ_get_version(x);
if (l >= 0 && l <= 2) {
if (BIO_printf(bp, "%8sVersion: %ld (0x%lx)\n", "", l + 1, (unsigned long)l) <= 0)
goto err;
} else {
if (BIO_printf(bp, "%8sVersion: Unknown (%ld)\n", "", l) <= 0)
goto err;
}
}
if (!(cflag & X509_FLAG_NO_SUBJECT)) {
if (BIO_printf(bp, " Subject:%c", mlch) <= 0)
goto err;
if (X509_NAME_print_ex(bp, X509_REQ_get_subject_name(x),
nmindent, nmflags) < 0)
goto err;
if (BIO_write(bp, "\n", 1) <= 0)
goto err;
}
if (!(cflag & X509_FLAG_NO_PUBKEY)) {
X509_PUBKEY *xpkey;
ASN1_OBJECT *koid;
if (BIO_write(bp, " Subject Public Key Info:\n", 33) <= 0)
goto err;
if (BIO_printf(bp, "%12sPublic Key Algorithm: ", "") <= 0)
goto err;
xpkey = X509_REQ_get_X509_PUBKEY(x);
X509_PUBKEY_get0_param(&koid, NULL, NULL, NULL, xpkey);
if (i2a_ASN1_OBJECT(bp, koid) <= 0)
goto err;
if (BIO_puts(bp, "\n") <= 0)
goto err;
pkey = X509_REQ_get0_pubkey(x);
if (pkey == NULL) {
BIO_printf(bp, "%12sUnable to load Public Key\n", "");
ERR_print_errors(bp);
} else {
EVP_PKEY_print_public(bp, pkey, 16, NULL);
}
}
if (!(cflag & X509_FLAG_NO_ATTRIBUTES)) {
/* may not be */
if (BIO_printf(bp, "%8sAttributes:\n", "") <= 0)
goto err;
if (X509_REQ_get_attr_count(x) == 0) {
if (BIO_printf(bp, "%12sa0:00\n", "") <= 0)
goto err;
} else {
for (i = 0; i < X509_REQ_get_attr_count(x); i++) {
ASN1_TYPE *at;
X509_ATTRIBUTE *a;
ASN1_BIT_STRING *bs = NULL;
ASN1_OBJECT *aobj;
int j, type = 0, count = 1, ii = 0;
a = X509_REQ_get_attr(x, i);
aobj = X509_ATTRIBUTE_get0_object(a);
if (X509_REQ_extension_nid(OBJ_obj2nid(aobj)))
continue;
if (BIO_printf(bp, "%12s", "") <= 0)
goto err;
if ((j = i2a_ASN1_OBJECT(bp, aobj)) > 0) {
ii = 0;
count = X509_ATTRIBUTE_count(a);
get_next:
at = X509_ATTRIBUTE_get0_type(a, ii);
type = at->type;
bs = at->value.asn1_string;
}
for (j = 25 - j; j > 0; j--)
if (BIO_write(bp, " ", 1) != 1)
goto err;
if (BIO_puts(bp, ":") <= 0)
goto err;
if ((type == V_ASN1_PRINTABLESTRING) ||
(type == V_ASN1_T61STRING) ||
(type == V_ASN1_UTF8STRING) ||
(type == V_ASN1_IA5STRING)) {
if (BIO_write(bp, (char *)bs->data, bs->length)
!= bs->length)
goto err;
BIO_puts(bp, "\n");
} else {
BIO_puts(bp, "unable to print attribute\n");
}
if (++ii < count)
goto get_next;
}
}
}
if (!(cflag & X509_FLAG_NO_EXTENSIONS)) {
exts = X509_REQ_get_extensions(x);
if (exts) {
BIO_printf(bp, "%8sRequested Extensions:\n", "");
for (i = 0; i < sk_X509_EXTENSION_num(exts); i++) {
ASN1_OBJECT *obj;
X509_EXTENSION *ex;
int critical;
ex = sk_X509_EXTENSION_value(exts, i);
if (BIO_printf(bp, "%12s", "") <= 0)
goto err;
obj = X509_EXTENSION_get_object(ex);
i2a_ASN1_OBJECT(bp, obj);
critical = X509_EXTENSION_get_critical(ex);
if (BIO_printf(bp, ": %s\n", critical ? "critical" : "") <= 0)
goto err;
if (!X509V3_EXT_print(bp, ex, cflag, 16)) {
BIO_printf(bp, "%16s", "");
ASN1_STRING_print(bp, X509_EXTENSION_get_data(ex));
}
if (BIO_write(bp, "\n", 1) <= 0)
goto err;
}
sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free);
}
}
if (!(cflag & X509_FLAG_NO_SIGDUMP)) {
const X509_ALGOR *sig_alg;
const ASN1_BIT_STRING *sig;
X509_REQ_get0_signature(x, &sig, &sig_alg);
if (!X509_signature_print(bp, sig_alg, sig))
goto err;
}
return 1;
err:
X509err(X509_F_X509_REQ_PRINT_EX, ERR_R_BUF_LIB);
return 0;
}
int X509_REQ_print(BIO *bp, X509_REQ *x)
{
return X509_REQ_print_ex(bp, x, XN_FLAG_COMPAT, X509_FLAG_COMPAT);
}
|
376587.c | /* clonePos.c - create table for clonePos. */
/* Copyright (C) 2013 The Regents of the University of California
* See kent/LICENSE or http://genome.ucsc.edu/license/ for licensing information. */
#include "common.h"
#include "linefile.h"
#include "portable.h"
#include "hash.h"
#include "jksql.h"
#include "hdb.h"
#include "psl.h"
#include "glDbRep.h"
char *mapFile = "/projects/compbio/experiments/hg/clonemap.wu/jun15a.wu";
char *infFile = "/projects/cc/hg/gs.2/ffa/sequence.inf";
char *posName = "clonePos.tab";
char *aliName = "cloneAliPos.tab";
void usage()
/* Explain usage and exit. */
{
errAbort(
"clonePos - make clonePos and cloneAliPos tables\n"
"usage:\n"
" clonePos here\n"
"This will make clonePos.tab and cloneAliPos.tab in the current dir");
}
struct cloneInfo
/* Info on one clone. */
{
struct cloneInfo *next; /* Next in list. */
char *name; /* Name of clone. */
int size; /* Clone size. */
int phase; /* HTG Phase. */
struct clonePos *pos; /* Position in chromosome. */
struct clonePos *aliPos; /* Position of alignment in chromosome. */
};
struct clonePos
/* Position of a clone. */
{
struct clonePos *next; /* Next in list. */
struct cloneInfo *info; /* Info about clone. */
int start, end; /* Offset within contig. */
};
int cmpClonePos(const void *va, const void *vb)
/* Compare two slNames. */
{
const struct clonePos *a = *((struct clonePos **)va);
const struct clonePos *b = *((struct clonePos **)vb);
return a->start - b->start;
}
void readSeqInfo(char *fileName, struct hash **pHash, struct cloneInfo **pList)
/* Read info about clones. */
{
struct lineFile *lf;
struct hash *hash = newHash(16);
struct hashEl *hel;
struct cloneInfo *infoList = NULL, *info;
int lineSize, wordCount;
char *line, *words[16];
char *acc;
char c;
lf = lineFileOpen(fileName, TRUE);
if (!lineFileNext(lf, &line, &lineSize))
errAbort("Empty %s", fileName);
if (!startsWith("#Accession.ver", line))
errAbort("Unrecognized format on %s", fileName);
while (lineFileNext(lf, &line, &lineSize))
{
if (line[0] == '#')
continue;
wordCount = chopLine(line,words);
if (wordCount != 8)
errAbort("Expecting 8 words line %d of %s", lf->lineIx, lf->fileName);
AllocVar(info);
acc = words[0];
if (hashLookup(hash, acc) != NULL)
errAbort("Duplicate %s line %d of %s", acc, lf->lineIx, lf->fileName);
hel = hashAdd(hash, acc, info);
info->name = hel->name;
info->size = atoi(words[2]);
if (info->size == 0)
errAbort("Expecting clone size field 2 of line %d of %s",
lf->lineIx, lf->fileName);
c = words[3][0];
if (c == '0' || c == '1' || c == '2' || c == '3')
info->phase = atoi(words[3]);
else
errAbort("Expecting phase field 3 of line %d of %s",
lf->lineIx, lf->fileName);
slAddHead(&infoList, info);
}
lineFileClose(&lf);
slReverse(&infoList);
*pList = infoList;
*pHash = hash;
}
struct cloneInfo *findClone(struct hash *cloneHash, char *name)
/* Find named clone in hash table. */
{
struct hashEl *hel;
if ((hel = hashLookup(cloneHash, name)) == NULL)
errAbort("Clone %s is not in %s", name, infFile);
return hel->val;
}
void fragNameToCloneName(char *cloneName)
/* Chop off suffix to get clone name. */
{
char *s = strchr(cloneName, '_');
if (s != NULL)
*s = 0;
}
void writePosList(char *fileName, struct clonePos *posList, char *chromName)
/* Write out tab-delimited position list. */
{
struct clonePos *pos;
struct cloneInfo *info;
FILE *f = mustOpen(fileName, "w");
printf("Writing %d entries to %s\n", slCount(posList), fileName);
for (pos = posList; pos != NULL; pos = pos->next)
{
info = pos->info;
fprintf(f, "%s\t%d\t%d\t%s\t%d\t%d\n",
info->name, info->size, info->phase, chromName, pos->start, pos->end);
}
fclose(f);
}
void clonePosTab(char *fileName, struct hash *cloneHash)
/* Write out clonePos.tab. */
{
char query[256];
struct sqlResult *sr;
char **row;
struct clonePos *posList = NULL, *pos;
struct gl gl;
struct cloneInfo *info;
struct sqlConnection *conn = hAllocConn();
sqlSafef(query, sizeof query, "select * from chr18_gl");
sr = sqlGetResult(conn, query);
while ((row = sqlNextRow(sr)) != NULL)
{
glStaticLoad(row, &gl);
fragNameToCloneName(gl.frag);
info = findClone(cloneHash, gl.frag);
if ((pos = info->pos) == NULL)
{
AllocVar(pos);
pos->info = info;
info->pos = pos;
pos->start = gl.start;
pos->end = gl.end;
slAddHead(&posList, pos);
}
else
{
if (pos->start > gl.start)
pos->start = gl.start;
if (pos->end < gl.end)
pos->end = gl.end;
}
}
sqlFreeResult(&sr);
hFreeConn(&conn);
slSort(&posList, cmpClonePos);
writePosList(fileName, posList, "chr18");
}
void cloneAliPosTab(char *fileName, struct hash *cloneHash)
/* Write out clonePos.tab. */
{
char query[256];
struct sqlResult *sr;
char **row;
struct clonePos *posList = NULL, *pos;
struct cloneInfo *info;
struct sqlConnection *conn = hAllocConn();
sqlSafef(query, sizeof query, "select * from chr18_frags");
sr = sqlGetResult(conn, query);
while ((row = sqlNextRow(sr)) != NULL)
{
struct psl *psl = pslLoad(row);
fragNameToCloneName(psl->qName);
info = findClone(cloneHash, psl->qName);
if ((pos = info->aliPos) == NULL)
{
AllocVar(pos);
pos->info = info;
info->aliPos = pos;
pos->start = psl->tStart;
pos->end = psl->tEnd;
slAddHead(&posList, pos);
}
else
{
if (pos->start > psl->tStart)
pos->start = psl->tStart;
if (pos->end < psl->tEnd)
pos->end = psl->tEnd;
}
pslFree(&psl);
}
sqlFreeResult(&sr);
hFreeConn(&conn);
slSort(&posList, cmpClonePos);
writePosList(fileName, posList, "chr18");
}
void clonePos()
/* create tables for clonePos. */
{
struct cloneInfo *infoList, *info;
struct clonePos *aliList, *posList, *pos;
struct hash *cloneHash;
uglyf("mysqlHost is %s\n", mysqlHost());
readSeqInfo(infFile, &cloneHash, &infoList);
clonePosTab(posName, cloneHash);
cloneAliPosTab(aliName, cloneHash);
}
int main(int argc, char *argv[])
{
if (argc != 2)
usage();
clonePos();
}
|
178959.c | /*
* Copyright (c) 1993, 1994, 1995, 1996, 1998
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that: (1) source code distributions
* retain the above copyright notice and this paragraph in its entirety, (2)
* distributions including binary code include the above copyright notice and
* this paragraph in its entirety in the documentation or other materials
* provided with the distribution, and (3) all advertising materials mentioning
* features or use of this software display the following acknowledgement:
* ``This product includes software developed by the University of California,
* Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
* the University nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior
* written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <sys/param.h> /* optionally get BSD define */
#include <sys/socket.h>
#include <time.h>
/*
* <net/bpf.h> defines ioctls, but doesn't include <sys/ioccom.h>.
*
* We include <sys/ioctl.h> as it might be necessary to declare ioctl();
* at least on *BSD and macOS, it also defines various SIOC ioctls -
* we could include <sys/sockio.h>, but if we're already including
* <sys/ioctl.h>, which includes <sys/sockio.h> on those platforms,
* there's not much point in doing so.
*
* If we have <sys/ioccom.h>, we include it as well, to handle systems
* such as Solaris which don't arrange to include <sys/ioccom.h> if you
* include <sys/ioctl.h>
*/
#include <sys/ioctl.h>
#ifdef HAVE_SYS_IOCCOM_H
#include <sys/ioccom.h>
#endif
#include <sys/utsname.h>
#if defined(__FreeBSD__) && defined(SIOCIFCREATE2)
/*
* Add support for capturing on FreeBSD usbusN interfaces.
*/
static const char usbus_prefix[] = "usbus";
#define USBUS_PREFIX_LEN (sizeof(usbus_prefix) - 1)
#include <dirent.h>
#endif
#include <net/if.h>
#ifdef _AIX
/*
* Make "pcap.h" not include "pcap/bpf.h"; we are going to include the
* native OS version, as we need "struct bpf_config" from it.
*/
#define PCAP_DONT_INCLUDE_PCAP_BPF_H
#include <sys/types.h>
/*
* Prevent bpf.h from redefining the DLT_ values to their
* IFT_ values, as we're going to return the standard libpcap
* values, not IBM's non-standard IFT_ values.
*/
#undef _AIX
#include <net/bpf.h>
#define _AIX
/*
* If both BIOCROTZBUF and BPF_BUFMODE_ZBUF are defined, we have
* zero-copy BPF.
*/
#if defined(BIOCROTZBUF) && defined(BPF_BUFMODE_ZBUF)
#define HAVE_ZEROCOPY_BPF
#include <sys/mman.h>
#include <machine/atomic.h>
#endif
#include <net/if_types.h> /* for IFT_ values */
#include <sys/sysconfig.h>
#include <sys/device.h>
#include <sys/cfgodm.h>
#include <cf.h>
#ifdef __64BIT__
#define domakedev makedev64
#define getmajor major64
#define bpf_hdr bpf_hdr32
#else /* __64BIT__ */
#define domakedev makedev
#define getmajor major
#endif /* __64BIT__ */
#define BPF_NAME "bpf"
#define BPF_MINORS 4
#define DRIVER_PATH "/usr/lib/drivers"
#define BPF_NODE "/dev/bpf"
static int bpfloadedflag = 0;
static int odmlockid = 0;
static int bpf_load(char *errbuf);
#else /* _AIX */
#include <net/bpf.h>
#endif /* _AIX */
#include <ctype.h>
#include <fcntl.h>
#include <errno.h>
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#ifdef SIOCGIFMEDIA
# include <net/if_media.h>
#endif
#include "pcap-int.h"
#ifdef HAVE_OS_PROTO_H
#include "os-proto.h"
#endif
/*
* Later versions of NetBSD stick padding in front of FDDI frames
* to align the IP header on a 4-byte boundary.
*/
#if defined(__NetBSD__) && __NetBSD_Version__ > 106000000
#define PCAP_FDDIPAD 3
#endif
/*
* Private data for capturing on BPF devices.
*/
struct pcap_bpf {
#ifdef HAVE_ZEROCOPY_BPF
/*
* Zero-copy read buffer -- for zero-copy BPF. 'buffer' above will
* alternative between these two actual mmap'd buffers as required.
* As there is a header on the front size of the mmap'd buffer, only
* some of the buffer is exposed to libpcap as a whole via bufsize;
* zbufsize is the true size. zbuffer tracks the current zbuf
* assocated with buffer so that it can be used to decide which the
* next buffer to read will be.
*/
u_char *zbuf1, *zbuf2, *zbuffer;
u_int zbufsize;
u_int zerocopy;
u_int interrupted;
struct timespec firstsel;
/*
* If there's currently a buffer being actively processed, then it is
* referenced here; 'buffer' is also pointed at it, but offset by the
* size of the header.
*/
struct bpf_zbuf_header *bzh;
int nonblock; /* true if in nonblocking mode */
#endif /* HAVE_ZEROCOPY_BPF */
char *device; /* device name */
int filtering_in_kernel; /* using kernel filter */
int must_do_on_close; /* stuff we must do when we close */
};
/*
* Stuff to do when we close.
*/
#define MUST_CLEAR_RFMON 0x00000001 /* clear rfmon (monitor) mode */
#define MUST_DESTROY_USBUS 0x00000002 /* destroy usbusN interface */
#ifdef BIOCGDLTLIST
# if (defined(HAVE_NET_IF_MEDIA_H) && defined(IFM_IEEE80211)) && !defined(__APPLE__)
#define HAVE_BSD_IEEE80211
/*
* The ifm_ulist member of a struct ifmediareq is an int * on most systems,
* but it's a uint64_t on newer versions of OpenBSD.
*
* We check this by checking whether IFM_GMASK is defined and > 2^32-1.
*/
# if defined(IFM_GMASK) && IFM_GMASK > 0xFFFFFFFF
# define IFM_ULIST_TYPE uint64_t
# else
# define IFM_ULIST_TYPE int
# endif
# endif
# if defined(__APPLE__) || defined(HAVE_BSD_IEEE80211)
static int find_802_11(struct bpf_dltlist *);
# ifdef HAVE_BSD_IEEE80211
static int monitor_mode(pcap_t *, int);
# endif
# if defined(__APPLE__)
static void remove_non_802_11(pcap_t *);
static void remove_802_11(pcap_t *);
# endif
# endif /* defined(__APPLE__) || defined(HAVE_BSD_IEEE80211) */
#endif /* BIOCGDLTLIST */
#if defined(sun) && defined(LIFNAMSIZ) && defined(lifr_zoneid)
#include <zone.h>
#endif
/*
* We include the OS's <net/bpf.h>, not our "pcap/bpf.h", so we probably
* don't get DLT_DOCSIS defined.
*/
#ifndef DLT_DOCSIS
#define DLT_DOCSIS 143
#endif
/*
* In some versions of macOS, we might not even get any of the
* 802.11-plus-radio-header DLT_'s defined, even though some
* of them are used by various Airport drivers in those versions.
*/
#ifndef DLT_PRISM_HEADER
#define DLT_PRISM_HEADER 119
#endif
#ifndef DLT_AIRONET_HEADER
#define DLT_AIRONET_HEADER 120
#endif
#ifndef DLT_IEEE802_11_RADIO
#define DLT_IEEE802_11_RADIO 127
#endif
#ifndef DLT_IEEE802_11_RADIO_AVS
#define DLT_IEEE802_11_RADIO_AVS 163
#endif
static int pcap_can_set_rfmon_bpf(pcap_t *p);
static int pcap_activate_bpf(pcap_t *p);
static int pcap_setfilter_bpf(pcap_t *p, struct bpf_program *fp);
static int pcap_setdirection_bpf(pcap_t *, pcap_direction_t);
static int pcap_set_datalink_bpf(pcap_t *p, int dlt);
/*
* For zerocopy bpf, the setnonblock/getnonblock routines need to modify
* pb->nonblock so we don't call select(2) if the pcap handle is in non-
* blocking mode.
*/
static int
pcap_getnonblock_bpf(pcap_t *p)
{
#ifdef HAVE_ZEROCOPY_BPF
struct pcap_bpf *pb = p->priv;
if (pb->zerocopy)
return (pb->nonblock);
#endif
return (pcap_getnonblock_fd(p));
}
static int
pcap_setnonblock_bpf(pcap_t *p, int nonblock)
{
#ifdef HAVE_ZEROCOPY_BPF
struct pcap_bpf *pb = p->priv;
if (pb->zerocopy) {
pb->nonblock = nonblock;
return (0);
}
#endif
return (pcap_setnonblock_fd(p, nonblock));
}
#ifdef HAVE_ZEROCOPY_BPF
/*
* Zero-copy BPF buffer routines to check for and acknowledge BPF data in
* shared memory buffers.
*
* pcap_next_zbuf_shm(): Check for a newly available shared memory buffer,
* and set up p->buffer and cc to reflect one if available. Notice that if
* there was no prior buffer, we select zbuf1 as this will be the first
* buffer filled for a fresh BPF session.
*/
static int
pcap_next_zbuf_shm(pcap_t *p, int *cc)
{
struct pcap_bpf *pb = p->priv;
struct bpf_zbuf_header *bzh;
if (pb->zbuffer == pb->zbuf2 || pb->zbuffer == NULL) {
bzh = (struct bpf_zbuf_header *)pb->zbuf1;
if (bzh->bzh_user_gen !=
atomic_load_acq_int(&bzh->bzh_kernel_gen)) {
pb->bzh = bzh;
pb->zbuffer = (u_char *)pb->zbuf1;
p->buffer = pb->zbuffer + sizeof(*bzh);
*cc = bzh->bzh_kernel_len;
return (1);
}
} else if (pb->zbuffer == pb->zbuf1) {
bzh = (struct bpf_zbuf_header *)pb->zbuf2;
if (bzh->bzh_user_gen !=
atomic_load_acq_int(&bzh->bzh_kernel_gen)) {
pb->bzh = bzh;
pb->zbuffer = (u_char *)pb->zbuf2;
p->buffer = pb->zbuffer + sizeof(*bzh);
*cc = bzh->bzh_kernel_len;
return (1);
}
}
*cc = 0;
return (0);
}
/*
* pcap_next_zbuf() -- Similar to pcap_next_zbuf_shm(), except wait using
* select() for data or a timeout, and possibly force rotation of the buffer
* in the event we time out or are in immediate mode. Invoke the shared
* memory check before doing system calls in order to avoid doing avoidable
* work.
*/
static int
pcap_next_zbuf(pcap_t *p, int *cc)
{
struct pcap_bpf *pb = p->priv;
struct bpf_zbuf bz;
struct timeval tv;
struct timespec cur;
fd_set r_set;
int data, r;
int expire, tmout;
#define TSTOMILLI(ts) (((ts)->tv_sec * 1000) + ((ts)->tv_nsec / 1000000))
/*
* Start out by seeing whether anything is waiting by checking the
* next shared memory buffer for data.
*/
data = pcap_next_zbuf_shm(p, cc);
if (data)
return (data);
/*
* If a previous sleep was interrupted due to signal delivery, make
* sure that the timeout gets adjusted accordingly. This requires
* that we analyze when the timeout should be been expired, and
* subtract the current time from that. If after this operation,
* our timeout is less then or equal to zero, handle it like a
* regular timeout.
*/
tmout = p->opt.timeout;
if (tmout)
(void) clock_gettime(CLOCK_MONOTONIC, &cur);
if (pb->interrupted && p->opt.timeout) {
expire = TSTOMILLI(&pb->firstsel) + p->opt.timeout;
tmout = expire - TSTOMILLI(&cur);
#undef TSTOMILLI
if (tmout <= 0) {
pb->interrupted = 0;
data = pcap_next_zbuf_shm(p, cc);
if (data)
return (data);
if (ioctl(p->fd, BIOCROTZBUF, &bz) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf,
PCAP_ERRBUF_SIZE, errno, "BIOCROTZBUF");
return (PCAP_ERROR);
}
return (pcap_next_zbuf_shm(p, cc));
}
}
/*
* No data in the buffer, so must use select() to wait for data or
* the next timeout. Note that we only call select if the handle
* is in blocking mode.
*/
if (!pb->nonblock) {
FD_ZERO(&r_set);
FD_SET(p->fd, &r_set);
if (tmout != 0) {
tv.tv_sec = tmout / 1000;
tv.tv_usec = (tmout * 1000) % 1000000;
}
r = select(p->fd + 1, &r_set, NULL, NULL,
p->opt.timeout != 0 ? &tv : NULL);
if (r < 0 && errno == EINTR) {
if (!pb->interrupted && p->opt.timeout) {
pb->interrupted = 1;
pb->firstsel = cur;
}
return (0);
} else if (r < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "select");
return (PCAP_ERROR);
}
}
pb->interrupted = 0;
/*
* Check again for data, which may exist now that we've either been
* woken up as a result of data or timed out. Try the "there's data"
* case first since it doesn't require a system call.
*/
data = pcap_next_zbuf_shm(p, cc);
if (data)
return (data);
/*
* Try forcing a buffer rotation to dislodge timed out or immediate
* data.
*/
if (ioctl(p->fd, BIOCROTZBUF, &bz) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCROTZBUF");
return (PCAP_ERROR);
}
return (pcap_next_zbuf_shm(p, cc));
}
/*
* Notify kernel that we are done with the buffer. We don't reset zbuffer so
* that we know which buffer to use next time around.
*/
static int
pcap_ack_zbuf(pcap_t *p)
{
struct pcap_bpf *pb = p->priv;
atomic_store_rel_int(&pb->bzh->bzh_user_gen,
pb->bzh->bzh_kernel_gen);
pb->bzh = NULL;
p->buffer = NULL;
return (0);
}
#endif /* HAVE_ZEROCOPY_BPF */
pcap_t *
pcap_create_interface(const char *device _U_, char *ebuf)
{
pcap_t *p;
p = pcap_create_common(ebuf, sizeof (struct pcap_bpf));
if (p == NULL)
return (NULL);
p->activate_op = pcap_activate_bpf;
p->can_set_rfmon_op = pcap_can_set_rfmon_bpf;
#ifdef BIOCSTSTAMP
/*
* We claim that we support microsecond and nanosecond time
* stamps.
*/
p->tstamp_precision_count = 2;
p->tstamp_precision_list = malloc(2 * sizeof(u_int));
if (p->tstamp_precision_list == NULL) {
pcap_fmt_errmsg_for_errno(ebuf, PCAP_ERRBUF_SIZE, errno,
"malloc");
free(p);
return (NULL);
}
p->tstamp_precision_list[0] = PCAP_TSTAMP_PRECISION_MICRO;
p->tstamp_precision_list[1] = PCAP_TSTAMP_PRECISION_NANO;
#endif /* BIOCSTSTAMP */
return (p);
}
/*
* On success, returns a file descriptor for a BPF device.
* On failure, returns a PCAP_ERROR_ value, and sets p->errbuf.
*/
static int
bpf_open(char *errbuf)
{
int fd = -1;
static const char cloning_device[] = "/dev/bpf";
int n = 0;
char device[sizeof "/dev/bpf0000000000"];
static int no_cloning_bpf = 0;
#ifdef _AIX
/*
* Load the bpf driver, if it isn't already loaded,
* and create the BPF device entries, if they don't
* already exist.
*/
if (bpf_load(errbuf) == PCAP_ERROR)
return (PCAP_ERROR);
#endif
/*
* First, unless we've already tried opening /dev/bpf and
* gotten ENOENT, try opening /dev/bpf.
* If it fails with ENOENT, remember that, so we don't try
* again, and try /dev/bpfN.
*/
if (!no_cloning_bpf &&
(fd = open(cloning_device, O_RDWR)) == -1 &&
((errno != EACCES && errno != ENOENT) ||
(fd = open(cloning_device, O_RDONLY)) == -1)) {
if (errno != ENOENT) {
if (errno == EACCES)
fd = PCAP_ERROR_PERM_DENIED;
else
fd = PCAP_ERROR;
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "(cannot open device) %s", cloning_device);
return (fd);
}
no_cloning_bpf = 1;
}
if (no_cloning_bpf) {
/*
* We don't have /dev/bpf.
* Go through all the /dev/bpfN minors and find one
* that isn't in use.
*/
do {
(void)pcap_snprintf(device, sizeof(device), "/dev/bpf%d", n++);
/*
* Initially try a read/write open (to allow the inject
* method to work). If that fails due to permission
* issues, fall back to read-only. This allows a
* non-root user to be granted specific access to pcap
* capabilities via file permissions.
*
* XXX - we should have an API that has a flag that
* controls whether to open read-only or read-write,
* so that denial of permission to send (or inability
* to send, if sending packets isn't supported on
* the device in question) can be indicated at open
* time.
*/
fd = open(device, O_RDWR);
if (fd == -1 && errno == EACCES)
fd = open(device, O_RDONLY);
} while (fd < 0 && errno == EBUSY);
}
/*
* XXX better message for all minors used
*/
if (fd < 0) {
switch (errno) {
case ENOENT:
fd = PCAP_ERROR;
if (n == 1) {
/*
* /dev/bpf0 doesn't exist, which
* means we probably have no BPF
* devices.
*/
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"(there are no BPF devices)");
} else {
/*
* We got EBUSY on at least one
* BPF device, so we have BPF
* devices, but all the ones
* that exist are busy.
*/
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"(all BPF devices are busy)");
}
break;
case EACCES:
/*
* Got EACCES on the last device we tried,
* and EBUSY on all devices before that,
* if any.
*/
fd = PCAP_ERROR_PERM_DENIED;
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "(cannot open BPF device) %s", device);
break;
default:
/*
* Some other problem.
*/
fd = PCAP_ERROR;
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "(cannot open BPF device) %s", device);
break;
}
}
return (fd);
}
/*
* Open and bind to a device; used if we're not actually going to use
* the device, but are just testing whether it can be opened, or opening
* it to get information about it.
*
* Returns an error code on failure (always negative), and an FD for
* the now-bound BPF device on success (always non-negative).
*/
static int
bpf_open_and_bind(const char *name, char *errbuf)
{
int fd;
struct ifreq ifr;
/*
* First, open a BPF device.
*/
fd = bpf_open(errbuf);
if (fd < 0)
return (fd); /* fd is the appropriate error code */
/*
* Now bind to the device.
*/
(void)strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) < 0) {
switch (errno) {
case ENXIO:
/*
* There's no such device.
*/
close(fd);
return (PCAP_ERROR_NO_SUCH_DEVICE);
case ENETDOWN:
/*
* Return a "network down" indication, so that
* the application can report that rather than
* saying we had a mysterious failure and
* suggest that they report a problem to the
* libpcap developers.
*/
close(fd);
return (PCAP_ERROR_IFACE_NOT_UP);
default:
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSETIF: %s", name);
close(fd);
return (PCAP_ERROR);
}
}
/*
* Success.
*/
return (fd);
}
#ifdef BIOCGDLTLIST
static int
get_dlt_list(int fd, int v, struct bpf_dltlist *bdlp, char *ebuf)
{
memset(bdlp, 0, sizeof(*bdlp));
if (ioctl(fd, BIOCGDLTLIST, (caddr_t)bdlp) == 0) {
u_int i;
int is_ethernet;
bdlp->bfl_list = (u_int *) malloc(sizeof(u_int) * (bdlp->bfl_len + 1));
if (bdlp->bfl_list == NULL) {
pcap_fmt_errmsg_for_errno(ebuf, PCAP_ERRBUF_SIZE,
errno, "malloc");
return (PCAP_ERROR);
}
if (ioctl(fd, BIOCGDLTLIST, (caddr_t)bdlp) < 0) {
pcap_fmt_errmsg_for_errno(ebuf, PCAP_ERRBUF_SIZE,
errno, "BIOCGDLTLIST");
free(bdlp->bfl_list);
return (PCAP_ERROR);
}
/*
* OK, for real Ethernet devices, add DLT_DOCSIS to the
* list, so that an application can let you choose it,
* in case you're capturing DOCSIS traffic that a Cisco
* Cable Modem Termination System is putting out onto
* an Ethernet (it doesn't put an Ethernet header onto
* the wire, it puts raw DOCSIS frames out on the wire
* inside the low-level Ethernet framing).
*
* A "real Ethernet device" is defined here as a device
* that has a link-layer type of DLT_EN10MB and that has
* no alternate link-layer types; that's done to exclude
* 802.11 interfaces (which might or might not be the
* right thing to do, but I suspect it is - Ethernet <->
* 802.11 bridges would probably badly mishandle frames
* that don't have Ethernet headers).
*
* On Solaris with BPF, Ethernet devices also offer
* DLT_IPNET, so we, if DLT_IPNET is defined, we don't
* treat it as an indication that the device isn't an
* Ethernet.
*/
if (v == DLT_EN10MB) {
is_ethernet = 1;
for (i = 0; i < bdlp->bfl_len; i++) {
if (bdlp->bfl_list[i] != DLT_EN10MB
#ifdef DLT_IPNET
&& bdlp->bfl_list[i] != DLT_IPNET
#endif
) {
is_ethernet = 0;
break;
}
}
if (is_ethernet) {
/*
* We reserved one more slot at the end of
* the list.
*/
bdlp->bfl_list[bdlp->bfl_len] = DLT_DOCSIS;
bdlp->bfl_len++;
}
}
} else {
/*
* EINVAL just means "we don't support this ioctl on
* this device"; don't treat it as an error.
*/
if (errno != EINVAL) {
pcap_fmt_errmsg_for_errno(ebuf, PCAP_ERRBUF_SIZE,
errno, "BIOCGDLTLIST");
return (PCAP_ERROR);
}
}
return (0);
}
#endif
#if defined(__APPLE__)
static int
pcap_can_set_rfmon_bpf(pcap_t *p)
{
struct utsname osinfo;
struct ifreq ifr;
int fd;
#ifdef BIOCGDLTLIST
struct bpf_dltlist bdl;
#endif
/*
* The joys of monitor mode on Mac OS X/OS X/macOS.
*
* Prior to 10.4, it's not supported at all.
*
* In 10.4, if adapter enN supports monitor mode, there's a
* wltN adapter corresponding to it; you open it, instead of
* enN, to get monitor mode. You get whatever link-layer
* headers it supplies.
*
* In 10.5, and, we assume, later releases, if adapter enN
* supports monitor mode, it offers, among its selectable
* DLT_ values, values that let you get the 802.11 header;
* selecting one of those values puts the adapter into monitor
* mode (i.e., you can't get 802.11 headers except in monitor
* mode, and you can't get Ethernet headers in monitor mode).
*/
if (uname(&osinfo) == -1) {
/*
* Can't get the OS version; just say "no".
*/
return (0);
}
/*
* We assume osinfo.sysname is "Darwin", because
* __APPLE__ is defined. We just check the version.
*/
if (osinfo.release[0] < '8' && osinfo.release[1] == '.') {
/*
* 10.3 (Darwin 7.x) or earlier.
* Monitor mode not supported.
*/
return (0);
}
if (osinfo.release[0] == '8' && osinfo.release[1] == '.') {
/*
* 10.4 (Darwin 8.x). s/en/wlt/, and check
* whether the device exists.
*/
if (strncmp(p->opt.device, "en", 2) != 0) {
/*
* Not an enN device; no monitor mode.
*/
return (0);
}
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "socket");
return (PCAP_ERROR);
}
pcap_strlcpy(ifr.ifr_name, "wlt", sizeof(ifr.ifr_name));
pcap_strlcat(ifr.ifr_name, p->opt.device + 2, sizeof(ifr.ifr_name));
if (ioctl(fd, SIOCGIFFLAGS, (char *)&ifr) < 0) {
/*
* No such device?
*/
close(fd);
return (0);
}
close(fd);
return (1);
}
#ifdef BIOCGDLTLIST
/*
* Everything else is 10.5 or later; for those,
* we just open the enN device, and check whether
* we have any 802.11 devices.
*
* First, open a BPF device.
*/
fd = bpf_open(p->errbuf);
if (fd < 0)
return (fd); /* fd is the appropriate error code */
/*
* Now bind to the device.
*/
(void)strncpy(ifr.ifr_name, p->opt.device, sizeof(ifr.ifr_name));
if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) < 0) {
switch (errno) {
case ENXIO:
/*
* There's no such device.
*/
close(fd);
return (PCAP_ERROR_NO_SUCH_DEVICE);
case ENETDOWN:
/*
* Return a "network down" indication, so that
* the application can report that rather than
* saying we had a mysterious failure and
* suggest that they report a problem to the
* libpcap developers.
*/
close(fd);
return (PCAP_ERROR_IFACE_NOT_UP);
default:
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSETIF: %s", p->opt.device);
close(fd);
return (PCAP_ERROR);
}
}
/*
* We know the default link type -- now determine all the DLTs
* this interface supports. If this fails with EINVAL, it's
* not fatal; we just don't get to use the feature later.
* (We don't care about DLT_DOCSIS, so we pass DLT_NULL
* as the default DLT for this adapter.)
*/
if (get_dlt_list(fd, DLT_NULL, &bdl, p->errbuf) == PCAP_ERROR) {
close(fd);
return (PCAP_ERROR);
}
if (find_802_11(&bdl) != -1) {
/*
* We have an 802.11 DLT, so we can set monitor mode.
*/
free(bdl.bfl_list);
close(fd);
return (1);
}
free(bdl.bfl_list);
close(fd);
#endif /* BIOCGDLTLIST */
return (0);
}
#elif defined(HAVE_BSD_IEEE80211)
static int
pcap_can_set_rfmon_bpf(pcap_t *p)
{
int ret;
ret = monitor_mode(p, 0);
if (ret == PCAP_ERROR_RFMON_NOTSUP)
return (0); /* not an error, just a "can't do" */
if (ret == 0)
return (1); /* success */
return (ret);
}
#else
static int
pcap_can_set_rfmon_bpf(pcap_t *p _U_)
{
return (0);
}
#endif
static int
pcap_stats_bpf(pcap_t *p, struct pcap_stat *ps)
{
struct bpf_stat s;
/*
* "ps_recv" counts packets handed to the filter, not packets
* that passed the filter. This includes packets later dropped
* because we ran out of buffer space.
*
* "ps_drop" counts packets dropped inside the BPF device
* because we ran out of buffer space. It doesn't count
* packets dropped by the interface driver. It counts
* only packets that passed the filter.
*
* Both statistics include packets not yet read from the kernel
* by libpcap, and thus not yet seen by the application.
*/
if (ioctl(p->fd, BIOCGSTATS, (caddr_t)&s) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCGSTATS");
return (PCAP_ERROR);
}
ps->ps_recv = s.bs_recv;
ps->ps_drop = s.bs_drop;
ps->ps_ifdrop = 0;
return (0);
}
static int
pcap_read_bpf(pcap_t *p, int cnt, pcap_handler callback, u_char *user)
{
struct pcap_bpf *pb = p->priv;
int cc;
int n = 0;
register u_char *bp, *ep;
u_char *datap;
#ifdef PCAP_FDDIPAD
register u_int pad;
#endif
#ifdef HAVE_ZEROCOPY_BPF
int i;
#endif
again:
/*
* Has "pcap_breakloop()" been called?
*/
if (p->break_loop) {
/*
* Yes - clear the flag that indicates that it
* has, and return PCAP_ERROR_BREAK to indicate
* that we were told to break out of the loop.
*/
p->break_loop = 0;
return (PCAP_ERROR_BREAK);
}
cc = p->cc;
if (p->cc == 0) {
/*
* When reading without zero-copy from a file descriptor, we
* use a single buffer and return a length of data in the
* buffer. With zero-copy, we update the p->buffer pointer
* to point at whatever underlying buffer contains the next
* data and update cc to reflect the data found in the
* buffer.
*/
#ifdef HAVE_ZEROCOPY_BPF
if (pb->zerocopy) {
if (p->buffer != NULL)
pcap_ack_zbuf(p);
i = pcap_next_zbuf(p, &cc);
if (i == 0)
goto again;
if (i < 0)
return (PCAP_ERROR);
} else
#endif
{
cc = read(p->fd, p->buffer, p->bufsize);
}
if (cc < 0) {
/* Don't choke when we get ptraced */
switch (errno) {
case EINTR:
goto again;
#ifdef _AIX
case EFAULT:
/*
* Sigh. More AIX wonderfulness.
*
* For some unknown reason the uiomove()
* operation in the bpf kernel extension
* used to copy the buffer into user
* space sometimes returns EFAULT. I have
* no idea why this is the case given that
* a kernel debugger shows the user buffer
* is correct. This problem appears to
* be mostly mitigated by the memset of
* the buffer before it is first used.
* Very strange.... Shaun Clowes
*
* In any case this means that we shouldn't
* treat EFAULT as a fatal error; as we
* don't have an API for returning
* a "some packets were dropped since
* the last packet you saw" indication,
* we just ignore EFAULT and keep reading.
*/
goto again;
#endif
case EWOULDBLOCK:
return (0);
case ENXIO: /* FreeBSD, DragonFly BSD, and Darwin */
case EIO: /* OpenBSD */
/* NetBSD appears not to return an error in this case */
/*
* The device on which we're capturing
* went away.
*
* XXX - we should really return
* an appropriate error for that,
* but pcap_dispatch() etc. aren't
* documented as having error returns
* other than PCAP_ERROR or PCAP_ERROR_BREAK.
*/
pcap_snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
"The interface disappeared");
return (PCAP_ERROR);
#if defined(sun) && !defined(BSD) && !defined(__svr4__) && !defined(__SVR4)
/*
* Due to a SunOS bug, after 2^31 bytes, the kernel
* file offset overflows and read fails with EINVAL.
* The lseek() to 0 will fix things.
*/
case EINVAL:
if (lseek(p->fd, 0L, SEEK_CUR) +
p->bufsize < 0) {
(void)lseek(p->fd, 0L, SEEK_SET);
goto again;
}
/* fall through */
#endif
}
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "read");
return (PCAP_ERROR);
}
bp = (u_char *)p->buffer;
} else
bp = p->bp;
/*
* Loop through each packet.
*/
#ifdef BIOCSTSTAMP
#define bhp ((struct bpf_xhdr *)bp)
#else
#define bhp ((struct bpf_hdr *)bp)
#endif
ep = bp + cc;
#ifdef PCAP_FDDIPAD
pad = p->fddipad;
#endif
while (bp < ep) {
register u_int caplen, hdrlen;
/*
* Has "pcap_breakloop()" been called?
* If so, return immediately - if we haven't read any
* packets, clear the flag and return PCAP_ERROR_BREAK
* to indicate that we were told to break out of the loop,
* otherwise leave the flag set, so that the *next* call
* will break out of the loop without having read any
* packets, and return the number of packets we've
* processed so far.
*/
if (p->break_loop) {
p->bp = bp;
p->cc = ep - bp;
/*
* ep is set based on the return value of read(),
* but read() from a BPF device doesn't necessarily
* return a value that's a multiple of the alignment
* value for BPF_WORDALIGN(). However, whenever we
* increment bp, we round up the increment value by
* a value rounded up by BPF_WORDALIGN(), so we
* could increment bp past ep after processing the
* last packet in the buffer.
*
* We treat ep < bp as an indication that this
* happened, and just set p->cc to 0.
*/
if (p->cc < 0)
p->cc = 0;
if (n == 0) {
p->break_loop = 0;
return (PCAP_ERROR_BREAK);
} else
return (n);
}
caplen = bhp->bh_caplen;
hdrlen = bhp->bh_hdrlen;
datap = bp + hdrlen;
/*
* Short-circuit evaluation: if using BPF filter
* in kernel, no need to do it now - we already know
* the packet passed the filter.
*
#ifdef PCAP_FDDIPAD
* Note: the filter code was generated assuming
* that p->fddipad was the amount of padding
* before the header, as that's what's required
* in the kernel, so we run the filter before
* skipping that padding.
#endif
*/
if (pb->filtering_in_kernel ||
bpf_filter(p->fcode.bf_insns, datap, bhp->bh_datalen, caplen)) {
struct pcap_pkthdr pkthdr;
#ifdef BIOCSTSTAMP
struct bintime bt;
bt.sec = bhp->bh_tstamp.bt_sec;
bt.frac = bhp->bh_tstamp.bt_frac;
if (p->opt.tstamp_precision == PCAP_TSTAMP_PRECISION_NANO) {
struct timespec ts;
bintime2timespec(&bt, &ts);
pkthdr.ts.tv_sec = ts.tv_sec;
pkthdr.ts.tv_usec = ts.tv_nsec;
} else {
struct timeval tv;
bintime2timeval(&bt, &tv);
pkthdr.ts.tv_sec = tv.tv_sec;
pkthdr.ts.tv_usec = tv.tv_usec;
}
#else
pkthdr.ts.tv_sec = bhp->bh_tstamp.tv_sec;
#ifdef _AIX
/*
* AIX's BPF returns seconds/nanoseconds time
* stamps, not seconds/microseconds time stamps.
*/
pkthdr.ts.tv_usec = bhp->bh_tstamp.tv_usec/1000;
#else
pkthdr.ts.tv_usec = bhp->bh_tstamp.tv_usec;
#endif
#endif /* BIOCSTSTAMP */
#ifdef PCAP_FDDIPAD
if (caplen > pad)
pkthdr.caplen = caplen - pad;
else
pkthdr.caplen = 0;
if (bhp->bh_datalen > pad)
pkthdr.len = bhp->bh_datalen - pad;
else
pkthdr.len = 0;
datap += pad;
#else
pkthdr.caplen = caplen;
pkthdr.len = bhp->bh_datalen;
#endif
(*callback)(user, &pkthdr, datap);
bp += BPF_WORDALIGN(caplen + hdrlen);
if (++n >= cnt && !PACKET_COUNT_IS_UNLIMITED(cnt)) {
p->bp = bp;
p->cc = ep - bp;
/*
* See comment above about p->cc < 0.
*/
if (p->cc < 0)
p->cc = 0;
return (n);
}
} else {
/*
* Skip this packet.
*/
bp += BPF_WORDALIGN(caplen + hdrlen);
}
}
#undef bhp
p->cc = 0;
return (n);
}
static int
pcap_inject_bpf(pcap_t *p, const void *buf, size_t size)
{
int ret;
ret = write(p->fd, buf, size);
#ifdef __APPLE__
if (ret == -1 && errno == EAFNOSUPPORT) {
/*
* In some versions of macOS, there's a bug wherein setting
* the BIOCSHDRCMPLT flag causes writes to fail; see, for
* example:
*
* http://cerberus.sourcefire.com/~jeff/archives/patches/macosx/BIOCSHDRCMPLT-10.3.3.patch
*
* So, if, on macOS, we get EAFNOSUPPORT from the write, we
* assume it's due to that bug, and turn off that flag
* and try again. If we succeed, it either means that
* somebody applied the fix from that URL, or other patches
* for that bug from
*
* http://cerberus.sourcefire.com/~jeff/archives/patches/macosx/
*
* and are running a Darwin kernel with those fixes, or
* that Apple fixed the problem in some macOS release.
*/
u_int spoof_eth_src = 0;
if (ioctl(p->fd, BIOCSHDRCMPLT, &spoof_eth_src) == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "send: can't turn off BIOCSHDRCMPLT");
return (PCAP_ERROR);
}
/*
* Now try the write again.
*/
ret = write(p->fd, buf, size);
}
#endif /* __APPLE__ */
if (ret == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "send");
return (PCAP_ERROR);
}
return (ret);
}
#ifdef _AIX
static int
bpf_odminit(char *errbuf)
{
char *errstr;
if (odm_initialize() == -1) {
if (odm_err_msg(odmerrno, &errstr) == -1)
errstr = "Unknown error";
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"bpf_load: odm_initialize failed: %s",
errstr);
return (PCAP_ERROR);
}
if ((odmlockid = odm_lock("/etc/objrepos/config_lock", ODM_WAIT)) == -1) {
if (odm_err_msg(odmerrno, &errstr) == -1)
errstr = "Unknown error";
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"bpf_load: odm_lock of /etc/objrepos/config_lock failed: %s",
errstr);
(void)odm_terminate();
return (PCAP_ERROR);
}
return (0);
}
static int
bpf_odmcleanup(char *errbuf)
{
char *errstr;
if (odm_unlock(odmlockid) == -1) {
if (errbuf != NULL) {
if (odm_err_msg(odmerrno, &errstr) == -1)
errstr = "Unknown error";
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"bpf_load: odm_unlock failed: %s",
errstr);
}
return (PCAP_ERROR);
}
if (odm_terminate() == -1) {
if (errbuf != NULL) {
if (odm_err_msg(odmerrno, &errstr) == -1)
errstr = "Unknown error";
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"bpf_load: odm_terminate failed: %s",
errstr);
}
return (PCAP_ERROR);
}
return (0);
}
static int
bpf_load(char *errbuf)
{
long major;
int *minors;
int numminors, i, rc;
char buf[1024];
struct stat sbuf;
struct bpf_config cfg_bpf;
struct cfg_load cfg_ld;
struct cfg_kmod cfg_km;
/*
* This is very very close to what happens in the real implementation
* but I've fixed some (unlikely) bug situations.
*/
if (bpfloadedflag)
return (0);
if (bpf_odminit(errbuf) == PCAP_ERROR)
return (PCAP_ERROR);
major = genmajor(BPF_NAME);
if (major == -1) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "bpf_load: genmajor failed");
(void)bpf_odmcleanup(NULL);
return (PCAP_ERROR);
}
minors = getminor(major, &numminors, BPF_NAME);
if (!minors) {
minors = genminor("bpf", major, 0, BPF_MINORS, 1, 1);
if (!minors) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "bpf_load: genminor failed");
(void)bpf_odmcleanup(NULL);
return (PCAP_ERROR);
}
}
if (bpf_odmcleanup(errbuf) == PCAP_ERROR)
return (PCAP_ERROR);
rc = stat(BPF_NODE "0", &sbuf);
if (rc == -1 && errno != ENOENT) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "bpf_load: can't stat %s", BPF_NODE "0");
return (PCAP_ERROR);
}
if (rc == -1 || getmajor(sbuf.st_rdev) != major) {
for (i = 0; i < BPF_MINORS; i++) {
pcap_snprintf(buf, sizeof(buf), "%s%d", BPF_NODE, i);
unlink(buf);
if (mknod(buf, S_IRUSR | S_IFCHR, domakedev(major, i)) == -1) {
pcap_fmt_errmsg_for_errno(errbuf,
PCAP_ERRBUF_SIZE, errno,
"bpf_load: can't mknod %s", buf);
return (PCAP_ERROR);
}
}
}
/* Check if the driver is loaded */
memset(&cfg_ld, 0x0, sizeof(cfg_ld));
pcap_snprintf(buf, sizeof(buf), "%s/%s", DRIVER_PATH, BPF_NAME);
cfg_ld.path = buf;
if ((sysconfig(SYS_QUERYLOAD, (void *)&cfg_ld, sizeof(cfg_ld)) == -1) ||
(cfg_ld.kmid == 0)) {
/* Driver isn't loaded, load it now */
if (sysconfig(SYS_SINGLELOAD, (void *)&cfg_ld, sizeof(cfg_ld)) == -1) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "bpf_load: could not load driver");
return (PCAP_ERROR);
}
}
/* Configure the driver */
cfg_km.cmd = CFG_INIT;
cfg_km.kmid = cfg_ld.kmid;
cfg_km.mdilen = sizeof(cfg_bpf);
cfg_km.mdiptr = (void *)&cfg_bpf;
for (i = 0; i < BPF_MINORS; i++) {
cfg_bpf.devno = domakedev(major, i);
if (sysconfig(SYS_CFGKMOD, (void *)&cfg_km, sizeof(cfg_km)) == -1) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "bpf_load: could not configure driver");
return (PCAP_ERROR);
}
}
bpfloadedflag = 1;
return (0);
}
#endif
/*
* Undo any operations done when opening the device when necessary.
*/
static void
pcap_cleanup_bpf(pcap_t *p)
{
struct pcap_bpf *pb = p->priv;
#ifdef HAVE_BSD_IEEE80211
int sock;
struct ifmediareq req;
struct ifreq ifr;
#endif
if (pb->must_do_on_close != 0) {
/*
* There's something we have to do when closing this
* pcap_t.
*/
#ifdef HAVE_BSD_IEEE80211
if (pb->must_do_on_close & MUST_CLEAR_RFMON) {
/*
* We put the interface into rfmon mode;
* take it out of rfmon mode.
*
* XXX - if somebody else wants it in rfmon
* mode, this code cannot know that, so it'll take
* it out of rfmon mode.
*/
sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock == -1) {
fprintf(stderr,
"Can't restore interface flags (socket() failed: %s).\n"
"Please adjust manually.\n",
strerror(errno));
} else {
memset(&req, 0, sizeof(req));
strncpy(req.ifm_name, pb->device,
sizeof(req.ifm_name));
if (ioctl(sock, SIOCGIFMEDIA, &req) < 0) {
fprintf(stderr,
"Can't restore interface flags (SIOCGIFMEDIA failed: %s).\n"
"Please adjust manually.\n",
strerror(errno));
} else {
if (req.ifm_current & IFM_IEEE80211_MONITOR) {
/*
* Rfmon mode is currently on;
* turn it off.
*/
memset(&ifr, 0, sizeof(ifr));
(void)strncpy(ifr.ifr_name,
pb->device,
sizeof(ifr.ifr_name));
ifr.ifr_media =
req.ifm_current & ~IFM_IEEE80211_MONITOR;
if (ioctl(sock, SIOCSIFMEDIA,
&ifr) == -1) {
fprintf(stderr,
"Can't restore interface flags (SIOCSIFMEDIA failed: %s).\n"
"Please adjust manually.\n",
strerror(errno));
}
}
}
close(sock);
}
}
#endif /* HAVE_BSD_IEEE80211 */
#if defined(__FreeBSD__) && defined(SIOCIFCREATE2)
/*
* Attempt to destroy the usbusN interface that we created.
*/
if (pb->must_do_on_close & MUST_DESTROY_USBUS) {
if (if_nametoindex(pb->device) > 0) {
int s;
s = socket(AF_LOCAL, SOCK_DGRAM, 0);
if (s >= 0) {
pcap_strlcpy(ifr.ifr_name, pb->device,
sizeof(ifr.ifr_name));
ioctl(s, SIOCIFDESTROY, &ifr);
close(s);
}
}
}
#endif /* defined(__FreeBSD__) && defined(SIOCIFCREATE2) */
/*
* Take this pcap out of the list of pcaps for which we
* have to take the interface out of some mode.
*/
pcap_remove_from_pcaps_to_close(p);
pb->must_do_on_close = 0;
}
#ifdef HAVE_ZEROCOPY_BPF
if (pb->zerocopy) {
/*
* Delete the mappings. Note that p->buffer gets
* initialized to one of the mmapped regions in
* this case, so do not try and free it directly;
* null it out so that pcap_cleanup_live_common()
* doesn't try to free it.
*/
if (pb->zbuf1 != MAP_FAILED && pb->zbuf1 != NULL)
(void) munmap(pb->zbuf1, pb->zbufsize);
if (pb->zbuf2 != MAP_FAILED && pb->zbuf2 != NULL)
(void) munmap(pb->zbuf2, pb->zbufsize);
p->buffer = NULL;
}
#endif
if (pb->device != NULL) {
free(pb->device);
pb->device = NULL;
}
pcap_cleanup_live_common(p);
}
static int
check_setif_failure(pcap_t *p, int error)
{
#ifdef __APPLE__
int fd;
struct ifreq ifr;
int err;
#endif
if (error == ENXIO) {
/*
* No such device exists.
*/
#ifdef __APPLE__
if (p->opt.rfmon && strncmp(p->opt.device, "wlt", 3) == 0) {
/*
* Monitor mode was requested, and we're trying
* to open a "wltN" device. Assume that this
* is 10.4 and that we were asked to open an
* "enN" device; if that device exists, return
* "monitor mode not supported on the device".
*/
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd != -1) {
pcap_strlcpy(ifr.ifr_name, "en",
sizeof(ifr.ifr_name));
pcap_strlcat(ifr.ifr_name, p->opt.device + 3,
sizeof(ifr.ifr_name));
if (ioctl(fd, SIOCGIFFLAGS, (char *)&ifr) < 0) {
/*
* We assume this failed because
* the underlying device doesn't
* exist.
*/
err = PCAP_ERROR_NO_SUCH_DEVICE;
pcap_fmt_errmsg_for_errno(p->errbuf,
PCAP_ERRBUF_SIZE, errno,
"SIOCGIFFLAGS on %s failed",
ifr.ifr_name);
} else {
/*
* The underlying "enN" device
* exists, but there's no
* corresponding "wltN" device;
* that means that the "enN"
* device doesn't support
* monitor mode, probably because
* it's an Ethernet device rather
* than a wireless device.
*/
err = PCAP_ERROR_RFMON_NOTSUP;
}
close(fd);
} else {
/*
* We can't find out whether there's
* an underlying "enN" device, so
* just report "no such device".
*/
err = PCAP_ERROR_NO_SUCH_DEVICE;
pcap_fmt_errmsg_for_errno(p->errbuf,
errno, PCAP_ERRBUF_SIZE,
"socket() failed");
}
return (err);
}
#endif
/*
* No such device.
*/
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSETIF failed");
return (PCAP_ERROR_NO_SUCH_DEVICE);
} else if (errno == ENETDOWN) {
/*
* Return a "network down" indication, so that
* the application can report that rather than
* saying we had a mysterious failure and
* suggest that they report a problem to the
* libpcap developers.
*/
return (PCAP_ERROR_IFACE_NOT_UP);
} else {
/*
* Some other error; fill in the error string, and
* return PCAP_ERROR.
*/
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSETIF: %s", p->opt.device);
return (PCAP_ERROR);
}
}
/*
* Default capture buffer size.
* 32K isn't very much for modern machines with fast networks; we
* pick .5M, as that's the maximum on at least some systems with BPF.
*
* However, on AIX 3.5, the larger buffer sized caused unrecoverable
* read failures under stress, so we leave it as 32K; yet another
* place where AIX's BPF is broken.
*/
#ifdef _AIX
#define DEFAULT_BUFSIZE 32768
#else
#define DEFAULT_BUFSIZE 524288
#endif
static int
pcap_activate_bpf(pcap_t *p)
{
struct pcap_bpf *pb = p->priv;
int status = 0;
#ifdef HAVE_BSD_IEEE80211
int retv;
#endif
int fd;
#ifdef LIFNAMSIZ
char *zonesep;
struct lifreq ifr;
char *ifrname = ifr.lifr_name;
const size_t ifnamsiz = sizeof(ifr.lifr_name);
#else
struct ifreq ifr;
char *ifrname = ifr.ifr_name;
const size_t ifnamsiz = sizeof(ifr.ifr_name);
#endif
struct bpf_version bv;
#ifdef __APPLE__
int sockfd;
char *wltdev = NULL;
#endif
#ifdef BIOCGDLTLIST
struct bpf_dltlist bdl;
#if defined(__APPLE__) || defined(HAVE_BSD_IEEE80211)
int new_dlt;
#endif
#endif /* BIOCGDLTLIST */
#if defined(BIOCGHDRCMPLT) && defined(BIOCSHDRCMPLT)
u_int spoof_eth_src = 1;
#endif
u_int v;
struct bpf_insn total_insn;
struct bpf_program total_prog;
struct utsname osinfo;
int have_osinfo = 0;
#ifdef HAVE_ZEROCOPY_BPF
struct bpf_zbuf bz;
u_int bufmode, zbufmax;
#endif
fd = bpf_open(p->errbuf);
if (fd < 0) {
status = fd;
goto bad;
}
p->fd = fd;
if (ioctl(fd, BIOCVERSION, (caddr_t)&bv) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCVERSION");
status = PCAP_ERROR;
goto bad;
}
if (bv.bv_major != BPF_MAJOR_VERSION ||
bv.bv_minor < BPF_MINOR_VERSION) {
pcap_snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
"kernel bpf filter out of date");
status = PCAP_ERROR;
goto bad;
}
/*
* Turn a negative snapshot value (invalid), a snapshot value of
* 0 (unspecified), or a value bigger than the normal maximum
* value, into the maximum allowed value.
*
* If some application really *needs* a bigger snapshot
* length, we should just increase MAXIMUM_SNAPLEN.
*/
if (p->snapshot <= 0 || p->snapshot > MAXIMUM_SNAPLEN)
p->snapshot = MAXIMUM_SNAPLEN;
#if defined(LIFNAMSIZ) && defined(ZONENAME_MAX) && defined(lifr_zoneid)
/*
* Retrieve the zoneid of the zone we are currently executing in.
*/
if ((ifr.lifr_zoneid = getzoneid()) == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "getzoneid()");
status = PCAP_ERROR;
goto bad;
}
/*
* Check if the given source datalink name has a '/' separated
* zonename prefix string. The zonename prefixed source datalink can
* be used by pcap consumers in the Solaris global zone to capture
* traffic on datalinks in non-global zones. Non-global zones
* do not have access to datalinks outside of their own namespace.
*/
if ((zonesep = strchr(p->opt.device, '/')) != NULL) {
char path_zname[ZONENAME_MAX];
int znamelen;
char *lnamep;
if (ifr.lifr_zoneid != GLOBAL_ZONEID) {
pcap_snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
"zonename/linkname only valid in global zone.");
status = PCAP_ERROR;
goto bad;
}
znamelen = zonesep - p->opt.device;
(void) pcap_strlcpy(path_zname, p->opt.device, znamelen + 1);
ifr.lifr_zoneid = getzoneidbyname(path_zname);
if (ifr.lifr_zoneid == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "getzoneidbyname(%s)", path_zname);
status = PCAP_ERROR;
goto bad;
}
lnamep = strdup(zonesep + 1);
if (lnamep == NULL) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "strdup");
status = PCAP_ERROR;
goto bad;
}
free(p->opt.device);
p->opt.device = lnamep;
}
#endif
pb->device = strdup(p->opt.device);
if (pb->device == NULL) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "strdup");
status = PCAP_ERROR;
goto bad;
}
/*
* Attempt to find out the version of the OS on which we're running.
*/
if (uname(&osinfo) == 0)
have_osinfo = 1;
#ifdef __APPLE__
/*
* See comment in pcap_can_set_rfmon_bpf() for an explanation
* of why we check the version number.
*/
if (p->opt.rfmon) {
if (have_osinfo) {
/*
* We assume osinfo.sysname is "Darwin", because
* __APPLE__ is defined. We just check the version.
*/
if (osinfo.release[0] < '8' &&
osinfo.release[1] == '.') {
/*
* 10.3 (Darwin 7.x) or earlier.
*/
status = PCAP_ERROR_RFMON_NOTSUP;
goto bad;
}
if (osinfo.release[0] == '8' &&
osinfo.release[1] == '.') {
/*
* 10.4 (Darwin 8.x). s/en/wlt/
*/
if (strncmp(p->opt.device, "en", 2) != 0) {
/*
* Not an enN device; check
* whether the device even exists.
*/
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (sockfd != -1) {
pcap_strlcpy(ifrname,
p->opt.device, ifnamsiz);
if (ioctl(sockfd, SIOCGIFFLAGS,
(char *)&ifr) < 0) {
/*
* We assume this
* failed because
* the underlying
* device doesn't
* exist.
*/
status = PCAP_ERROR_NO_SUCH_DEVICE;
pcap_fmt_errmsg_for_errno(p->errbuf,
PCAP_ERRBUF_SIZE,
errno,
"SIOCGIFFLAGS failed");
} else
status = PCAP_ERROR_RFMON_NOTSUP;
close(sockfd);
} else {
/*
* We can't find out whether
* the device exists, so just
* report "no such device".
*/
status = PCAP_ERROR_NO_SUCH_DEVICE;
pcap_fmt_errmsg_for_errno(p->errbuf,
PCAP_ERRBUF_SIZE, errno,
"socket() failed");
}
goto bad;
}
wltdev = malloc(strlen(p->opt.device) + 2);
if (wltdev == NULL) {
pcap_fmt_errmsg_for_errno(p->errbuf,
PCAP_ERRBUF_SIZE, errno,
"malloc");
status = PCAP_ERROR;
goto bad;
}
strcpy(wltdev, "wlt");
strcat(wltdev, p->opt.device + 2);
free(p->opt.device);
p->opt.device = wltdev;
}
/*
* Everything else is 10.5 or later; for those,
* we just open the enN device, and set the DLT.
*/
}
}
#endif /* __APPLE__ */
/*
* If this is FreeBSD, and the device name begins with "usbus",
* try to create the interface if it's not available.
*/
#if defined(__FreeBSD__) && defined(SIOCIFCREATE2)
if (strncmp(p->opt.device, usbus_prefix, USBUS_PREFIX_LEN) == 0) {
/*
* Do we already have an interface with that name?
*/
if (if_nametoindex(p->opt.device) == 0) {
/*
* No. We need to create it, and, if we
* succeed, remember that we should destroy
* it when the pcap_t is closed.
*/
int s;
/*
* Open a socket to use for ioctls to
* create the interface.
*/
s = socket(AF_LOCAL, SOCK_DGRAM, 0);
if (s < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf,
PCAP_ERRBUF_SIZE, errno,
"Can't open socket");
status = PCAP_ERROR;
goto bad;
}
/*
* If we haven't already done so, arrange to have
* "pcap_close_all()" called when we exit.
*/
if (!pcap_do_addexit(p)) {
/*
* "atexit()" failed; don't create the
* interface, just give up.
*/
pcap_snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
"atexit failed");
close(s);
status = PCAP_ERROR;
goto bad;
}
/*
* Create the interface.
*/
pcap_strlcpy(ifr.ifr_name, p->opt.device, sizeof(ifr.ifr_name));
if (ioctl(s, SIOCIFCREATE2, &ifr) < 0) {
if (errno == EINVAL) {
pcap_snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
"Invalid USB bus interface %s",
p->opt.device);
} else {
pcap_fmt_errmsg_for_errno(p->errbuf,
PCAP_ERRBUF_SIZE, errno,
"Can't create interface for %s",
p->opt.device);
}
close(s);
status = PCAP_ERROR;
goto bad;
}
/*
* Make sure we clean this up when we close.
*/
pb->must_do_on_close |= MUST_DESTROY_USBUS;
/*
* Add this to the list of pcaps to close when we exit.
*/
pcap_add_to_pcaps_to_close(p);
}
}
#endif /* defined(__FreeBSD__) && defined(SIOCIFCREATE2) */
#ifdef HAVE_ZEROCOPY_BPF
/*
* If the BPF extension to set buffer mode is present, try setting
* the mode to zero-copy. If that fails, use regular buffering. If
* it succeeds but other setup fails, return an error to the user.
*/
bufmode = BPF_BUFMODE_ZBUF;
if (ioctl(fd, BIOCSETBUFMODE, (caddr_t)&bufmode) == 0) {
/*
* We have zerocopy BPF; use it.
*/
pb->zerocopy = 1;
/*
* How to pick a buffer size: first, query the maximum buffer
* size supported by zero-copy. This also lets us quickly
* determine whether the kernel generally supports zero-copy.
* Then, if a buffer size was specified, use that, otherwise
* query the default buffer size, which reflects kernel
* policy for a desired default. Round to the nearest page
* size.
*/
if (ioctl(fd, BIOCGETZMAX, (caddr_t)&zbufmax) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCGETZMAX");
status = PCAP_ERROR;
goto bad;
}
if (p->opt.buffer_size != 0) {
/*
* A buffer size was explicitly specified; use it.
*/
v = p->opt.buffer_size;
} else {
if ((ioctl(fd, BIOCGBLEN, (caddr_t)&v) < 0) ||
v < DEFAULT_BUFSIZE)
v = DEFAULT_BUFSIZE;
}
#ifndef roundup
#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */
#endif
pb->zbufsize = roundup(v, getpagesize());
if (pb->zbufsize > zbufmax)
pb->zbufsize = zbufmax;
pb->zbuf1 = mmap(NULL, pb->zbufsize, PROT_READ | PROT_WRITE,
MAP_ANON, -1, 0);
pb->zbuf2 = mmap(NULL, pb->zbufsize, PROT_READ | PROT_WRITE,
MAP_ANON, -1, 0);
if (pb->zbuf1 == MAP_FAILED || pb->zbuf2 == MAP_FAILED) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "mmap");
status = PCAP_ERROR;
goto bad;
}
memset(&bz, 0, sizeof(bz)); /* bzero() deprecated, replaced with memset() */
bz.bz_bufa = pb->zbuf1;
bz.bz_bufb = pb->zbuf2;
bz.bz_buflen = pb->zbufsize;
if (ioctl(fd, BIOCSETZBUF, (caddr_t)&bz) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSETZBUF");
status = PCAP_ERROR;
goto bad;
}
(void)strncpy(ifrname, p->opt.device, ifnamsiz);
if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSETIF: %s", p->opt.device);
status = PCAP_ERROR;
goto bad;
}
v = pb->zbufsize - sizeof(struct bpf_zbuf_header);
} else
#endif
{
/*
* We don't have zerocopy BPF.
* Set the buffer size.
*/
if (p->opt.buffer_size != 0) {
/*
* A buffer size was explicitly specified; use it.
*/
if (ioctl(fd, BIOCSBLEN,
(caddr_t)&p->opt.buffer_size) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf,
PCAP_ERRBUF_SIZE, errno,
"BIOCSBLEN: %s", p->opt.device);
status = PCAP_ERROR;
goto bad;
}
/*
* Now bind to the device.
*/
(void)strncpy(ifrname, p->opt.device, ifnamsiz);
#ifdef BIOCSETLIF
if (ioctl(fd, BIOCSETLIF, (caddr_t)&ifr) < 0)
#else
if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) < 0)
#endif
{
status = check_setif_failure(p, errno);
goto bad;
}
} else {
/*
* No buffer size was explicitly specified.
*
* Try finding a good size for the buffer;
* DEFAULT_BUFSIZE may be too big, so keep
* cutting it in half until we find a size
* that works, or run out of sizes to try.
* If the default is larger, don't make it smaller.
*/
if ((ioctl(fd, BIOCGBLEN, (caddr_t)&v) < 0) ||
v < DEFAULT_BUFSIZE)
v = DEFAULT_BUFSIZE;
for ( ; v != 0; v >>= 1) {
/*
* Ignore the return value - this is because the
* call fails on BPF systems that don't have
* kernel malloc. And if the call fails, it's
* no big deal, we just continue to use the
* standard buffer size.
*/
(void) ioctl(fd, BIOCSBLEN, (caddr_t)&v);
(void)strncpy(ifrname, p->opt.device, ifnamsiz);
#ifdef BIOCSETLIF
if (ioctl(fd, BIOCSETLIF, (caddr_t)&ifr) >= 0)
#else
if (ioctl(fd, BIOCSETIF, (caddr_t)&ifr) >= 0)
#endif
break; /* that size worked; we're done */
if (errno != ENOBUFS) {
status = check_setif_failure(p, errno);
goto bad;
}
}
if (v == 0) {
pcap_snprintf(p->errbuf, PCAP_ERRBUF_SIZE,
"BIOCSBLEN: %s: No buffer size worked",
p->opt.device);
status = PCAP_ERROR;
goto bad;
}
}
}
/* Get the data link layer type. */
if (ioctl(fd, BIOCGDLT, (caddr_t)&v) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCGDLT");
status = PCAP_ERROR;
goto bad;
}
#ifdef _AIX
/*
* AIX's BPF returns IFF_ types, not DLT_ types, in BIOCGDLT.
*/
switch (v) {
case IFT_ETHER:
case IFT_ISO88023:
v = DLT_EN10MB;
break;
case IFT_FDDI:
v = DLT_FDDI;
break;
case IFT_ISO88025:
v = DLT_IEEE802;
break;
case IFT_LOOP:
v = DLT_NULL;
break;
default:
/*
* We don't know what to map this to yet.
*/
pcap_snprintf(p->errbuf, PCAP_ERRBUF_SIZE, "unknown interface type %u",
v);
status = PCAP_ERROR;
goto bad;
}
#endif
#if _BSDI_VERSION - 0 >= 199510
/* The SLIP and PPP link layer header changed in BSD/OS 2.1 */
switch (v) {
case DLT_SLIP:
v = DLT_SLIP_BSDOS;
break;
case DLT_PPP:
v = DLT_PPP_BSDOS;
break;
case 11: /*DLT_FR*/
v = DLT_FRELAY;
break;
case 12: /*DLT_C_HDLC*/
v = DLT_CHDLC;
break;
}
#endif
#ifdef BIOCGDLTLIST
/*
* We know the default link type -- now determine all the DLTs
* this interface supports. If this fails with EINVAL, it's
* not fatal; we just don't get to use the feature later.
*/
if (get_dlt_list(fd, v, &bdl, p->errbuf) == -1) {
status = PCAP_ERROR;
goto bad;
}
p->dlt_count = bdl.bfl_len;
p->dlt_list = bdl.bfl_list;
#ifdef __APPLE__
/*
* Monitor mode fun, continued.
*
* For 10.5 and, we're assuming, later releases, as noted above,
* 802.1 adapters that support monitor mode offer both DLT_EN10MB,
* DLT_IEEE802_11, and possibly some 802.11-plus-radio-information
* DLT_ value. Choosing one of the 802.11 DLT_ values will turn
* monitor mode on.
*
* Therefore, if the user asked for monitor mode, we filter out
* the DLT_EN10MB value, as you can't get that in monitor mode,
* and, if the user didn't ask for monitor mode, we filter out
* the 802.11 DLT_ values, because selecting those will turn
* monitor mode on. Then, for monitor mode, if an 802.11-plus-
* radio DLT_ value is offered, we try to select that, otherwise
* we try to select DLT_IEEE802_11.
*/
if (have_osinfo) {
if (isdigit((unsigned)osinfo.release[0]) &&
(osinfo.release[0] == '9' ||
isdigit((unsigned)osinfo.release[1]))) {
/*
* 10.5 (Darwin 9.x), or later.
*/
new_dlt = find_802_11(&bdl);
if (new_dlt != -1) {
/*
* We have at least one 802.11 DLT_ value,
* so this is an 802.11 interface.
* new_dlt is the best of the 802.11
* DLT_ values in the list.
*/
if (p->opt.rfmon) {
/*
* Our caller wants monitor mode.
* Purge DLT_EN10MB from the list
* of link-layer types, as selecting
* it will keep monitor mode off.
*/
remove_non_802_11(p);
/*
* If the new mode we want isn't
* the default mode, attempt to
* select the new mode.
*/
if ((u_int)new_dlt != v) {
if (ioctl(p->fd, BIOCSDLT,
&new_dlt) != -1) {
/*
* We succeeded;
* make this the
* new DLT_ value.
*/
v = new_dlt;
}
}
} else {
/*
* Our caller doesn't want
* monitor mode. Unless this
* is being done by pcap_open_live(),
* purge the 802.11 link-layer types
* from the list, as selecting
* one of them will turn monitor
* mode on.
*/
if (!p->oldstyle)
remove_802_11(p);
}
} else {
if (p->opt.rfmon) {
/*
* The caller requested monitor
* mode, but we have no 802.11
* link-layer types, so they
* can't have it.
*/
status = PCAP_ERROR_RFMON_NOTSUP;
goto bad;
}
}
}
}
#elif defined(HAVE_BSD_IEEE80211)
/*
* *BSD with the new 802.11 ioctls.
* Do we want monitor mode?
*/
if (p->opt.rfmon) {
/*
* Try to put the interface into monitor mode.
*/
retv = monitor_mode(p, 1);
if (retv != 0) {
/*
* We failed.
*/
status = retv;
goto bad;
}
/*
* We're in monitor mode.
* Try to find the best 802.11 DLT_ value and, if we
* succeed, try to switch to that mode if we're not
* already in that mode.
*/
new_dlt = find_802_11(&bdl);
if (new_dlt != -1) {
/*
* We have at least one 802.11 DLT_ value.
* new_dlt is the best of the 802.11
* DLT_ values in the list.
*
* If the new mode we want isn't the default mode,
* attempt to select the new mode.
*/
if ((u_int)new_dlt != v) {
if (ioctl(p->fd, BIOCSDLT, &new_dlt) != -1) {
/*
* We succeeded; make this the
* new DLT_ value.
*/
v = new_dlt;
}
}
}
}
#endif /* various platforms */
#endif /* BIOCGDLTLIST */
/*
* If this is an Ethernet device, and we don't have a DLT_ list,
* give it a list with DLT_EN10MB and DLT_DOCSIS. (That'd give
* 802.11 interfaces DLT_DOCSIS, which isn't the right thing to
* do, but there's not much we can do about that without finding
* some other way of determining whether it's an Ethernet or 802.11
* device.)
*/
if (v == DLT_EN10MB && p->dlt_count == 0) {
p->dlt_list = (u_int *) malloc(sizeof(u_int) * 2);
/*
* If that fails, just leave the list empty.
*/
if (p->dlt_list != NULL) {
p->dlt_list[0] = DLT_EN10MB;
p->dlt_list[1] = DLT_DOCSIS;
p->dlt_count = 2;
}
}
#ifdef PCAP_FDDIPAD
if (v == DLT_FDDI)
p->fddipad = PCAP_FDDIPAD;
else
#endif
p->fddipad = 0;
p->linktype = v;
#if defined(BIOCGHDRCMPLT) && defined(BIOCSHDRCMPLT)
/*
* Do a BIOCSHDRCMPLT, if defined, to turn that flag on, so
* the link-layer source address isn't forcibly overwritten.
* (Should we ignore errors? Should we do this only if
* we're open for writing?)
*
* XXX - I seem to remember some packet-sending bug in some
* BSDs - check CVS log for "bpf.c"?
*/
if (ioctl(fd, BIOCSHDRCMPLT, &spoof_eth_src) == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSHDRCMPLT");
status = PCAP_ERROR;
goto bad;
}
#endif
/* set timeout */
#ifdef HAVE_ZEROCOPY_BPF
/*
* In zero-copy mode, we just use the timeout in select().
* XXX - what if we're in non-blocking mode and the *application*
* is using select() or poll() or kqueues or....?
*/
if (p->opt.timeout && !pb->zerocopy) {
#else
if (p->opt.timeout) {
#endif
/*
* XXX - is this seconds/nanoseconds in AIX?
* (Treating it as such doesn't fix the timeout
* problem described below.)
*
* XXX - Mac OS X 10.6 mishandles BIOCSRTIMEOUT in
* 64-bit userland - it takes, as an argument, a
* "struct BPF_TIMEVAL", which has 32-bit tv_sec
* and tv_usec, rather than a "struct timeval".
*
* If this platform defines "struct BPF_TIMEVAL",
* we check whether the structure size in BIOCSRTIMEOUT
* is that of a "struct timeval" and, if not, we use
* a "struct BPF_TIMEVAL" rather than a "struct timeval".
* (That way, if the bug is fixed in a future release,
* we will still do the right thing.)
*/
struct timeval to;
#ifdef HAVE_STRUCT_BPF_TIMEVAL
struct BPF_TIMEVAL bpf_to;
if (IOCPARM_LEN(BIOCSRTIMEOUT) != sizeof(struct timeval)) {
bpf_to.tv_sec = p->opt.timeout / 1000;
bpf_to.tv_usec = (p->opt.timeout * 1000) % 1000000;
if (ioctl(p->fd, BIOCSRTIMEOUT, (caddr_t)&bpf_to) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf,
errno, PCAP_ERRBUF_SIZE, "BIOCSRTIMEOUT");
status = PCAP_ERROR;
goto bad;
}
} else {
#endif
to.tv_sec = p->opt.timeout / 1000;
to.tv_usec = (p->opt.timeout * 1000) % 1000000;
if (ioctl(p->fd, BIOCSRTIMEOUT, (caddr_t)&to) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf,
errno, PCAP_ERRBUF_SIZE, "BIOCSRTIMEOUT");
status = PCAP_ERROR;
goto bad;
}
#ifdef HAVE_STRUCT_BPF_TIMEVAL
}
#endif
}
#ifdef BIOCIMMEDIATE
/*
* Darren Reed notes that
*
* On AIX (4.2 at least), if BIOCIMMEDIATE is not set, the
* timeout appears to be ignored and it waits until the buffer
* is filled before returning. The result of not having it
* set is almost worse than useless if your BPF filter
* is reducing things to only a few packets (i.e. one every
* second or so).
*
* so we always turn BIOCIMMEDIATE mode on if this is AIX.
*
* For other platforms, we don't turn immediate mode on by default,
* as that would mean we get woken up for every packet, which
* probably isn't what you want for a packet sniffer.
*
* We set immediate mode if the caller requested it by calling
* pcap_set_immediate() before calling pcap_activate().
*/
#ifndef _AIX
if (p->opt.immediate) {
#endif /* _AIX */
v = 1;
if (ioctl(p->fd, BIOCIMMEDIATE, &v) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCIMMEDIATE");
status = PCAP_ERROR;
goto bad;
}
#ifndef _AIX
}
#endif /* _AIX */
#else /* BIOCIMMEDIATE */
if (p->opt.immediate) {
/*
* We don't support immediate mode. Fail.
*/
pcap_snprintf(p->errbuf, PCAP_ERRBUF_SIZE, "Immediate mode not supported");
status = PCAP_ERROR;
goto bad;
}
#endif /* BIOCIMMEDIATE */
if (p->opt.promisc) {
/* set promiscuous mode, just warn if it fails */
if (ioctl(p->fd, BIOCPROMISC, NULL) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCPROMISC");
status = PCAP_WARNING_PROMISC_NOTSUP;
}
}
#ifdef BIOCSTSTAMP
v = BPF_T_BINTIME;
if (ioctl(p->fd, BIOCSTSTAMP, &v) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSTSTAMP");
status = PCAP_ERROR;
goto bad;
}
#endif /* BIOCSTSTAMP */
if (ioctl(fd, BIOCGBLEN, (caddr_t)&v) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCGBLEN");
status = PCAP_ERROR;
goto bad;
}
p->bufsize = v;
#ifdef HAVE_ZEROCOPY_BPF
if (!pb->zerocopy) {
#endif
p->buffer = malloc(p->bufsize);
if (p->buffer == NULL) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "malloc");
status = PCAP_ERROR;
goto bad;
}
#ifdef _AIX
/* For some strange reason this seems to prevent the EFAULT
* problems we have experienced from AIX BPF. */
memset(p->buffer, 0x0, p->bufsize);
#endif
#ifdef HAVE_ZEROCOPY_BPF
}
#endif
/*
* If there's no filter program installed, there's
* no indication to the kernel of what the snapshot
* length should be, so no snapshotting is done.
*
* Therefore, when we open the device, we install
* an "accept everything" filter with the specified
* snapshot length.
*/
total_insn.code = (u_short)(BPF_RET | BPF_K);
total_insn.jt = 0;
total_insn.jf = 0;
total_insn.k = p->snapshot;
total_prog.bf_len = 1;
total_prog.bf_insns = &total_insn;
if (ioctl(p->fd, BIOCSETF, (caddr_t)&total_prog) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSETF");
status = PCAP_ERROR;
goto bad;
}
/*
* On most BPF platforms, either you can do a "select()" or
* "poll()" on a BPF file descriptor and it works correctly,
* or you can do it and it will return "readable" if the
* hold buffer is full but not if the timeout expires *and*
* a non-blocking read will, if the hold buffer is empty
* but the store buffer isn't empty, rotate the buffers
* and return what packets are available.
*
* In the latter case, the fact that a non-blocking read
* will give you the available packets means you can work
* around the failure of "select()" and "poll()" to wake up
* and return "readable" when the timeout expires by using
* the timeout as the "select()" or "poll()" timeout, putting
* the BPF descriptor into non-blocking mode, and read from
* it regardless of whether "select()" reports it as readable
* or not.
*
* However, in FreeBSD 4.3 and 4.4, "select()" and "poll()"
* won't wake up and return "readable" if the timer expires
* and non-blocking reads return EWOULDBLOCK if the hold
* buffer is empty, even if the store buffer is non-empty.
*
* This means the workaround in question won't work.
*
* Therefore, on FreeBSD 4.3 and 4.4, we set "p->selectable_fd"
* to -1, which means "sorry, you can't use 'select()' or 'poll()'
* here". On all other BPF platforms, we set it to the FD for
* the BPF device; in NetBSD, OpenBSD, and Darwin, a non-blocking
* read will, if the hold buffer is empty and the store buffer
* isn't empty, rotate the buffers and return what packets are
* there (and in sufficiently recent versions of OpenBSD
* "select()" and "poll()" should work correctly).
*
* XXX - what about AIX?
*/
p->selectable_fd = p->fd; /* assume select() works until we know otherwise */
if (have_osinfo) {
/*
* We can check what OS this is.
*/
if (strcmp(osinfo.sysname, "FreeBSD") == 0) {
if (strncmp(osinfo.release, "4.3-", 4) == 0 ||
strncmp(osinfo.release, "4.4-", 4) == 0)
p->selectable_fd = -1;
}
}
p->read_op = pcap_read_bpf;
p->inject_op = pcap_inject_bpf;
p->setfilter_op = pcap_setfilter_bpf;
p->setdirection_op = pcap_setdirection_bpf;
p->set_datalink_op = pcap_set_datalink_bpf;
p->getnonblock_op = pcap_getnonblock_bpf;
p->setnonblock_op = pcap_setnonblock_bpf;
p->stats_op = pcap_stats_bpf;
p->cleanup_op = pcap_cleanup_bpf;
return (status);
bad:
pcap_cleanup_bpf(p);
return (status);
}
/*
* Not all interfaces can be bound to by BPF, so try to bind to
* the specified interface; return 0 if we fail with
* PCAP_ERROR_NO_SUCH_DEVICE (which means we got an ENXIO when we tried
* to bind, which means this interface isn't in the list of interfaces
* attached to BPF) and 1 otherwise.
*/
static int
check_bpf_bindable(const char *name)
{
int fd;
char errbuf[PCAP_ERRBUF_SIZE];
/*
* On macOS, we don't do this check if the device name begins
* with "wlt"; at least some versions of macOS (actually, it
* was called "Mac OS X" then...) offer monitor mode capturing
* by having a separate "monitor mode" device for each wireless
* adapter, rather than by implementing the ioctls that
* {Free,Net,Open,DragonFly}BSD provide. Opening that device
* puts the adapter into monitor mode, which, at least for
* some adapters, causes them to deassociate from the network
* with which they're associated.
*
* Instead, we try to open the corresponding "en" device (so
* that we don't end up with, for users without sufficient
* privilege to open capture devices, a list of adapters that
* only includes the wlt devices).
*/
#ifdef __APPLE__
if (strncmp(name, "wlt", 3) == 0) {
char *en_name;
size_t en_name_len;
/*
* Try to allocate a buffer for the "en"
* device's name.
*/
en_name_len = strlen(name) - 1;
en_name = malloc(en_name_len + 1);
if (en_name == NULL) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "malloc");
return (-1);
}
strcpy(en_name, "en");
strcat(en_name, name + 3);
fd = bpf_open_and_bind(en_name, errbuf);
free(en_name);
} else
#endif /* __APPLE */
fd = bpf_open_and_bind(name, errbuf);
if (fd < 0) {
/*
* Error - was it PCAP_ERROR_NO_SUCH_DEVICE?
*/
if (fd == PCAP_ERROR_NO_SUCH_DEVICE) {
/*
* Yes, so we can't bind to this because it's
* not something supported by BPF.
*/
return (0);
}
/*
* No, so we don't know whether it's supported or not;
* say it is, so that the user can at least try to
* open it and report the error (which is probably
* "you don't have permission to open BPF devices";
* reporting those interfaces means users will ask
* "why am I getting a permissions error when I try
* to capture" rather than "why am I not seeing any
* interfaces", making the underlying problem clearer).
*/
return (1);
}
/*
* Success.
*/
close(fd);
return (1);
}
#if defined(__FreeBSD__) && defined(SIOCIFCREATE2)
static int
get_usb_if_flags(const char *name _U_, bpf_u_int32 *flags _U_, char *errbuf _U_)
{
/*
* XXX - if there's a way to determine whether there's something
* plugged into a given USB bus, use that to determine whether
* this device is "connected" or not.
*/
return (0);
}
static int
finddevs_usb(pcap_if_list_t *devlistp, char *errbuf)
{
DIR *usbdir;
struct dirent *usbitem;
size_t name_max;
char *name;
/*
* We might have USB sniffing support, so try looking for USB
* interfaces.
*
* We want to report a usbusN device for each USB bus, but
* usbusN interfaces might, or might not, exist for them -
* we create one if there isn't already one.
*
* So, instead, we look in /dev/usb for all buses and create
* a "usbusN" device for each one.
*/
usbdir = opendir("/dev/usb");
if (usbdir == NULL) {
/*
* Just punt.
*/
return (0);
}
/*
* Leave enough room for a 32-bit (10-digit) bus number.
* Yes, that's overkill, but we won't be using
* the buffer very long.
*/
name_max = USBUS_PREFIX_LEN + 10 + 1;
name = malloc(name_max);
if (name == NULL) {
closedir(usbdir);
return (0);
}
while ((usbitem = readdir(usbdir)) != NULL) {
char *p;
size_t busnumlen;
if (strcmp(usbitem->d_name, ".") == 0 ||
strcmp(usbitem->d_name, "..") == 0) {
/*
* Ignore these.
*/
continue;
}
p = strchr(usbitem->d_name, '.');
if (p == NULL)
continue;
busnumlen = p - usbitem->d_name;
memcpy(name, usbus_prefix, USBUS_PREFIX_LEN);
memcpy(name + USBUS_PREFIX_LEN, usbitem->d_name, busnumlen);
*(name + USBUS_PREFIX_LEN + busnumlen) = '\0';
/*
* There's an entry in this directory for every USB device,
* not for every bus; if there's more than one device on
* the bus, there'll be more than one entry for that bus,
* so we need to avoid adding multiple capture devices
* for each bus.
*/
if (find_or_add_dev(devlistp, name, PCAP_IF_UP,
get_usb_if_flags, NULL, errbuf) == NULL) {
free(name);
closedir(usbdir);
return (PCAP_ERROR);
}
}
free(name);
closedir(usbdir);
return (0);
}
#endif
/*
* Get additional flags for a device, using SIOCGIFMEDIA.
*/
#ifdef SIOCGIFMEDIA
static int
get_if_flags(const char *name, bpf_u_int32 *flags, char *errbuf)
{
int sock;
struct ifmediareq req;
sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock == -1) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE, errno,
"Can't create socket to get media information for %s",
name);
return (-1);
}
memset(&req, 0, sizeof(req));
strncpy(req.ifm_name, name, sizeof(req.ifm_name));
if (ioctl(sock, SIOCGIFMEDIA, &req) < 0) {
if (errno == EOPNOTSUPP || errno == EINVAL || errno == ENOTTY ||
errno == ENODEV || errno == EPERM) {
/*
* Not supported, so we can't provide any
* additional information. Assume that
* this means that "connected" vs.
* "disconnected" doesn't apply.
*
* The ioctl routine for Apple's pktap devices,
* annoyingly, checks for "are you root?" before
* checking whether the ioctl is valid, so it
* returns EPERM, rather than ENOTSUP, for the
* invalid SIOCGIFMEDIA, unless you're root.
* So, just as we do for some ethtool ioctls
* on Linux, which makes the same mistake, we
* also treat EPERM as meaning "not supported".
*/
*flags |= PCAP_IF_CONNECTION_STATUS_NOT_APPLICABLE;
close(sock);
return (0);
}
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE, errno,
"SIOCGIFMEDIA on %s failed", name);
close(sock);
return (-1);
}
close(sock);
/*
* OK, what type of network is this?
*/
switch (IFM_TYPE(req.ifm_active)) {
case IFM_IEEE80211:
/*
* Wireless.
*/
*flags |= PCAP_IF_WIRELESS;
break;
}
/*
* Do we know whether it's connected?
*/
if (req.ifm_status & IFM_AVALID) {
/*
* Yes.
*/
if (req.ifm_status & IFM_ACTIVE) {
/*
* It's connected.
*/
*flags |= PCAP_IF_CONNECTION_STATUS_CONNECTED;
} else {
/*
* It's disconnected.
*/
*flags |= PCAP_IF_CONNECTION_STATUS_DISCONNECTED;
}
}
return (0);
}
#else
static int
get_if_flags(const char *name _U_, bpf_u_int32 *flags _U_, char *errbuf _U_)
{
/*
* Nothing we can do other than mark loopback devices as "the
* connected/disconnected status doesn't apply".
*
* XXX - on Solaris, can we do what the dladm command does,
* i.e. get a connected/disconnected indication from a kstat?
* (Note that you can also get the link speed, and possibly
* other information, from a kstat as well.)
*/
if (*flags & PCAP_IF_LOOPBACK) {
/*
* Loopback devices aren't wireless, and "connected"/
* "disconnected" doesn't apply to them.
*/
*flags |= PCAP_IF_CONNECTION_STATUS_NOT_APPLICABLE;
return (0);
}
return (0);
}
#endif
int
pcap_platform_finddevs(pcap_if_list_t *devlistp, char *errbuf)
{
/*
* Get the list of regular interfaces first.
*/
if (pcap_findalldevs_interfaces(devlistp, errbuf, check_bpf_bindable,
get_if_flags) == -1)
return (-1); /* failure */
#if defined(__FreeBSD__) && defined(SIOCIFCREATE2)
if (finddevs_usb(devlistp, errbuf) == -1)
return (-1);
#endif
return (0);
}
#ifdef HAVE_BSD_IEEE80211
static int
monitor_mode(pcap_t *p, int set)
{
struct pcap_bpf *pb = p->priv;
int sock;
struct ifmediareq req;
IFM_ULIST_TYPE *media_list;
int i;
int can_do;
struct ifreq ifr;
sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "can't open socket");
return (PCAP_ERROR);
}
memset(&req, 0, sizeof req);
strncpy(req.ifm_name, p->opt.device, sizeof req.ifm_name);
/*
* Find out how many media types we have.
*/
if (ioctl(sock, SIOCGIFMEDIA, &req) < 0) {
/*
* Can't get the media types.
*/
switch (errno) {
case ENXIO:
/*
* There's no such device.
*/
close(sock);
return (PCAP_ERROR_NO_SUCH_DEVICE);
case EINVAL:
/*
* Interface doesn't support SIOC{G,S}IFMEDIA.
*/
close(sock);
return (PCAP_ERROR_RFMON_NOTSUP);
default:
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "SIOCGIFMEDIA");
close(sock);
return (PCAP_ERROR);
}
}
if (req.ifm_count == 0) {
/*
* No media types.
*/
close(sock);
return (PCAP_ERROR_RFMON_NOTSUP);
}
/*
* Allocate a buffer to hold all the media types, and
* get the media types.
*/
media_list = malloc(req.ifm_count * sizeof(*media_list));
if (media_list == NULL) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "malloc");
close(sock);
return (PCAP_ERROR);
}
req.ifm_ulist = media_list;
if (ioctl(sock, SIOCGIFMEDIA, &req) < 0) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "SIOCGIFMEDIA");
free(media_list);
close(sock);
return (PCAP_ERROR);
}
/*
* Look for an 802.11 "automatic" media type.
* We assume that all 802.11 adapters have that media type,
* and that it will carry the monitor mode supported flag.
*/
can_do = 0;
for (i = 0; i < req.ifm_count; i++) {
if (IFM_TYPE(media_list[i]) == IFM_IEEE80211
&& IFM_SUBTYPE(media_list[i]) == IFM_AUTO) {
/* OK, does it do monitor mode? */
if (media_list[i] & IFM_IEEE80211_MONITOR) {
can_do = 1;
break;
}
}
}
free(media_list);
if (!can_do) {
/*
* This adapter doesn't support monitor mode.
*/
close(sock);
return (PCAP_ERROR_RFMON_NOTSUP);
}
if (set) {
/*
* Don't just check whether we can enable monitor mode,
* do so, if it's not already enabled.
*/
if ((req.ifm_current & IFM_IEEE80211_MONITOR) == 0) {
/*
* Monitor mode isn't currently on, so turn it on,
* and remember that we should turn it off when the
* pcap_t is closed.
*/
/*
* If we haven't already done so, arrange to have
* "pcap_close_all()" called when we exit.
*/
if (!pcap_do_addexit(p)) {
/*
* "atexit()" failed; don't put the interface
* in monitor mode, just give up.
*/
close(sock);
return (PCAP_ERROR);
}
memset(&ifr, 0, sizeof(ifr));
(void)strncpy(ifr.ifr_name, p->opt.device,
sizeof(ifr.ifr_name));
ifr.ifr_media = req.ifm_current | IFM_IEEE80211_MONITOR;
if (ioctl(sock, SIOCSIFMEDIA, &ifr) == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf,
PCAP_ERRBUF_SIZE, errno, "SIOCSIFMEDIA");
close(sock);
return (PCAP_ERROR);
}
pb->must_do_on_close |= MUST_CLEAR_RFMON;
/*
* Add this to the list of pcaps to close when we exit.
*/
pcap_add_to_pcaps_to_close(p);
}
}
return (0);
}
#endif /* HAVE_BSD_IEEE80211 */
#if defined(BIOCGDLTLIST) && (defined(__APPLE__) || defined(HAVE_BSD_IEEE80211))
/*
* Check whether we have any 802.11 link-layer types; return the best
* of the 802.11 link-layer types if we find one, and return -1
* otherwise.
*
* DLT_IEEE802_11_RADIO, with the radiotap header, is considered the
* best 802.11 link-layer type; any of the other 802.11-plus-radio
* headers are second-best; 802.11 with no radio information is
* the least good.
*/
static int
find_802_11(struct bpf_dltlist *bdlp)
{
int new_dlt;
u_int i;
/*
* Scan the list of DLT_ values, looking for 802.11 values,
* and, if we find any, choose the best of them.
*/
new_dlt = -1;
for (i = 0; i < bdlp->bfl_len; i++) {
switch (bdlp->bfl_list[i]) {
case DLT_IEEE802_11:
/*
* 802.11, but no radio.
*
* Offer this, and select it as the new mode
* unless we've already found an 802.11
* header with radio information.
*/
if (new_dlt == -1)
new_dlt = bdlp->bfl_list[i];
break;
#ifdef DLT_PRISM_HEADER
case DLT_PRISM_HEADER:
#endif
#ifdef DLT_AIRONET_HEADER
case DLT_AIRONET_HEADER:
#endif
case DLT_IEEE802_11_RADIO_AVS:
/*
* 802.11 with radio, but not radiotap.
*
* Offer this, and select it as the new mode
* unless we've already found the radiotap DLT_.
*/
if (new_dlt != DLT_IEEE802_11_RADIO)
new_dlt = bdlp->bfl_list[i];
break;
case DLT_IEEE802_11_RADIO:
/*
* 802.11 with radiotap.
*
* Offer this, and select it as the new mode.
*/
new_dlt = bdlp->bfl_list[i];
break;
default:
/*
* Not 802.11.
*/
break;
}
}
return (new_dlt);
}
#endif /* defined(BIOCGDLTLIST) && (defined(__APPLE__) || defined(HAVE_BSD_IEEE80211)) */
#if defined(__APPLE__) && defined(BIOCGDLTLIST)
/*
* Remove non-802.11 header types from the list of DLT_ values, as we're in
* monitor mode, and those header types aren't supported in monitor mode.
*/
static void
remove_non_802_11(pcap_t *p)
{
int i, j;
/*
* Scan the list of DLT_ values and discard non-802.11 ones.
*/
j = 0;
for (i = 0; i < p->dlt_count; i++) {
switch (p->dlt_list[i]) {
case DLT_EN10MB:
case DLT_RAW:
/*
* Not 802.11. Don't offer this one.
*/
continue;
default:
/*
* Just copy this mode over.
*/
break;
}
/*
* Copy this DLT_ value to its new position.
*/
p->dlt_list[j] = p->dlt_list[i];
j++;
}
/*
* Set the DLT_ count to the number of entries we copied.
*/
p->dlt_count = j;
}
/*
* Remove 802.11 link-layer types from the list of DLT_ values, as
* we're not in monitor mode, and those DLT_ values will switch us
* to monitor mode.
*/
static void
remove_802_11(pcap_t *p)
{
int i, j;
/*
* Scan the list of DLT_ values and discard 802.11 values.
*/
j = 0;
for (i = 0; i < p->dlt_count; i++) {
switch (p->dlt_list[i]) {
case DLT_IEEE802_11:
#ifdef DLT_PRISM_HEADER
case DLT_PRISM_HEADER:
#endif
#ifdef DLT_AIRONET_HEADER
case DLT_AIRONET_HEADER:
#endif
case DLT_IEEE802_11_RADIO:
case DLT_IEEE802_11_RADIO_AVS:
#ifdef DLT_PPI
case DLT_PPI:
#endif
/*
* 802.11. Don't offer this one.
*/
continue;
default:
/*
* Just copy this mode over.
*/
break;
}
/*
* Copy this DLT_ value to its new position.
*/
p->dlt_list[j] = p->dlt_list[i];
j++;
}
/*
* Set the DLT_ count to the number of entries we copied.
*/
p->dlt_count = j;
}
#endif /* defined(__APPLE__) && defined(BIOCGDLTLIST) */
static int
pcap_setfilter_bpf(pcap_t *p, struct bpf_program *fp)
{
struct pcap_bpf *pb = p->priv;
/*
* Free any user-mode filter we might happen to have installed.
*/
pcap_freecode(&p->fcode);
/*
* Try to install the kernel filter.
*/
if (ioctl(p->fd, BIOCSETF, (caddr_t)fp) == 0) {
/*
* It worked.
*/
pb->filtering_in_kernel = 1; /* filtering in the kernel */
/*
* Discard any previously-received packets, as they might
* have passed whatever filter was formerly in effect, but
* might not pass this filter (BIOCSETF discards packets
* buffered in the kernel, so you can lose packets in any
* case).
*/
p->cc = 0;
return (0);
}
/*
* We failed.
*
* If it failed with EINVAL, that's probably because the program
* is invalid or too big. Validate it ourselves; if we like it
* (we currently allow backward branches, to support protochain),
* run it in userland. (There's no notion of "too big" for
* userland.)
*
* Otherwise, just give up.
* XXX - if the copy of the program into the kernel failed,
* we will get EINVAL rather than, say, EFAULT on at least
* some kernels.
*/
if (errno != EINVAL) {
pcap_fmt_errmsg_for_errno(p->errbuf, PCAP_ERRBUF_SIZE,
errno, "BIOCSETF");
return (-1);
}
/*
* install_bpf_program() validates the program.
*
* XXX - what if we already have a filter in the kernel?
*/
if (install_bpf_program(p, fp) < 0)
return (-1);
pb->filtering_in_kernel = 0; /* filtering in userland */
return (0);
}
/*
* Set direction flag: Which packets do we accept on a forwarding
* single device? IN, OUT or both?
*/
#if defined(BIOCSDIRECTION)
static int
pcap_setdirection_bpf(pcap_t *p, pcap_direction_t d)
{
u_int direction;
direction = (d == PCAP_D_IN) ? BPF_D_IN :
((d == PCAP_D_OUT) ? BPF_D_OUT : BPF_D_INOUT);
if (ioctl(p->fd, BIOCSDIRECTION, &direction) == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
errno, "Cannot set direction to %s",
(d == PCAP_D_IN) ? "PCAP_D_IN" :
((d == PCAP_D_OUT) ? "PCAP_D_OUT" : "PCAP_D_INOUT"));
return (-1);
}
return (0);
}
#elif defined(BIOCSSEESENT)
static int
pcap_setdirection_bpf(pcap_t *p, pcap_direction_t d)
{
u_int seesent;
/*
* We don't support PCAP_D_OUT.
*/
if (d == PCAP_D_OUT) {
pcap_snprintf(p->errbuf, sizeof(p->errbuf),
"Setting direction to PCAP_D_OUT is not supported on BPF");
return -1;
}
seesent = (d == PCAP_D_INOUT);
if (ioctl(p->fd, BIOCSSEESENT, &seesent) == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
errno, "Cannot set direction to %s",
(d == PCAP_D_INOUT) ? "PCAP_D_INOUT" : "PCAP_D_IN");
return (-1);
}
return (0);
}
#else
static int
pcap_setdirection_bpf(pcap_t *p, pcap_direction_t d _U_)
{
(void) pcap_snprintf(p->errbuf, sizeof(p->errbuf),
"This system doesn't support BIOCSSEESENT, so the direction can't be set");
return (-1);
}
#endif
#ifdef BIOCSDLT
static int
pcap_set_datalink_bpf(pcap_t *p, int dlt)
{
if (ioctl(p->fd, BIOCSDLT, &dlt) == -1) {
pcap_fmt_errmsg_for_errno(p->errbuf, sizeof(p->errbuf),
errno, "Cannot set DLT %d", dlt);
return (-1);
}
return (0);
}
#else
static int
pcap_set_datalink_bpf(pcap_t *p _U_, int dlt _U_)
{
return (0);
}
#endif
/*
* Platform-specific information.
*/
const char *
pcap_lib_version(void)
{
#ifdef HAVE_ZEROCOPY_BPF
return (PCAP_VERSION_STRING " (with zerocopy support)");
#else
return (PCAP_VERSION_STRING);
#endif
}
|
397349.c | /*
ChibiOS/RT - Copyright (C) 2006-2014 Giovanni Di Sirio
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "ch.h"
#include "hal.h"
#define MMA8451_ADDR 0x1D
#define WHO_AM_I 0x0D
static bool i2cOk = false;
static THD_WORKING_AREA(waThread1, 64);
static THD_FUNCTION(Thread1, arg) {
(void)arg;
chRegSetThreadName("Blinker");
while (TRUE) {
if (i2cOk) {
palSetPad(IOPORT3, 3);
palTogglePad(IOPORT4, 4);
} else {
palSetPad(IOPORT4, 4);
palTogglePad(IOPORT3, 3);
}
chThdSleepMilliseconds(500);
}
return 0;
}
/*
* Application entry point.
*/
int main(void) {
uint8_t tx[1], rx[1];
/*
* System initializations.
* - HAL initialization, this also initializes the configured device drivers
* and performs the board-specific initializations.
* - Kernel initialization, the main() function becomes a thread and the
* RTOS is active.
*/
halInit();
chSysInit();
palSetPad(IOPORT3, 3);
palSetPad(IOPORT4, 4);
palSetPad(IOPORT1, 2);
i2cStart(&I2CD1, NULL);
chThdCreateStatic(waThread1, sizeof(waThread1), NORMALPRIO, Thread1, NULL);
while (1) {
tx[0] = WHO_AM_I;
i2cMasterTransmitTimeout(&I2CD1, MMA8451_ADDR, tx, 1, rx, 1, TIME_INFINITE);
i2cOk = (rx[0] == 0x1A) ? true : false;
chThdSleepMilliseconds(2000);
}
}
|
498442.c | /*
* linux/kernel/sys.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/utsname.h>
#include <linux/mman.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
#include <linux/highuid.h>
#include <linux/fs.h>
#include <linux/kmod.h>
#include <linux/perf_event.h>
#include <linux/resource.h>
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/key.h>
#include <linux/times.h>
#include <linux/posix-timers.h>
#include <linux/security.h>
#include <linux/dcookies.h>
#include <linux/suspend.h>
#include <linux/tty.h>
#include <linux/signal.h>
#include <linux/cn_proc.h>
#include <linux/getcpu.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/seccomp.h>
#include <linux/cpu.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/fs_struct.h>
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/gfp.h>
#include <linux/syscore_ops.h>
#include <linux/version.h>
#include <linux/ctype.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
#include <linux/kprobes.h>
#include <linux/user_namespace.h>
#include <linux/binfmts.h>
#include <linux/sched.h>
#include <linux/rcupdate.h>
#include <linux/uidgid.h>
#include <linux/cred.h>
#include <linux/kmsg_dump.h>
/* Move somewhere else to avoid recompiling? */
#include <generated/utsrelease.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/unistd.h>
#ifndef SET_UNALIGN_CTL
# define SET_UNALIGN_CTL(a, b) (-EINVAL)
#endif
#ifndef GET_UNALIGN_CTL
# define GET_UNALIGN_CTL(a, b) (-EINVAL)
#endif
#ifndef SET_FPEMU_CTL
# define SET_FPEMU_CTL(a, b) (-EINVAL)
#endif
#ifndef GET_FPEMU_CTL
# define GET_FPEMU_CTL(a, b) (-EINVAL)
#endif
#ifndef SET_FPEXC_CTL
# define SET_FPEXC_CTL(a, b) (-EINVAL)
#endif
#ifndef GET_FPEXC_CTL
# define GET_FPEXC_CTL(a, b) (-EINVAL)
#endif
#ifndef GET_ENDIAN
# define GET_ENDIAN(a, b) (-EINVAL)
#endif
#ifndef SET_ENDIAN
# define SET_ENDIAN(a, b) (-EINVAL)
#endif
#ifndef GET_TSC_CTL
# define GET_TSC_CTL(a) (-EINVAL)
#endif
#ifndef SET_TSC_CTL
# define SET_TSC_CTL(a) (-EINVAL)
#endif
#ifndef MPX_ENABLE_MANAGEMENT
# define MPX_ENABLE_MANAGEMENT() (-EINVAL)
#endif
#ifndef MPX_DISABLE_MANAGEMENT
# define MPX_DISABLE_MANAGEMENT() (-EINVAL)
#endif
#ifndef GET_FP_MODE
# define GET_FP_MODE(a) (-EINVAL)
#endif
#ifndef SET_FP_MODE
# define SET_FP_MODE(a,b) (-EINVAL)
#endif
/*
* this is where the system-wide overflow UID and GID are defined, for
* architectures that now have 32-bit UID/GID but didn't in the past
*/
int overflowuid = DEFAULT_OVERFLOWUID;
int overflowgid = DEFAULT_OVERFLOWGID;
EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid);
/*
* the same as above, but for filesystems which can only store a 16-bit
* UID and GID. as such, this is needed on all architectures
*/
int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
EXPORT_SYMBOL(fs_overflowuid);
EXPORT_SYMBOL(fs_overflowgid);
/*
* Returns true if current's euid is same as p's uid or euid,
* or has CAP_SYS_NICE to p's user_ns.
*
* Called with rcu_read_lock, creds are safe
*/
static bool set_one_prio_perm(struct task_struct *p)
{
const struct cred *cred = current_cred(), *pcred = __task_cred(p);
if (uid_eq(pcred->uid, cred->euid) ||
uid_eq(pcred->euid, cred->euid))
return true;
if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
return true;
return false;
}
/*
* set the priority of a task
* - the caller must hold the RCU read lock
*/
static int set_one_prio(struct task_struct *p, int niceval, int error)
{
int no_nice;
if (!set_one_prio_perm(p)) {
error = -EPERM;
goto out;
}
if (niceval < task_nice(p) && !can_nice(p, niceval)) {
error = -EACCES;
goto out;
}
no_nice = security_task_setnice(p, niceval);
if (no_nice) {
error = no_nice;
goto out;
}
if (error == -ESRCH)
error = 0;
set_user_nice(p, niceval);
out:
return error;
}
SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
{
struct task_struct *g, *p;
struct user_struct *user;
const struct cred *cred = current_cred();
int error = -EINVAL;
struct pid *pgrp;
kuid_t uid;
if (which > PRIO_USER || which < PRIO_PROCESS)
goto out;
/* normalize: avoid signed division (rounding problems) */
error = -ESRCH;
if (niceval < MIN_NICE)
niceval = MIN_NICE;
if (niceval > MAX_NICE)
niceval = MAX_NICE;
rcu_read_lock();
read_lock(&tasklist_lock);
switch (which) {
case PRIO_PROCESS:
if (who)
p = find_task_by_vpid(who);
else
p = current;
if (p)
error = set_one_prio(p, niceval, error);
break;
case PRIO_PGRP:
if (who)
pgrp = find_vpid(who);
else
pgrp = task_pgrp(current);
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
error = set_one_prio(p, niceval, error);
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case PRIO_USER:
uid = make_kuid(cred->user_ns, who);
user = cred->user;
if (!who)
uid = cred->uid;
else if (!uid_eq(uid, cred->uid)) {
user = find_user(uid);
if (!user)
goto out_unlock; /* No processes for this user */
}
do_each_thread(g, p) {
if (uid_eq(task_uid(p), uid))
error = set_one_prio(p, niceval, error);
} while_each_thread(g, p);
if (!uid_eq(uid, cred->uid))
free_uid(user); /* For find_user() */
break;
}
out_unlock:
read_unlock(&tasklist_lock);
rcu_read_unlock();
out:
return error;
}
/*
* Ugh. To avoid negative return values, "getpriority()" will
* not return the normal nice-value, but a negated value that
* has been offset by 20 (ie it returns 40..1 instead of -20..19)
* to stay compatible.
*/
SYSCALL_DEFINE2(getpriority, int, which, int, who)
{
struct task_struct *g, *p;
struct user_struct *user;
const struct cred *cred = current_cred();
long niceval, retval = -ESRCH;
struct pid *pgrp;
kuid_t uid;
if (which > PRIO_USER || which < PRIO_PROCESS)
return -EINVAL;
rcu_read_lock();
read_lock(&tasklist_lock);
switch (which) {
case PRIO_PROCESS:
if (who)
p = find_task_by_vpid(who);
else
p = current;
if (p) {
niceval = nice_to_rlimit(task_nice(p));
if (niceval > retval)
retval = niceval;
}
break;
case PRIO_PGRP:
if (who)
pgrp = find_vpid(who);
else
pgrp = task_pgrp(current);
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
niceval = nice_to_rlimit(task_nice(p));
if (niceval > retval)
retval = niceval;
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case PRIO_USER:
uid = make_kuid(cred->user_ns, who);
user = cred->user;
if (!who)
uid = cred->uid;
else if (!uid_eq(uid, cred->uid)) {
user = find_user(uid);
if (!user)
goto out_unlock; /* No processes for this user */
}
do_each_thread(g, p) {
if (uid_eq(task_uid(p), uid)) {
niceval = nice_to_rlimit(task_nice(p));
if (niceval > retval)
retval = niceval;
}
} while_each_thread(g, p);
if (!uid_eq(uid, cred->uid))
free_uid(user); /* for find_user() */
break;
}
out_unlock:
read_unlock(&tasklist_lock);
rcu_read_unlock();
return retval;
}
/*
* Unprivileged users may change the real gid to the effective gid
* or vice versa. (BSD-style)
*
* If you set the real gid at all, or set the effective gid to a value not
* equal to the real gid, then the saved gid is set to the new effective gid.
*
* This makes it possible for a setgid program to completely drop its
* privileges, which is often a useful assertion to make when you are doing
* a security audit over a program.
*
* The general idea is that a program which uses just setregid() will be
* 100% compatible with BSD. A program which uses just setgid() will be
* 100% compatible with POSIX with saved IDs.
*
* SMP: There are not races, the GIDs are checked only by filesystem
* operations (as far as semantic preservation is concerned).
*/
#ifdef CONFIG_MULTIUSER
SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kgid_t krgid, kegid;
krgid = make_kgid(ns, rgid);
kegid = make_kgid(ns, egid);
if ((rgid != (gid_t) -1) && !gid_valid(krgid))
return -EINVAL;
if ((egid != (gid_t) -1) && !gid_valid(kegid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (rgid != (gid_t) -1) {
if (gid_eq(old->gid, krgid) ||
gid_eq(old->egid, krgid) ||
ns_capable(old->user_ns, CAP_SETGID))
new->gid = krgid;
else
goto error;
}
if (egid != (gid_t) -1) {
if (gid_eq(old->gid, kegid) ||
gid_eq(old->egid, kegid) ||
gid_eq(old->sgid, kegid) ||
ns_capable(old->user_ns, CAP_SETGID))
new->egid = kegid;
else
goto error;
}
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
new->sgid = new->egid;
new->fsgid = new->egid;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
/*
* setgid() is implemented like SysV w/ SAVED_IDS
*
* SMP: Same implicit races as above.
*/
SYSCALL_DEFINE1(setgid, gid_t, gid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kgid_t kgid;
kgid = make_kgid(ns, gid);
if (!gid_valid(kgid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (ns_capable(old->user_ns, CAP_SETGID))
new->gid = new->egid = new->sgid = new->fsgid = kgid;
else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
new->egid = new->fsgid = kgid;
else
goto error;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
/*
* change the user struct in a credentials set to match the new UID
*/
static int set_user(struct cred *new)
{
struct user_struct *new_user;
new_user = alloc_uid(new->uid);
if (!new_user)
return -EAGAIN;
/*
* We don't fail in case of NPROC limit excess here because too many
* poorly written programs don't check set*uid() return code, assuming
* it never fails if called by root. We may still enforce NPROC limit
* for programs doing set*uid()+execve() by harmlessly deferring the
* failure to the execve() stage.
*/
if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
new_user != INIT_USER)
current->flags |= PF_NPROC_EXCEEDED;
else
current->flags &= ~PF_NPROC_EXCEEDED;
free_uid(new->user);
new->user = new_user;
return 0;
}
/*
* Unprivileged users may change the real uid to the effective uid
* or vice versa. (BSD-style)
*
* If you set the real uid at all, or set the effective uid to a value not
* equal to the real uid, then the saved uid is set to the new effective uid.
*
* This makes it possible for a setuid program to completely drop its
* privileges, which is often a useful assertion to make when you are doing
* a security audit over a program.
*
* The general idea is that a program which uses just setreuid() will be
* 100% compatible with BSD. A program which uses just setuid() will be
* 100% compatible with POSIX with saved IDs.
*/
SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kuid_t kruid, keuid;
kruid = make_kuid(ns, ruid);
keuid = make_kuid(ns, euid);
if ((ruid != (uid_t) -1) && !uid_valid(kruid))
return -EINVAL;
if ((euid != (uid_t) -1) && !uid_valid(keuid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (ruid != (uid_t) -1) {
new->uid = kruid;
if (!uid_eq(old->uid, kruid) &&
!uid_eq(old->euid, kruid) &&
!ns_capable(old->user_ns, CAP_SETUID))
goto error;
}
if (euid != (uid_t) -1) {
new->euid = keuid;
if (!uid_eq(old->uid, keuid) &&
!uid_eq(old->euid, keuid) &&
!uid_eq(old->suid, keuid) &&
!ns_capable(old->user_ns, CAP_SETUID))
goto error;
}
if (!uid_eq(new->uid, old->uid)) {
retval = set_user(new);
if (retval < 0)
goto error;
}
if (ruid != (uid_t) -1 ||
(euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
new->suid = new->euid;
new->fsuid = new->euid;
retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
if (retval < 0)
goto error;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
/*
* setuid() is implemented like SysV with SAVED_IDS
*
* Note that SAVED_ID's is deficient in that a setuid root program
* like sendmail, for example, cannot set its uid to be a normal
* user and then switch back, because if you're root, setuid() sets
* the saved uid too. If you don't like this, blame the bright people
* in the POSIX committee and/or USG. Note that the BSD-style setreuid()
* will allow a root program to temporarily drop privileges and be able to
* regain them by swapping the real and effective uid.
*/
SYSCALL_DEFINE1(setuid, uid_t, uid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kuid_t kuid;
kuid = make_kuid(ns, uid);
if (!uid_valid(kuid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (ns_capable(old->user_ns, CAP_SETUID)) {
new->suid = new->uid = kuid;
if (!uid_eq(kuid, old->uid)) {
retval = set_user(new);
if (retval < 0)
goto error;
}
} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
goto error;
}
new->fsuid = new->euid = kuid;
retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
if (retval < 0)
goto error;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
/*
* This function implements a generic ability to update ruid, euid,
* and suid. This allows you to implement the 4.4 compatible seteuid().
*/
SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kuid_t kruid, keuid, ksuid;
kruid = make_kuid(ns, ruid);
keuid = make_kuid(ns, euid);
ksuid = make_kuid(ns, suid);
if ((ruid != (uid_t) -1) && !uid_valid(kruid))
return -EINVAL;
if ((euid != (uid_t) -1) && !uid_valid(keuid))
return -EINVAL;
if ((suid != (uid_t) -1) && !uid_valid(ksuid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (!ns_capable(old->user_ns, CAP_SETUID)) {
if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
goto error;
if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
!uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
goto error;
if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
!uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
goto error;
}
if (ruid != (uid_t) -1) {
new->uid = kruid;
if (!uid_eq(kruid, old->uid)) {
retval = set_user(new);
if (retval < 0)
goto error;
}
}
if (euid != (uid_t) -1)
new->euid = keuid;
if (suid != (uid_t) -1)
new->suid = ksuid;
new->fsuid = new->euid;
retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
if (retval < 0)
goto error;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
{
const struct cred *cred = current_cred();
int retval;
uid_t ruid, euid, suid;
ruid = from_kuid_munged(cred->user_ns, cred->uid);
euid = from_kuid_munged(cred->user_ns, cred->euid);
suid = from_kuid_munged(cred->user_ns, cred->suid);
retval = put_user(ruid, ruidp);
if (!retval) {
retval = put_user(euid, euidp);
if (!retval)
return put_user(suid, suidp);
}
return retval;
}
/*
* Same as above, but for rgid, egid, sgid.
*/
SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kgid_t krgid, kegid, ksgid;
krgid = make_kgid(ns, rgid);
kegid = make_kgid(ns, egid);
ksgid = make_kgid(ns, sgid);
if ((rgid != (gid_t) -1) && !gid_valid(krgid))
return -EINVAL;
if ((egid != (gid_t) -1) && !gid_valid(kegid))
return -EINVAL;
if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (!ns_capable(old->user_ns, CAP_SETGID)) {
if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
goto error;
if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
!gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
goto error;
if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
!gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
goto error;
}
if (rgid != (gid_t) -1)
new->gid = krgid;
if (egid != (gid_t) -1)
new->egid = kegid;
if (sgid != (gid_t) -1)
new->sgid = ksgid;
new->fsgid = new->egid;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
{
const struct cred *cred = current_cred();
int retval;
gid_t rgid, egid, sgid;
rgid = from_kgid_munged(cred->user_ns, cred->gid);
egid = from_kgid_munged(cred->user_ns, cred->egid);
sgid = from_kgid_munged(cred->user_ns, cred->sgid);
retval = put_user(rgid, rgidp);
if (!retval) {
retval = put_user(egid, egidp);
if (!retval)
retval = put_user(sgid, sgidp);
}
return retval;
}
/*
* "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
* is used for "access()" and for the NFS daemon (letting nfsd stay at
* whatever uid it wants to). It normally shadows "euid", except when
* explicitly set by setfsuid() or for access..
*/
SYSCALL_DEFINE1(setfsuid, uid_t, uid)
{
const struct cred *old;
struct cred *new;
uid_t old_fsuid;
kuid_t kuid;
old = current_cred();
old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
kuid = make_kuid(old->user_ns, uid);
if (!uid_valid(kuid))
return old_fsuid;
new = prepare_creds();
if (!new)
return old_fsuid;
if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
ns_capable(old->user_ns, CAP_SETUID)) {
if (!uid_eq(kuid, old->fsuid)) {
new->fsuid = kuid;
if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
goto change_okay;
}
}
abort_creds(new);
return old_fsuid;
change_okay:
commit_creds(new);
return old_fsuid;
}
/*
* Samma på svenska..
*/
SYSCALL_DEFINE1(setfsgid, gid_t, gid)
{
const struct cred *old;
struct cred *new;
gid_t old_fsgid;
kgid_t kgid;
old = current_cred();
old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
kgid = make_kgid(old->user_ns, gid);
if (!gid_valid(kgid))
return old_fsgid;
new = prepare_creds();
if (!new)
return old_fsgid;
if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
ns_capable(old->user_ns, CAP_SETGID)) {
if (!gid_eq(kgid, old->fsgid)) {
new->fsgid = kgid;
goto change_okay;
}
}
abort_creds(new);
return old_fsgid;
change_okay:
commit_creds(new);
return old_fsgid;
}
#endif /* CONFIG_MULTIUSER */
/**
* sys_getpid - return the thread group id of the current process
*
* Note, despite the name, this returns the tgid not the pid. The tgid and
* the pid are identical unless CLONE_THREAD was specified on clone() in
* which case the tgid is the same in all threads of the same group.
*
* This is SMP safe as current->tgid does not change.
*/
SYSCALL_DEFINE0(getpid)
{
return task_tgid_vnr(current);
}
/* Thread ID - the internal kernel "pid" */
SYSCALL_DEFINE0(gettid)
{
return task_pid_vnr(current);
}
/*
* Accessing ->real_parent is not SMP-safe, it could
* change from under us. However, we can use a stale
* value of ->real_parent under rcu_read_lock(), see
* release_task()->call_rcu(delayed_put_task_struct).
*/
SYSCALL_DEFINE0(getppid)
{
int pid;
rcu_read_lock();
pid = task_tgid_vnr(rcu_dereference(current->real_parent));
rcu_read_unlock();
return pid;
}
SYSCALL_DEFINE0(getuid)
{
/* Only we change this so SMP safe */
return from_kuid_munged(current_user_ns(), current_uid());
}
SYSCALL_DEFINE0(geteuid)
{
/* Only we change this so SMP safe */
return from_kuid_munged(current_user_ns(), current_euid());
}
SYSCALL_DEFINE0(getgid)
{
/* Only we change this so SMP safe */
return from_kgid_munged(current_user_ns(), current_gid());
}
SYSCALL_DEFINE0(getegid)
{
/* Only we change this so SMP safe */
return from_kgid_munged(current_user_ns(), current_egid());
}
void do_sys_times(struct tms *tms)
{
cputime_t tgutime, tgstime, cutime, cstime;
thread_group_cputime_adjusted(current, &tgutime, &tgstime);
cutime = current->signal->cutime;
cstime = current->signal->cstime;
tms->tms_utime = cputime_to_clock_t(tgutime);
tms->tms_stime = cputime_to_clock_t(tgstime);
tms->tms_cutime = cputime_to_clock_t(cutime);
tms->tms_cstime = cputime_to_clock_t(cstime);
}
SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
{
if (tbuf) {
struct tms tmp;
do_sys_times(&tmp);
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT;
}
force_successful_syscall_return();
return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
/*
* This needs some heavy checking ...
* I just haven't the stomach for it. I also don't fully
* understand sessions/pgrp etc. Let somebody who does explain it.
*
* OK, I think I have the protection semantics right.... this is really
* only important on a multi-user system anyway, to make sure one user
* can't send a signal to a process owned by another. -TYT, 12/12/91
*
* !PF_FORKNOEXEC check to conform completely to POSIX.
*/
SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
{
struct task_struct *p;
struct task_struct *group_leader = current->group_leader;
struct pid *pgrp;
int err;
if (!pid)
pid = task_pid_vnr(group_leader);
if (!pgid)
pgid = pid;
if (pgid < 0)
return -EINVAL;
rcu_read_lock();
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
*/
write_lock_irq(&tasklist_lock);
err = -ESRCH;
p = find_task_by_vpid(pid);
if (!p)
goto out;
err = -EINVAL;
if (!thread_group_leader(p))
goto out;
if (same_thread_group(p->real_parent, group_leader)) {
err = -EPERM;
if (task_session(p) != task_session(group_leader))
goto out;
err = -EACCES;
if (!(p->flags & PF_FORKNOEXEC))
goto out;
} else {
err = -ESRCH;
if (p != group_leader)
goto out;
}
err = -EPERM;
if (p->signal->leader)
goto out;
pgrp = task_pid(p);
if (pgid != pid) {
struct task_struct *g;
pgrp = find_vpid(pgid);
g = pid_task(pgrp, PIDTYPE_PGID);
if (!g || task_session(g) != task_session(group_leader))
goto out;
}
err = security_task_setpgid(p, pgid);
if (err)
goto out;
if (task_pgrp(p) != pgrp)
change_pid(p, PIDTYPE_PGID, pgrp);
err = 0;
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
return err;
}
SYSCALL_DEFINE1(getpgid, pid_t, pid)
{
struct task_struct *p;
struct pid *grp;
int retval;
rcu_read_lock();
if (!pid)
grp = task_pgrp(current);
else {
retval = -ESRCH;
p = find_task_by_vpid(pid);
if (!p)
goto out;
grp = task_pgrp(p);
if (!grp)
goto out;
retval = security_task_getpgid(p);
if (retval)
goto out;
}
retval = pid_vnr(grp);
out:
rcu_read_unlock();
return retval;
}
#ifdef __ARCH_WANT_SYS_GETPGRP
SYSCALL_DEFINE0(getpgrp)
{
return sys_getpgid(0);
}
#endif
SYSCALL_DEFINE1(getsid, pid_t, pid)
{
struct task_struct *p;
struct pid *sid;
int retval;
rcu_read_lock();
if (!pid)
sid = task_session(current);
else {
retval = -ESRCH;
p = find_task_by_vpid(pid);
if (!p)
goto out;
sid = task_session(p);
if (!sid)
goto out;
retval = security_task_getsid(p);
if (retval)
goto out;
}
retval = pid_vnr(sid);
out:
rcu_read_unlock();
return retval;
}
static void set_special_pids(struct pid *pid)
{
struct task_struct *curr = current->group_leader;
if (task_session(curr) != pid)
change_pid(curr, PIDTYPE_SID, pid);
if (task_pgrp(curr) != pid)
change_pid(curr, PIDTYPE_PGID, pid);
}
SYSCALL_DEFINE0(setsid)
{
struct task_struct *group_leader = current->group_leader;
struct pid *sid = task_pid(group_leader);
pid_t session = pid_vnr(sid);
int err = -EPERM;
write_lock_irq(&tasklist_lock);
/* Fail if I am already a session leader */
if (group_leader->signal->leader)
goto out;
/* Fail if a process group id already exists that equals the
* proposed session id.
*/
if (pid_task(sid, PIDTYPE_PGID))
goto out;
group_leader->signal->leader = 1;
set_special_pids(sid);
proc_clear_tty(group_leader);
err = session;
out:
write_unlock_irq(&tasklist_lock);
if (err > 0) {
proc_sid_connector(group_leader);
sched_autogroup_create_attach(group_leader);
}
return err;
}
DECLARE_RWSEM(uts_sem);
#ifdef COMPAT_UTS_MACHINE
#define override_architecture(name) \
(personality(current->personality) == PER_LINUX32 && \
copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
sizeof(COMPAT_UTS_MACHINE)))
#else
#define override_architecture(name) 0
#endif
/*
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
* And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
*/
static int override_release(char __user *release, size_t len)
{
int ret = 0;
if (current->personality & UNAME26) {
const char *rest = UTS_RELEASE;
char buf[65] = { 0 };
int ndots = 0;
unsigned v;
size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
break;
if (!isdigit(*rest) && *rest != '.')
break;
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
copy = clamp_t(size_t, len, 1, sizeof(buf));
copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
ret = copy_to_user(release, buf, copy + 1);
}
return ret;
}
SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
{
int errno = 0;
down_read(&uts_sem);
if (copy_to_user(name, utsname(), sizeof *name))
errno = -EFAULT;
up_read(&uts_sem);
if (!errno && override_release(name->release, sizeof(name->release)))
errno = -EFAULT;
if (!errno && override_architecture(name))
errno = -EFAULT;
return errno;
}
#ifdef __ARCH_WANT_SYS_OLD_UNAME
/*
* Old cruft
*/
SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
{
int error = 0;
if (!name)
return -EFAULT;
down_read(&uts_sem);
if (copy_to_user(name, utsname(), sizeof(*name)))
error = -EFAULT;
up_read(&uts_sem);
if (!error && override_release(name->release, sizeof(name->release)))
error = -EFAULT;
if (!error && override_architecture(name))
error = -EFAULT;
return error;
}
SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
{
int error;
if (!name)
return -EFAULT;
if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
return -EFAULT;
down_read(&uts_sem);
error = __copy_to_user(&name->sysname, &utsname()->sysname,
__OLD_UTS_LEN);
error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
error |= __copy_to_user(&name->nodename, &utsname()->nodename,
__OLD_UTS_LEN);
error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
error |= __copy_to_user(&name->release, &utsname()->release,
__OLD_UTS_LEN);
error |= __put_user(0, name->release + __OLD_UTS_LEN);
error |= __copy_to_user(&name->version, &utsname()->version,
__OLD_UTS_LEN);
error |= __put_user(0, name->version + __OLD_UTS_LEN);
error |= __copy_to_user(&name->machine, &utsname()->machine,
__OLD_UTS_LEN);
error |= __put_user(0, name->machine + __OLD_UTS_LEN);
up_read(&uts_sem);
if (!error && override_architecture(name))
error = -EFAULT;
if (!error && override_release(name->release, sizeof(name->release)))
error = -EFAULT;
return error ? -EFAULT : 0;
}
#endif
SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
{
int errno;
char tmp[__NEW_UTS_LEN];
if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
struct new_utsname *u = utsname();
memcpy(u->nodename, tmp, len);
memset(u->nodename + len, 0, sizeof(u->nodename) - len);
errno = 0;
uts_proc_notify(UTS_PROC_HOSTNAME);
}
up_write(&uts_sem);
return errno;
}
#ifdef __ARCH_WANT_SYS_GETHOSTNAME
SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
{
int i, errno;
struct new_utsname *u;
if (len < 0)
return -EINVAL;
down_read(&uts_sem);
u = utsname();
i = 1 + strlen(u->nodename);
if (i > len)
i = len;
errno = 0;
if (copy_to_user(name, u->nodename, i))
errno = -EFAULT;
up_read(&uts_sem);
return errno;
}
#endif
/*
* Only setdomainname; getdomainname can be implemented by calling
* uname()
*/
SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
{
int errno;
char tmp[__NEW_UTS_LEN];
if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
struct new_utsname *u = utsname();
memcpy(u->domainname, tmp, len);
memset(u->domainname + len, 0, sizeof(u->domainname) - len);
errno = 0;
uts_proc_notify(UTS_PROC_DOMAINNAME);
}
up_write(&uts_sem);
return errno;
}
SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
{
struct rlimit value;
int ret;
ret = do_prlimit(current, resource, NULL, &value);
if (!ret)
ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
return ret;
}
#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
/*
* Back compatibility for getrlimit. Needed for some apps.
*/
SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
struct rlimit __user *, rlim)
{
struct rlimit x;
if (resource >= RLIM_NLIMITS)
return -EINVAL;
task_lock(current->group_leader);
x = current->signal->rlim[resource];
task_unlock(current->group_leader);
if (x.rlim_cur > 0x7FFFFFFF)
x.rlim_cur = 0x7FFFFFFF;
if (x.rlim_max > 0x7FFFFFFF)
x.rlim_max = 0x7FFFFFFF;
return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
}
#endif
static inline bool rlim64_is_infinity(__u64 rlim64)
{
#if BITS_PER_LONG < 64
return rlim64 >= ULONG_MAX;
#else
return rlim64 == RLIM64_INFINITY;
#endif
}
static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
{
if (rlim->rlim_cur == RLIM_INFINITY)
rlim64->rlim_cur = RLIM64_INFINITY;
else
rlim64->rlim_cur = rlim->rlim_cur;
if (rlim->rlim_max == RLIM_INFINITY)
rlim64->rlim_max = RLIM64_INFINITY;
else
rlim64->rlim_max = rlim->rlim_max;
}
static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
{
if (rlim64_is_infinity(rlim64->rlim_cur))
rlim->rlim_cur = RLIM_INFINITY;
else
rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
if (rlim64_is_infinity(rlim64->rlim_max))
rlim->rlim_max = RLIM_INFINITY;
else
rlim->rlim_max = (unsigned long)rlim64->rlim_max;
}
/* make sure you are allowed to change @tsk limits before calling this */
int do_prlimit(struct task_struct *tsk, unsigned int resource,
struct rlimit *new_rlim, struct rlimit *old_rlim)
{
struct rlimit *rlim;
int retval = 0;
if (resource >= RLIM_NLIMITS)
return -EINVAL;
if (new_rlim) {
if (new_rlim->rlim_cur > new_rlim->rlim_max)
return -EINVAL;
if (resource == RLIMIT_NOFILE &&
new_rlim->rlim_max > sysctl_nr_open)
return -EPERM;
}
/* protect tsk->signal and tsk->sighand from disappearing */
read_lock(&tasklist_lock);
if (!tsk->sighand) {
retval = -ESRCH;
goto out;
}
rlim = tsk->signal->rlim + resource;
task_lock(tsk->group_leader);
if (new_rlim) {
/* Keep the capable check against init_user_ns until
cgroups can contain all limits */
if (new_rlim->rlim_max > rlim->rlim_max &&
!capable(CAP_SYS_RESOURCE))
retval = -EPERM;
if (!retval)
retval = security_task_setrlimit(tsk->group_leader,
resource, new_rlim);
if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
/*
* The caller is asking for an immediate RLIMIT_CPU
* expiry. But we use the zero value to mean "it was
* never set". So let's cheat and make it one second
* instead
*/
new_rlim->rlim_cur = 1;
}
}
if (!retval) {
if (old_rlim)
*old_rlim = *rlim;
if (new_rlim)
*rlim = *new_rlim;
}
task_unlock(tsk->group_leader);
/*
* RLIMIT_CPU handling. Note that the kernel fails to return an error
* code if it rejected the user's attempt to set RLIMIT_CPU. This is a
* very long-standing error, and fixing it now risks breakage of
* applications, so we live with it
*/
if (!retval && new_rlim && resource == RLIMIT_CPU &&
new_rlim->rlim_cur != RLIM_INFINITY)
update_rlimit_cpu(tsk, new_rlim->rlim_cur);
out:
read_unlock(&tasklist_lock);
return retval;
}
/* rcu lock must be held */
static int check_prlimit_permission(struct task_struct *task)
{
const struct cred *cred = current_cred(), *tcred;
if (current == task)
return 0;
tcred = __task_cred(task);
if (uid_eq(cred->uid, tcred->euid) &&
uid_eq(cred->uid, tcred->suid) &&
uid_eq(cred->uid, tcred->uid) &&
gid_eq(cred->gid, tcred->egid) &&
gid_eq(cred->gid, tcred->sgid) &&
gid_eq(cred->gid, tcred->gid))
return 0;
if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
return 0;
return -EPERM;
}
SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
const struct rlimit64 __user *, new_rlim,
struct rlimit64 __user *, old_rlim)
{
struct rlimit64 old64, new64;
struct rlimit old, new;
struct task_struct *tsk;
int ret;
if (new_rlim) {
if (copy_from_user(&new64, new_rlim, sizeof(new64)))
return -EFAULT;
rlim64_to_rlim(&new64, &new);
}
rcu_read_lock();
tsk = pid ? find_task_by_vpid(pid) : current;
if (!tsk) {
rcu_read_unlock();
return -ESRCH;
}
ret = check_prlimit_permission(tsk);
if (ret) {
rcu_read_unlock();
return ret;
}
get_task_struct(tsk);
rcu_read_unlock();
ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
old_rlim ? &old : NULL);
if (!ret && old_rlim) {
rlim_to_rlim64(&old, &old64);
if (copy_to_user(old_rlim, &old64, sizeof(old64)))
ret = -EFAULT;
}
put_task_struct(tsk);
return ret;
}
SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
{
struct rlimit new_rlim;
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
return -EFAULT;
return do_prlimit(current, resource, &new_rlim, NULL);
}
/*
* It would make sense to put struct rusage in the task_struct,
* except that would make the task_struct be *really big*. After
* task_struct gets moved into malloc'ed memory, it would
* make sense to do this. It will make moving the rest of the information
* a lot simpler! (Which we're not doing right now because we're not
* measuring them yet).
*
* When sampling multiple threads for RUSAGE_SELF, under SMP we might have
* races with threads incrementing their own counters. But since word
* reads are atomic, we either get new values or old values and we don't
* care which for the sums. We always take the siglock to protect reading
* the c* fields from p->signal from races with exit.c updating those
* fields when reaping, so a sample either gets all the additions of a
* given child after it's reaped, or none so this sample is before reaping.
*
* Locking:
* We need to take the siglock for CHILDEREN, SELF and BOTH
* for the cases current multithreaded, non-current single threaded
* non-current multithreaded. Thread traversal is now safe with
* the siglock held.
* Strictly speaking, we donot need to take the siglock if we are current and
* single threaded, as no one else can take our signal_struct away, no one
* else can reap the children to update signal->c* counters, and no one else
* can race with the signal-> fields. If we do not take any lock, the
* signal-> fields could be read out of order while another thread was just
* exiting. So we should place a read memory barrier when we avoid the lock.
* On the writer side, write memory barrier is implied in __exit_signal
* as __exit_signal releases the siglock spinlock after updating the signal->
* fields. But we don't do this yet to keep things simple.
*
*/
static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
{
r->ru_nvcsw += t->nvcsw;
r->ru_nivcsw += t->nivcsw;
r->ru_minflt += t->min_flt;
r->ru_majflt += t->maj_flt;
r->ru_inblock += task_io_get_inblock(t);
r->ru_oublock += task_io_get_oublock(t);
}
static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
struct task_struct *t;
unsigned long flags;
cputime_t tgutime, tgstime, utime, stime;
unsigned long maxrss = 0;
memset((char *)r, 0, sizeof (*r));
utime = stime = 0;
if (who == RUSAGE_THREAD) {
task_cputime_adjusted(current, &utime, &stime);
accumulate_thread_rusage(p, r);
maxrss = p->signal->maxrss;
goto out;
}
if (!lock_task_sighand(p, &flags))
return;
switch (who) {
case RUSAGE_BOTH:
case RUSAGE_CHILDREN:
utime = p->signal->cutime;
stime = p->signal->cstime;
r->ru_nvcsw = p->signal->cnvcsw;
r->ru_nivcsw = p->signal->cnivcsw;
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
r->ru_inblock = p->signal->cinblock;
r->ru_oublock = p->signal->coublock;
maxrss = p->signal->cmaxrss;
if (who == RUSAGE_CHILDREN)
break;
case RUSAGE_SELF:
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
utime += tgutime;
stime += tgstime;
r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt;
r->ru_majflt += p->signal->maj_flt;
r->ru_inblock += p->signal->inblock;
r->ru_oublock += p->signal->oublock;
if (maxrss < p->signal->maxrss)
maxrss = p->signal->maxrss;
t = p;
do {
accumulate_thread_rusage(t, r);
} while_each_thread(p, t);
break;
default:
BUG();
}
unlock_task_sighand(p, &flags);
out:
cputime_to_timeval(utime, &r->ru_utime);
cputime_to_timeval(stime, &r->ru_stime);
if (who != RUSAGE_CHILDREN) {
struct mm_struct *mm = get_task_mm(p);
if (mm) {
setmax_mm_hiwater_rss(&maxrss, mm);
mmput(mm);
}
}
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
}
int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
{
struct rusage r;
k_getrusage(p, who, &r);
return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
}
SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
{
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
who != RUSAGE_THREAD)
return -EINVAL;
return getrusage(current, who, ru);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
{
struct rusage r;
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
who != RUSAGE_THREAD)
return -EINVAL;
k_getrusage(current, who, &r);
return put_compat_rusage(&r, ru);
}
#endif
SYSCALL_DEFINE1(umask, int, mask)
{
mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
return mask;
}
static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
{
struct fd exe;
struct file *old_exe, *exe_file;
struct inode *inode;
int err;
exe = fdget(fd);
if (!exe.file)
return -EBADF;
inode = file_inode(exe.file);
/*
* Because the original mm->exe_file points to executable file, make
* sure that this one is executable as well, to avoid breaking an
* overall picture.
*/
err = -EACCES;
if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
goto exit;
err = inode_permission(inode, MAY_EXEC);
if (err)
goto exit;
/*
* Forbid mm->exe_file change if old file still mapped.
*/
exe_file = get_mm_exe_file(mm);
err = -EBUSY;
if (exe_file) {
struct vm_area_struct *vma;
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!vma->vm_file)
continue;
if (path_equal(&vma->vm_file->f_path,
&exe_file->f_path))
goto exit_err;
}
up_read(&mm->mmap_sem);
fput(exe_file);
}
/*
* The symlink can be changed only once, just to disallow arbitrary
* transitions malicious software might bring in. This means one
* could make a snapshot over all processes running and monitor
* /proc/pid/exe changes to notice unusual activity if needed.
*/
err = -EPERM;
if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
goto exit;
err = 0;
/* set the new file, lockless */
get_file(exe.file);
old_exe = xchg(&mm->exe_file, exe.file);
if (old_exe)
fput(old_exe);
exit:
fdput(exe);
return err;
exit_err:
up_read(&mm->mmap_sem);
fput(exe_file);
goto exit;
}
/*
* WARNING: we don't require any capability here so be very careful
* in what is allowed for modification from userspace.
*/
static int validate_prctl_map(struct prctl_mm_map *prctl_map)
{
unsigned long mmap_max_addr = TASK_SIZE;
struct mm_struct *mm = current->mm;
int error = -EINVAL, i;
static const unsigned char offsets[] = {
offsetof(struct prctl_mm_map, start_code),
offsetof(struct prctl_mm_map, end_code),
offsetof(struct prctl_mm_map, start_data),
offsetof(struct prctl_mm_map, end_data),
offsetof(struct prctl_mm_map, start_brk),
offsetof(struct prctl_mm_map, brk),
offsetof(struct prctl_mm_map, start_stack),
offsetof(struct prctl_mm_map, arg_start),
offsetof(struct prctl_mm_map, arg_end),
offsetof(struct prctl_mm_map, env_start),
offsetof(struct prctl_mm_map, env_end),
};
/*
* Make sure the members are not somewhere outside
* of allowed address space.
*/
for (i = 0; i < ARRAY_SIZE(offsets); i++) {
u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
if ((unsigned long)val >= mmap_max_addr ||
(unsigned long)val < mmap_min_addr)
goto out;
}
/*
* Make sure the pairs are ordered.
*/
#define __prctl_check_order(__m1, __op, __m2) \
((unsigned long)prctl_map->__m1 __op \
(unsigned long)prctl_map->__m2) ? 0 : -EINVAL
error = __prctl_check_order(start_code, <, end_code);
error |= __prctl_check_order(start_data, <, end_data);
error |= __prctl_check_order(start_brk, <=, brk);
error |= __prctl_check_order(arg_start, <=, arg_end);
error |= __prctl_check_order(env_start, <=, env_end);
if (error)
goto out;
#undef __prctl_check_order
error = -EINVAL;
/*
* @brk should be after @end_data in traditional maps.
*/
if (prctl_map->start_brk <= prctl_map->end_data ||
prctl_map->brk <= prctl_map->end_data)
goto out;
/*
* Neither we should allow to override limits if they set.
*/
if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
prctl_map->start_brk, prctl_map->end_data,
prctl_map->start_data))
goto out;
/*
* Someone is trying to cheat the auxv vector.
*/
if (prctl_map->auxv_size) {
if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
goto out;
}
/*
* Finally, make sure the caller has the rights to
* change /proc/pid/exe link: only local root should
* be allowed to.
*/
if (prctl_map->exe_fd != (u32)-1) {
struct user_namespace *ns = current_user_ns();
const struct cred *cred = current_cred();
if (!uid_eq(cred->uid, make_kuid(ns, 0)) ||
!gid_eq(cred->gid, make_kgid(ns, 0)))
goto out;
}
error = 0;
out:
return error;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
{
struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
unsigned long user_auxv[AT_VECTOR_SIZE];
struct mm_struct *mm = current->mm;
int error;
BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
if (opt == PR_SET_MM_MAP_SIZE)
return put_user((unsigned int)sizeof(prctl_map),
(unsigned int __user *)addr);
if (data_size != sizeof(prctl_map))
return -EINVAL;
if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
return -EFAULT;
error = validate_prctl_map(&prctl_map);
if (error)
return error;
if (prctl_map.auxv_size) {
memset(user_auxv, 0, sizeof(user_auxv));
if (copy_from_user(user_auxv,
(const void __user *)prctl_map.auxv,
prctl_map.auxv_size))
return -EFAULT;
/* Last entry must be AT_NULL as specification requires */
user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
}
if (prctl_map.exe_fd != (u32)-1)
error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
down_read(&mm->mmap_sem);
if (error)
goto out;
/*
* We don't validate if these members are pointing to
* real present VMAs because application may have correspond
* VMAs already unmapped and kernel uses these members for statistics
* output in procfs mostly, except
*
* - @start_brk/@brk which are used in do_brk but kernel lookups
* for VMAs when updating these memvers so anything wrong written
* here cause kernel to swear at userspace program but won't lead
* to any problem in kernel itself
*/
mm->start_code = prctl_map.start_code;
mm->end_code = prctl_map.end_code;
mm->start_data = prctl_map.start_data;
mm->end_data = prctl_map.end_data;
mm->start_brk = prctl_map.start_brk;
mm->brk = prctl_map.brk;
mm->start_stack = prctl_map.start_stack;
mm->arg_start = prctl_map.arg_start;
mm->arg_end = prctl_map.arg_end;
mm->env_start = prctl_map.env_start;
mm->env_end = prctl_map.env_end;
/*
* Note this update of @saved_auxv is lockless thus
* if someone reads this member in procfs while we're
* updating -- it may get partly updated results. It's
* known and acceptable trade off: we leave it as is to
* not introduce additional locks here making the kernel
* more complex.
*/
if (prctl_map.auxv_size)
memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
error = 0;
out:
up_read(&mm->mmap_sem);
return error;
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
/*
* This doesn't move the auxiliary vector itself since it's pinned to
* mm_struct, but it permits filling the vector with new values. It's
* up to the caller to provide sane values here, otherwise userspace
* tools which use this vector might be unhappy.
*/
unsigned long user_auxv[AT_VECTOR_SIZE];
if (len > sizeof(user_auxv))
return -EINVAL;
if (copy_from_user(user_auxv, (const void __user *)addr, len))
return -EFAULT;
/* Make sure the last entry is always AT_NULL */
user_auxv[AT_VECTOR_SIZE - 2] = 0;
user_auxv[AT_VECTOR_SIZE - 1] = 0;
BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
task_lock(current);
memcpy(mm->saved_auxv, user_auxv, len);
task_unlock(current);
return 0;
}
static int prctl_set_mm(int opt, unsigned long addr,
unsigned long arg4, unsigned long arg5)
{
struct mm_struct *mm = current->mm;
struct prctl_mm_map prctl_map;
struct vm_area_struct *vma;
int error;
if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
opt != PR_SET_MM_MAP &&
opt != PR_SET_MM_MAP_SIZE)))
return -EINVAL;
#ifdef CONFIG_CHECKPOINT_RESTORE
if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
#endif
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
if (opt == PR_SET_MM_EXE_FILE)
return prctl_set_mm_exe_file(mm, (unsigned int)addr);
if (opt == PR_SET_MM_AUXV)
return prctl_set_auxv(mm, addr, arg4);
if (addr >= TASK_SIZE || addr < mmap_min_addr)
return -EINVAL;
error = -EINVAL;
down_read(&mm->mmap_sem);
vma = find_vma(mm, addr);
prctl_map.start_code = mm->start_code;
prctl_map.end_code = mm->end_code;
prctl_map.start_data = mm->start_data;
prctl_map.end_data = mm->end_data;
prctl_map.start_brk = mm->start_brk;
prctl_map.brk = mm->brk;
prctl_map.start_stack = mm->start_stack;
prctl_map.arg_start = mm->arg_start;
prctl_map.arg_end = mm->arg_end;
prctl_map.env_start = mm->env_start;
prctl_map.env_end = mm->env_end;
prctl_map.auxv = NULL;
prctl_map.auxv_size = 0;
prctl_map.exe_fd = -1;
switch (opt) {
case PR_SET_MM_START_CODE:
prctl_map.start_code = addr;
break;
case PR_SET_MM_END_CODE:
prctl_map.end_code = addr;
break;
case PR_SET_MM_START_DATA:
prctl_map.start_data = addr;
break;
case PR_SET_MM_END_DATA:
prctl_map.end_data = addr;
break;
case PR_SET_MM_START_STACK:
prctl_map.start_stack = addr;
break;
case PR_SET_MM_START_BRK:
prctl_map.start_brk = addr;
break;
case PR_SET_MM_BRK:
prctl_map.brk = addr;
break;
case PR_SET_MM_ARG_START:
prctl_map.arg_start = addr;
break;
case PR_SET_MM_ARG_END:
prctl_map.arg_end = addr;
break;
case PR_SET_MM_ENV_START:
prctl_map.env_start = addr;
break;
case PR_SET_MM_ENV_END:
prctl_map.env_end = addr;
break;
default:
goto out;
}
error = validate_prctl_map(&prctl_map);
if (error)
goto out;
switch (opt) {
/*
* If command line arguments and environment
* are placed somewhere else on stack, we can
* set them up here, ARG_START/END to setup
* command line argumets and ENV_START/END
* for environment.
*/
case PR_SET_MM_START_STACK:
case PR_SET_MM_ARG_START:
case PR_SET_MM_ARG_END:
case PR_SET_MM_ENV_START:
case PR_SET_MM_ENV_END:
if (!vma) {
error = -EFAULT;
goto out;
}
}
mm->start_code = prctl_map.start_code;
mm->end_code = prctl_map.end_code;
mm->start_data = prctl_map.start_data;
mm->end_data = prctl_map.end_data;
mm->start_brk = prctl_map.start_brk;
mm->brk = prctl_map.brk;
mm->start_stack = prctl_map.start_stack;
mm->arg_start = prctl_map.arg_start;
mm->arg_end = prctl_map.arg_end;
mm->env_start = prctl_map.env_start;
mm->env_end = prctl_map.env_end;
error = 0;
out:
up_read(&mm->mmap_sem);
return error;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
{
return put_user(me->clear_child_tid, tid_addr);
}
#else
static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
{
return -EINVAL;
}
#endif
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
struct task_struct *me = current;
unsigned char comm[sizeof(me->comm)];
long error;
error = security_task_prctl(option, arg2, arg3, arg4, arg5);
if (error != -ENOSYS)
return error;
error = 0;
switch (option) {
case PR_SET_PDEATHSIG:
if (!valid_signal(arg2)) {
error = -EINVAL;
break;
}
me->pdeath_signal = arg2;
break;
case PR_GET_PDEATHSIG:
error = put_user(me->pdeath_signal, (int __user *)arg2);
break;
case PR_GET_DUMPABLE:
error = get_dumpable(me->mm);
break;
case PR_SET_DUMPABLE:
if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
error = -EINVAL;
break;
}
set_dumpable(me->mm, arg2);
break;
case PR_SET_UNALIGN:
error = SET_UNALIGN_CTL(me, arg2);
break;
case PR_GET_UNALIGN:
error = GET_UNALIGN_CTL(me, arg2);
break;
case PR_SET_FPEMU:
error = SET_FPEMU_CTL(me, arg2);
break;
case PR_GET_FPEMU:
error = GET_FPEMU_CTL(me, arg2);
break;
case PR_SET_FPEXC:
error = SET_FPEXC_CTL(me, arg2);
break;
case PR_GET_FPEXC:
error = GET_FPEXC_CTL(me, arg2);
break;
case PR_GET_TIMING:
error = PR_TIMING_STATISTICAL;
break;
case PR_SET_TIMING:
if (arg2 != PR_TIMING_STATISTICAL)
error = -EINVAL;
break;
case PR_SET_NAME:
comm[sizeof(me->comm) - 1] = 0;
if (strncpy_from_user(comm, (char __user *)arg2,
sizeof(me->comm) - 1) < 0)
return -EFAULT;
set_task_comm(me, comm);
proc_comm_connector(me);
break;
case PR_GET_NAME:
get_task_comm(comm, me);
if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
return -EFAULT;
break;
case PR_GET_ENDIAN:
error = GET_ENDIAN(me, arg2);
break;
case PR_SET_ENDIAN:
error = SET_ENDIAN(me, arg2);
break;
case PR_GET_SECCOMP:
error = prctl_get_seccomp();
break;
case PR_SET_SECCOMP:
error = prctl_set_seccomp(arg2, (char __user *)arg3);
break;
case PR_GET_TSC:
error = GET_TSC_CTL(arg2);
break;
case PR_SET_TSC:
error = SET_TSC_CTL(arg2);
break;
case PR_TASK_PERF_EVENTS_DISABLE:
error = perf_event_task_disable();
break;
case PR_TASK_PERF_EVENTS_ENABLE:
error = perf_event_task_enable();
break;
case PR_GET_TIMERSLACK:
error = current->timer_slack_ns;
break;
case PR_SET_TIMERSLACK:
if (arg2 <= 0)
current->timer_slack_ns =
current->default_timer_slack_ns;
else
current->timer_slack_ns = arg2;
break;
case PR_MCE_KILL:
if (arg4 | arg5)
return -EINVAL;
switch (arg2) {
case PR_MCE_KILL_CLEAR:
if (arg3 != 0)
return -EINVAL;
current->flags &= ~PF_MCE_PROCESS;
break;
case PR_MCE_KILL_SET:
current->flags |= PF_MCE_PROCESS;
if (arg3 == PR_MCE_KILL_EARLY)
current->flags |= PF_MCE_EARLY;
else if (arg3 == PR_MCE_KILL_LATE)
current->flags &= ~PF_MCE_EARLY;
else if (arg3 == PR_MCE_KILL_DEFAULT)
current->flags &=
~(PF_MCE_EARLY|PF_MCE_PROCESS);
else
return -EINVAL;
break;
default:
return -EINVAL;
}
break;
case PR_MCE_KILL_GET:
if (arg2 | arg3 | arg4 | arg5)
return -EINVAL;
if (current->flags & PF_MCE_PROCESS)
error = (current->flags & PF_MCE_EARLY) ?
PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
else
error = PR_MCE_KILL_DEFAULT;
break;
case PR_SET_MM:
error = prctl_set_mm(arg2, arg3, arg4, arg5);
break;
case PR_GET_TID_ADDRESS:
error = prctl_get_tid_address(me, (int __user **)arg2);
break;
case PR_SET_CHILD_SUBREAPER:
me->signal->is_child_subreaper = !!arg2;
break;
case PR_GET_CHILD_SUBREAPER:
error = put_user(me->signal->is_child_subreaper,
(int __user *)arg2);
break;
case PR_SET_NO_NEW_PRIVS:
if (arg2 != 1 || arg3 || arg4 || arg5)
return -EINVAL;
task_set_no_new_privs(current);
break;
case PR_GET_NO_NEW_PRIVS:
if (arg2 || arg3 || arg4 || arg5)
return -EINVAL;
return task_no_new_privs(current) ? 1 : 0;
case PR_GET_THP_DISABLE:
if (arg2 || arg3 || arg4 || arg5)
return -EINVAL;
error = !!(me->mm->def_flags & VM_NOHUGEPAGE);
break;
case PR_SET_THP_DISABLE:
if (arg3 || arg4 || arg5)
return -EINVAL;
down_write(&me->mm->mmap_sem);
if (arg2)
me->mm->def_flags |= VM_NOHUGEPAGE;
else
me->mm->def_flags &= ~VM_NOHUGEPAGE;
up_write(&me->mm->mmap_sem);
break;
case PR_MPX_ENABLE_MANAGEMENT:
if (arg2 || arg3 || arg4 || arg5)
return -EINVAL;
error = MPX_ENABLE_MANAGEMENT();
break;
case PR_MPX_DISABLE_MANAGEMENT:
if (arg2 || arg3 || arg4 || arg5)
return -EINVAL;
error = MPX_DISABLE_MANAGEMENT();
break;
case PR_SET_FP_MODE:
error = SET_FP_MODE(me, arg2);
break;
case PR_GET_FP_MODE:
error = GET_FP_MODE(me);
break;
default:
error = -EINVAL;
break;
}
return error;
}
SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
struct getcpu_cache __user *, unused)
{
int err = 0;
int cpu = raw_smp_processor_id();
if (cpup)
err |= put_user(cpu, cpup);
if (nodep)
err |= put_user(cpu_to_node(cpu), nodep);
return err ? -EFAULT : 0;
}
/**
* do_sysinfo - fill in sysinfo struct
* @info: pointer to buffer to fill
*/
static int do_sysinfo(struct sysinfo *info)
{
unsigned long mem_total, sav_total;
unsigned int mem_unit, bitcount;
struct timespec tp;
memset(info, 0, sizeof(struct sysinfo));
get_monotonic_boottime(&tp);
info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
info->procs = nr_threads;
si_meminfo(info);
si_swapinfo(info);
/*
* If the sum of all the available memory (i.e. ram + swap)
* is less than can be stored in a 32 bit unsigned long then
* we can be binary compatible with 2.2.x kernels. If not,
* well, in that case 2.2.x was broken anyways...
*
* -Erik Andersen <andersee@debian.org>
*/
mem_total = info->totalram + info->totalswap;
if (mem_total < info->totalram || mem_total < info->totalswap)
goto out;
bitcount = 0;
mem_unit = info->mem_unit;
while (mem_unit > 1) {
bitcount++;
mem_unit >>= 1;
sav_total = mem_total;
mem_total <<= 1;
if (mem_total < sav_total)
goto out;
}
/*
* If mem_total did not overflow, multiply all memory values by
* info->mem_unit and set it to 1. This leaves things compatible
* with 2.2.x, and also retains compatibility with earlier 2.4.x
* kernels...
*/
info->mem_unit = 1;
info->totalram <<= bitcount;
info->freeram <<= bitcount;
info->sharedram <<= bitcount;
info->bufferram <<= bitcount;
info->totalswap <<= bitcount;
info->freeswap <<= bitcount;
info->totalhigh <<= bitcount;
info->freehigh <<= bitcount;
out:
return 0;
}
SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
{
struct sysinfo val;
do_sysinfo(&val);
if (copy_to_user(info, &val, sizeof(struct sysinfo)))
return -EFAULT;
return 0;
}
#ifdef CONFIG_COMPAT
struct compat_sysinfo {
s32 uptime;
u32 loads[3];
u32 totalram;
u32 freeram;
u32 sharedram;
u32 bufferram;
u32 totalswap;
u32 freeswap;
u16 procs;
u16 pad;
u32 totalhigh;
u32 freehigh;
u32 mem_unit;
char _f[20-2*sizeof(u32)-sizeof(int)];
};
COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
{
struct sysinfo s;
do_sysinfo(&s);
/* Check to see if any memory value is too large for 32-bit and scale
* down if needed
*/
if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
int bitcount = 0;
while (s.mem_unit < PAGE_SIZE) {
s.mem_unit <<= 1;
bitcount++;
}
s.totalram >>= bitcount;
s.freeram >>= bitcount;
s.sharedram >>= bitcount;
s.bufferram >>= bitcount;
s.totalswap >>= bitcount;
s.freeswap >>= bitcount;
s.totalhigh >>= bitcount;
s.freehigh >>= bitcount;
}
if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
__put_user(s.uptime, &info->uptime) ||
__put_user(s.loads[0], &info->loads[0]) ||
__put_user(s.loads[1], &info->loads[1]) ||
__put_user(s.loads[2], &info->loads[2]) ||
__put_user(s.totalram, &info->totalram) ||
__put_user(s.freeram, &info->freeram) ||
__put_user(s.sharedram, &info->sharedram) ||
__put_user(s.bufferram, &info->bufferram) ||
__put_user(s.totalswap, &info->totalswap) ||
__put_user(s.freeswap, &info->freeswap) ||
__put_user(s.procs, &info->procs) ||
__put_user(s.totalhigh, &info->totalhigh) ||
__put_user(s.freehigh, &info->freehigh) ||
__put_user(s.mem_unit, &info->mem_unit))
return -EFAULT;
return 0;
}
#endif /* CONFIG_COMPAT */
|
111093.c | /*
* X.509 certificate writing
*
* Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of mbed TLS (https://tls.mbed.org)
*/
/*
* References:
* - certificates: RFC 5280, updated by RFC 6818
* - CSRs: PKCS#10 v1.7 aka RFC 2986
* - attributes: PKCS#9 v2.0 aka RFC 2985
*/
#if !defined(MBEDTLS_CONFIG_FILE)
#include "config.h"
#else
#include MBEDTLS_CONFIG_FILE
#endif
#if defined(MBEDTLS_X509_CRT_WRITE_C)
#include "x509_crt.h"
#include "oid.h"
#include "asn1write.h"
#include "sha1.h"
#include <string.h>
#if defined(MBEDTLS_PEM_WRITE_C)
#include "pem.h"
#endif /* MBEDTLS_PEM_WRITE_C */
/* Implementation that should never be optimized out by the compiler */
static void mbedtls_zeroize( void *v, size_t n ) {
volatile unsigned char *p = v; while( n-- ) *p++ = 0;
}
void mbedtls_x509write_crt_init( mbedtls_x509write_cert *ctx )
{
memset( ctx, 0, sizeof(mbedtls_x509write_cert) );
mbedtls_mpi_init( &ctx->serial );
ctx->version = MBEDTLS_X509_CRT_VERSION_3;
}
void mbedtls_x509write_crt_free( mbedtls_x509write_cert *ctx )
{
mbedtls_mpi_free( &ctx->serial );
mbedtls_asn1_free_named_data_list( &ctx->subject );
mbedtls_asn1_free_named_data_list( &ctx->issuer );
mbedtls_asn1_free_named_data_list( &ctx->extensions );
mbedtls_zeroize( ctx, sizeof(mbedtls_x509write_cert) );
}
void mbedtls_x509write_crt_set_version( mbedtls_x509write_cert *ctx, int version )
{
ctx->version = version;
}
void mbedtls_x509write_crt_set_md_alg( mbedtls_x509write_cert *ctx, mbedtls_md_type_t md_alg )
{
ctx->md_alg = md_alg;
}
void mbedtls_x509write_crt_set_subject_key( mbedtls_x509write_cert *ctx, mbedtls_pk_context *key )
{
ctx->subject_key = key;
}
void mbedtls_x509write_crt_set_issuer_key( mbedtls_x509write_cert *ctx, mbedtls_pk_context *key )
{
ctx->issuer_key = key;
}
int mbedtls_x509write_crt_set_subject_name( mbedtls_x509write_cert *ctx,
const char *subject_name )
{
return mbedtls_x509_string_to_names( &ctx->subject, subject_name );
}
int mbedtls_x509write_crt_set_issuer_name( mbedtls_x509write_cert *ctx,
const char *issuer_name )
{
return mbedtls_x509_string_to_names( &ctx->issuer, issuer_name );
}
int mbedtls_x509write_crt_set_serial( mbedtls_x509write_cert *ctx, const mbedtls_mpi *serial )
{
int ret;
if( ( ret = mbedtls_mpi_copy( &ctx->serial, serial ) ) != 0 )
return( ret );
return( 0 );
}
int mbedtls_x509write_crt_set_validity( mbedtls_x509write_cert *ctx, const char *not_before,
const char *not_after )
{
if( strlen( not_before ) != MBEDTLS_X509_RFC5280_UTC_TIME_LEN - 1 ||
strlen( not_after ) != MBEDTLS_X509_RFC5280_UTC_TIME_LEN - 1 )
{
return( MBEDTLS_ERR_X509_BAD_INPUT_DATA );
}
strncpy( ctx->not_before, not_before, MBEDTLS_X509_RFC5280_UTC_TIME_LEN );
strncpy( ctx->not_after , not_after , MBEDTLS_X509_RFC5280_UTC_TIME_LEN );
ctx->not_before[MBEDTLS_X509_RFC5280_UTC_TIME_LEN - 1] = 'Z';
ctx->not_after[MBEDTLS_X509_RFC5280_UTC_TIME_LEN - 1] = 'Z';
return( 0 );
}
int mbedtls_x509write_crt_set_extension( mbedtls_x509write_cert *ctx,
const char *oid, size_t oid_len,
int critical,
const unsigned char *val, size_t val_len )
{
return mbedtls_x509_set_extension( &ctx->extensions, oid, oid_len,
critical, val, val_len );
}
int mbedtls_x509write_crt_set_basic_constraints( mbedtls_x509write_cert *ctx,
int is_ca, int max_pathlen )
{
int ret;
unsigned char buf[9];
unsigned char *c = buf + sizeof(buf);
size_t len = 0;
memset( buf, 0, sizeof(buf) );
if( is_ca && max_pathlen > 127 )
return( MBEDTLS_ERR_X509_BAD_INPUT_DATA );
if( is_ca )
{
if( max_pathlen >= 0 )
{
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_int( &c, buf, max_pathlen ) );
}
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_bool( &c, buf, 1 ) );
}
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, buf, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_CONSTRUCTED |
MBEDTLS_ASN1_SEQUENCE ) );
return mbedtls_x509write_crt_set_extension( ctx, MBEDTLS_OID_BASIC_CONSTRAINTS,
MBEDTLS_OID_SIZE( MBEDTLS_OID_BASIC_CONSTRAINTS ),
0, buf + sizeof(buf) - len, len );
}
#if defined(MBEDTLS_SHA1_C)
int mbedtls_x509write_crt_set_subject_key_identifier( mbedtls_x509write_cert *ctx )
{
int ret;
unsigned char buf[MBEDTLS_MPI_MAX_SIZE * 2 + 20]; /* tag, length + 2xMPI */
unsigned char *c = buf + sizeof(buf);
size_t len = 0;
memset( buf, 0, sizeof(buf) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_pk_write_pubkey( &c, buf, ctx->subject_key ) );
mbedtls_sha1( buf + sizeof(buf) - len, len, buf + sizeof(buf) - 20 );
c = buf + sizeof(buf) - 20;
len = 20;
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, buf, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_OCTET_STRING ) );
return mbedtls_x509write_crt_set_extension( ctx, MBEDTLS_OID_SUBJECT_KEY_IDENTIFIER,
MBEDTLS_OID_SIZE( MBEDTLS_OID_SUBJECT_KEY_IDENTIFIER ),
0, buf + sizeof(buf) - len, len );
}
int mbedtls_x509write_crt_set_authority_key_identifier( mbedtls_x509write_cert *ctx )
{
int ret;
unsigned char buf[MBEDTLS_MPI_MAX_SIZE * 2 + 20]; /* tag, length + 2xMPI */
unsigned char *c = buf + sizeof(buf);
size_t len = 0;
memset( buf, 0, sizeof(buf) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_pk_write_pubkey( &c, buf, ctx->issuer_key ) );
mbedtls_sha1( buf + sizeof(buf) - len, len, buf + sizeof(buf) - 20 );
c = buf + sizeof(buf) - 20;
len = 20;
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, buf, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_CONTEXT_SPECIFIC | 0 ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, buf, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_CONSTRUCTED |
MBEDTLS_ASN1_SEQUENCE ) );
return mbedtls_x509write_crt_set_extension( ctx, MBEDTLS_OID_AUTHORITY_KEY_IDENTIFIER,
MBEDTLS_OID_SIZE( MBEDTLS_OID_AUTHORITY_KEY_IDENTIFIER ),
0, buf + sizeof(buf) - len, len );
}
#endif /* MBEDTLS_SHA1_C */
int mbedtls_x509write_crt_set_key_usage( mbedtls_x509write_cert *ctx,
unsigned int key_usage )
{
unsigned char buf[4], ku;
unsigned char *c;
int ret;
/* We currently only support 7 bits, from 0x80 to 0x02 */
if( ( key_usage & ~0xfe ) != 0 )
return( MBEDTLS_ERR_X509_FEATURE_UNAVAILABLE );
c = buf + 4;
ku = (unsigned char) key_usage;
if( ( ret = mbedtls_asn1_write_bitstring( &c, buf, &ku, 7 ) ) != 4 )
return( ret );
ret = mbedtls_x509write_crt_set_extension( ctx, MBEDTLS_OID_KEY_USAGE,
MBEDTLS_OID_SIZE( MBEDTLS_OID_KEY_USAGE ),
1, buf, 4 );
if( ret != 0 )
return( ret );
return( 0 );
}
int mbedtls_x509write_crt_set_ns_cert_type( mbedtls_x509write_cert *ctx,
unsigned char ns_cert_type )
{
unsigned char buf[4];
unsigned char *c;
int ret;
c = buf + 4;
if( ( ret = mbedtls_asn1_write_bitstring( &c, buf, &ns_cert_type, 8 ) ) != 4 )
return( ret );
ret = mbedtls_x509write_crt_set_extension( ctx, MBEDTLS_OID_NS_CERT_TYPE,
MBEDTLS_OID_SIZE( MBEDTLS_OID_NS_CERT_TYPE ),
0, buf, 4 );
if( ret != 0 )
return( ret );
return( 0 );
}
static int x509_write_time( unsigned char **p, unsigned char *start,
const char *time, size_t size )
{
int ret;
size_t len = 0;
/*
* write MBEDTLS_ASN1_UTC_TIME if year < 2050 (2 bytes shorter)
*/
if( time[0] == '2' && time[1] == '0' && time [2] < '5' )
{
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_raw_buffer( p, start,
(const unsigned char *) time + 2,
size - 2 ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( p, start, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( p, start, MBEDTLS_ASN1_UTC_TIME ) );
}
else
{
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_raw_buffer( p, start,
(const unsigned char *) time,
size ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( p, start, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( p, start, MBEDTLS_ASN1_GENERALIZED_TIME ) );
}
return( (int) len );
}
int mbedtls_x509write_crt_der( mbedtls_x509write_cert *ctx, unsigned char *buf, size_t size,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
int ret;
const char *sig_oid;
size_t sig_oid_len = 0;
unsigned char *c, *c2;
unsigned char hash[64];
unsigned char sig[MBEDTLS_MPI_MAX_SIZE];
unsigned char tmp_buf[2048];
size_t sub_len = 0, pub_len = 0, sig_and_oid_len = 0, sig_len;
size_t len = 0;
mbedtls_pk_type_t pk_alg;
/*
* Prepare data to be signed in tmp_buf
*/
c = tmp_buf + sizeof( tmp_buf );
/* Signature algorithm needed in TBS, and later for actual signature */
pk_alg = mbedtls_pk_get_type( ctx->issuer_key );
if( pk_alg == MBEDTLS_PK_ECKEY )
pk_alg = MBEDTLS_PK_ECDSA;
if( ( ret = mbedtls_oid_get_oid_by_sig_alg( pk_alg, ctx->md_alg,
&sig_oid, &sig_oid_len ) ) != 0 )
{
return( ret );
}
/*
* Extensions ::= SEQUENCE SIZE (1..MAX) OF Extension
*/
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_x509_write_extensions( &c, tmp_buf, ctx->extensions ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, tmp_buf, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, tmp_buf, MBEDTLS_ASN1_CONSTRUCTED |
MBEDTLS_ASN1_SEQUENCE ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, tmp_buf, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, tmp_buf, MBEDTLS_ASN1_CONTEXT_SPECIFIC |
MBEDTLS_ASN1_CONSTRUCTED | 3 ) );
/*
* SubjectPublicKeyInfo
*/
MBEDTLS_ASN1_CHK_ADD( pub_len, mbedtls_pk_write_pubkey_der( ctx->subject_key,
tmp_buf, c - tmp_buf ) );
c -= pub_len;
len += pub_len;
/*
* Subject ::= Name
*/
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_x509_write_names( &c, tmp_buf, ctx->subject ) );
/*
* Validity ::= SEQUENCE {
* notBefore Time,
* notAfter Time }
*/
sub_len = 0;
MBEDTLS_ASN1_CHK_ADD( sub_len, x509_write_time( &c, tmp_buf, ctx->not_after,
MBEDTLS_X509_RFC5280_UTC_TIME_LEN ) );
MBEDTLS_ASN1_CHK_ADD( sub_len, x509_write_time( &c, tmp_buf, ctx->not_before,
MBEDTLS_X509_RFC5280_UTC_TIME_LEN ) );
len += sub_len;
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, tmp_buf, sub_len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, tmp_buf, MBEDTLS_ASN1_CONSTRUCTED |
MBEDTLS_ASN1_SEQUENCE ) );
/*
* Issuer ::= Name
*/
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_x509_write_names( &c, tmp_buf, ctx->issuer ) );
/*
* Signature ::= AlgorithmIdentifier
*/
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_algorithm_identifier( &c, tmp_buf,
sig_oid, strlen( sig_oid ), 0 ) );
/*
* Serial ::= INTEGER
*/
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_mpi( &c, tmp_buf, &ctx->serial ) );
/*
* Version ::= INTEGER { v1(0), v2(1), v3(2) }
*/
sub_len = 0;
MBEDTLS_ASN1_CHK_ADD( sub_len, mbedtls_asn1_write_int( &c, tmp_buf, ctx->version ) );
len += sub_len;
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, tmp_buf, sub_len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, tmp_buf, MBEDTLS_ASN1_CONTEXT_SPECIFIC |
MBEDTLS_ASN1_CONSTRUCTED | 0 ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, tmp_buf, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, tmp_buf, MBEDTLS_ASN1_CONSTRUCTED |
MBEDTLS_ASN1_SEQUENCE ) );
/*
* Make signature
*/
mbedtls_md( mbedtls_md_info_from_type( ctx->md_alg ), c, len, hash );
if( ( ret = mbedtls_pk_sign( ctx->issuer_key, ctx->md_alg, hash, 0, sig, &sig_len,
f_rng, p_rng ) ) != 0 )
{
return( ret );
}
/*
* Write data to output buffer
*/
c2 = buf + size;
MBEDTLS_ASN1_CHK_ADD( sig_and_oid_len, mbedtls_x509_write_sig( &c2, buf,
sig_oid, sig_oid_len, sig, sig_len ) );
c2 -= len;
memcpy( c2, c, len );
len += sig_and_oid_len;
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c2, buf, len ) );
MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c2, buf, MBEDTLS_ASN1_CONSTRUCTED |
MBEDTLS_ASN1_SEQUENCE ) );
return( (int) len );
}
#define PEM_BEGIN_CRT "-----BEGIN CERTIFICATE-----\n"
#define PEM_END_CRT "-----END CERTIFICATE-----\n"
#if defined(MBEDTLS_PEM_WRITE_C)
int mbedtls_x509write_crt_pem( mbedtls_x509write_cert *crt, unsigned char *buf, size_t size,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng )
{
int ret;
unsigned char output_buf[4096];
size_t olen = 0;
if( ( ret = mbedtls_x509write_crt_der( crt, output_buf, sizeof(output_buf),
f_rng, p_rng ) ) < 0 )
{
return( ret );
}
if( ( ret = mbedtls_pem_write_buffer( PEM_BEGIN_CRT, PEM_END_CRT,
output_buf + sizeof(output_buf) - ret,
ret, buf, size, &olen ) ) != 0 )
{
return( ret );
}
return( 0 );
}
#endif /* MBEDTLS_PEM_WRITE_C */
#endif /* MBEDTLS_X509_CRT_WRITE_C */
|
591437.c |
/*
* Unix SMB/Netbios implementation.
* Version 1.9.
* RPC Pipe client / server routines
* Copyright (C) Andrew Tridgell 1992-1997,
* Copyright (C) Luke Kenneth Casson Leighton 1996-1997,
* Copyright (C) Paul Ashton 1997.
* Copyright (C) Jeremy Allison 1998.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "includes.h"
#include "nterr.h"
extern int DEBUGLEVEL;
extern DOM_SID global_sam_sid;
extern fstring global_myworkgroup;
extern pstring global_myname;
/***************************************************************************
lsa_reply_open_policy2
***************************************************************************/
static BOOL lsa_reply_open_policy2(prs_struct *rdata)
{
int i;
LSA_R_OPEN_POL2 r_o;
ZERO_STRUCT(r_o);
/* set up the LSA QUERY INFO response */
for (i = 4; i < POL_HND_SIZE; i++)
r_o.pol.data[i] = i;
r_o.status = 0x0;
/* store the response in the SMB stream */
if(!lsa_io_r_open_pol2("", &r_o, rdata, 0)) {
DEBUG(0,("lsa_reply_open_policy2: unable to marshall LSA_R_OPEN_POL2.\n"));
return False;
}
return True;
}
/***************************************************************************
lsa_reply_open_policy
***************************************************************************/
static BOOL lsa_reply_open_policy(prs_struct *rdata)
{
int i;
LSA_R_OPEN_POL r_o;
ZERO_STRUCT(r_o);
/* set up the LSA QUERY INFO response */
for (i = 4; i < POL_HND_SIZE; i++)
r_o.pol.data[i] = i;
r_o.status = 0x0;
/* store the response in the SMB stream */
if(!lsa_io_r_open_pol("", &r_o, rdata, 0)) {
DEBUG(0,("lsa_reply_open_policy: unable to marshall LSA_R_OPEN_POL.\n"));
return False;
}
return True;
}
/***************************************************************************
Init dom_query
***************************************************************************/
static void init_dom_query(DOM_QUERY *d_q, char *dom_name, DOM_SID *dom_sid)
{
fstring sid_str;
int domlen = strlen(dom_name);
*sid_str = '\0';
d_q->uni_dom_max_len = domlen * 2;
d_q->uni_dom_str_len = domlen * 2;
d_q->buffer_dom_name = domlen != 0 ? 1 : 0; /* domain buffer pointer */
d_q->buffer_dom_sid = dom_sid != NULL ? 1 : 0; /* domain sid pointer */
/* this string is supposed to be character short */
init_unistr2(&d_q->uni_domain_name, dom_name, domlen);
if(dom_sid) {
sid_to_string(sid_str, dom_sid);
init_dom_sid2(&d_q->dom_sid, dom_sid);
}
}
/***************************************************************************
lsa_reply_enum_trust_dom
***************************************************************************/
static void lsa_reply_enum_trust_dom(LSA_Q_ENUM_TRUST_DOM *q_e,
prs_struct *rdata,
uint32 enum_context, char *dom_name, DOM_SID *dom_sid)
{
LSA_R_ENUM_TRUST_DOM r_e;
ZERO_STRUCT(r_e);
/* set up the LSA QUERY INFO response */
init_r_enum_trust_dom(&r_e, enum_context, dom_name, dom_sid,
dom_name != NULL ? 0x0 : 0x80000000 | NT_STATUS_UNABLE_TO_FREE_VM);
/* store the response in the SMB stream */
lsa_io_r_enum_trust_dom("", &r_e, rdata, 0);
}
/***************************************************************************
lsa_reply_query_info
***************************************************************************/
static BOOL lsa_reply_query_info(LSA_Q_QUERY_INFO *q_q, prs_struct *rdata,
char *dom_name, DOM_SID *dom_sid, uint32 status_code)
{
LSA_R_QUERY_INFO r_q;
ZERO_STRUCT(r_q);
/* set up the LSA QUERY INFO response */
if(status_code == 0) {
r_q.undoc_buffer = 0x22000000; /* bizarre */
r_q.info_class = q_q->info_class;
init_dom_query(&r_q.dom.id5, dom_name, dom_sid);
}
r_q.status = status_code;
/* store the response in the SMB stream */
if(!lsa_io_r_query("", &r_q, rdata, 0)) {
DEBUG(0,("lsa_reply_query_info: failed to marshall LSA_R_QUERY_INFO.\n"));
return False;
}
return True;
}
/***************************************************************************
init_dom_ref - adds a domain if it's not already in, returns the index.
***************************************************************************/
static int init_dom_ref(DOM_R_REF *ref, char *dom_name, DOM_SID *dom_sid)
{
int num = 0;
int len;
if (dom_name != NULL) {
for (num = 0; num < ref->num_ref_doms_1; num++) {
fstring domname;
fstrcpy(domname, dos_unistr2_to_str(&ref->ref_dom[num].uni_dom_name));
if (strequal(domname, dom_name))
return num;
}
} else {
num = ref->num_ref_doms_1;
}
if (num >= MAX_REF_DOMAINS) {
/* index not found, already at maximum domain limit */
return -1;
}
ref->num_ref_doms_1 = num+1;
ref->ptr_ref_dom = 1;
ref->max_entries = MAX_REF_DOMAINS;
ref->num_ref_doms_2 = num+1;
len = (dom_name != NULL) ? strlen(dom_name) : 0;
if(dom_name != NULL && len == 0)
len = 1;
init_uni_hdr(&ref->hdr_ref_dom[num].hdr_dom_name, len);
ref->hdr_ref_dom[num].ptr_dom_sid = dom_sid != NULL ? 1 : 0;
init_unistr2(&ref->ref_dom[num].uni_dom_name, dom_name, len);
init_dom_sid2(&ref->ref_dom[num].ref_dom, dom_sid );
return num;
}
/***************************************************************************
init_lsa_rid2s
***************************************************************************/
static void init_lsa_rid2s(DOM_R_REF *ref, DOM_RID2 *rid2,
int num_entries, UNISTR2 name[MAX_LOOKUP_SIDS],
uint32 *mapped_count)
{
int i;
int total = 0;
*mapped_count = 0;
SMB_ASSERT(num_entries <= MAX_LOOKUP_SIDS);
for (i = 0; i < num_entries; i++) {
BOOL status = False;
DOM_SID dom_sid;
DOM_SID sid;
uint32 rid = 0xffffffff;
int dom_idx = -1;
pstring full_name;
fstring dom_name;
fstring user;
uint8 sid_name_use = SID_NAME_UNKNOWN;
pstrcpy(full_name, dos_unistr2_to_str(&name[i]));
/*
* Try and split the name into a DOMAIN and
* user component.
*/
split_domain_name(full_name, dom_name, user);
/*
* We only do anything with this name if we
* can map the Domain into a SID we know.
*/
if (map_domain_name_to_sid(&dom_sid, dom_name)) {
dom_idx = init_dom_ref(ref, dom_name, &dom_sid);
if (lookup_local_name(dom_name, user, &sid, &sid_name_use) && sid_split_rid(&sid, &rid))
status = True;
}
if (status)
(*mapped_count)++;
else {
dom_idx = -1;
rid = 0xffffffff;
sid_name_use = SID_NAME_UNKNOWN;
}
init_dom_rid2(&rid2[total], rid, sid_name_use, dom_idx);
total++;
}
}
/***************************************************************************
init_reply_lookup_names
***************************************************************************/
static void init_reply_lookup_names(LSA_R_LOOKUP_NAMES *r_l,
DOM_R_REF *ref, uint32 num_entries,
DOM_RID2 *rid2, uint32 mapped_count)
{
r_l->ptr_dom_ref = 1;
r_l->dom_ref = ref;
r_l->num_entries = num_entries;
r_l->ptr_entries = 1;
r_l->num_entries2 = num_entries;
r_l->dom_rid = rid2;
r_l->mapped_count = mapped_count;
if (mapped_count == 0)
r_l->status = 0xC0000000 | NT_STATUS_NONE_MAPPED;
else
r_l->status = 0x0;
}
/***************************************************************************
Init lsa_trans_names.
***************************************************************************/
static void init_lsa_trans_names(DOM_R_REF *ref, LSA_TRANS_NAME_ENUM *trn,
int num_entries, DOM_SID2 sid[MAX_LOOKUP_SIDS], uint32 *mapped_count)
{
extern DOM_SID global_sid_S_1_5_0x20; /* BUILTIN sid. */
int i;
int total = 0;
*mapped_count = 0;
SMB_ASSERT(num_entries <= MAX_LOOKUP_SIDS);
for (i = 0; i < num_entries; i++) {
BOOL status = False;
DOM_SID find_sid = sid[i].sid;
uint32 rid = 0xffffffff;
int dom_idx = -1;
fstring name;
fstring dom_name;
uint8 sid_name_use = 0;
memset(dom_name, '\0', sizeof(dom_name));
memset(name, '\0', sizeof(name));
/*
* First, check to see if the SID is one of the well
* known ones (this includes our own domain SID).
* Next, check if the domain prefix is one of the
* well known ones. If so and the domain prefix was
* either BUILTIN or our own global sid, then lookup
* the RID as a user or group id and translate to
* a name.
*/
if (map_domain_sid_to_name(&find_sid, dom_name)) {
sid_name_use = SID_NAME_DOMAIN;
} else if (sid_split_rid(&find_sid, &rid) && map_domain_sid_to_name(&find_sid, dom_name)) {
if (sid_equal(&find_sid, &global_sam_sid) ||
sid_equal(&find_sid, &global_sid_S_1_5_0x20)) {
status = lookup_local_rid(rid, name, &sid_name_use);
} else {
status = lookup_known_rid(&find_sid, rid, name, &sid_name_use);
}
}
DEBUG(10,("init_lsa_trans_names: adding domain '%s' sid %s to referenced list.\n",
dom_name, name ));
dom_idx = init_dom_ref(ref, dom_name, &find_sid);
if(!status) {
slprintf(name, sizeof(name)-1, "unix.%08x", rid);
sid_name_use = SID_NAME_UNKNOWN;
}
DEBUG(10,("init_lsa_trans_names: added user '%s\\%s' to referenced list.\n", dom_name, name ));
(*mapped_count)++;
init_lsa_trans_name(&trn->name[total], &trn->uni_name[total],
sid_name_use, name, dom_idx);
total++;
}
trn->num_entries = total;
trn->ptr_trans_names = 1;
trn->num_entries2 = total;
}
/***************************************************************************
Init_reply_lookup_sids.
***************************************************************************/
static void init_reply_lookup_sids(LSA_R_LOOKUP_SIDS *r_l,
DOM_R_REF *ref, LSA_TRANS_NAME_ENUM *names,
uint32 mapped_count)
{
r_l->ptr_dom_ref = 1;
r_l->dom_ref = ref;
r_l->names = names;
r_l->mapped_count = mapped_count;
if (mapped_count == 0)
r_l->status = 0xC0000000 | NT_STATUS_NONE_MAPPED;
else
r_l->status = 0x0;
}
/***************************************************************************
lsa_reply_lookup_sids
***************************************************************************/
static BOOL lsa_reply_lookup_sids(prs_struct *rdata, DOM_SID2 *sid, int num_entries)
{
LSA_R_LOOKUP_SIDS r_l;
DOM_R_REF ref;
LSA_TRANS_NAME_ENUM names;
uint32 mapped_count = 0;
ZERO_STRUCT(r_l);
ZERO_STRUCT(ref);
ZERO_STRUCT(names);
/* set up the LSA Lookup SIDs response */
init_lsa_trans_names(&ref, &names, num_entries, sid, &mapped_count);
init_reply_lookup_sids(&r_l, &ref, &names, mapped_count);
/* store the response in the SMB stream */
if(!lsa_io_r_lookup_sids("", &r_l, rdata, 0)) {
DEBUG(0,("lsa_reply_lookup_sids: Failed to marshall LSA_R_LOOKUP_SIDS.\n"));
return False;
}
return True;
}
/***************************************************************************
lsa_reply_lookup_names
***************************************************************************/
static BOOL lsa_reply_lookup_names(prs_struct *rdata,
UNISTR2 names[MAX_LOOKUP_SIDS], int num_entries)
{
LSA_R_LOOKUP_NAMES r_l;
DOM_R_REF ref;
DOM_RID2 rids[MAX_LOOKUP_SIDS];
uint32 mapped_count = 0;
ZERO_STRUCT(r_l);
ZERO_STRUCT(ref);
ZERO_ARRAY(rids);
/* set up the LSA Lookup RIDs response */
init_lsa_rid2s(&ref, rids, num_entries, names, &mapped_count);
init_reply_lookup_names(&r_l, &ref, num_entries, rids, mapped_count);
/* store the response in the SMB stream */
if(!lsa_io_r_lookup_names("", &r_l, rdata, 0)) {
DEBUG(0,("lsa_reply_lookup_names: Failed to marshall LSA_R_LOOKUP_NAMES.\n"));
return False;
}
return True;
}
/***************************************************************************
api_lsa_open_policy2
***************************************************************************/
static BOOL api_lsa_open_policy2( uint16 vuid, prs_struct *data,
prs_struct *rdata )
{
LSA_Q_OPEN_POL2 q_o;
ZERO_STRUCT(q_o);
/* grab the server, object attributes and desired access flag...*/
if(!lsa_io_q_open_pol2("", &q_o, data, 0)) {
DEBUG(0,("api_lsa_open_policy2: unable to unmarshall LSA_Q_OPEN_POL2.\n"));
return False;
}
/* lkclXXXX having decoded it, ignore all fields in the open policy! */
/* return a 20 byte policy handle */
if(!lsa_reply_open_policy2(rdata))
return False;
return True;
}
/***************************************************************************
api_lsa_open_policy
***************************************************************************/
static BOOL api_lsa_open_policy( uint16 vuid, prs_struct *data,
prs_struct *rdata )
{
LSA_Q_OPEN_POL q_o;
ZERO_STRUCT(q_o);
/* grab the server, object attributes and desired access flag...*/
if(!lsa_io_q_open_pol("", &q_o, data, 0)) {
DEBUG(0,("api_lsa_open_policy: unable to unmarshall LSA_Q_OPEN_POL.\n"));
return False;
}
/* lkclXXXX having decoded it, ignore all fields in the open policy! */
/* return a 20 byte policy handle */
if(!lsa_reply_open_policy(rdata))
return False;
return True;
}
/***************************************************************************
api_lsa_enum_trust_dom
***************************************************************************/
static BOOL api_lsa_enum_trust_dom( uint16 vuid, prs_struct *data,
prs_struct *rdata )
{
LSA_Q_ENUM_TRUST_DOM q_e;
ZERO_STRUCT(q_e);
/* grab the enum trust domain context etc. */
if(!lsa_io_q_enum_trust_dom("", &q_e, data, 0))
return False;
/* construct reply. return status is always 0x0 */
lsa_reply_enum_trust_dom(&q_e, rdata, 0, NULL, NULL);
return True;
}
/***************************************************************************
api_lsa_query_info
***************************************************************************/
static BOOL api_lsa_query_info( uint16 vuid, prs_struct *data,
prs_struct *rdata )
{
LSA_Q_QUERY_INFO q_i;
fstring name;
DOM_SID *sid = NULL;
uint32 status_code = 0;
memset(name, 0, sizeof(name));
ZERO_STRUCT(q_i);
/* grab the info class and policy handle */
if(!lsa_io_q_query("", &q_i, data, 0)) {
DEBUG(0,("api_lsa_query_info: failed to unmarshall LSA_Q_QUERY_INFO.\n"));
return False;
}
switch (q_i.info_class) {
case 0x03:
if(lp_domain_logons()) {
fstrcpy(name, global_myworkgroup);
sid = &global_sam_sid;
} else {
*name = '\0';
}
break;
case 0x05:
fstrcpy(name, global_myname);
sid = &global_sam_sid;
break;
default:
DEBUG(3,("api_lsa_query_info: unknown info level in Lsa Query: %d\n", q_i.info_class));
status_code = (NT_STATUS_INVALID_INFO_CLASS | 0xC0000000);
break;
}
/* construct reply. return status is always 0x0 */
if(!lsa_reply_query_info(&q_i, rdata, name, sid, status_code))
return False;
return True;
}
/***************************************************************************
api_lsa_lookup_sids
***************************************************************************/
static BOOL api_lsa_lookup_sids( uint16 vuid, prs_struct *data, prs_struct *rdata )
{
LSA_Q_LOOKUP_SIDS q_l;
ZERO_STRUCT(q_l);
/* grab the info class and policy handle */
if(!lsa_io_q_lookup_sids("", &q_l, data, 0)) {
DEBUG(0,("api_lsa_lookup_sids: failed to unmarshall LSA_Q_LOOKUP_SIDS.\n"));
return False;
}
/* construct reply. return status is always 0x0 */
if(!lsa_reply_lookup_sids(rdata, q_l.sids.sid, q_l.sids.num_entries))
return False;
return True;
}
/***************************************************************************
api_lsa_lookup_names
***************************************************************************/
static BOOL api_lsa_lookup_names( uint16 vuid, prs_struct *data, prs_struct *rdata )
{
LSA_Q_LOOKUP_NAMES q_l;
ZERO_STRUCT(q_l);
/* grab the info class and policy handle */
if(!lsa_io_q_lookup_names("", &q_l, data, 0)) {
DEBUG(0,("api_lsa_lookup_names: failed to unmarshall LSA_Q_LOOKUP_NAMES.\n"));
return False;
}
SMB_ASSERT_ARRAY(q_l.uni_name, q_l.num_entries);
return lsa_reply_lookup_names(rdata, q_l.uni_name, q_l.num_entries);
}
/***************************************************************************
api_lsa_close
***************************************************************************/
static BOOL api_lsa_close( uint16 vuid, prs_struct *data,
prs_struct *rdata)
{
LSA_R_CLOSE r_c;
ZERO_STRUCT(r_c);
/* store the response in the SMB stream */
if (!lsa_io_r_close("", &r_c, rdata, 0)) {
DEBUG(0,("api_lsa_close: lsa_io_r_close failed.\n"));
return False;
}
return True;
}
/***************************************************************************
api_lsa_open_secret
***************************************************************************/
static BOOL api_lsa_open_secret( uint16 vuid, prs_struct *data,
prs_struct *rdata)
{
/* XXXX this is NOT good */
size_t i;
uint32 dummy = 0;
for(i =0; i < 4; i++) {
if(!prs_uint32("api_lsa_close", rdata, 1, &dummy)) {
DEBUG(0,("api_lsa_open_secret: prs_uint32 %d failed.\n",
(int)i ));
return False;
}
}
dummy = 0xC0000000 | NT_STATUS_OBJECT_NAME_NOT_FOUND;
if(!prs_uint32("api_lsa_close", rdata, 1, &dummy)) {
DEBUG(0,("api_lsa_open_secret: prs_uint32 status failed.\n"));
return False;
}
return True;
}
/***************************************************************************
\PIPE\ntlsa commands
***************************************************************************/
static struct api_struct api_lsa_cmds[] =
{
{ "LSA_OPENPOLICY2" , LSA_OPENPOLICY2 , api_lsa_open_policy2 },
{ "LSA_OPENPOLICY" , LSA_OPENPOLICY , api_lsa_open_policy },
{ "LSA_QUERYINFOPOLICY" , LSA_QUERYINFOPOLICY , api_lsa_query_info },
{ "LSA_ENUMTRUSTDOM" , LSA_ENUMTRUSTDOM , api_lsa_enum_trust_dom },
{ "LSA_CLOSE" , LSA_CLOSE , api_lsa_close },
{ "LSA_OPENSECRET" , LSA_OPENSECRET , api_lsa_open_secret },
{ "LSA_LOOKUPSIDS" , LSA_LOOKUPSIDS , api_lsa_lookup_sids },
{ "LSA_LOOKUPNAMES" , LSA_LOOKUPNAMES , api_lsa_lookup_names },
{ NULL , 0 , NULL }
};
/***************************************************************************
api_ntLsarpcTNP
***************************************************************************/
BOOL api_ntlsa_rpc(pipes_struct *p, prs_struct *data)
{
return api_rpcTNP(p, "api_ntlsa_rpc", api_lsa_cmds, data);
}
|
823709.c | #include "AST/AstDumper.h"
#include "AST/BinaryOperator.h"
#include "AST/CompoundStatement.h"
#include "AST/ConstantValue.h"
#include "AST/FunctionInvocation.h"
#include "AST/UnaryOperator.h"
#include "AST/VariableReference.h"
#include "AST/assignment.h"
#include "AST/declaration.h"
#include "AST/for.h"
#include "AST/function.h"
#include "AST/if.h"
#include "AST/print.h"
#include "AST/program.h"
#include "AST/read.h"
#include "AST/return.h"
#include "AST/variable.h"
#include "AST/while.h"
#include "OoUtils.h"
#include "core/utils.h"
#include <stdio.h>
#include <stdlib.h>
typedef struct __AstDumper {
ANONYMOUS_AST_NODE_VISITOR;
uint32_t indentation;
} AstDumper;
// FIXME: remove this line if you choose to use visitor pattern with this template
#ifdef I_WANT_TO_USE_VISITOR_PATTERN
static const uint32_t kIndentationStride = 2u;
static void outputIndentationSpace(const uint32_t indentation) {
fprintf(stdout, "%*s", indentation, "");
}
static void incrementIndentation(AstDumper *ad) {
ad->indentation += kIndentationStride;
}
static void decrementIndentation(AstDumper *ad) {
ad->indentation -= kIndentationStride;
}
static void dumpProgramNode(AstDumper *self, ProgramNode *program_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "program <line: %u, col: %u> %s %s\n",
gAstNode.getLocation(program_node)->line,
gAstNode.getLocation(program_node)->col,
gProgramNode.getName(program_node),
"void");
incrementIndentation(self);
gProgramNode.visitChildNodes(program_node, self);
decrementIndentation(self);
}
static void dumpDeclNode(AstDumper *self, DeclNode *decl_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "declaration <line: %u, col: %u>\n",
gAstNode.getLocation(decl_node)->line,
gAstNode.getLocation(decl_node)->col);
incrementIndentation(self);
gDeclNode.visitChildNodes(decl_node, self);
decrementIndentation(self);
}
static void dumpVariableNode(AstDumper *self, VariableNode *var_node) {
outputIndentationSpace(self->indentation);
// TODO: name, type
fprintf(stdout, "variable <line: %u, col: %u> %s %s\n",
gAstNode.getLocation(var_node)->line,
gAstNode.getLocation(var_node)->col,
"TODO",
"TODO");
incrementIndentation(self);
gVariableNode.visitChildNodes(var_node, self);
decrementIndentation(self);
}
static void dumpConstantValueNode(AstDumper *self,
ConstantValueNode *constant_value_node) {
outputIndentationSpace(self->indentation);
// TODO: string of constant value
fprintf(stdout, "constant <line: %u, col: %u> %s\n",
gAstNode.getLocation(constant_value_node)->line,
gAstNode.getLocation(constant_value_node)->col,
"TODO");
}
static void dumpFunctionNode(AstDumper *self, FunctionNode *func_node) {
outputIndentationSpace(self->indentation);
// TODO: name, prototype string
fprintf(stdout, "function declaration <line: %u, col: %u> %s %s\n",
gAstNode.getLocation(func_node)->line,
gAstNode.getLocation(func_node)->col,
"TODO",
"TODO");
incrementIndentation(self);
gFunctionNode.visitChildNodes(func_node, self);
decrementIndentation(self);
}
static void
dumpCompoundStatementNode(AstDumper *self,
CompoundStatementNode *compound_statement_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "compound statement <line: %u, col: %u>\n",
gAstNode.getLocation(compound_statement_node)->line,
gAstNode.getLocation(compound_statement_node)->col);
incrementIndentation(self);
gCompoundStatementNode.visitChildNodes(compound_statement_node, self);
decrementIndentation(self);
}
static void dumpPrintNode(AstDumper *self, PrintNode *print_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "print statement <line: %u, col: %u>\n",
gAstNode.getLocation(print_node)->line,
gAstNode.getLocation(print_node)->col);
incrementIndentation(self);
gPrintNode.visitChildNodes(print_node, self);
decrementIndentation(self);
}
static void dumpBinaryOperatorNode(AstDumper *self,
BinaryOperatorNode *bin_op_node) {
outputIndentationSpace(self->indentation);
// TODO: operator string
fprintf(stdout, "binary operator <line: %u, col: %u> %s\n",
gAstNode.getLocation(bin_op_node)->line,
gAstNode.getLocation(bin_op_node)->col,
"TODO");
incrementIndentation(self);
gBinaryOperatorNode.visitChildNodes(bin_op_node, self);
decrementIndentation(self);
}
static void dumpUnaryOperatorNode(AstDumper *self,
UnaryOperatorNode *un_op_node) {
outputIndentationSpace(self->indentation);
// TODO: operator string
fprintf(stdout, "unary operator <line: %u, col: %u> %s\n",
gAstNode.getLocation(un_op_node)->line,
gAstNode.getLocation(un_op_node)->col,
"TODO");
incrementIndentation(self);
gUnaryOperatorNode.visitChildNodes(un_op_node, self);
decrementIndentation(self);
}
static void
dumpFunctionInvocationNode(AstDumper *self,
FunctionInvocationNode *func_invocation_node) {
outputIndentationSpace(self->indentation);
// TODO: function name
fprintf(stdout, "function invocation <line: %u, col: %u> %s\n",
gAstNode.getLocation(func_invocation_node)->line,
gAstNode.getLocation(func_invocation_node)->col,
"TODO");
incrementIndentation(self);
gFunctionInvocationNode.visitChildNodes(func_invocation_node, self);
decrementIndentation(self);
}
static void dumpVariableReferenceNode(AstDumper *self,
VariableReferenceNode *var_ref_node) {
outputIndentationSpace(self->indentation);
// TODO: variable name
fprintf(stdout, "variable reference <line: %u, col: %u> %s\n",
gAstNode.getLocation(var_ref_node)->line,
gAstNode.getLocation(var_ref_node)->col,
"TODO");
incrementIndentation(self);
gVariableReferenceNode.visitChildNodes(var_ref_node, self);
decrementIndentation(self);
}
static void dumpAssignmentNode(AstDumper *self,
AssignmentNode *assignment_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "assignment statement <line: %u, col: %u>\n",
gAstNode.getLocation(assignment_node)->line,
gAstNode.getLocation(assignment_node)->col);
incrementIndentation(self);
gAssignmentNode.visitChildNodes(assignment_node, self);
decrementIndentation(self);
}
static void dumpReadNode(AstDumper *self, ReadNode *read_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "read statement <line: %u, col: %u>\n",
gAstNode.getLocation(read_node)->line,
gAstNode.getLocation(read_node)->col);
incrementIndentation(self);
gReadNode.visitChildNodes(read_node, self);
decrementIndentation(self);
}
static void dumpIfNode(AstDumper *self, IfNode *if_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "if statement <line: %u, col: %u>\n",
gAstNode.getLocation(if_node)->line,
gAstNode.getLocation(if_node)->col);
incrementIndentation(self);
gIfNode.visitChildNodes(if_node, self);
decrementIndentation(self);
}
static void dumpWhileNode(AstDumper *self, WhileNode *while_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "while statement <line: %u, col: %u>\n",
gAstNode.getLocation(while_node)->line,
gAstNode.getLocation(while_node)->col);
incrementIndentation(self);
gWhileNode.visitChildNodes(while_node, self);
decrementIndentation(self);
}
static void dumpForNode(AstDumper *self, ForNode *for_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "for statement <line: %u, col: %u>\n",
gAstNode.getLocation(for_node)->line,
gAstNode.getLocation(for_node)->col);
incrementIndentation(self);
gForNode.visitChildNodes(for_node, self);
decrementIndentation(self);
}
static void dumpReturnNode(AstDumper *self, ReturnNode *return_node) {
outputIndentationSpace(self->indentation);
fprintf(stdout, "return statement <line: %u, col: %u>\n",
gAstNode.getLocation(return_node)->line,
gAstNode.getLocation(return_node)->col);
incrementIndentation(self);
gReturnNode.visitChildNodes(return_node, self);
decrementIndentation(self);
}
static void construct(AstDumper *self) {
MEMBER_FUNCTION_PROLOGUE;
CONSTRUCT_VISITOR(self, dump);
self->indentation = 0u;
}
static void destruct(AstDumper *self) {}
static AstDumper *newAstDumper(void) {
AstDumper *visitor = (AstDumper *)myMalloc(sizeof(AstDumper));
gAstDumper.construct(visitor);
return visitor;
}
static void freeAstDumper(AstDumper *self) { free(self); }
const IAstDumper gAstDumper = {
.construct = construct,
.destruct = destruct,
.operatorNew = newAstDumper,
.operatorDelete = freeAstDumper,
.visitProgramNode = (VisitProgramNodeFPtr)dumpProgramNode,
.visitDeclNode = (VisitDeclNodeFPtr)dumpDeclNode,
.visitVariableNode = (VisitVariableNodeFPtr)dumpVariableNode,
.visitConstantValueNode = (VisitConstantValueNodeFPtr)dumpConstantValueNode,
.visitFunctionNode = (VisitFunctionNodeFPtr)dumpFunctionNode,
.visitCompoundStatementNode =
(VisitCompoundStatementNodeFPtr)dumpCompoundStatementNode,
.visitPrintNode = (VisitPrintNodeFPtr)dumpPrintNode,
.visitBinaryOperatorNode =
(VisitBinaryOperatorNodeFPtr)dumpBinaryOperatorNode,
.visitUnaryOperatorNode = (VisitUnaryOperatorNodeFPtr)dumpUnaryOperatorNode,
.visitFunctionInvocationNode =
(VisitFunctionInvocationNodeFPtr)dumpFunctionInvocationNode,
.visitVariableReferenceNode =
(VisitVariableReferenceNodeFPtr)dumpVariableReferenceNode,
.visitAssignmentNode = (VisitAssignmentNodeFPtr)dumpAssignmentNode,
.visitReadNode = (VisitReadNodeFPtr)dumpReadNode,
.visitIfNode = (VisitIfNodeFPtr)dumpIfNode,
.visitWhileNode = (VisitWhileNodeFPtr)dumpWhileNode,
.visitForNode = (VisitForNodeFPtr)dumpForNode,
.visitReturnNode = (VisitReturnNodeFPtr)dumpReturnNode};
// FIXME: remove this line if you choose to use visitor pattern with this template
#endif
|
759961.c | /*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
* Copyright (c) 2019 Artur Pacholec
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "shared-bindings/microcontroller/Pin.h"
#include "shared-bindings/microcontroller/__init__.h"
#include "shared-bindings/busio/UART.h"
#include "mpconfigport.h"
#include "lib/utils/interrupt_char.h"
#include "supervisor/shared/tick.h"
#include "py/gc.h"
#include "py/mperrno.h"
#include "py/runtime.h"
#include "py/stream.h"
#include "periph.h"
#include "fsl_lpuart.h"
// arrays use 0 based numbering: UART1 is stored at index 0
#define MAX_UART 8
STATIC bool reserved_uart[MAX_UART];
#define UART_CLOCK_FREQ (CLOCK_GetPllFreq(kCLOCK_PllUsb1) / 6U) / (CLOCK_GetDiv(kCLOCK_UartDiv) + 1U)
static void config_periph_pin(const mcu_periph_obj_t *periph) {
IOMUXC_SetPinMux(
periph->pin->mux_reg, periph->mux_mode,
periph->input_reg, periph->input_idx,
0,
0);
IOMUXC_SetPinConfig(0, 0, 0, 0,
periph->pin->cfg_reg,
IOMUXC_SW_PAD_CTL_PAD_HYS(0)
| IOMUXC_SW_PAD_CTL_PAD_PUS(1)
| IOMUXC_SW_PAD_CTL_PAD_PUE(1)
| IOMUXC_SW_PAD_CTL_PAD_PKE(1)
| IOMUXC_SW_PAD_CTL_PAD_ODE(0)
| IOMUXC_SW_PAD_CTL_PAD_SPEED(1)
| IOMUXC_SW_PAD_CTL_PAD_DSE(6)
| IOMUXC_SW_PAD_CTL_PAD_SRE(0));
}
void LPUART_UserCallback(LPUART_Type *base, lpuart_handle_t *handle, status_t status, void *user_data) {
busio_uart_obj_t *self = (busio_uart_obj_t *)user_data;
if (status == kStatus_LPUART_RxIdle) {
self->rx_ongoing = false;
}
}
void uart_reset(void) {
for (uint i = 0; i < MP_ARRAY_SIZE(mcu_uart_banks); i++) {
reserved_uart[i] = false;
LPUART_Deinit(mcu_uart_banks[i]);
}
}
void common_hal_busio_uart_construct(busio_uart_obj_t *self,
const mcu_pin_obj_t *tx, const mcu_pin_obj_t *rx,
const mcu_pin_obj_t *rts, const mcu_pin_obj_t *cts,
const mcu_pin_obj_t *rs485_dir, bool rs485_invert,
uint32_t baudrate, uint8_t bits, busio_uart_parity_t parity, uint8_t stop,
mp_float_t timeout, uint16_t receiver_buffer_size, byte *receiver_buffer,
bool sigint_enabled) {
self->baudrate = baudrate;
self->character_bits = bits;
self->timeout_ms = timeout * 1000;
if (self->character_bits != 7 && self->character_bits != 8) {
mp_raise_ValueError(translate("Invalid word/bit length"));
}
// We are transmitting one direction if one pin is NULL and the other isn't.
bool is_onedirection = (rx == NULL) != (tx == NULL);
bool uart_taken = false;
const uint32_t rx_count = MP_ARRAY_SIZE(mcu_uart_rx_list);
const uint32_t tx_count = MP_ARRAY_SIZE(mcu_uart_tx_list);
// RX loop handles rx only, or both rx and tx
if (rx != NULL) {
for (uint32_t i = 0; i < rx_count; ++i) {
if (mcu_uart_rx_list[i].pin != rx) {
continue;
}
// If TX is on, keep looking, else stop
if (tx != NULL) {
for (uint32_t j = 0; j < tx_count; ++j) {
if (mcu_uart_tx_list[j].pin != tx ||
mcu_uart_tx_list[j].bank_idx != mcu_uart_rx_list[i].bank_idx) {
continue;
}
// If UART is taken, break (pins never have >1 periph)
if (reserved_uart[mcu_uart_rx_list[i].bank_idx - 1]) {
uart_taken = true;
break;
}
self->rx = &mcu_uart_rx_list[i];
self->tx = &mcu_uart_tx_list[j];
break;
}
if (self->tx != NULL || uart_taken) {
break;
}
} else {
if (reserved_uart[mcu_uart_rx_list[i].bank_idx - 1]) {
uart_taken = true;
break;
}
self->rx = &mcu_uart_rx_list[i];
}
}
} else if (tx != NULL) {
// TX only case
for (uint32_t i = 0; i < tx_count; ++i) {
if (mcu_uart_tx_list[i].pin != tx) {
continue;
}
if (reserved_uart[mcu_uart_tx_list[i].bank_idx - 1]) {
uart_taken = true;
break;
}
self->tx = &mcu_uart_tx_list[i];
break;
}
} else {
mp_raise_ValueError(translate("Supply at least one UART pin"));
}
if (rx && !self->rx) {
mp_raise_ValueError_varg(translate("Invalid %q pin"), MP_QSTR_RX);
}
if (tx && !self->tx) {
mp_raise_ValueError_varg(translate("Invalid %q pin"), MP_QSTR_TX);
}
if (uart_taken) {
mp_raise_ValueError(translate("Hardware in use, try alternative pins"));
}
if (is_onedirection && ((rts != NULL) || (cts != NULL))) {
mp_raise_ValueError(translate("Both RX and TX required for flow control"));
}
// Filter for sane settings for RS485
if (rs485_dir != NULL) {
if ((rts != NULL) || (cts != NULL)) {
mp_raise_ValueError(translate("Cannot specify RTS or CTS in RS485 mode"));
}
// For IMXRT the RTS pin is used for RS485 direction
rts = rs485_dir;
} else {
if (rs485_invert) {
mp_raise_ValueError(translate("RS485 inversion specified when not in RS485 mode"));
}
}
// Now check for RTS/CTS (or overloaded RS485 direction) pin(s)
const uint32_t rts_count = MP_ARRAY_SIZE(mcu_uart_rts_list);
const uint32_t cts_count = MP_ARRAY_SIZE(mcu_uart_cts_list);
if (rts != NULL) {
for (uint32_t i = 0; i < rts_count; ++i) {
if (mcu_uart_rts_list[i].bank_idx == self->rx->bank_idx) {
if (mcu_uart_rts_list[i].pin == rts) {
self->rts = &mcu_uart_rts_list[i];
break;
}
}
}
if (self->rts == NULL) {
mp_raise_ValueError_varg(translate("Invalid %q pin"), MP_QSTR_RTS);
}
}
if (cts != NULL) {
for (uint32_t i = 0; i < cts_count; ++i) {
if (mcu_uart_cts_list[i].bank_idx == self->rx->bank_idx) {
if (mcu_uart_cts_list[i].pin == cts) {
self->cts = &mcu_uart_cts_list[i];
break;
}
}
}
if (self->cts == NULL) {
mp_raise_ValueError_varg(translate("Invalid %q pin"), MP_QSTR_CTS);
}
}
if (self->rx) {
self->uart = mcu_uart_banks[self->rx->bank_idx - 1];
} else {
assert(self->tx);
self->uart = mcu_uart_banks[self->tx->bank_idx - 1];
}
assert(self->uart);
if (self->rx) {
config_periph_pin(self->rx);
}
if (self->tx) {
config_periph_pin(self->tx);
}
if (self->rts) {
config_periph_pin(self->rts);
}
if (self->cts) {
config_periph_pin(self->cts);
}
lpuart_config_t config = { 0 };
LPUART_GetDefaultConfig(&config);
config.dataBitsCount = self->character_bits == 8 ? kLPUART_EightDataBits : kLPUART_SevenDataBits;
config.baudRate_Bps = self->baudrate;
config.enableTx = self->tx != NULL;
config.enableRx = self->rx != NULL;
config.enableRxRTS = self->rts != NULL;
config.enableTxCTS = self->cts != NULL;
if (self->rts != NULL) {
claim_pin(self->rts->pin);
}
if (self->cts != NULL) {
claim_pin(self->cts->pin);
}
LPUART_Init(self->uart, &config, UART_CLOCK_FREQ);
// Before we init, setup RS485 direction pin
// ..unfortunately this isn't done by the driver library
uint32_t modir = (self->uart->MODIR) & ~(LPUART_MODIR_TXRTSPOL_MASK | LPUART_MODIR_TXRTSE_MASK);
if (rs485_dir != NULL) {
modir |= LPUART_MODIR_TXRTSE_MASK;
if (rs485_invert) {
modir |= LPUART_MODIR_TXRTSPOL_MASK;
}
}
self->uart->MODIR = modir;
if (self->tx != NULL) {
claim_pin(self->tx->pin);
}
if (self->rx != NULL) {
// The LPUART ring buffer wastes one byte to distinguish between full and empty.
self->ringbuf = gc_alloc(receiver_buffer_size + 1, false, true /*long-lived*/);
if (!self->ringbuf) {
LPUART_Deinit(self->uart);
mp_raise_msg(&mp_type_MemoryError, translate("Failed to allocate RX buffer"));
}
LPUART_TransferCreateHandle(self->uart, &self->handle, LPUART_UserCallback, self);
// Pass actual allocated size; the LPUART routines are cognizant that
// the capacity is one less than the size.
LPUART_TransferStartRingBuffer(self->uart, &self->handle, self->ringbuf, receiver_buffer_size + 1);
claim_pin(self->rx->pin);
}
}
bool common_hal_busio_uart_deinited(busio_uart_obj_t *self) {
return self->rx == NULL && self->tx == NULL;
}
void common_hal_busio_uart_deinit(busio_uart_obj_t *self) {
if (common_hal_busio_uart_deinited(self)) {
return;
}
if (self->rx) {
reserved_uart[self->rx->bank_idx - 1] = false;
} else {
reserved_uart[self->tx->bank_idx - 1] = false;
}
LPUART_Deinit(self->uart);
gc_free(self->ringbuf);
common_hal_reset_pin(self->rx->pin);
common_hal_reset_pin(self->tx->pin);
self->rx = NULL;
self->tx = NULL;
}
// Read characters.
size_t common_hal_busio_uart_read(busio_uart_obj_t *self, uint8_t *data, size_t len, int *errcode) {
if (self->rx == NULL) {
mp_raise_ValueError(translate("No RX pin"));
}
if (len == 0) {
// Nothing to read.
return 0;
}
lpuart_transfer_t xfer = {
.data = data,
.dataSize = len,
};
self->rx_ongoing = true;
LPUART_TransferReceiveNonBlocking(self->uart, &self->handle, &xfer, NULL);
uint64_t start_ticks = supervisor_ticks_ms64();
// Wait for all bytes received or timeout
while (self->rx_ongoing && (supervisor_ticks_ms64() - start_ticks < self->timeout_ms)) {
RUN_BACKGROUND_TASKS;
// Allow user to break out of a timeout with a KeyboardInterrupt.
if (mp_hal_is_interrupted()) {
break;
}
}
// if we timed out, stop the transfer
if (self->rx_ongoing) {
LPUART_TransferAbortReceive(self->uart, &self->handle);
}
// No data left, we got it all
if (self->handle.rxData == NULL) {
return len;
}
// The only place we can reliably tell how many bytes have been received is from the current
// wp in the handle (because the abort nukes rxDataSize, and reading it before abort is a race.)
return self->handle.rxData - data;
}
// Write characters.
size_t common_hal_busio_uart_write(busio_uart_obj_t *self, const uint8_t *data, size_t len, int *errcode) {
if (self->tx == NULL) {
mp_raise_ValueError(translate("No TX pin"));
}
LPUART_WriteBlocking(self->uart, data, len);
return len;
}
uint32_t common_hal_busio_uart_get_baudrate(busio_uart_obj_t *self) {
return self->baudrate;
}
void common_hal_busio_uart_set_baudrate(busio_uart_obj_t *self, uint32_t baudrate) {
if (LPUART_SetBaudRate(self->uart, baudrate, UART_CLOCK_FREQ) == kStatus_Success) {
self->baudrate = baudrate;
}
}
mp_float_t common_hal_busio_uart_get_timeout(busio_uart_obj_t *self) {
return (mp_float_t)(self->timeout_ms / 1000.0f);
}
void common_hal_busio_uart_set_timeout(busio_uart_obj_t *self, mp_float_t timeout) {
self->timeout_ms = timeout * 1000;
}
uint32_t common_hal_busio_uart_rx_characters_available(busio_uart_obj_t *self) {
return LPUART_TransferGetRxRingBufferLength(self->uart, &self->handle);
}
void common_hal_busio_uart_clear_rx_buffer(busio_uart_obj_t *self) {
self->handle.rxRingBufferHead = self->handle.rxRingBufferTail;
}
bool common_hal_busio_uart_ready_to_tx(busio_uart_obj_t *self) {
if (self->tx == NULL) {
return false;
}
return LPUART_GetStatusFlags(self->uart) & kLPUART_TxDataRegEmptyFlag;
}
|
638356.c | /*
* Copyright (c) 2004
* Bill Paul <wpaul@windriver.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD: src/sys/dev/vge/if_vge.c,v 1.24 2006/02/14 12:44:56 glebius Exp $
*/
/*
* VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
*
* Written by Bill Paul <wpaul@windriver.com>
* Senior Networking Software Engineer
* Wind River Systems
*/
/*
* The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
* combines a tri-speed ethernet MAC and PHY, with the following
* features:
*
* o Jumbo frame support up to 16K
* o Transmit and receive flow control
* o IPv4 checksum offload
* o VLAN tag insertion and stripping
* o TCP large send
* o 64-bit multicast hash table filter
* o 64 entry CAM filter
* o 16K RX FIFO and 48K TX FIFO memory
* o Interrupt moderation
*
* The VT6122 supports up to four transmit DMA queues. The descriptors
* in the transmit ring can address up to 7 data fragments; frames which
* span more than 7 data buffers must be coalesced, but in general the
* BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
* long. The receive descriptors address only a single buffer.
*
* There are two peculiar design issues with the VT6122. One is that
* receive data buffers must be aligned on a 32-bit boundary. This is
* not a problem where the VT6122 is used as a LOM device in x86-based
* systems, but on architectures that generate unaligned access traps, we
* have to do some copying.
*
* The other issue has to do with the way 64-bit addresses are handled.
* The DMA descriptors only allow you to specify 48 bits of addressing
* information. The remaining 16 bits are specified using one of the
* I/O registers. If you only have a 32-bit system, then this isn't
* an issue, but if you have a 64-bit system and more than 4GB of
* memory, you must have to make sure your network data buffers reside
* in the same 48-bit 'segment.'
*
* Special thanks to Ryan Fu at VIA Networking for providing documentation
* and sample NICs for testing.
*/
#include "opt_ifpoll.h"
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/serialize.h>
#include <sys/proc.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_poll.h>
#include <net/ifq_var.h>
#include <net/if_types.h>
#include <net/vlan/if_vlan_var.h>
#include <net/vlan/if_vlan_ether.h>
#include <net/bpf.h>
#include <dev/netif/mii_layer/mii.h>
#include <dev/netif/mii_layer/miivar.h>
#include <bus/pci/pcireg.h>
#include <bus/pci/pcivar.h>
#include <bus/pci/pcidevs.h>
#include "miibus_if.h"
#include <dev/netif/vge/if_vgereg.h>
#include <dev/netif/vge/if_vgevar.h>
#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
/*
* Various supported device vendors/types and their names.
*/
static const struct vge_type vge_devs[] = {
{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612X,
"VIA Networking Gigabit Ethernet" },
{ 0, 0, NULL }
};
static int vge_probe (device_t);
static int vge_attach (device_t);
static int vge_detach (device_t);
static int vge_encap (struct vge_softc *, struct mbuf *, int);
static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int,
bus_size_t, int);
static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
bus_size_t, int);
static int vge_dma_alloc (device_t);
static void vge_dma_free (struct vge_softc *);
static int vge_newbuf (struct vge_softc *, int, struct mbuf *);
static int vge_rx_list_init (struct vge_softc *);
static int vge_tx_list_init (struct vge_softc *);
#ifdef VGE_FIXUP_RX
static __inline void vge_fixup_rx
(struct mbuf *);
#endif
static void vge_rxeof (struct vge_softc *, int);
static void vge_txeof (struct vge_softc *);
static void vge_intr (void *);
static void vge_tick (struct vge_softc *);
static void vge_start (struct ifnet *, struct ifaltq_subque *);
static int vge_ioctl (struct ifnet *, u_long, caddr_t,
struct ucred *);
static void vge_init (void *);
static void vge_stop (struct vge_softc *);
static void vge_watchdog (struct ifnet *);
static int vge_suspend (device_t);
static int vge_resume (device_t);
static void vge_shutdown (device_t);
static int vge_ifmedia_upd (struct ifnet *);
static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
#ifdef VGE_EEPROM
static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *);
#endif
static void vge_read_eeprom (struct vge_softc *, uint8_t *, int, int, int);
static void vge_miipoll_start (struct vge_softc *);
static void vge_miipoll_stop (struct vge_softc *);
static int vge_miibus_readreg (device_t, int, int);
static int vge_miibus_writereg (device_t, int, int, int);
static void vge_miibus_statchg (device_t);
static void vge_cam_clear (struct vge_softc *);
static int vge_cam_set (struct vge_softc *, uint8_t *);
static void vge_setmulti (struct vge_softc *);
static void vge_reset (struct vge_softc *);
#ifdef IFPOLL_ENABLE
static void vge_npoll(struct ifnet *, struct ifpoll_info *);
static void vge_npoll_compat(struct ifnet *, void *, int);
static void vge_disable_intr(struct vge_softc *);
#endif
static void vge_enable_intr(struct vge_softc *, uint32_t);
#define VGE_PCI_LOIO 0x10
#define VGE_PCI_LOMEM 0x14
static device_method_t vge_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, vge_probe),
DEVMETHOD(device_attach, vge_attach),
DEVMETHOD(device_detach, vge_detach),
DEVMETHOD(device_suspend, vge_suspend),
DEVMETHOD(device_resume, vge_resume),
DEVMETHOD(device_shutdown, vge_shutdown),
/* bus interface */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
/* MII interface */
DEVMETHOD(miibus_readreg, vge_miibus_readreg),
DEVMETHOD(miibus_writereg, vge_miibus_writereg),
DEVMETHOD(miibus_statchg, vge_miibus_statchg),
DEVMETHOD_END
};
static driver_t vge_driver = {
"vge",
vge_methods,
sizeof(struct vge_softc)
};
static devclass_t vge_devclass;
DECLARE_DUMMY_MODULE(if_vge);
MODULE_DEPEND(if_vge, miibus, 1, 1, 1);
DRIVER_MODULE(if_vge, pci, vge_driver, vge_devclass, NULL, NULL);
DRIVER_MODULE(if_vge, cardbus, vge_driver, vge_devclass, NULL, NULL);
DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, NULL, NULL);
#ifdef VGE_EEPROM
/*
* Read a word of data stored in the EEPROM at address 'addr.'
*/
static void
vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t dest)
{
uint16_t word = 0;
int i;
/*
* Enter EEPROM embedded programming mode. In order to
* access the EEPROM at all, we first have to set the
* EELOAD bit in the CHIPCFG2 register.
*/
CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
/* Select the address of the word we want to read */
CSR_WRITE_1(sc, VGE_EEADDR, addr);
/* Issue read command */
CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
/* Wait for the done bit to be set. */
for (i = 0; i < VGE_TIMEOUT; i++) {
if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
break;
}
if (i == VGE_TIMEOUT) {
device_printf(sc->vge_dev, "EEPROM read timed out\n");
*dest = 0;
return;
}
/* Read the result */
word = CSR_READ_2(sc, VGE_EERDDAT);
/* Turn off EEPROM access mode. */
CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
*dest = word;
}
#endif
/*
* Read a sequence of words from the EEPROM.
*/
static void
vge_read_eeprom(struct vge_softc *sc, uint8_t *dest, int off, int cnt, int swap)
{
int i;
#ifdef VGE_EEPROM
uint16_t word = 0, *ptr;
for (i = 0; i < cnt; i++) {
vge_eeprom_getword(sc, off + i, &word);
ptr = (uint16_t *)(dest + (i * 2));
if (swap)
*ptr = ntohs(word);
else
*ptr = word;
}
#else
for (i = 0; i < ETHER_ADDR_LEN; i++)
dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
#endif
}
static void
vge_miipoll_stop(struct vge_softc *sc)
{
int i;
CSR_WRITE_1(sc, VGE_MIICMD, 0);
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
break;
}
if (i == VGE_TIMEOUT)
if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n");
}
static void
vge_miipoll_start(struct vge_softc *sc)
{
int i;
/* First, make sure we're idle. */
CSR_WRITE_1(sc, VGE_MIICMD, 0);
CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
break;
}
if (i == VGE_TIMEOUT) {
if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n");
return;
}
/* Now enable auto poll mode. */
CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
/* And make sure it started. */
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
break;
}
if (i == VGE_TIMEOUT)
if_printf(&sc->arpcom.ac_if, "failed to start MII autopoll\n");
}
static int
vge_miibus_readreg(device_t dev, int phy, int reg)
{
struct vge_softc *sc;
int i;
uint16_t rval = 0;
sc = device_get_softc(dev);
if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
return(0);
vge_miipoll_stop(sc);
/* Specify the register we want to read. */
CSR_WRITE_1(sc, VGE_MIIADDR, reg);
/* Issue read command. */
CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
/* Wait for the read command bit to self-clear. */
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
break;
}
if (i == VGE_TIMEOUT)
if_printf(&sc->arpcom.ac_if, "MII read timed out\n");
else
rval = CSR_READ_2(sc, VGE_MIIDATA);
vge_miipoll_start(sc);
return (rval);
}
static int
vge_miibus_writereg(device_t dev, int phy, int reg, int data)
{
struct vge_softc *sc;
int i, rval = 0;
sc = device_get_softc(dev);
if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
return(0);
vge_miipoll_stop(sc);
/* Specify the register we want to write. */
CSR_WRITE_1(sc, VGE_MIIADDR, reg);
/* Specify the data we want to write. */
CSR_WRITE_2(sc, VGE_MIIDATA, data);
/* Issue write command. */
CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
/* Wait for the write command bit to self-clear. */
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
break;
}
if (i == VGE_TIMEOUT) {
if_printf(&sc->arpcom.ac_if, "MII write timed out\n");
rval = EIO;
}
vge_miipoll_start(sc);
return (rval);
}
static void
vge_cam_clear(struct vge_softc *sc)
{
int i;
/*
* Turn off all the mask bits. This tells the chip
* that none of the entries in the CAM filter are valid.
* desired entries will be enabled as we fill the filter in.
*/
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
for (i = 0; i < 8; i++)
CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
/* Clear the VLAN filter too. */
CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
for (i = 0; i < 8; i++)
CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
CSR_WRITE_1(sc, VGE_CAMADDR, 0);
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
sc->vge_camidx = 0;
}
static int
vge_cam_set(struct vge_softc *sc, uint8_t *addr)
{
int i, error = 0;
if (sc->vge_camidx == VGE_CAM_MAXADDRS)
return(ENOSPC);
/* Select the CAM data page. */
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
/* Set the filter entry we want to update and enable writing. */
CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
/* Write the address to the CAM registers */
for (i = 0; i < ETHER_ADDR_LEN; i++)
CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
/* Issue a write command. */
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
/* Wake for it to clear. */
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(1);
if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
break;
}
if (i == VGE_TIMEOUT) {
if_printf(&sc->arpcom.ac_if, "setting CAM filter failed\n");
error = EIO;
goto fail;
}
/* Select the CAM mask page. */
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
/* Set the mask bit that enables this filter. */
CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
1<<(sc->vge_camidx & 7));
sc->vge_camidx++;
fail:
/* Turn off access to CAM. */
CSR_WRITE_1(sc, VGE_CAMADDR, 0);
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
return (error);
}
/*
* Program the multicast filter. We use the 64-entry CAM filter
* for perfect filtering. If there's more than 64 multicast addresses,
* we use the hash filter insted.
*/
static void
vge_setmulti(struct vge_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
int error = 0;
struct ifmultiaddr *ifma;
uint32_t h, hashes[2] = { 0, 0 };
/* First, zot all the multicast entries. */
vge_cam_clear(sc);
CSR_WRITE_4(sc, VGE_MAR0, 0);
CSR_WRITE_4(sc, VGE_MAR1, 0);
/*
* If the user wants allmulti or promisc mode, enable reception
* of all multicast frames.
*/
if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
return;
}
/* Now program new ones */
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
error = vge_cam_set(sc,
LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
if (error)
break;
}
/* If there were too many addresses, use the hash filter. */
if (error) {
vge_cam_clear(sc);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
if (h < 32)
hashes[0] |= (1 << h);
else
hashes[1] |= (1 << (h - 32));
}
CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
}
}
static void
vge_reset(struct vge_softc *sc)
{
int i;
CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(5);
if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
break;
}
if (i == VGE_TIMEOUT) {
if_printf(&sc->arpcom.ac_if, "soft reset timed out");
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
DELAY(2000);
}
DELAY(5000);
CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
for (i = 0; i < VGE_TIMEOUT; i++) {
DELAY(5);
if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
break;
}
if (i == VGE_TIMEOUT) {
if_printf(&sc->arpcom.ac_if, "EEPROM reload timed out\n");
return;
}
CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
}
/*
* Probe for a VIA gigabit chip. Check the PCI vendor and device
* IDs against our list and return a device name if we find a match.
*/
static int
vge_probe(device_t dev)
{
const struct vge_type *t;
uint16_t did, vid;
did = pci_get_device(dev);
vid = pci_get_vendor(dev);
for (t = vge_devs; t->vge_name != NULL; ++t) {
if (vid == t->vge_vid && did == t->vge_did) {
device_set_desc(dev, t->vge_name);
return 0;
}
}
return (ENXIO);
}
static void
vge_dma_map_rx_desc(void *arg, bus_dma_segment_t *segs, int nseg,
bus_size_t mapsize, int error)
{
struct vge_dmaload_arg *ctx;
struct vge_rx_desc *d = NULL;
if (error)
return;
ctx = arg;
/* Signal error to caller if there's too many segments */
if (nseg > ctx->vge_maxsegs) {
ctx->vge_maxsegs = 0;
return;
}
/*
* Map the segment array into descriptors.
*/
d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx];
/* If this descriptor is still owned by the chip, bail. */
if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
if_printf(&ctx->sc->arpcom.ac_if,
"tried to map busy descriptor\n");
ctx->vge_maxsegs = 0;
return;
}
d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
d->vge_sts = 0;
d->vge_ctl = 0;
ctx->vge_maxsegs = 1;
}
static void
vge_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs, int nseg,
bus_size_t mapsize, int error)
{
struct vge_dmaload_arg *ctx;
struct vge_tx_desc *d = NULL;
struct vge_tx_frag *f;
int i = 0;
if (error)
return;
ctx = arg;
/* Signal error to caller if there's too many segments */
if (nseg > ctx->vge_maxsegs) {
ctx->vge_maxsegs = 0;
return;
}
/* Map the segment array into descriptors. */
d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx];
/* If this descriptor is still owned by the chip, bail. */
if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
ctx->vge_maxsegs = 0;
return;
}
for (i = 0; i < nseg; i++) {
f = &d->vge_frag[i];
f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
}
/* Argh. This chip does not autopad short frames */
if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
f = &d->vge_frag[i];
f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
ctx->vge_m0->m_pkthdr.len));
f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN;
i++;
}
/*
* When telling the chip how many segments there are, we
* must use nsegs + 1 instead of just nsegs. Darned if I
* know why.
*/
i++;
d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16;
d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM;
if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN)
d->vge_ctl |= VGE_TDCTL_JUMBO;
ctx->vge_maxsegs = nseg;
}
/*
* Map a single buffer address.
*/
static void
vge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
*((bus_addr_t *)arg) = segs->ds_addr;
}
static int
vge_dma_alloc(device_t dev)
{
struct vge_softc *sc = device_get_softc(dev);
int error, nseg, i, tx_pos = 0, rx_pos = 0;
/*
* Allocate the parent bus DMA tag appropriate for PCI.
*/
#define VGE_NSEG_NEW 32
error = bus_dma_tag_create(NULL, /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */
BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
&sc->vge_parent_tag);
if (error) {
device_printf(dev, "can't create parent dma tag\n");
return error;
}
/*
* Allocate map for RX mbufs.
*/
nseg = 32;
error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
NULL, NULL,
MCLBYTES * nseg, nseg, MCLBYTES,
BUS_DMA_ALLOCNOW, &sc->vge_ldata.vge_mtag);
if (error) {
device_printf(dev, "could not allocate mbuf dma tag\n");
return error;
}
/*
* Allocate map for TX descriptor list.
*/
error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
NULL, NULL,
VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ,
BUS_DMA_ALLOCNOW,
&sc->vge_ldata.vge_tx_list_tag);
if (error) {
device_printf(dev, "could not allocate tx list dma tag\n");
return error;
}
/* Allocate DMA'able memory for the TX ring */
error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag,
(void **)&sc->vge_ldata.vge_tx_list,
BUS_DMA_WAITOK | BUS_DMA_ZERO,
&sc->vge_ldata.vge_tx_list_map);
if (error) {
device_printf(dev, "could not allocate tx list dma memory\n");
return error;
}
/* Load the map for the TX ring. */
error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag,
sc->vge_ldata.vge_tx_list_map,
sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ,
vge_dma_map_addr,
&sc->vge_ldata.vge_tx_list_addr,
BUS_DMA_WAITOK);
if (error) {
device_printf(dev, "could not load tx list\n");
bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
sc->vge_ldata.vge_tx_list,
sc->vge_ldata.vge_tx_list_map);
sc->vge_ldata.vge_tx_list = NULL;
return error;
}
/* Create DMA maps for TX buffers */
for (i = 0; i < VGE_TX_DESC_CNT; i++) {
error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
&sc->vge_ldata.vge_tx_dmamap[i]);
if (error) {
device_printf(dev, "can't create DMA map for TX\n");
tx_pos = i;
goto map_fail;
}
}
tx_pos = VGE_TX_DESC_CNT;
/*
* Allocate map for RX descriptor list.
*/
error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
NULL, NULL,
VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ,
BUS_DMA_ALLOCNOW,
&sc->vge_ldata.vge_rx_list_tag);
if (error) {
device_printf(dev, "could not allocate rx list dma tag\n");
return error;
}
/* Allocate DMA'able memory for the RX ring */
error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag,
(void **)&sc->vge_ldata.vge_rx_list,
BUS_DMA_WAITOK | BUS_DMA_ZERO,
&sc->vge_ldata.vge_rx_list_map);
if (error) {
device_printf(dev, "could not allocate rx list dma memory\n");
return error;
}
/* Load the map for the RX ring. */
error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag,
sc->vge_ldata.vge_rx_list_map,
sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ,
vge_dma_map_addr,
&sc->vge_ldata.vge_rx_list_addr,
BUS_DMA_WAITOK);
if (error) {
device_printf(dev, "could not load rx list\n");
bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
sc->vge_ldata.vge_rx_list,
sc->vge_ldata.vge_rx_list_map);
sc->vge_ldata.vge_rx_list = NULL;
return error;
}
/* Create DMA maps for RX buffers */
for (i = 0; i < VGE_RX_DESC_CNT; i++) {
error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
&sc->vge_ldata.vge_rx_dmamap[i]);
if (error) {
device_printf(dev, "can't create DMA map for RX\n");
rx_pos = i;
goto map_fail;
}
}
return (0);
map_fail:
for (i = 0; i < tx_pos; ++i) {
error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_tx_dmamap[i]);
}
for (i = 0; i < rx_pos; ++i) {
error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_rx_dmamap[i]);
}
bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
sc->vge_ldata.vge_mtag = NULL;
return error;
}
static void
vge_dma_free(struct vge_softc *sc)
{
/* Unload and free the RX DMA ring memory and map */
if (sc->vge_ldata.vge_rx_list_tag) {
bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag,
sc->vge_ldata.vge_rx_list_map);
bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
sc->vge_ldata.vge_rx_list,
sc->vge_ldata.vge_rx_list_map);
}
if (sc->vge_ldata.vge_rx_list_tag)
bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag);
/* Unload and free the TX DMA ring memory and map */
if (sc->vge_ldata.vge_tx_list_tag) {
bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag,
sc->vge_ldata.vge_tx_list_map);
bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
sc->vge_ldata.vge_tx_list,
sc->vge_ldata.vge_tx_list_map);
}
if (sc->vge_ldata.vge_tx_list_tag)
bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag);
/* Destroy all the RX and TX buffer maps */
if (sc->vge_ldata.vge_mtag) {
int i;
for (i = 0; i < VGE_TX_DESC_CNT; i++) {
bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_tx_dmamap[i]);
}
for (i = 0; i < VGE_RX_DESC_CNT; i++) {
bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_rx_dmamap[i]);
}
bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
}
if (sc->vge_parent_tag)
bus_dma_tag_destroy(sc->vge_parent_tag);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
static int
vge_attach(device_t dev)
{
uint8_t eaddr[ETHER_ADDR_LEN];
struct vge_softc *sc;
struct ifnet *ifp;
int error = 0;
sc = device_get_softc(dev);
ifp = &sc->arpcom.ac_if;
/* Initialize if_xname early, so if_printf() can be used */
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
/*
* Map control/status registers.
*/
pci_enable_busmaster(dev);
sc->vge_res_rid = VGE_PCI_LOMEM;
sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->vge_res_rid, RF_ACTIVE);
if (sc->vge_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
return ENXIO;
}
sc->vge_btag = rman_get_bustag(sc->vge_res);
sc->vge_bhandle = rman_get_bushandle(sc->vge_res);
/* Allocate interrupt */
sc->vge_irq_rid = 0;
sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->vge_irq_rid,
RF_SHAREABLE | RF_ACTIVE);
if (sc->vge_irq == NULL) {
device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
/* Reset the adapter. */
vge_reset(sc);
/*
* Get station address from the EEPROM.
*/
vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 0);
/* Allocate DMA related stuffs */
error = vge_dma_alloc(dev);
if (error)
goto fail;
/* Do MII setup */
error = mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd,
vge_ifmedia_sts);
if (error) {
device_printf(dev, "MII without any phy!\n");
goto fail;
}
ifp->if_softc = sc;
ifp->if_mtu = ETHERMTU;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_init = vge_init;
ifp->if_start = vge_start;
ifp->if_watchdog = vge_watchdog;
ifp->if_ioctl = vge_ioctl;
#ifdef IFPOLL_ENABLE
ifp->if_npoll = vge_npoll;
#endif
ifp->if_hwassist = VGE_CSUM_FEATURES;
ifp->if_capabilities = IFCAP_VLAN_MTU |
IFCAP_HWCSUM |
IFCAP_VLAN_HWTAGGING;
ifp->if_capenable = ifp->if_capabilities;
ifq_set_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN);
ifq_set_ready(&ifp->if_snd);
/*
* Call MI attach routine.
*/
ether_ifattach(ifp, eaddr, NULL);
ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->vge_irq));
#ifdef IFPOLL_ENABLE
ifpoll_compat_setup(&sc->vge_npoll, NULL, NULL, device_get_unit(dev),
ifp->if_serializer);
#endif
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->vge_irq, INTR_MPSAFE, vge_intr, sc,
&sc->vge_intrhand, ifp->if_serializer);
if (error) {
device_printf(dev, "couldn't set up irq\n");
ether_ifdetach(ifp);
goto fail;
}
return 0;
fail:
vge_detach(dev);
return error;
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
static int
vge_detach(device_t dev)
{
struct vge_softc *sc = device_get_softc(dev);
struct ifnet *ifp = &sc->arpcom.ac_if;
/* These should only be active if attach succeeded */
if (device_is_attached(dev)) {
lwkt_serialize_enter(ifp->if_serializer);
vge_stop(sc);
bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
/*
* Force off the IFF_UP flag here, in case someone
* still had a BPF descriptor attached to this
* interface. If they do, ether_ifattach() will cause
* the BPF code to try and clear the promisc mode
* flag, which will bubble down to vge_ioctl(),
* which will try to call vge_init() again. This will
* turn the NIC back on and restart the MII ticker,
* which will panic the system when the kernel tries
* to invoke the vge_tick() function that isn't there
* anymore.
*/
ifp->if_flags &= ~IFF_UP;
lwkt_serialize_exit(ifp->if_serializer);
ether_ifdetach(ifp);
}
if (sc->vge_miibus)
device_delete_child(dev, sc->vge_miibus);
bus_generic_detach(dev);
if (sc->vge_irq) {
bus_release_resource(dev, SYS_RES_IRQ, sc->vge_irq_rid,
sc->vge_irq);
}
if (sc->vge_res) {
bus_release_resource(dev, SYS_RES_MEMORY, sc->vge_res_rid,
sc->vge_res);
}
vge_dma_free(sc);
return (0);
}
static int
vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
{
struct vge_dmaload_arg arg;
struct mbuf *n = NULL;
int i, error;
if (m == NULL) {
n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
if (n == NULL)
return (ENOBUFS);
m = n;
} else {
m->m_data = m->m_ext.ext_buf;
}
#ifdef VGE_FIXUP_RX
/*
* This is part of an evil trick to deal with non-x86 platforms.
* The VIA chip requires RX buffers to be aligned on 32-bit
* boundaries, but that will hose non-x86 machines. To get around
* this, we leave some empty space at the start of each buffer
* and for non-x86 hosts, we copy the buffer back two bytes
* to achieve word alignment. This is slightly more efficient
* than allocating a new buffer, copying the contents, and
* discarding the old buffer.
*/
m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
m_adj(m, VGE_ETHER_ALIGN);
#else
m->m_len = m->m_pkthdr.len = MCLBYTES;
#endif
arg.sc = sc;
arg.vge_idx = idx;
arg.vge_maxsegs = 1;
arg.vge_flags = 0;
error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_rx_dmamap[idx], m,
vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT);
if (error || arg.vge_maxsegs != 1) {
if (n != NULL)
m_freem(n);
return (ENOMEM);
}
/*
* Note: the manual fails to document the fact that for
* proper opration, the driver needs to replentish the RX
* DMA ring 4 descriptors at a time (rather than one at a
* time, like most chips). We can allocate the new buffers
* but we should not set the OWN bits until we're ready
* to hand back 4 of them in one shot.
*/
#define VGE_RXCHUNK 4
sc->vge_rx_consumed++;
if (sc->vge_rx_consumed == VGE_RXCHUNK) {
for (i = idx; i != idx - sc->vge_rx_consumed; i--) {
sc->vge_ldata.vge_rx_list[i].vge_sts |=
htole32(VGE_RDSTS_OWN);
}
sc->vge_rx_consumed = 0;
}
sc->vge_ldata.vge_rx_mbuf[idx] = m;
bus_dmamap_sync(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD);
return (0);
}
static int
vge_tx_list_init(struct vge_softc *sc)
{
bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
(VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE);
sc->vge_ldata.vge_tx_prodidx = 0;
sc->vge_ldata.vge_tx_considx = 0;
sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
return (0);
}
static int
vge_rx_list_init(struct vge_softc *sc)
{
int i;
bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
bzero(&sc->vge_ldata.vge_rx_mbuf,
VGE_RX_DESC_CNT * sizeof(struct mbuf *));
sc->vge_rx_consumed = 0;
for (i = 0; i < VGE_RX_DESC_CNT; i++) {
if (vge_newbuf(sc, i, NULL) == ENOBUFS)
return (ENOBUFS);
}
/* Flush the RX descriptors */
bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
sc->vge_ldata.vge_rx_list_map,
BUS_DMASYNC_PREWRITE);
sc->vge_ldata.vge_rx_prodidx = 0;
sc->vge_rx_consumed = 0;
sc->vge_head = sc->vge_tail = NULL;
return (0);
}
#ifdef VGE_FIXUP_RX
static __inline void
vge_fixup_rx(struct mbuf *m)
{
uint16_t *src, *dst;
int i;
src = mtod(m, uint16_t *);
dst = src - 1;
for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
*dst++ = *src++;
m->m_data -= ETHER_ALIGN;
}
#endif
/*
* RX handler. We support the reception of jumbo frames that have
* been fragmented across multiple 2K mbuf cluster buffers.
*/
static void
vge_rxeof(struct vge_softc *sc, int count)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
struct mbuf *m;
int i, total_len, lim = 0;
struct vge_rx_desc *cur_rx;
uint32_t rxstat, rxctl;
ASSERT_SERIALIZED(ifp->if_serializer);
i = sc->vge_ldata.vge_rx_prodidx;
/* Invalidate the descriptor memory */
bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD);
while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
#ifdef IFPOLL_ENABLE
if (count >= 0 && count-- == 0)
break;
#endif
cur_rx = &sc->vge_ldata.vge_rx_list[i];
m = sc->vge_ldata.vge_rx_mbuf[i];
total_len = VGE_RXBYTES(cur_rx);
rxstat = le32toh(cur_rx->vge_sts);
rxctl = le32toh(cur_rx->vge_ctl);
/* Invalidate the RX mbuf and unload its map */
bus_dmamap_sync(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_rx_dmamap[i],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_rx_dmamap[i]);
/*
* If the 'start of frame' bit is set, this indicates
* either the first fragment in a multi-fragment receive,
* or an intermediate fragment. Either way, we want to
* accumulate the buffers.
*/
if (rxstat & VGE_RXPKT_SOF) {
m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
if (sc->vge_head == NULL) {
sc->vge_head = sc->vge_tail = m;
} else {
m->m_flags &= ~M_PKTHDR;
sc->vge_tail->m_next = m;
sc->vge_tail = m;
}
vge_newbuf(sc, i, NULL);
VGE_RX_DESC_INC(i);
continue;
}
/*
* Bad/error frames will have the RXOK bit cleared.
* However, there's one error case we want to allow:
* if a VLAN tagged frame arrives and the chip can't
* match it against the CAM filter, it considers this
* a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
* We don't want to drop the frame though: our VLAN
* filtering is done in software.
*/
if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) &&
!(rxstat & VGE_RDSTS_CSUMERR)) {
IFNET_STAT_INC(ifp, ierrors, 1);
/*
* If this is part of a multi-fragment packet,
* discard all the pieces.
*/
if (sc->vge_head != NULL) {
m_freem(sc->vge_head);
sc->vge_head = sc->vge_tail = NULL;
}
vge_newbuf(sc, i, m);
VGE_RX_DESC_INC(i);
continue;
}
/*
* If allocating a replacement mbuf fails,
* reload the current one.
*/
if (vge_newbuf(sc, i, NULL)) {
IFNET_STAT_INC(ifp, ierrors, 1);
if (sc->vge_head != NULL) {
m_freem(sc->vge_head);
sc->vge_head = sc->vge_tail = NULL;
}
vge_newbuf(sc, i, m);
VGE_RX_DESC_INC(i);
continue;
}
VGE_RX_DESC_INC(i);
if (sc->vge_head != NULL) {
m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
/*
* Special case: if there's 4 bytes or less
* in this buffer, the mbuf can be discarded:
* the last 4 bytes is the CRC, which we don't
* care about anyway.
*/
if (m->m_len <= ETHER_CRC_LEN) {
sc->vge_tail->m_len -=
(ETHER_CRC_LEN - m->m_len);
m_freem(m);
} else {
m->m_len -= ETHER_CRC_LEN;
m->m_flags &= ~M_PKTHDR;
sc->vge_tail->m_next = m;
}
m = sc->vge_head;
sc->vge_head = sc->vge_tail = NULL;
m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
} else {
m->m_pkthdr.len = m->m_len =
(total_len - ETHER_CRC_LEN);
}
#ifdef VGE_FIXUP_RX
vge_fixup_rx(m);
#endif
IFNET_STAT_INC(ifp, ipackets, 1);
m->m_pkthdr.rcvif = ifp;
/* Do RX checksumming if enabled */
if (ifp->if_capenable & IFCAP_RXCSUM) {
/* Check IP header checksum */
if (rxctl & VGE_RDCTL_IPPKT)
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
if (rxctl & VGE_RDCTL_IPCSUMOK)
m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
/* Check TCP/UDP checksum */
if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) &&
rxctl & VGE_RDCTL_PROTOCSUMOK) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID|CSUM_PSEUDO_HDR|
CSUM_FRAG_NOT_CHECKED;
m->m_pkthdr.csum_data = 0xffff;
}
}
if (rxstat & VGE_RDSTS_VTAG) {
m->m_flags |= M_VLANTAG;
m->m_pkthdr.ether_vlantag =
ntohs((rxctl & VGE_RDCTL_VLANID));
}
ifp->if_input(ifp, m);
lim++;
if (lim == VGE_RX_DESC_CNT)
break;
}
/* Flush the RX DMA ring */
bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
sc->vge_ldata.vge_rx_list_map,
BUS_DMASYNC_PREWRITE);
sc->vge_ldata.vge_rx_prodidx = i;
CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
}
static void
vge_txeof(struct vge_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
uint32_t txstat;
int idx;
idx = sc->vge_ldata.vge_tx_considx;
/* Invalidate the TX descriptor list */
bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD);
while (idx != sc->vge_ldata.vge_tx_prodidx) {
txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
if (txstat & VGE_TDSTS_OWN)
break;
m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
bus_dmamap_unload(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_tx_dmamap[idx]);
if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
IFNET_STAT_INC(ifp, collisions, 1);
if (txstat & VGE_TDSTS_TXERR)
IFNET_STAT_INC(ifp, oerrors, 1);
else
IFNET_STAT_INC(ifp, opackets, 1);
sc->vge_ldata.vge_tx_free++;
VGE_TX_DESC_INC(idx);
}
/* No changes made to the TX ring, so no flush needed */
if (idx != sc->vge_ldata.vge_tx_considx) {
sc->vge_ldata.vge_tx_considx = idx;
ifq_clr_oactive(&ifp->if_snd);
ifp->if_timer = 0;
}
/*
* If not all descriptors have been released reaped yet,
* reload the timer so that we will eventually get another
* interrupt that will cause us to re-enter this routine.
* This is done in case the transmitter has gone idle.
*/
if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT)
CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
}
static void
vge_tick(struct vge_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
struct mii_data *mii;
mii = device_get_softc(sc->vge_miibus);
mii_tick(mii);
if (sc->vge_link) {
if (!(mii->mii_media_status & IFM_ACTIVE))
sc->vge_link = 0;
} else {
if (mii->mii_media_status & IFM_ACTIVE &&
IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
sc->vge_link = 1;
if (!ifq_is_empty(&ifp->if_snd))
if_devstart(ifp);
}
}
}
#ifdef IFPOLL_ENABLE
static void
vge_npoll_compat(struct ifnet *ifp, void *arg __unused, int count)
{
struct vge_softc *sc = ifp->if_softc;
ASSERT_SERIALIZED(ifp->if_serializer);
vge_rxeof(sc, count);
vge_txeof(sc);
if (!ifq_is_empty(&ifp->if_snd))
if_devstart(ifp);
/* XXX copy & paste from vge_intr */
if (sc->vge_npoll.ifpc_stcount-- == 0) {
uint32_t status;
sc->vge_npoll.ifpc_stcount = sc->vge_npoll.ifpc_stfrac;
status = CSR_READ_4(sc, VGE_ISR);
if (status == 0xffffffff)
return;
if (status)
CSR_WRITE_4(sc, VGE_ISR, status);
if (status & (VGE_ISR_TXDMA_STALL |
VGE_ISR_RXDMA_STALL))
vge_init(sc);
if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) {
IFNET_STAT_INC(ifp, ierrors, 1);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
}
}
}
static void
vge_npoll(struct ifnet *ifp, struct ifpoll_info *info)
{
struct vge_softc *sc = ifp->if_softc;
ASSERT_SERIALIZED(ifp->if_serializer);
if (info != NULL) {
int cpuid = sc->vge_npoll.ifpc_cpuid;
info->ifpi_rx[cpuid].poll_func = vge_npoll_compat;
info->ifpi_rx[cpuid].arg = NULL;
info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
if (ifp->if_flags & IFF_RUNNING)
vge_disable_intr(sc);
ifq_set_cpuid(&ifp->if_snd, cpuid);
} else {
if (ifp->if_flags & IFF_RUNNING)
vge_enable_intr(sc, 0xffffffff);
ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->vge_irq));
}
}
#endif /* IFPOLL_ENABLE */
static void
vge_intr(void *arg)
{
struct vge_softc *sc = arg;
struct ifnet *ifp = &sc->arpcom.ac_if;
uint32_t status;
if (sc->suspended || !(ifp->if_flags & IFF_UP))
return;
/* Disable interrupts */
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
for (;;) {
status = CSR_READ_4(sc, VGE_ISR);
/* If the card has gone away the read returns 0xffff. */
if (status == 0xFFFFFFFF)
break;
if (status)
CSR_WRITE_4(sc, VGE_ISR, status);
if ((status & VGE_INTRS) == 0)
break;
if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
vge_rxeof(sc, -1);
if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
vge_rxeof(sc, -1);
IFNET_STAT_INC(ifp, ierrors, 1);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
}
if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
vge_txeof(sc);
if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
vge_init(sc);
if (status & VGE_ISR_LINKSTS)
vge_tick(sc);
}
/* Re-enable interrupts */
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
if (!ifq_is_empty(&ifp->if_snd))
if_devstart(ifp);
}
static int
vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
{
struct vge_dmaload_arg arg;
bus_dmamap_t map;
int error;
arg.vge_flags = 0;
if (m_head->m_pkthdr.csum_flags & CSUM_IP)
arg.vge_flags |= VGE_TDCTL_IPCSUM;
if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
arg.vge_flags |= VGE_TDCTL_TCPCSUM;
if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
arg.vge_flags |= VGE_TDCTL_UDPCSUM;
arg.sc = sc;
arg.vge_idx = idx;
arg.vge_m0 = m_head;
arg.vge_maxsegs = VGE_TX_FRAGS;
map = sc->vge_ldata.vge_tx_dmamap[idx];
error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head,
vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
if (error && error != EFBIG) {
if_printf(&sc->arpcom.ac_if, "can't map mbuf (error %d)\n",
error);
goto fail;
}
/* Too many segments to map, coalesce into a single mbuf */
if (error || arg.vge_maxsegs == 0) {
struct mbuf *m_new;
m_new = m_defrag(m_head, MB_DONTWAIT);
if (m_new == NULL) {
error = ENOBUFS;
goto fail;
} else {
m_head = m_new;
}
arg.sc = sc;
arg.vge_m0 = m_head;
arg.vge_idx = idx;
arg.vge_maxsegs = 1;
error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
m_head, vge_dma_map_tx_desc, &arg,
BUS_DMA_NOWAIT);
if (error) {
if_printf(&sc->arpcom.ac_if,
"can't map mbuf (error %d)\n", error);
goto fail;
}
}
sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
sc->vge_ldata.vge_tx_free--;
/*
* Set up hardware VLAN tagging.
*/
if (m_head->m_flags & M_VLANTAG) {
sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
htole32(htons(m_head->m_pkthdr.ether_vlantag) |
VGE_TDCTL_VTAG);
}
sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
return (0);
fail:
m_freem(m_head);
return error;
}
/*
* Main transmit routine.
*/
static void
vge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
{
struct vge_softc *sc = ifp->if_softc;
struct mbuf *m_head = NULL;
int idx, pidx = 0;
ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
ASSERT_SERIALIZED(ifp->if_serializer);
if (!sc->vge_link) {
ifq_purge(&ifp->if_snd);
return;
}
if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
return;
idx = sc->vge_ldata.vge_tx_prodidx;
pidx = idx - 1;
if (pidx < 0)
pidx = VGE_TX_DESC_CNT - 1;
while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) {
if (sc->vge_ldata.vge_tx_free <= 2) {
ifq_set_oactive(&ifp->if_snd);
break;
}
m_head = ifq_dequeue(&ifp->if_snd);
if (m_head == NULL)
break;
if (vge_encap(sc, m_head, idx)) {
/* If vge_encap() failed, it will free m_head for us */
ifq_set_oactive(&ifp->if_snd);
break;
}
sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
htole16(VGE_TXDESC_Q);
pidx = idx;
VGE_TX_DESC_INC(idx);
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
ETHER_BPF_MTAP(ifp, m_head);
}
if (idx == sc->vge_ldata.vge_tx_prodidx)
return;
/* Flush the TX descriptors */
bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
sc->vge_ldata.vge_tx_list_map,
BUS_DMASYNC_PREWRITE);
/* Issue a transmit command. */
CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
sc->vge_ldata.vge_tx_prodidx = idx;
/*
* Use the countdown timer for interrupt moderation.
* 'TX done' interrupts are disabled. Instead, we reset the
* countdown timer, which will begin counting until it hits
* the value in the SSTIMER register, and then trigger an
* interrupt. Each time we set the TIMER0_ENABLE bit, the
* the timer count is reloaded. Only when the transmitter
* is idle will the timer hit 0 and an interrupt fire.
*/
CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
/*
* Set a timeout in case the chip goes out to lunch.
*/
ifp->if_timer = 5;
}
static void
vge_init(void *xsc)
{
struct vge_softc *sc = xsc;
struct ifnet *ifp = &sc->arpcom.ac_if;
struct mii_data *mii;
int i;
ASSERT_SERIALIZED(ifp->if_serializer);
mii = device_get_softc(sc->vge_miibus);
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
vge_stop(sc);
vge_reset(sc);
/*
* Initialize the RX and TX descriptors and mbufs.
*/
vge_rx_list_init(sc);
vge_tx_list_init(sc);
/* Set our station address */
for (i = 0; i < ETHER_ADDR_LEN; i++)
CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(ifp)[i]);
/*
* Set receive FIFO threshold. Also allow transmission and
* reception of VLAN tagged frames.
*/
CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
/* Set DMA burst length */
CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
/* Set collision backoff algorithm */
CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
/* Disable LPSEL field in priority resolution */
CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
/*
* Load the addresses of the DMA queues into the chip.
* Note that we only use one transmit queue.
*/
CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr));
CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr));
CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
/* Enable and wake up the RX descriptor queue */
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
/* Enable the TX descriptor queue */
CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
/* Set up the receive filter -- allow large frames for VLANs. */
CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
/* If we want promiscuous mode, set the allframes bit. */
if (ifp->if_flags & IFF_PROMISC)
CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
/* Set capture broadcast bit to capture broadcast frames. */
if (ifp->if_flags & IFF_BROADCAST)
CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
/* Set multicast bit to capture multicast frames. */
if (ifp->if_flags & IFF_MULTICAST)
CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
/* Init the cam filter. */
vge_cam_clear(sc);
/* Init the multicast filter. */
vge_setmulti(sc);
/* Enable flow control */
CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
/* Enable jumbo frame reception (if desired) */
/* Start the MAC. */
CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
CSR_WRITE_1(sc, VGE_CRS0,
VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
/*
* Configure one-shot timer for microsecond
* resulution and load it for 500 usecs.
*/
CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
CSR_WRITE_2(sc, VGE_SSTIMER, 400);
/*
* Configure interrupt moderation for receive. Enable
* the holdoff counter and load it, and set the RX
* suppression count to the number of descriptors we
* want to allow before triggering an interrupt.
* The holdoff timer is in units of 20 usecs.
*/
#ifdef notyet
CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
/* Select the interrupt holdoff timer page. */
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
/* Enable use of the holdoff timer. */
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
/* Select the RX suppression threshold page. */
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
/* Restore the page select bits. */
CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
#endif
#ifdef IFPOLL_ENABLE
/* Disable intr if polling(4) is enabled */
if (ifp->if_flags & IFF_NPOLLING)
vge_disable_intr(sc);
else
#endif
vge_enable_intr(sc, 0);
mii_mediachg(mii);
ifp->if_flags |= IFF_RUNNING;
ifq_clr_oactive(&ifp->if_snd);
sc->vge_if_flags = 0;
sc->vge_link = 0;
}
/*
* Set media options.
*/
static int
vge_ifmedia_upd(struct ifnet *ifp)
{
struct vge_softc *sc = ifp->if_softc;
struct mii_data *mii = device_get_softc(sc->vge_miibus);
mii_mediachg(mii);
return (0);
}
/*
* Report current media status.
*/
static void
vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct vge_softc *sc = ifp->if_softc;
struct mii_data *mii = device_get_softc(sc->vge_miibus);
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
}
static void
vge_miibus_statchg(device_t dev)
{
struct vge_softc *sc;
struct mii_data *mii;
struct ifmedia_entry *ife;
sc = device_get_softc(dev);
mii = device_get_softc(sc->vge_miibus);
ife = mii->mii_media.ifm_cur;
/*
* If the user manually selects a media mode, we need to turn
* on the forced MAC mode bit in the DIAGCTL register. If the
* user happens to choose a full duplex mode, we also need to
* set the 'force full duplex' bit. This applies only to
* 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
* mode is disabled, and in 1000baseT mode, full duplex is
* always implied, so we turn on the forced mode bit but leave
* the FDX bit cleared.
*/
switch (IFM_SUBTYPE(ife->ifm_media)) {
case IFM_AUTO:
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
break;
case IFM_1000_T:
CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
break;
case IFM_100_TX:
case IFM_10_T:
CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
if ((ife->ifm_media & IFM_GMASK) == IFM_FDX)
CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
else
CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
break;
default:
device_printf(dev, "unknown media type: %x\n",
IFM_SUBTYPE(ife->ifm_media));
break;
}
}
static int
vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
{
struct vge_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
struct mii_data *mii;
int error = 0;
switch (command) {
case SIOCSIFMTU:
if (ifr->ifr_mtu > VGE_JUMBO_MTU)
error = EINVAL;
ifp->if_mtu = ifr->ifr_mtu;
break;
case SIOCSIFFLAGS:
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_flags & IFF_RUNNING) &&
(ifp->if_flags & IFF_PROMISC) &&
!(sc->vge_if_flags & IFF_PROMISC)) {
CSR_SETBIT_1(sc, VGE_RXCTL,
VGE_RXCTL_RX_PROMISC);
vge_setmulti(sc);
} else if ((ifp->if_flags & IFF_RUNNING) &&
!(ifp->if_flags & IFF_PROMISC) &&
(sc->vge_if_flags & IFF_PROMISC)) {
CSR_CLRBIT_1(sc, VGE_RXCTL,
VGE_RXCTL_RX_PROMISC);
vge_setmulti(sc);
} else {
vge_init(sc);
}
} else {
if (ifp->if_flags & IFF_RUNNING)
vge_stop(sc);
}
sc->vge_if_flags = ifp->if_flags;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
vge_setmulti(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
mii = device_get_softc(sc->vge_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
case SIOCSIFCAP:
{
uint32_t mask = ifr->ifr_reqcap ^ ifp->if_capenable;
if (mask & IFCAP_HWCSUM) {
ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM);
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist = VGE_CSUM_FEATURES;
else
ifp->if_hwassist = 0;
if (ifp->if_flags & IFF_RUNNING)
vge_init(sc);
}
}
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
vge_watchdog(struct ifnet *ifp)
{
struct vge_softc *sc = ifp->if_softc;
if_printf(ifp, "watchdog timeout\n");
IFNET_STAT_INC(ifp, oerrors, 1);
vge_txeof(sc);
vge_rxeof(sc, -1);
vge_init(sc);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
vge_stop(struct vge_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
int i;
ASSERT_SERIALIZED(ifp->if_serializer);
ifp->if_timer = 0;
ifp->if_flags &= ~IFF_RUNNING;
ifq_clr_oactive(&ifp->if_snd);
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
if (sc->vge_head != NULL) {
m_freem(sc->vge_head);
sc->vge_head = sc->vge_tail = NULL;
}
/* Free the TX list buffers. */
for (i = 0; i < VGE_TX_DESC_CNT; i++) {
if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
bus_dmamap_unload(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_tx_dmamap[i]);
m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
sc->vge_ldata.vge_tx_mbuf[i] = NULL;
}
}
/* Free the RX list buffers. */
for (i = 0; i < VGE_RX_DESC_CNT; i++) {
if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
bus_dmamap_unload(sc->vge_ldata.vge_mtag,
sc->vge_ldata.vge_rx_dmamap[i]);
m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
sc->vge_ldata.vge_rx_mbuf[i] = NULL;
}
}
}
/*
* Device suspend routine. Stop the interface and save some PCI
* settings in case the BIOS doesn't restore them properly on
* resume.
*/
static int
vge_suspend(device_t dev)
{
struct vge_softc *sc = device_get_softc(dev);
struct ifnet *ifp = &sc->arpcom.ac_if;
lwkt_serialize_enter(ifp->if_serializer);
vge_stop(sc);
sc->suspended = 1;
lwkt_serialize_exit(ifp->if_serializer);
return (0);
}
/*
* Device resume routine. Restore some PCI settings in case the BIOS
* doesn't, re-enable busmastering, and restart the interface if
* appropriate.
*/
static int
vge_resume(device_t dev)
{
struct vge_softc *sc = device_get_softc(dev);
struct ifnet *ifp = &sc->arpcom.ac_if;
/* reenable busmastering */
pci_enable_busmaster(dev);
pci_enable_io(dev, SYS_RES_MEMORY);
lwkt_serialize_enter(ifp->if_serializer);
/* reinitialize interface if necessary */
if (ifp->if_flags & IFF_UP)
vge_init(sc);
sc->suspended = 0;
lwkt_serialize_exit(ifp->if_serializer);
return (0);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
static void
vge_shutdown(device_t dev)
{
struct vge_softc *sc = device_get_softc(dev);
struct ifnet *ifp = &sc->arpcom.ac_if;
lwkt_serialize_enter(ifp->if_serializer);
vge_stop(sc);
lwkt_serialize_exit(ifp->if_serializer);
}
static void
vge_enable_intr(struct vge_softc *sc, uint32_t isr)
{
CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
CSR_WRITE_4(sc, VGE_ISR, isr);
CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
}
#ifdef IFPOLL_ENABLE
static void
vge_disable_intr(struct vge_softc *sc)
{
CSR_WRITE_4(sc, VGE_IMR, 0);
CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
sc->vge_npoll.ifpc_stcount = 0;
}
#endif /* IFPOLL_ENABLE */
|
339883.c | /***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2014 - 2020, Steve Holme, <steve_holme@hotmail.com>.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include "test.h"
#include "memdebug.h"
/*
* This is the list of basic details you need to tweak to get things right.
*/
#define TO "<recipient@example.com>"
#define FROM "<sender@example.com>"
static const char *payload_text[] = {
"From: different\r\n",
"To: another\r\n",
"\r\n",
"\r\n",
".\r\n",
".\r\n",
"\r\n",
".\r\n",
"\r\n",
"body",
NULL
};
struct upload_status {
int lines_read;
};
static size_t read_callback(void *ptr, size_t size, size_t nmemb, void *userp)
{
struct upload_status *upload_ctx = (struct upload_status *)userp;
const char *data;
if((size == 0) || (nmemb == 0) || ((size*nmemb) < 1)) {
return 0;
}
data = payload_text[upload_ctx->lines_read];
if(data) {
size_t len = strlen(data);
memcpy(ptr, data, len);
upload_ctx->lines_read++;
return len;
}
return 0;
}
int test(char *URL)
{
CURLcode res;
CURL *curl;
struct curl_slist *rcpt_list = NULL;
struct upload_status upload_ctx = {0};
if(curl_global_init(CURL_GLOBAL_ALL) != CURLE_OK) {
fprintf(stderr, "curl_global_init() failed\n");
return TEST_ERR_MAJOR_BAD;
}
curl = curl_easy_init();
if(!curl) {
fprintf(stderr, "curl_easy_init() failed\n");
curl_global_cleanup();
return TEST_ERR_MAJOR_BAD;
}
rcpt_list = curl_slist_append(rcpt_list, TO);
/* more addresses can be added here
rcpt_list = curl_slist_append(rcpt_list, "<others@example.com>");
*/
test_setopt(curl, CURLOPT_URL, URL);
test_setopt(curl, CURLOPT_UPLOAD, 1L);
test_setopt(curl, CURLOPT_READFUNCTION, read_callback);
test_setopt(curl, CURLOPT_READDATA, &upload_ctx);
test_setopt(curl, CURLOPT_MAIL_FROM, FROM);
test_setopt(curl, CURLOPT_MAIL_RCPT, rcpt_list);
test_setopt(curl, CURLOPT_VERBOSE, 1L);
res = curl_easy_perform(curl);
test_cleanup:
curl_slist_free_all(rcpt_list);
curl_easy_cleanup(curl);
curl_global_cleanup();
return (int)res;
}
|
478104.c | /*
Copyright (C) 2015-2017 Alexander Borisov
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Author: lex.borisov@gmail.com (Alexander Borisov)
*/
#include "myhtml/tokenizer_script.h"
size_t myhtml_tokenizer_state_script_data(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
while (html_offset < html_size)
{
if(html[html_offset] == '<') {
token_node->element_begin = (tree->global_offset + html_offset);
html_offset++;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_LESS_THAN_SIGN;
break;
}
html_offset++;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_less_than_sign(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(html[html_offset] == '/')
{
html_offset++;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_END_TAG_OPEN;
}
else if(html[html_offset] == '!')
{
html_offset++;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPE_START;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_escape_start(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(html[html_offset] == '-') {
html_offset++;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPE_START_DASH;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_escape_start_dash(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(html[html_offset] == '-') {
html_offset++;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED_DASH_DASH;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_end_tag_open(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(myhtml_ascii_char_cmp(html[html_offset])) {
token_node->str.length = (html_offset + tree->global_offset);
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_END_TAG_NAME;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_end_tag_name(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
while(html_offset < html_size)
{
if(myhtml_whithspace(html[html_offset], ==, ||))
{
if((html_offset - token_node->str.length) != 6) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
html_offset++;
break;
}
size_t tmp_size = token_node->str.length;
const char *tem_name = myhtml_tree_incomming_buffer_make_data(tree, tmp_size, 6);
if(mycore_strncasecmp(tem_name, "script", 6) == 0)
{
token_node = myhtml_tokenizer_queue_create_text_node_if_need(tree, token_node, html, ((html_offset + tree->global_offset) - 8), MyHTML_TOKEN_TYPE_SCRIPT);
if(token_node == NULL) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_PARSE_ERROR_STOP;
return 0;
}
token_node->raw_begin = tmp_size;
token_node->raw_length = 6;
token_node->tag_id = MyHTML_TAG_SCRIPT;
token_node->type = MyHTML_TOKEN_TYPE_CLOSE;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_BEFORE_ATTRIBUTE_NAME;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
}
html_offset++;
break;
}
else if(html[html_offset] == '/')
{
if((html_offset - token_node->str.length) != 6) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
html_offset++;
break;
}
size_t tmp_size = token_node->str.length;
const char *tem_name = myhtml_tree_incomming_buffer_make_data(tree, tmp_size, 6);
if(mycore_strncasecmp(tem_name, "script", 6) == 0)
{
token_node = myhtml_tokenizer_queue_create_text_node_if_need(tree, token_node, html, ((html_offset + tree->global_offset) - 8), MyHTML_TOKEN_TYPE_SCRIPT);
if(token_node == NULL) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_PARSE_ERROR_STOP;
return 0;
}
token_node->raw_begin = tmp_size;
token_node->raw_length = 6;
token_node->tag_id = MyHTML_TAG_SCRIPT;
token_node->type = MyHTML_TOKEN_TYPE_CLOSE|MyHTML_TOKEN_TYPE_CLOSE_SELF;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_BEFORE_ATTRIBUTE_NAME;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
}
html_offset++;
break;
}
else if(html[html_offset] == '>')
{
if((html_offset - token_node->str.length) != 6) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
html_offset++;
break;
}
size_t tmp_size = token_node->str.length;
const char *tem_name = myhtml_tree_incomming_buffer_make_data(tree, tmp_size, 6);
if(mycore_strncasecmp(tem_name, "script", 6) == 0)
{
token_node = myhtml_tokenizer_queue_create_text_node_if_need(tree, token_node, html, ((html_offset + tree->global_offset) - 8), MyHTML_TOKEN_TYPE_SCRIPT);
if(token_node == NULL) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_PARSE_ERROR_STOP;
return 0;
}
token_node->raw_begin = tmp_size;
token_node->raw_length = 6;
token_node->tag_id = MyHTML_TAG_SCRIPT;
token_node->type = MyHTML_TOKEN_TYPE_CLOSE;
html_offset++;
token_node->element_length = (tree->global_offset + html_offset) - token_node->element_begin;
if(myhtml_queue_add(tree, html_offset, token_node) != MyHTML_STATUS_OK) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_PARSE_ERROR_STOP;
return 0;
}
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_DATA;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
html_offset++;
}
break;
}
else if(myhtml_ascii_char_unless_cmp(html[html_offset]))
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
break;
}
html_offset++;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_escaped_dash_dash(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(html[html_offset] == '-') {
html_offset++;
return html_offset;
}
if(html[html_offset] == '<') {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED_LESS_THAN_SIGN;
}
else if(html[html_offset] == '>') {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
}
html_offset++;
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_escaped_less_than_sign(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(html[html_offset] == '/') {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED_END_TAG_OPEN;
html_offset++;
}
else if(myhtml_ascii_char_cmp(html[html_offset])) {
token_node->str.length = (html_offset + tree->global_offset);
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPE_START;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_escaped_end_tag_open(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(myhtml_ascii_char_cmp(html[html_offset])) {
token_node->str.length = (html_offset + tree->global_offset);
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED_END_TAG_NAME;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_escaped_end_tag_name(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
while(html_offset < html_size)
{
if(myhtml_whithspace(html[html_offset], ==, ||))
{
if((html_offset - token_node->str.length) != 6) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
html_offset++;
break;
}
size_t tmp_size = token_node->str.length;
const char *tem_name = myhtml_tree_incomming_buffer_make_data(tree, tmp_size, 6);
if(mycore_strncasecmp(tem_name, "script", 6) == 0)
{
token_node = myhtml_tokenizer_queue_create_text_node_if_need(tree, token_node, html, ((html_offset + tree->global_offset) - 8), MyHTML_TOKEN_TYPE_SCRIPT);
if(token_node == NULL) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_PARSE_ERROR_STOP;
return 0;
}
token_node->raw_begin = tmp_size;
token_node->raw_length = 6;
token_node->tag_id = MyHTML_TAG_SCRIPT;
token_node->type = MyHTML_TOKEN_TYPE_CLOSE;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_BEFORE_ATTRIBUTE_NAME;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
}
html_offset++;
break;
}
else if(html[html_offset] == '/')
{
if((html_offset - token_node->str.length) != 6) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
html_offset++;
break;
}
size_t tmp_size = token_node->str.length;
const char *tem_name = myhtml_tree_incomming_buffer_make_data(tree, tmp_size, 6);
if(mycore_strncasecmp(tem_name, "script", 6) == 0)
{
token_node = myhtml_tokenizer_queue_create_text_node_if_need(tree, token_node, html, ((html_offset + tree->global_offset) - 8), MyHTML_TOKEN_TYPE_SCRIPT);
if(token_node == NULL) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_PARSE_ERROR_STOP;
return 0;
}
token_node->raw_begin = tmp_size;
token_node->raw_length = 6;
token_node->tag_id = MyHTML_TAG_SCRIPT;
token_node->type = MyHTML_TOKEN_TYPE_CLOSE|MyHTML_TOKEN_TYPE_CLOSE_SELF;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_BEFORE_ATTRIBUTE_NAME;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
}
html_offset++;
break;
}
else if(html[html_offset] == '>')
{
if((html_offset - token_node->str.length) != 6) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
html_offset++;
break;
}
size_t tmp_size = token_node->str.length;
const char *tem_name = myhtml_tree_incomming_buffer_make_data(tree, tmp_size, 6);
if(mycore_strncasecmp(tem_name, "script", 6) == 0)
{
token_node = myhtml_tokenizer_queue_create_text_node_if_need(tree, token_node, html, ((html_offset + tree->global_offset) - 8), MyHTML_TOKEN_TYPE_SCRIPT);
if(token_node == NULL) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_PARSE_ERROR_STOP;
return 0;
}
token_node->raw_begin = tmp_size;
token_node->raw_length = 6;
token_node->tag_id = MyHTML_TAG_SCRIPT;
token_node->type = MyHTML_TOKEN_TYPE_CLOSE;
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_DATA;
html_offset++;
token_node->element_length = (tree->global_offset + html_offset) - token_node->element_begin;
if(myhtml_queue_add(tree, html_offset, token_node) != MyHTML_STATUS_OK) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_PARSE_ERROR_STOP;
return 0;
}
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
html_offset++;
}
break;
}
else if(myhtml_ascii_char_unless_cmp(html[html_offset]))
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
break;
}
html_offset++;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_escaped(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
while(html_offset < html_size)
{
if(html[html_offset] == '-')
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED_DASH;
html_offset++;
break;
}
else if(html[html_offset] == '<')
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED_LESS_THAN_SIGN;
html_offset++;
break;
}
html_offset++;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_escaped_dash(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(html[html_offset] == '-') {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED_DASH_DASH;
html_offset++;
}
else if(html[html_offset] == '<') {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED_LESS_THAN_SIGN;
}
else if(html[html_offset] == '\0') {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
html_offset++;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_double_escape_start(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
while(html_offset < html_size)
{
if(myhtml_whithspace(html[html_offset], ==, ||) || html[html_offset] == '/' || html[html_offset] == '>')
{
if((html_offset - token_node->str.length) != 6) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
html_offset++;
break;
}
size_t tmp_size = token_node->str.length;
const char *tem_name = myhtml_tree_incomming_buffer_make_data(tree, tmp_size, 6);
if(mycore_strncasecmp(tem_name, "script", 6) == 0) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
}
html_offset++;
break;
}
else if(myhtml_ascii_char_unless_cmp(html[html_offset]))
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
break;
}
html_offset++;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_double_escaped(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
while(html_offset < html_size)
{
if(html[html_offset] == '-')
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED_DASH;
html_offset++;
break;
}
else if(html[html_offset] == '<')
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED_LESS_THAN_SIGN;
html_offset++;
break;
}
html_offset++;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_double_escaped_dash(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(html[html_offset] == '-')
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED_DASH_DASH;
}
else if(html[html_offset] == '<')
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED_LESS_THAN_SIGN;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED;
}
html_offset++;
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_double_escaped_dash_dash(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(html[html_offset] == '-') {
html_offset++;
return html_offset;
}
if(html[html_offset] == '<')
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED_LESS_THAN_SIGN;
}
else if(html[html_offset] == '>')
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED;
}
html_offset++;
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_double_escaped_less_than_sign(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
if(html[html_offset] == '/') {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPE_END;
html_offset++;
token_node->str.length = (html_offset + tree->global_offset);
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED;
}
return html_offset;
}
size_t myhtml_tokenizer_state_script_data_double_escape_end(myhtml_tree_t* tree, myhtml_token_node_t* token_node, const char* html, size_t html_offset, size_t html_size)
{
while(html_offset < html_size)
{
if(myhtml_whithspace(html[html_offset], ==, ||) || html[html_offset] == '/' || html[html_offset] == '>')
{
if((html_offset - token_node->str.length) != 6) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED;
html_offset++;
break;
}
size_t tmp_size = token_node->str.length;
const char *tem_name = myhtml_tree_incomming_buffer_make_data(tree, tmp_size, 6);
if(mycore_strncasecmp(tem_name, "script", 6) == 0) {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_ESCAPED;
}
else {
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED;
}
html_offset++;
break;
}
else if(myhtml_ascii_char_unless_cmp(html[html_offset]))
{
myhtml_tokenizer_state_set(tree) = MyHTML_TOKENIZER_STATE_SCRIPT_DATA_DOUBLE_ESCAPED;
break;
}
html_offset++;
}
return html_offset;
}
|
350149.c | /*
* (C) Copyright 2009-2012
* Jens Scharsig <esw@bus-elekronik.de>
* BuS Elektronik GmbH & Co. KG
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <config.h>
#include <common.h>
#include <linux/sizes.h>
#include <asm/io.h>
#include <asm/gpio.h>
#include <asm/arch/hardware.h>
#include <asm/arch/clk.h>
#include <asm/arch/at91_matrix.h>
#include <asm/arch/at91sam9_smc.h>
#include <asm/arch/at91_pmc.h>
#include <asm/arch/at91_pio.h>
#include <asm/arch/at91sam9263.h>
#include <asm/arch/gpio.h>
#include <asm/arch/at91_common.h>
#include <lcd.h>
#include <i2c.h>
#include <atmel_lcdc.h>
#if defined(CONFIG_RESET_PHY_R) && defined(CONFIG_MACB)
#include <net.h>
#endif
#include <netdev.h>
DECLARE_GLOBAL_DATA_PTR;
#ifdef CONFIG_CMD_NAND
static void vl_ma2sc_nand_hw_init(void)
{
unsigned long csa;
at91_smc_t *smc = (at91_smc_t *) ATMEL_BASE_SMC0;
at91_matrix_t *matrix = (at91_matrix_t *) ATMEL_BASE_MATRIX;
at91_pmc_t *pmc = (at91_pmc_t *) ATMEL_BASE_PMC;
at91_set_pio_output(AT91_PIO_PORTA, 13, 1); /* CAN_TX -> H */
at91_set_pio_output(AT91_PIO_PORTA, 12, 1); /* CAN_STB -> H */
at91_set_pio_output(AT91_PIO_PORTA, 11, 1); /* CAN_EN -> H */
/* Enable CS3 */
csa = readl(&matrix->csa[0]) | AT91_MATRIX_CSA_EBI_CS3A;
writel(csa, &matrix->csa[0]);
/* Configure SMC CS3 for NAND/SmartMedia */
writel(AT91_SMC_SETUP_NWE(1) | AT91_SMC_SETUP_NCS_WR(0) |
AT91_SMC_SETUP_NRD(1) | AT91_SMC_SETUP_NCS_RD(0),
&smc->cs[3].setup);
writel(AT91_SMC_PULSE_NWE(3) | AT91_SMC_PULSE_NCS_WR(3) |
AT91_SMC_PULSE_NRD(3) | AT91_SMC_PULSE_NCS_RD(3),
&smc->cs[3].pulse);
writel(AT91_SMC_CYCLE_NWE(5) | AT91_SMC_CYCLE_NRD(5),
&smc->cs[3].cycle);
writel(AT91_SMC_MODE_RM_NRD | AT91_SMC_MODE_WM_NWE |
AT91_SMC_MODE_DBW_8 |
AT91_SMC_MODE_TDF_CYCLE(2),
&smc->cs[3].mode);
writel((1 << ATMEL_ID_PIOB) | (1 << ATMEL_ID_PIOCDE),
&pmc->pcer);
/* Configure RDY/BSY */
#ifdef CONFIG_SYS_NAND_READY_PIN
gpio_direction_input(CONFIG_SYS_NAND_READY_PIN);
#endif
/* Enable NandFlash */
gpio_direction_output(CONFIG_SYS_NAND_ENABLE_PIN, 1);
}
#endif
#ifdef CONFIG_MACB
static void vl_ma2sc_macb_hw_init(void)
{
at91_pmc_t *pmc = (at91_pmc_t *) ATMEL_BASE_PMC;
/* Enable clock */
writel(1 << ATMEL_ID_EMAC, &pmc->pcer);
at91_phy_reset();
at91_macb_hw_init();
}
#endif
#ifdef CONFIG_LCD
vidinfo_t panel_info = {
.vl_col = 320,
.vl_row = 240,
.vl_clk = 6500000,
.vl_sync = ATMEL_LCDC_INVDVAL_INVERTED |
ATMEL_LCDC_INVLINE_INVERTED |
ATMEL_LCDC_INVVD_INVERTED |
ATMEL_LCDC_INVFRAME_INVERTED,
.vl_bpix = (ATMEL_LCDC_PIXELSIZE_8 >> 5),
.vl_tft = 1,
.vl_hsync_len = 5, /* Horiz Sync Pulse Width */
.vl_left_margin = 68, /* horiz back porch */
.vl_right_margin = 20, /* horiz front porch */
.vl_vsync_len = 2, /* vert Sync Pulse Width */
.vl_upper_margin = 18, /* vert back porch */
.vl_lower_margin = 4, /* vert front porch */
.mmio = ATMEL_BASE_LCDC,
};
void lcd_enable(void)
{
}
void lcd_disable(void)
{
}
static void vl_ma2sc_lcd_hw_init(void)
{
at91_pmc_t *pmc = (at91_pmc_t *) ATMEL_BASE_PMC;
at91_set_a_periph(AT91_PIO_PORTC, 0, 0); /* LCDVSYNC */
at91_set_a_periph(AT91_PIO_PORTC, 1, 0); /* LCDHSYNC */
at91_set_a_periph(AT91_PIO_PORTC, 2, 0); /* LCDDOTCK */
at91_set_a_periph(AT91_PIO_PORTC, 3, 0); /* LCDDEN */
at91_set_b_periph(AT91_PIO_PORTB, 9, 0); /* LCDCC */
at91_set_a_periph(AT91_PIO_PORTC, 4, 0); /* LCDD0 */
at91_set_a_periph(AT91_PIO_PORTC, 5, 0); /* LCDD1 */
at91_set_a_periph(AT91_PIO_PORTC, 6, 0); /* LCDD2 */
at91_set_a_periph(AT91_PIO_PORTC, 7, 0); /* LCDD3 */
at91_set_a_periph(AT91_PIO_PORTC, 8, 0); /* LCDD4 */
at91_set_a_periph(AT91_PIO_PORTC, 9, 0); /* LCDD5 */
at91_set_a_periph(AT91_PIO_PORTC, 10, 0); /* LCDD6 */
at91_set_a_periph(AT91_PIO_PORTC, 11, 0); /* LCDD7 */
at91_set_a_periph(AT91_PIO_PORTC, 13, 0); /* LCDD9 */
at91_set_a_periph(AT91_PIO_PORTC, 14, 0); /* LCDD10 */
at91_set_a_periph(AT91_PIO_PORTC, 15, 0); /* LCDD11 */
at91_set_a_periph(AT91_PIO_PORTC, 16, 0); /* LCDD12 */
at91_set_b_periph(AT91_PIO_PORTC, 12, 0); /* LCDD13 */
at91_set_a_periph(AT91_PIO_PORTC, 18, 0); /* LCDD14 */
at91_set_a_periph(AT91_PIO_PORTC, 19, 0); /* LCDD15 */
at91_set_a_periph(AT91_PIO_PORTC, 20, 0); /* LCDD26 */
at91_set_a_periph(AT91_PIO_PORTC, 21, 0); /* LCDD17 */
at91_set_a_periph(AT91_PIO_PORTC, 22, 0); /* LCDD18 */
at91_set_a_periph(AT91_PIO_PORTC, 23, 0); /* LCDD19 */
at91_set_a_periph(AT91_PIO_PORTC, 24, 0); /* LCDD20 */
at91_set_b_periph(AT91_PIO_PORTC, 17, 0); /* LCDD21 */
at91_set_a_periph(AT91_PIO_PORTC, 26, 0); /* LCDD22 */
at91_set_a_periph(AT91_PIO_PORTC, 27, 0); /* LCDD23 */
at91_set_pio_output(AT91_PIO_PORTE, 0, 0); /* LCD QXH */
at91_set_pio_output(AT91_PIO_PORTE, 2, 0); /* LCD SHUT */
at91_set_pio_output(AT91_PIO_PORTE, 3, 1); /* LCD TopBottom */
at91_set_pio_output(AT91_PIO_PORTE, 4, 0); /* LCD REV */
at91_set_pio_output(AT91_PIO_PORTE, 5, 1); /* LCD RightLeft */
at91_set_pio_output(AT91_PIO_PORTE, 6, 0); /* LCD Color Mode CM */
at91_set_pio_output(AT91_PIO_PORTE, 7, 0); /* LCD BGR */
at91_set_pio_output(AT91_PIO_PORTB, 9, 0); /* LCD CC */
writel(1 << ATMEL_ID_LCDC, &pmc->pcer);
gd->fb_base = ATMEL_BASE_SRAM0;
}
#endif /* Config LCD */
#ifdef CONFIG_BOARD_EARLY_INIT_F
int board_early_init_f(void)
{
struct at91_pmc *pmc = (struct at91_pmc *)ATMEL_BASE_PMC;
/* Enable clocks for all PIOs */
writel((1 << ATMEL_ID_PIOA) | (1 << ATMEL_ID_PIOB) |
(1 << ATMEL_ID_PIOCDE),
&pmc->pcer);
at91_seriald_hw_init();
return 0;
}
#endif
int board_init(void)
{
at91_smc_t *smc = (at91_smc_t *) ATMEL_BASE_SMC0;
at91_pio_t *pio = (at91_pio_t *) ATMEL_BASE_PIO;
u32 pin;
pin = 0x1F000001;
writel(pin, &pio->pioa.idr);
writel(pin, &pio->pioa.pudr);
writel(pin, &pio->pioa.per);
writel(pin, &pio->pioa.oer);
writel(pin, &pio->pioa.sodr);
writel((1 << 25), &pio->pioa.codr);
pin = 0x1F000100;
writel(pin, &pio->piob.idr);
writel(pin, &pio->piob.pudr);
writel(pin, &pio->piob.per);
writel(pin, &pio->piob.oer);
writel(pin, &pio->piob.codr);
writel((1 << 24), &pio->piob.sodr);
pin = 0x40000000; /* Pullup DRxD enbable */
writel(pin, &pio->pioc.puer);
pin = 0x0000000F; /* HWversion als Input */
writel(pin, &pio->piod.idr);
writel(pin, &pio->piod.puer);
writel(pin, &pio->piod.per);
writel(pin, &pio->piod.odr);
writel(pin, &pio->piod.owdr);
gd->bd->bi_arch_number = MACH_TYPE_VL_MA2SC;
/* adress of boot parameters */
gd->bd->bi_boot_params = CONFIG_SYS_SDRAM_BASE + 0x100;
writel(CONFIG_SYS_SMC0_MODE0_VAL, &smc->cs[0].setup);
writel(CONFIG_SYS_SMC0_CYCLE0_VAL, &smc->cs[0].cycle);
writel(CONFIG_SYS_SMC0_PULSE0_VAL, &smc->cs[0].pulse);
writel(CONFIG_SYS_SMC0_SETUP0_VAL, &smc->cs[0].setup);
#ifdef CONFIG_CMD_NAND
vl_ma2sc_nand_hw_init();
#endif
#ifdef CONFIG_MACB
vl_ma2sc_macb_hw_init();
#endif
#ifdef CONFIG_USB_OHCI_NEW
at91_uhp_hw_init();
#endif
#ifdef CONFIG_LCD
vl_ma2sc_lcd_hw_init();
#endif
return 0;
}
#ifdef CONFIG_MISC_INIT_R
int misc_init_r(void)
{
uchar buffer[8];
at91_pio_t *pio = (at91_pio_t *) ATMEL_BASE_PIO;
u32 pin;
buffer[0] = 0x04;
buffer[1] = 0x00;
if (i2c_write(0x68, 0x0E, 1, buffer, 2) != 0)
puts("error reseting rtc clock\n\0");
/* read hardware version */
pin = (readl(&pio->piod.pdsr) & 0x0F) + 0x44;
printf("Board: revision %c\n", pin);
buffer[0] = pin;
buffer[1] = 0;
setenv("revision", (char *) buffer);
pin = 0x40000000; /* Pullup DRxD enbable */
writel(pin, &pio->pioc.puer);
return 0;
}
#endif
int dram_init(void)
{
gd->ram_size = get_ram_size((long *) CONFIG_SYS_SDRAM_BASE,
CONFIG_SYS_SDRAM_SIZE);
return 0;
}
#ifdef CONFIG_RESET_PHY_R
void reset_phy(void)
{
#ifdef CONFIG_MACB
/*
* Initialize ethernet HW addr prior to starting Linux,
* needed for nfsroot
*/
eth_init(gd->bd);
#endif
}
#endif
int board_eth_init(bd_t *bis)
{
int rc = 0;
#ifdef CONFIG_MACB
rc = macb_eth_initialize(0, (void *) ATMEL_BASE_EMAC, 0x01);
#endif
return rc;
}
#ifdef CONFIG_SYS_I2C_SOFT
void i2c_init_board(void)
{
u32 pin;
at91_pmc_t *pmc = (at91_pmc_t *) ATMEL_BASE_PMC;
at91_pio_t *pio = (at91_pio_t *) ATMEL_BASE_PIO;
u8 sda = (1<<4);
u8 scl = (1<<5);
writel(1 << ATMEL_ID_PIOB, &pmc->pcer);
pin = sda | scl;
writel(pin, &pio->piob.idr); /* Disable Interupt */
writel(pin, &pio->piob.pudr);
writel(pin, &pio->piob.per);
writel(pin, &pio->piob.oer);
writel(pin, &pio->piob.sodr);
}
#endif
void watchdog_reset(void)
{
at91_pio_t *pio = (at91_pio_t *) ATMEL_BASE_PIO;
u32 pin = 0x1; /* PA0 */
if ((readl(&pio->pioa.odsr) & pin) > 0)
writel(pin, &pio->pioa.codr);
else
writel(pin, &pio->pioa.sodr);
}
void enable_caches(void)
{
#ifndef CONFIG_SYS_DCACHE_OFF
dcache_enable();
#endif
}
/*---------------------------------------------------------------------------*/
int do_ledtest(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
int rcode = 1;
int row;
int col;
u32 pinz;
u32 pins;
at91_pio_t *pio = (at91_pio_t *) ATMEL_BASE_PIO;
at91_set_pio_output(AT91_PIO_PORTB, 8, 0); /* LCD DIM */
pins = 0x1F000000;
writel(pins, &pio->pioa.idr);
writel(pins, &pio->pioa.pudr);
writel(pins, &pio->pioa.per);
writel(pins, &pio->pioa.oer);
writel(pins, &pio->pioa.sodr);
pinz = 0x1F000000;
writel(pinz, &pio->piob.idr);
writel(pinz, &pio->piob.pudr);
writel(pinz, &pio->piob.per);
writel(pinz, &pio->piob.oer);
writel(pinz, &pio->piob.sodr);
for (row = 0; row < 5; row++) {
for (col = 0; col < 5; col++) {
writel((0x01000000 << col), &pio->piob.sodr);
writel((0x01000000 << row), &pio->pioa.codr);
printf("LED Test %d x %d\n", row, col);
udelay(1000000);
writel(pinz, &pio->piob.codr);
writel(pins, &pio->pioa.sodr);
}
}
return rcode;
}
void poweroff(void)
{
watchdog_reset();
at91_set_pio_output(AT91_PIO_PORTA, 13, 1); /* CAN_TX -> H */
udelay(100);
at91_set_pio_output(AT91_PIO_PORTA, 12, 0); /* CAN_STB -> L */
udelay(100);
at91_set_pio_output(AT91_PIO_PORTA, 11, 0); /* CAN_EN -> L */
udelay(100);
while (1)
watchdog_reset();
}
int do_poweroff(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
int rcode = 1;
poweroff();
return rcode;
}
int do_beep(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
int i;
u32 freq;
u32 durate;
int rcode = 1;
freq = 1000;
durate = 2;
switch (argc) {
case 3:
durate = simple_strtoul(argv[2], NULL, 10);
case 2:
freq = simple_strtoul(argv[1], NULL, 10);
case 1:
break;
default:
cmd_usage(cmdtp);
rcode = 1;
break;
}
durate = durate * freq;
freq = 500000 / freq;
for (i = 0; i < durate; i++) {
at91_set_pio_output(AT91_PIO_PORTB, 29, 1); /* Sound On*/
udelay(freq);
at91_set_pio_output(AT91_PIO_PORTB, 29, 0); /* Sound Off*/
udelay(freq);
}
at91_set_pio_output(AT91_PIO_PORTB, 29, 0); /* Sound Off*/
return rcode;
}
int do_keytest(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
int rcode = 1;
int row;
u32 col;
u32 pinz;
u32 pins;
at91_pio_t *pio = (at91_pio_t *) ATMEL_BASE_PIO;
at91_pmc_t *pmc = (at91_pmc_t *) ATMEL_BASE_PMC;
writel((1 << ATMEL_ID_PIOA), &pmc->pcer);
pins = 0x001F0000;
writel(pins, &pio->pioa.idr);
writel(pins, &pio->pioa.pudr);
writel(pins, &pio->pioa.per);
writel(pins, &pio->pioa.odr);
pinz = 0x000F0000;
writel(pinz, &pio->piob.idr);
writel(pinz, &pio->piob.pudr);
writel(pinz, &pio->piob.per);
writel(pinz, &pio->piob.oer);
writel(pinz, &pio->piob.codr);
while (1) {
col = 0;
for (row = 0; row < 4; row++) {
writel((0x00010000 << row), &pio->piob.sodr);
udelay(10000);
col <<= 4;
col |= ((readl(&pio->pioa.pdsr) >> 16) & 0xF) ^ 0xF ;
writel(pinz, &pio->piob.codr);
}
printf("Matix: ");
for (row = 0; row < 16; row++) {
printf("%1.1d", col & 1);
col >>= 1;
}
printf(" SP %d\r ",
1 ^ (1 & (readl(&pio->piob.pdsr) >> 20)));
if ((1 & (readl(&pio->pioa.pdsr) >> 1)) == 0) {
/* SHUTDOWN */
row = 0;
while (row < 1000) {
if ((1 & (readl(&pio->pioa.pdsr) >> 1)) == 0)
row++;
udelay(100);
}
udelay(100000);
row = 0;
while (row < 1000) {
if ((1 & (readl(&pio->pioa.pdsr) >> 1)) > 0) {
row++;
udelay(1000);
}
}
poweroff();
while (1)
;
}
}
return rcode;
}
/*****************************************************************************/
U_BOOT_CMD(
ledtest, 1, 0, do_ledtest,
"test ledmatrix",
"\n"
);
U_BOOT_CMD(
keytest, 1, 0, do_keytest,
"test keymatix and special keys, poweroff on pressing ON key",
"\n"
);
U_BOOT_CMD(
poweroff, 1, 0, do_poweroff,
"power off",
"\n"
);
U_BOOT_CMD(
beep, 3, 0, do_beep,
"[freq [duration]]",
"freq frequence of beep\nduration duration of beep\n"
);
/*****************************************************************************/
|
571171.c | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE392_Failure_To_Report_Error_In_Status_Code__fail_to_set_error_code_14.c
Label Definition File: CWE392_Failure_To_Report_Error_In_Status_Code.label.xml
Template File: point-flaw-14.tmpl.c
*/
/*
* @description
* CWE: 392 Failure to Report Error in Status Code
* Sinks: fail_to_set_error_code
* GoodSink: Properly set the error code when an error occurs
* BadSink : Fail to set the error code when an error occurs
* Flow Variant: 14 Control flow: if(global_five==5) and if(global_five!=5)
*
* */
#include "std_testcase.h"
enum
{
E_OK = 0,
E_FILE_NOT_OPENED = 1,
E_OTHER = 2
};
#define INPUT_SZ 10
static int helper_bad(int *error_code)
{
FILE *file;
*error_code = E_OK;
file = fopen("myfile.txt", "r+");
/* FLAW: This function returns failure, but does not set the error code appropiately */
if (file == NULL) return 0;
fclose(file);
return 1;
}
static int helper_good(int *error_code)
{
FILE *file;
*error_code = E_OK;
file = fopen("myfile.txt", "r+");
/* FIX: Set the error code properly when returning failure */
if (file == NULL)
{
*error_code = E_FILE_NOT_OPENED;
return 0;
}
fclose(file);
return 1;
}
#ifndef OMITBAD
void CWE392_Failure_To_Report_Error_In_Status_Code__fail_to_set_error_code_14_bad()
{
if(global_five==5)
{
{
int error_code;
if (helper_bad(&error_code))
{
printLine("Success");
}
else
{
printLine("Failure");
}
if (error_code == E_OK)
{
printLine("Other success");
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
{
int error_code;
if (helper_good(&error_code))
{
printLine("Success");
}
else
{
printLine("Failure");
}
if (error_code == E_OK)
{
printLine("Other success");
}
}
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* good1() uses if(global_five!=5) instead of if(global_five==5) */
static void good1()
{
if(global_five!=5)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
{
int error_code;
if (helper_bad(&error_code))
{
printLine("Success");
}
else
{
printLine("Failure");
}
if (error_code == E_OK)
{
printLine("Other success");
}
}
}
else
{
{
int error_code;
if (helper_good(&error_code))
{
printLine("Success");
}
else
{
printLine("Failure");
}
if (error_code == E_OK)
{
printLine("Other success");
}
}
}
}
/* good2() reverses the bodies in the if statement */
static void good2()
{
if(global_five==5)
{
{
int error_code;
if (helper_good(&error_code))
{
printLine("Success");
}
else
{
printLine("Failure");
}
if (error_code == E_OK)
{
printLine("Other success");
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
{
int error_code;
if (helper_bad(&error_code))
{
printLine("Success");
}
else
{
printLine("Failure");
}
if (error_code == E_OK)
{
printLine("Other success");
}
}
}
}
void CWE392_Failure_To_Report_Error_In_Status_Code__fail_to_set_error_code_14_good()
{
good1();
good2();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
its own for testing or for building a binary to use in testing binary
analysis tools. It is not used when compiling all the testcases as one
application, which is how source code analysis tools are tested. */
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE392_Failure_To_Report_Error_In_Status_Code__fail_to_set_error_code_14_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE392_Failure_To_Report_Error_In_Status_Code__fail_to_set_error_code_14_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
556637.c | #include "libft.h"
#define ABS(v) -(v)
static void aux_dix_int(long n, char *ans, int *p)
{
if (n < -9 || 9 < n)
aux_dix_int(n / 10, ans, p);
ans[(*p)++] = '0' + ABS(n % 10);
}
static void aux_dix(size_t n, char *ans, int *p)
{
if (9 < n)
aux_dix(n / 10, ans, p);
ans[(*p)++] = '0' + ABS(n % 10);
}
static void aux(size_t n, unsigned int b, char *ans, int *p)
{
const char *base = "0123456789abcdef";
if (n >= b)
aux(n / b, b, ans, p);
ans[(*p)++] = base[n % b];
}
char *ft_itoa_base_ulongmax(size_t value, int base)
{
char *ans;
int p;
if (base < 2 || 16 < base || !(ans = (char *)malloc(sizeof(char) * 35)))
return (NULL);
p = 0;
if (base == 10 && value < 4294967296)
aux_dix_int(value, ans, &p);
else if (base == 10 && value > 4294967296)
aux_dix((size_t)value, ans, &p);
else
aux((size_t)value, (unsigned int)base, ans, &p);
ans[p] = '\0';
return (ans);
}
|
21647.c | #include "lstm_layer.h"
#include "connected_layer.h"
#include "utils.h"
#include "cuda.h"
#include "blas.h"
#include "gemm.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
static void increment_layer(layer *l, int steps) {
int num = l->outputs * l->batch * steps;
l->output += num;
l->delta += num;
l->x += num;
l->x_norm += num;
#ifdef GPU
l->output_gpu += num;
l->delta_gpu += num;
l->x_gpu += num;
l->x_norm_gpu += num;
#endif
}
layer make_lstm_layer(int batch, int inputs, int outputs, int steps,
int batch_normalize, int adam) {
fprintf(stderr, "LSTM Layer: %d inputs, %d outputs\n", inputs, outputs);
batch = batch / steps;
layer l = { 0 };
l.batch = batch;
l.type = LSTM;
l.steps = steps;
l.inputs = inputs;
l.uf = malloc(sizeof(layer));
fprintf(stderr, "\t\t");
*(l.uf) = make_connected_layer(batch * steps, inputs, outputs, LINEAR,
batch_normalize, adam);
l.uf->batch = batch;
l.ui = malloc(sizeof(layer));
fprintf(stderr, "\t\t");
*(l.ui) = make_connected_layer(batch * steps, inputs, outputs, LINEAR,
batch_normalize, adam);
l.ui->batch = batch;
l.ug = malloc(sizeof(layer));
fprintf(stderr, "\t\t");
*(l.ug) = make_connected_layer(batch * steps, inputs, outputs, LINEAR,
batch_normalize, adam);
l.ug->batch = batch;
l.uo = malloc(sizeof(layer));
fprintf(stderr, "\t\t");
*(l.uo) = make_connected_layer(batch * steps, inputs, outputs, LINEAR,
batch_normalize, adam);
l.uo->batch = batch;
l.wf = malloc(sizeof(layer));
fprintf(stderr, "\t\t");
*(l.wf) = make_connected_layer(batch * steps, outputs, outputs, LINEAR,
batch_normalize, adam);
l.wf->batch = batch;
l.wi = malloc(sizeof(layer));
fprintf(stderr, "\t\t");
*(l.wi) = make_connected_layer(batch * steps, outputs, outputs, LINEAR,
batch_normalize, adam);
l.wi->batch = batch;
l.wg = malloc(sizeof(layer));
fprintf(stderr, "\t\t");
*(l.wg) = make_connected_layer(batch * steps, outputs, outputs, LINEAR,
batch_normalize, adam);
l.wg->batch = batch;
l.wo = malloc(sizeof(layer));
fprintf(stderr, "\t\t");
*(l.wo) = make_connected_layer(batch * steps, outputs, outputs, LINEAR,
batch_normalize, adam);
l.wo->batch = batch;
l.batch_normalize = batch_normalize;
l.outputs = outputs;
l.output = calloc(outputs * batch * steps, sizeof(real_t));
l.state = calloc(outputs * batch, sizeof(real_t));
l.forward = forward_lstm_layer;
l.update = update_lstm_layer;
l.prev_state_cpu = calloc(batch * outputs, sizeof(real_t));
l.prev_cell_cpu = calloc(batch * outputs, sizeof(real_t));
l.cell_cpu = calloc(batch * outputs * steps, sizeof(real_t));
l.f_cpu = calloc(batch * outputs, sizeof(real_t));
l.i_cpu = calloc(batch * outputs, sizeof(real_t));
l.g_cpu = calloc(batch * outputs, sizeof(real_t));
l.o_cpu = calloc(batch * outputs, sizeof(real_t));
l.c_cpu = calloc(batch * outputs, sizeof(real_t));
l.h_cpu = calloc(batch * outputs, sizeof(real_t));
l.temp_cpu = calloc(batch * outputs, sizeof(real_t));
l.temp2_cpu = calloc(batch * outputs, sizeof(real_t));
l.temp3_cpu = calloc(batch * outputs, sizeof(real_t));
l.dc_cpu = calloc(batch * outputs, sizeof(real_t));
l.dh_cpu = calloc(batch * outputs, sizeof(real_t));
#ifdef GPU
l.forward_gpu = forward_lstm_layer_gpu;
l.backward_gpu = backward_lstm_layer_gpu;
l.update_gpu = update_lstm_layer_gpu;
l.output_gpu = cuda_make_array(0, batch * outputs * steps);
l.delta_gpu = cuda_make_array(0, batch * l.outputs * steps);
l.prev_state_gpu = cuda_make_array(0, batch * outputs);
l.prev_cell_gpu = cuda_make_array(0, batch * outputs);
l.cell_gpu = cuda_make_array(0, batch * outputs * steps);
l.f_gpu = cuda_make_array(0, batch * outputs);
l.i_gpu = cuda_make_array(0, batch * outputs);
l.g_gpu = cuda_make_array(0, batch * outputs);
l.o_gpu = cuda_make_array(0, batch * outputs);
l.c_gpu = cuda_make_array(0, batch * outputs);
l.h_gpu = cuda_make_array(0, batch * outputs);
l.temp_gpu = cuda_make_array(0, batch * outputs);
l.temp2_gpu = cuda_make_array(0, batch * outputs);
l.temp3_gpu = cuda_make_array(0, batch * outputs);
l.dc_gpu = cuda_make_array(0, batch * outputs);
l.dh_gpu = cuda_make_array(0, batch * outputs);
#ifdef CUDNN
cudnnSetTensor4dDescriptor(l.wf->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wf->out_c, l.wf->out_h, l.wf->out_w);
cudnnSetTensor4dDescriptor(l.wi->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wi->out_c, l.wi->out_h, l.wi->out_w);
cudnnSetTensor4dDescriptor(l.wg->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wg->out_c, l.wg->out_h, l.wg->out_w);
cudnnSetTensor4dDescriptor(l.wo->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wo->out_c, l.wo->out_h, l.wo->out_w);
cudnnSetTensor4dDescriptor(l.uf->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.uf->out_c, l.uf->out_h, l.uf->out_w);
cudnnSetTensor4dDescriptor(l.ui->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.ui->out_c, l.ui->out_h, l.ui->out_w);
cudnnSetTensor4dDescriptor(l.ug->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.ug->out_c, l.ug->out_h, l.ug->out_w);
cudnnSetTensor4dDescriptor(l.uo->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.uo->out_c, l.uo->out_h, l.uo->out_w);
#endif
#endif
return l;
}
void update_lstm_layer(layer l, update_args a) {
update_connected_layer(*(l.wf), a);
update_connected_layer(*(l.wi), a);
update_connected_layer(*(l.wg), a);
update_connected_layer(*(l.wo), a);
update_connected_layer(*(l.uf), a);
update_connected_layer(*(l.ui), a);
update_connected_layer(*(l.ug), a);
update_connected_layer(*(l.uo), a);
}
void forward_lstm_layer(layer l, network state) {
network s = { 0 };
s.train = state.train;
int i;
layer wf = *(l.wf);
layer wi = *(l.wi);
layer wg = *(l.wg);
layer wo = *(l.wo);
layer uf = *(l.uf);
layer ui = *(l.ui);
layer ug = *(l.ug);
layer uo = *(l.uo);
fill_cpu(l.outputs * l.batch * l.steps, 0, wf.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, wi.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, wg.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, wo.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, uf.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, ui.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, ug.delta, 1);
fill_cpu(l.outputs * l.batch * l.steps, 0, uo.delta, 1);
if (state.train) {
fill_cpu(l.outputs * l.batch * l.steps, 0, l.delta, 1);
}
for (i = 0; i < l.steps; ++i) {
s.input = l.h_cpu;
forward_connected_layer(wf, s);
forward_connected_layer(wi, s);
forward_connected_layer(wg, s);
forward_connected_layer(wo, s);
s.input = state.input;
forward_connected_layer(uf, s);
forward_connected_layer(ui, s);
forward_connected_layer(ug, s);
forward_connected_layer(uo, s);
copy_cpu(l.outputs * l.batch, wf.output, 1, l.f_cpu, 1);
axpy_cpu(l.outputs * l.batch, 1, uf.output, 1, l.f_cpu, 1);
copy_cpu(l.outputs * l.batch, wi.output, 1, l.i_cpu, 1);
axpy_cpu(l.outputs * l.batch, 1, ui.output, 1, l.i_cpu, 1);
copy_cpu(l.outputs * l.batch, wg.output, 1, l.g_cpu, 1);
axpy_cpu(l.outputs * l.batch, 1, ug.output, 1, l.g_cpu, 1);
copy_cpu(l.outputs * l.batch, wo.output, 1, l.o_cpu, 1);
axpy_cpu(l.outputs * l.batch, 1, uo.output, 1, l.o_cpu, 1);
activate_array(l.f_cpu, l.outputs * l.batch, LOGISTIC);
activate_array(l.i_cpu, l.outputs * l.batch, LOGISTIC);
activate_array(l.g_cpu, l.outputs * l.batch, TANH);
activate_array(l.o_cpu, l.outputs * l.batch, LOGISTIC);
copy_cpu(l.outputs * l.batch, l.i_cpu, 1, l.temp_cpu, 1);
mul_cpu(l.outputs * l.batch, l.g_cpu, 1, l.temp_cpu, 1);
mul_cpu(l.outputs * l.batch, l.f_cpu, 1, l.c_cpu, 1);
axpy_cpu(l.outputs * l.batch, 1, l.temp_cpu, 1, l.c_cpu, 1);
copy_cpu(l.outputs * l.batch, l.c_cpu, 1, l.h_cpu, 1);
activate_array(l.h_cpu, l.outputs * l.batch, TANH);
mul_cpu(l.outputs * l.batch, l.o_cpu, 1, l.h_cpu, 1);
copy_cpu(l.outputs * l.batch, l.c_cpu, 1, l.cell_cpu, 1);
copy_cpu(l.outputs * l.batch, l.h_cpu, 1, l.output, 1);
state.input += l.inputs * l.batch;
l.output += l.outputs * l.batch;
l.cell_cpu += l.outputs * l.batch;
increment_layer(&wf, 1);
increment_layer(&wi, 1);
increment_layer(&wg, 1);
increment_layer(&wo, 1);
increment_layer(&uf, 1);
increment_layer(&ui, 1);
increment_layer(&ug, 1);
increment_layer(&uo, 1);
}
}
void backward_lstm_layer(layer l, network state) {
network s = { 0 };
s.train = state.train;
int i;
layer wf = *(l.wf);
layer wi = *(l.wi);
layer wg = *(l.wg);
layer wo = *(l.wo);
layer uf = *(l.uf);
layer ui = *(l.ui);
layer ug = *(l.ug);
layer uo = *(l.uo);
increment_layer(&wf, l.steps - 1);
increment_layer(&wi, l.steps - 1);
increment_layer(&wg, l.steps - 1);
increment_layer(&wo, l.steps - 1);
increment_layer(&uf, l.steps - 1);
increment_layer(&ui, l.steps - 1);
increment_layer(&ug, l.steps - 1);
increment_layer(&uo, l.steps - 1);
state.input += l.inputs * l.batch * (l.steps - 1);
if (state.delta)
state.delta += l.inputs * l.batch * (l.steps - 1);
l.output += l.outputs * l.batch * (l.steps - 1);
l.cell_cpu += l.outputs * l.batch * (l.steps - 1);
l.delta += l.outputs * l.batch * (l.steps - 1);
for (i = l.steps - 1; i >= 0; --i) {
if (i != 0)
copy_cpu(l.outputs * l.batch, l.cell_cpu - l.outputs * l.batch, 1,
l.prev_cell_cpu, 1);
copy_cpu(l.outputs * l.batch, l.cell_cpu, 1, l.c_cpu, 1);
if (i != 0)
copy_cpu(l.outputs * l.batch, l.output - l.outputs * l.batch, 1,
l.prev_state_cpu, 1);
copy_cpu(l.outputs * l.batch, l.output, 1, l.h_cpu, 1);
l.dh_cpu = (i == 0) ? 0 : l.delta - l.outputs * l.batch;
copy_cpu(l.outputs * l.batch, wf.output, 1, l.f_cpu, 1);
axpy_cpu(l.outputs * l.batch, 1, uf.output, 1, l.f_cpu, 1);
copy_cpu(l.outputs * l.batch, wi.output, 1, l.i_cpu, 1);
axpy_cpu(l.outputs * l.batch, 1, ui.output, 1, l.i_cpu, 1);
copy_cpu(l.outputs * l.batch, wg.output, 1, l.g_cpu, 1);
axpy_cpu(l.outputs * l.batch, 1, ug.output, 1, l.g_cpu, 1);
copy_cpu(l.outputs * l.batch, wo.output, 1, l.o_cpu, 1);
axpy_cpu(l.outputs * l.batch, 1, uo.output, 1, l.o_cpu, 1);
activate_array(l.f_cpu, l.outputs * l.batch, LOGISTIC);
activate_array(l.i_cpu, l.outputs * l.batch, LOGISTIC);
activate_array(l.g_cpu, l.outputs * l.batch, TANH);
activate_array(l.o_cpu, l.outputs * l.batch, LOGISTIC);
copy_cpu(l.outputs * l.batch, l.delta, 1, l.temp3_cpu, 1);
copy_cpu(l.outputs * l.batch, l.c_cpu, 1, l.temp_cpu, 1);
activate_array(l.temp_cpu, l.outputs * l.batch, TANH);
copy_cpu(l.outputs * l.batch, l.temp3_cpu, 1, l.temp2_cpu, 1);
mul_cpu(l.outputs * l.batch, l.o_cpu, 1, l.temp2_cpu, 1);
gradient_array(l.temp_cpu, l.outputs * l.batch, TANH, l.temp2_cpu);
axpy_cpu(l.outputs * l.batch, 1, l.dc_cpu, 1, l.temp2_cpu, 1);
copy_cpu(l.outputs * l.batch, l.c_cpu, 1, l.temp_cpu, 1);
activate_array(l.temp_cpu, l.outputs * l.batch, TANH);
mul_cpu(l.outputs * l.batch, l.temp3_cpu, 1, l.temp_cpu, 1);
gradient_array(l.o_cpu, l.outputs * l.batch, LOGISTIC, l.temp_cpu);
copy_cpu(l.outputs * l.batch, l.temp_cpu, 1, wo.delta, 1);
s.input = l.prev_state_cpu;
s.delta = l.dh_cpu;
backward_connected_layer(wo, s);
copy_cpu(l.outputs * l.batch, l.temp_cpu, 1, uo.delta, 1);
s.input = state.input;
s.delta = state.delta;
backward_connected_layer(uo, s);
copy_cpu(l.outputs * l.batch, l.temp2_cpu, 1, l.temp_cpu, 1);
mul_cpu(l.outputs * l.batch, l.i_cpu, 1, l.temp_cpu, 1);
gradient_array(l.g_cpu, l.outputs * l.batch, TANH, l.temp_cpu);
copy_cpu(l.outputs * l.batch, l.temp_cpu, 1, wg.delta, 1);
s.input = l.prev_state_cpu;
s.delta = l.dh_cpu;
backward_connected_layer(wg, s);
copy_cpu(l.outputs * l.batch, l.temp_cpu, 1, ug.delta, 1);
s.input = state.input;
s.delta = state.delta;
backward_connected_layer(ug, s);
copy_cpu(l.outputs * l.batch, l.temp2_cpu, 1, l.temp_cpu, 1);
mul_cpu(l.outputs * l.batch, l.g_cpu, 1, l.temp_cpu, 1);
gradient_array(l.i_cpu, l.outputs * l.batch, LOGISTIC, l.temp_cpu);
copy_cpu(l.outputs * l.batch, l.temp_cpu, 1, wi.delta, 1);
s.input = l.prev_state_cpu;
s.delta = l.dh_cpu;
backward_connected_layer(wi, s);
copy_cpu(l.outputs * l.batch, l.temp_cpu, 1, ui.delta, 1);
s.input = state.input;
s.delta = state.delta;
backward_connected_layer(ui, s);
copy_cpu(l.outputs * l.batch, l.temp2_cpu, 1, l.temp_cpu, 1);
mul_cpu(l.outputs * l.batch, l.prev_cell_cpu, 1, l.temp_cpu, 1);
gradient_array(l.f_cpu, l.outputs * l.batch, LOGISTIC, l.temp_cpu);
copy_cpu(l.outputs * l.batch, l.temp_cpu, 1, wf.delta, 1);
s.input = l.prev_state_cpu;
s.delta = l.dh_cpu;
backward_connected_layer(wf, s);
copy_cpu(l.outputs * l.batch, l.temp_cpu, 1, uf.delta, 1);
s.input = state.input;
s.delta = state.delta;
backward_connected_layer(uf, s);
copy_cpu(l.outputs * l.batch, l.temp2_cpu, 1, l.temp_cpu, 1);
mul_cpu(l.outputs * l.batch, l.f_cpu, 1, l.temp_cpu, 1);
copy_cpu(l.outputs * l.batch, l.temp_cpu, 1, l.dc_cpu, 1);
state.input -= l.inputs * l.batch;
if (state.delta)
state.delta -= l.inputs * l.batch;
l.output -= l.outputs * l.batch;
l.cell_cpu -= l.outputs * l.batch;
l.delta -= l.outputs * l.batch;
increment_layer(&wf, -1);
increment_layer(&wi, -1);
increment_layer(&wg, -1);
increment_layer(&wo, -1);
increment_layer(&uf, -1);
increment_layer(&ui, -1);
increment_layer(&ug, -1);
increment_layer(&uo, -1);
}
}
#ifdef GPU
void update_lstm_layer_gpu(layer l, update_args a, cudaStream_t st) {
update_connected_layer_gpu(*(l.wf), a, st);
update_connected_layer_gpu(*(l.wi), a, st);
update_connected_layer_gpu(*(l.wg), a, st);
update_connected_layer_gpu(*(l.wo), a, st);
update_connected_layer_gpu(*(l.uf), a, st);
update_connected_layer_gpu(*(l.ui), a, st);
update_connected_layer_gpu(*(l.ug), a, st);
update_connected_layer_gpu(*(l.uo), a, st);
}
void forward_lstm_layer_gpu(layer l, network state) {
network s = { 0 };
s.train = state.train;
int i;
layer wf = *(l.wf);
layer wi = *(l.wi);
layer wg = *(l.wg);
layer wo = *(l.wo);
layer uf = *(l.uf);
layer ui = *(l.ui);
layer ug = *(l.ug);
layer uo = *(l.uo);
fill_gpu(l.outputs * l.batch * l.steps, 0, wf.delta_gpu, 1, state.st);
fill_gpu(l.outputs * l.batch * l.steps, 0, wi.delta_gpu, 1, state.st);
fill_gpu(l.outputs * l.batch * l.steps, 0, wg.delta_gpu, 1, state.st);
fill_gpu(l.outputs * l.batch * l.steps, 0, wo.delta_gpu, 1, state.st);
fill_gpu(l.outputs * l.batch * l.steps, 0, uf.delta_gpu, 1, state.st);
fill_gpu(l.outputs * l.batch * l.steps, 0, ui.delta_gpu, 1, state.st);
fill_gpu(l.outputs * l.batch * l.steps, 0, ug.delta_gpu, 1, state.st);
fill_gpu(l.outputs * l.batch * l.steps, 0, uo.delta_gpu, 1, state.st);
if (state.train) {
fill_gpu(l.outputs * l.batch * l.steps, 0, l.delta_gpu, 1, state.st);
}
for (i = 0; i < l.steps; ++i) {
s.input_gpu = l.h_gpu;
forward_connected_layer_gpu(wf, s);
forward_connected_layer_gpu(wi, s);
forward_connected_layer_gpu(wg, s);
forward_connected_layer_gpu(wo, s);
s.input_gpu = state.input_gpu;
forward_connected_layer_gpu(uf, s);
forward_connected_layer_gpu(ui, s);
forward_connected_layer_gpu(ug, s);
forward_connected_layer_gpu(uo, s);
copy_gpu(l.outputs * l.batch, wf.output_gpu, 1, l.f_gpu, 1, state.st);
axpy_gpu(l.outputs * l.batch, 1, uf.output_gpu, 1, l.f_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, wi.output_gpu, 1, l.i_gpu, 1, state.st);
axpy_gpu(l.outputs * l.batch, 1, ui.output_gpu, 1, l.i_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, wg.output_gpu, 1, l.g_gpu, 1, state.st);
axpy_gpu(l.outputs * l.batch, 1, ug.output_gpu, 1, l.g_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, wo.output_gpu, 1, l.o_gpu, 1, state.st);
axpy_gpu(l.outputs * l.batch, 1, uo.output_gpu, 1, l.o_gpu, 1, state.st);
activate_array_gpu(l.f_gpu, l.outputs * l.batch, LOGISTIC, state.st);
activate_array_gpu(l.i_gpu, l.outputs * l.batch, LOGISTIC, state.st);
activate_array_gpu(l.g_gpu, l.outputs * l.batch, TANH, state.st);
activate_array_gpu(l.o_gpu, l.outputs * l.batch, LOGISTIC, state.st);
copy_gpu(l.outputs * l.batch, l.i_gpu, 1, l.temp_gpu, 1, state.st);
mul_gpu(l.outputs * l.batch, l.g_gpu, 1, l.temp_gpu, 1, state.st);
mul_gpu(l.outputs * l.batch, l.f_gpu, 1, l.c_gpu, 1, state.st);
axpy_gpu(l.outputs * l.batch, 1, l.temp_gpu, 1, l.c_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, l.c_gpu, 1, l.h_gpu, 1, state.st);
activate_array_gpu(l.h_gpu, l.outputs * l.batch, TANH, state.st);
mul_gpu(l.outputs * l.batch, l.o_gpu, 1, l.h_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, l.c_gpu, 1, l.cell_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, l.h_gpu, 1, l.output_gpu, 1, state.st);
state.input_gpu += l.inputs * l.batch;
l.output_gpu += l.outputs * l.batch;
l.cell_gpu += l.outputs * l.batch;
increment_layer(&wf, 1);
increment_layer(&wi, 1);
increment_layer(&wg, 1);
increment_layer(&wo, 1);
increment_layer(&uf, 1);
increment_layer(&ui, 1);
increment_layer(&ug, 1);
increment_layer(&uo, 1);
}
}
void backward_lstm_layer_gpu(layer l, network state) {
network s = { 0 };
s.train = state.train;
int i;
layer wf = *(l.wf);
layer wi = *(l.wi);
layer wg = *(l.wg);
layer wo = *(l.wo);
layer uf = *(l.uf);
layer ui = *(l.ui);
layer ug = *(l.ug);
layer uo = *(l.uo);
increment_layer(&wf, l.steps - 1);
increment_layer(&wi, l.steps - 1);
increment_layer(&wg, l.steps - 1);
increment_layer(&wo, l.steps - 1);
increment_layer(&uf, l.steps - 1);
increment_layer(&ui, l.steps - 1);
increment_layer(&ug, l.steps - 1);
increment_layer(&uo, l.steps - 1);
state.input_gpu += l.inputs * l.batch * (l.steps - 1);
if (state.delta_gpu)
state.delta_gpu += l.inputs * l.batch * (l.steps - 1);
l.output_gpu += l.outputs * l.batch * (l.steps - 1);
l.cell_gpu += l.outputs * l.batch * (l.steps - 1);
l.delta_gpu += l.outputs * l.batch * (l.steps - 1);
for (i = l.steps - 1; i >= 0; --i) {
if (i != 0)
copy_gpu(l.outputs * l.batch, l.cell_gpu - l.outputs * l.batch, 1,
l.prev_cell_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, l.cell_gpu, 1, l.c_gpu, 1, state.st);
if (i != 0)
copy_gpu(l.outputs * l.batch, l.output_gpu - l.outputs * l.batch, 1,
l.prev_state_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, l.output_gpu, 1, l.h_gpu, 1, state.st);
l.dh_gpu = (i == 0) ? 0 : l.delta_gpu - l.outputs * l.batch;
copy_gpu(l.outputs * l.batch, wf.output_gpu, 1, l.f_gpu, 1, state.st);
axpy_gpu(l.outputs * l.batch, 1, uf.output_gpu, 1, l.f_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, wi.output_gpu, 1, l.i_gpu, 1, state.st);
axpy_gpu(l.outputs * l.batch, 1, ui.output_gpu, 1, l.i_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, wg.output_gpu, 1, l.g_gpu, 1, state.st);
axpy_gpu(l.outputs * l.batch, 1, ug.output_gpu, 1, l.g_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, wo.output_gpu, 1, l.o_gpu, 1, state.st);
axpy_gpu(l.outputs * l.batch, 1, uo.output_gpu, 1, l.o_gpu, 1, state.st);
activate_array_gpu(l.f_gpu, l.outputs * l.batch, LOGISTIC, state.st);
activate_array_gpu(l.i_gpu, l.outputs * l.batch, LOGISTIC, state.st);
activate_array_gpu(l.g_gpu, l.outputs * l.batch, TANH, state.st);
activate_array_gpu(l.o_gpu, l.outputs * l.batch, LOGISTIC, state.st);
copy_gpu(l.outputs * l.batch, l.delta_gpu, 1, l.temp3_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, l.c_gpu, 1, l.temp_gpu, 1, state.st);
activate_array_gpu(l.temp_gpu, l.outputs * l.batch, TANH, state.st);
copy_gpu(l.outputs * l.batch, l.temp3_gpu, 1, l.temp2_gpu, 1, state.st);
mul_gpu(l.outputs * l.batch, l.o_gpu, 1, l.temp2_gpu, 1, state.st);
gradient_array_gpu(l.temp_gpu, l.outputs * l.batch, TANH, l.temp2_gpu, state.st);
axpy_gpu(l.outputs * l.batch, 1, l.dc_gpu, 1, l.temp2_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, l.c_gpu, 1, l.temp_gpu, 1, state.st);
activate_array_gpu(l.temp_gpu, l.outputs * l.batch, TANH, state.st);
mul_gpu(l.outputs * l.batch, l.temp3_gpu, 1, l.temp_gpu, 1, state.st);
gradient_array_gpu(l.o_gpu, l.outputs * l.batch, LOGISTIC, l.temp_gpu, state.st);
copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, wo.delta_gpu, 1, state.st);
s.input_gpu = l.prev_state_gpu;
s.delta_gpu = l.dh_gpu;
backward_connected_layer_gpu(wo, s);
copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, uo.delta_gpu, 1, state.st);
s.input_gpu = state.input_gpu;
s.delta_gpu = state.delta_gpu;
backward_connected_layer_gpu(uo, s);
copy_gpu(l.outputs * l.batch, l.temp2_gpu, 1, l.temp_gpu, 1, state.st);
mul_gpu(l.outputs * l.batch, l.i_gpu, 1, l.temp_gpu, 1, state.st);
gradient_array_gpu(l.g_gpu, l.outputs * l.batch, TANH, l.temp_gpu, state.st);
copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, wg.delta_gpu, 1, state.st);
s.input_gpu = l.prev_state_gpu;
s.delta_gpu = l.dh_gpu;
backward_connected_layer_gpu(wg, s);
copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, ug.delta_gpu, 1, state.st);
s.input_gpu = state.input_gpu;
s.delta_gpu = state.delta_gpu;
backward_connected_layer_gpu(ug, s);
copy_gpu(l.outputs * l.batch, l.temp2_gpu, 1, l.temp_gpu, 1, state.st);
mul_gpu(l.outputs * l.batch, l.g_gpu, 1, l.temp_gpu, 1, state.st);
gradient_array_gpu(l.i_gpu, l.outputs * l.batch, LOGISTIC, l.temp_gpu, state.st);
copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, wi.delta_gpu, 1, state.st);
s.input_gpu = l.prev_state_gpu;
s.delta_gpu = l.dh_gpu;
backward_connected_layer_gpu(wi, s);
copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, ui.delta_gpu, 1, state.st);
s.input_gpu = state.input_gpu;
s.delta_gpu = state.delta_gpu;
backward_connected_layer_gpu(ui, s);
copy_gpu(l.outputs * l.batch, l.temp2_gpu, 1, l.temp_gpu, 1, state.st);
mul_gpu(l.outputs * l.batch, l.prev_cell_gpu, 1, l.temp_gpu, 1, state.st);
gradient_array_gpu(l.f_gpu, l.outputs * l.batch, LOGISTIC, l.temp_gpu, state.st);
copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, wf.delta_gpu, 1, state.st);
s.input_gpu = l.prev_state_gpu;
s.delta_gpu = l.dh_gpu;
backward_connected_layer_gpu(wf, s);
copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, uf.delta_gpu, 1, state.st);
s.input_gpu = state.input_gpu;
s.delta_gpu = state.delta_gpu;
backward_connected_layer_gpu(uf, s);
copy_gpu(l.outputs * l.batch, l.temp2_gpu, 1, l.temp_gpu, 1, state.st);
mul_gpu(l.outputs * l.batch, l.f_gpu, 1, l.temp_gpu, 1, state.st);
copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, l.dc_gpu, 1, state.st);
state.input_gpu -= l.inputs * l.batch;
if (state.delta_gpu)
state.delta_gpu -= l.inputs * l.batch;
l.output_gpu -= l.outputs * l.batch;
l.cell_gpu -= l.outputs * l.batch;
l.delta_gpu -= l.outputs * l.batch;
increment_layer(&wf, -1);
increment_layer(&wi, -1);
increment_layer(&wg, -1);
increment_layer(&wo, -1);
increment_layer(&uf, -1);
increment_layer(&ui, -1);
increment_layer(&ug, -1);
increment_layer(&uo, -1);
}
}
#endif
|
822481.c | /* An include literally copies and pastes the contents of the indicated file.
* A .h file is called a "header" file for the code.
* Usually, its primary purpose is to specify type signatures so all code below
* knows what the correct types of all functions and variables are even before
* their definitions appear. This prevents ordering issues during compilation.
*/
#include "factorial.h"
/* Definition for "factorial"
* Note: the type must match the type specified within the header file above
*/
int factorial(int n) {
/* By convention in C, all local variables must be type-defined at the top
* of each function. This includes all iteration dummy variables. */
int fac = n;
if ( n==0 ){
return 1;
}
while (n > 1) {
/* Note: The shorthand syntax
* var-- reads the contents of an integer variable, then decrements
* --var decrements the contents of an integer variable, then reads it
*/
n--;
// a *= b is equivalent to a = a * b
fac *= n;
}
/* The type of the returned value must match the type indicated before the
* function name above (here "int")
*/
return fac;
}
|
716465.c | /* packet-jpeg.c
*
* Routines for RFC 2435 JPEG dissection
*
* Copyright 2006
* Erwin Rol <erwin@erwinrol.com>
* Copyright 2001,
* Francisco Javier Cabello Torres, <fjcabello@vtools.es>
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#define NEW_PROTO_TREE_API
#include "config.h"
#include <epan/packet.h>
#include <epan/rtp_pt.h>
#include "packet-ber.h"
void proto_register_jpeg(void);
void proto_reg_handoff_jpeg(void);
static dissector_handle_t jpeg_handle;
static header_field_info *hfi_jpeg = NULL;
#define JPEG_HFI_INIT HFI_INIT(proto_jpeg)
/* JPEG header fields */
static header_field_info hfi_rtp_jpeg_main_hdr JPEG_HFI_INIT = {
"Main Header",
"jpeg.main_hdr",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_main_hdr_ts JPEG_HFI_INIT = {
"Type Specific",
"jpeg.main_hdr.ts",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_main_hdr_offs JPEG_HFI_INIT = {
"Fragment Offset",
"jpeg.main_hdr.offset",
FT_UINT24, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_main_hdr_type JPEG_HFI_INIT = {
"Type",
"jpeg.main_hdr.type",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_main_hdr_q JPEG_HFI_INIT = {
"Q",
"jpeg.main_hdr.q",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_main_hdr_width JPEG_HFI_INIT = {
"Width",
"jpeg.main_hdr.width",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_main_hdr_height JPEG_HFI_INIT = {
"Height",
"jpeg.main_hdr.height",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_restart_hdr JPEG_HFI_INIT = {
"Restart Marker Header",
"jpeg.restart_hdr",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_restart_hdr_interval JPEG_HFI_INIT = {
"Restart Interval",
"jpeg.restart_hdr.interval",
FT_UINT16, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_restart_hdr_f JPEG_HFI_INIT = {
"F",
"jpeg.restart_hdr.f",
FT_UINT16, BASE_DEC, NULL, 0x8000,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_restart_hdr_l JPEG_HFI_INIT = {
"L",
"jpeg.restart_hdr.l",
FT_UINT16, BASE_DEC, NULL, 0x4000,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_restart_hdr_count JPEG_HFI_INIT = {
"Restart Count",
"jpeg.restart_hdr.count",
FT_UINT16, BASE_DEC, NULL, 0x3FFF,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_qtable_hdr JPEG_HFI_INIT = {
"Quantization Table Header",
"jpeg.qtable_hdr",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_qtable_hdr_mbz JPEG_HFI_INIT = {
"MBZ",
"jpeg.qtable_hdr.mbz",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_qtable_hdr_prec JPEG_HFI_INIT = {
"Precision",
"jpeg.qtable_hdr.precision",
FT_UINT8, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_qtable_hdr_length JPEG_HFI_INIT = {
"Length",
"jpeg.qtable_hdr.length",
FT_UINT16, BASE_DEC, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_qtable_hdr_data JPEG_HFI_INIT = {
"Quantization Table Data",
"jpeg.qtable_hdr.data",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL
};
static header_field_info hfi_rtp_jpeg_payload JPEG_HFI_INIT = {
"Payload",
"jpeg.payload",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL
};
/* JPEG fields defining a sub tree */
static gint ett_jpeg = -1;
static int
dissect_jpeg( tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_ )
{
proto_item *ti = NULL;
proto_tree *jpeg_tree = NULL;
proto_tree *main_hdr_tree = NULL;
proto_tree *restart_hdr_tree = NULL;
proto_tree *qtable_hdr_tree = NULL;
guint32 fragment_offset = 0;
guint16 len = 0;
guint8 type = 0;
guint8 q = 0;
gint h = 0;
gint w = 0;
unsigned int offset = 0;
col_set_str(pinfo->cinfo, COL_PROTOCOL, "JPEG");
col_set_str(pinfo->cinfo, COL_INFO, "JPEG message");
if ( tree ) {
ti = proto_tree_add_item( tree, hfi_jpeg, tvb, offset, -1, ENC_NA );
jpeg_tree = proto_item_add_subtree( ti, ett_jpeg );
ti = proto_tree_add_item(jpeg_tree, &hfi_rtp_jpeg_main_hdr, tvb, offset, 8, ENC_NA);
main_hdr_tree = proto_item_add_subtree(ti, ett_jpeg);
proto_tree_add_item(main_hdr_tree, &hfi_rtp_jpeg_main_hdr_ts, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
proto_tree_add_item(main_hdr_tree, &hfi_rtp_jpeg_main_hdr_offs, tvb, offset, 3, ENC_BIG_ENDIAN);
fragment_offset = tvb_get_ntoh24(tvb, offset);
offset += 3;
proto_tree_add_item(main_hdr_tree, &hfi_rtp_jpeg_main_hdr_type, tvb, offset, 1, ENC_BIG_ENDIAN);
type = tvb_get_guint8(tvb, offset);
offset += 1;
proto_tree_add_item(main_hdr_tree, &hfi_rtp_jpeg_main_hdr_q, tvb, offset, 1, ENC_BIG_ENDIAN);
q = tvb_get_guint8(tvb, offset);
offset += 1;
w = tvb_get_guint8(tvb, offset) * 8;
proto_tree_add_uint(main_hdr_tree, &hfi_rtp_jpeg_main_hdr_width, tvb, offset, 1, w);
offset += 1;
h = tvb_get_guint8(tvb, offset) * 8;
proto_tree_add_uint(main_hdr_tree, &hfi_rtp_jpeg_main_hdr_height, tvb, offset, 1, h);
offset += 1;
if (type >= 64 && type <= 127) {
ti = proto_tree_add_item(jpeg_tree, &hfi_rtp_jpeg_restart_hdr, tvb, offset, 4, ENC_NA);
restart_hdr_tree = proto_item_add_subtree(ti, ett_jpeg);
proto_tree_add_item(restart_hdr_tree, &hfi_rtp_jpeg_restart_hdr_interval, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
proto_tree_add_item(restart_hdr_tree, &hfi_rtp_jpeg_restart_hdr_f, tvb, offset, 2, ENC_BIG_ENDIAN);
proto_tree_add_item(restart_hdr_tree, &hfi_rtp_jpeg_restart_hdr_l, tvb, offset, 2, ENC_BIG_ENDIAN);
proto_tree_add_item(restart_hdr_tree, &hfi_rtp_jpeg_restart_hdr_count, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
}
if (q >= 128 && fragment_offset == 0) {
ti = proto_tree_add_item(jpeg_tree, &hfi_rtp_jpeg_qtable_hdr, tvb, offset, -1, ENC_NA);
qtable_hdr_tree = proto_item_add_subtree(ti, ett_jpeg);
proto_tree_add_item(qtable_hdr_tree, &hfi_rtp_jpeg_qtable_hdr_mbz, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
proto_tree_add_item(qtable_hdr_tree, &hfi_rtp_jpeg_qtable_hdr_prec, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
proto_tree_add_item(qtable_hdr_tree, &hfi_rtp_jpeg_qtable_hdr_length, tvb, offset, 2, ENC_BIG_ENDIAN);
len = tvb_get_ntohs(tvb, offset);
offset += 2;
if (len > 0) {
proto_tree_add_item(qtable_hdr_tree, &hfi_rtp_jpeg_qtable_hdr_data, tvb, offset, len, ENC_NA);
offset += len;
}
proto_item_set_len(ti, len + 4);
}
/* The rest of the packet is the JPEG data */
proto_tree_add_item( jpeg_tree, &hfi_rtp_jpeg_payload, tvb, offset, -1, ENC_NA );
}
return tvb_captured_length(tvb);
}
void
proto_register_jpeg(void)
{
#ifndef HAVE_HFI_SECTION_INIT
static header_field_info *hfi[] =
{
&hfi_rtp_jpeg_main_hdr,
&hfi_rtp_jpeg_main_hdr_ts,
&hfi_rtp_jpeg_main_hdr_offs,
&hfi_rtp_jpeg_main_hdr_type,
&hfi_rtp_jpeg_main_hdr_q,
&hfi_rtp_jpeg_main_hdr_width,
&hfi_rtp_jpeg_main_hdr_height,
&hfi_rtp_jpeg_restart_hdr,
&hfi_rtp_jpeg_restart_hdr_interval,
&hfi_rtp_jpeg_restart_hdr_f,
&hfi_rtp_jpeg_restart_hdr_l,
&hfi_rtp_jpeg_restart_hdr_count,
&hfi_rtp_jpeg_qtable_hdr,
&hfi_rtp_jpeg_qtable_hdr_mbz,
&hfi_rtp_jpeg_qtable_hdr_prec,
&hfi_rtp_jpeg_qtable_hdr_length,
&hfi_rtp_jpeg_qtable_hdr_data,
&hfi_rtp_jpeg_payload,
};
#endif
static gint *ett[] =
{
&ett_jpeg,
};
int proto_jpeg;
proto_jpeg = proto_register_protocol("RFC 2435 JPEG","JPEG","jpeg");
hfi_jpeg = proto_registrar_get_nth(proto_jpeg);
proto_register_fields(proto_jpeg, hfi, array_length(hfi));
proto_register_subtree_array(ett, array_length(ett));
jpeg_handle = create_dissector_handle(dissect_jpeg, proto_jpeg);
/* RFC 2798 */
register_ber_oid_dissector_handle("0.9.2342.19200300.100.1.60", jpeg_handle, proto_jpeg, "jpegPhoto");
}
void
proto_reg_handoff_jpeg(void)
{
dissector_add_uint("rtp.pt", PT_JPEG, jpeg_handle);
}
/*
* Editor modelines - http://www.wireshark.org/tools/modelines.html
*
* Local variables:
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: t
* End:
*
* vi: set shiftwidth=8 tabstop=8 noexpandtab:
* :indentSize=8:tabSize=8:noTabs=false:
*/
|
731323.c | /*
SDL - Simple DirectMedia Layer
Copyright (C) 1997-2009 Sam Lantinga
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Sam Lantinga
slouken@libsdl.org
*/
#include "SDL_config.h"
/* Dummy SDL video driver implementation; this is just enough to make an
* SDL-based application THINK it's got a working video driver, for
* applications that call SDL_Init(SDL_INIT_VIDEO) when they don't need it,
* and also for use as a collection of stubs when porting SDL to a new
* platform for which you haven't yet written a valid video driver.
*
* This is also a great way to determine bottlenecks: if you think that SDL
* is a performance problem for a given platform, enable this driver, and
* then see if your application runs faster without video overhead.
*
* Initial work by Ryan C. Gordon (icculus@icculus.org). A good portion
* of this was cut-and-pasted from Stephane Peter's work in the AAlib
* SDL video driver. Renamed to "DUMMY" by Sam Lantinga.
*/
#include "SDL_video.h"
#include "SDL_mouse.h"
#include "../SDL_sysvideo.h"
#include "../SDL_pixels_c.h"
#include "../../events/SDL_events_c.h"
#include "SDL_nullvideo.h"
#include "SDL_nullevents_c.h"
#include "SDL_nullmouse_c.h"
#define DUMMYVID_DRIVER_NAME "dummy"
/* Initialization/Query functions */
static int DUMMY_VideoInit(_THIS, SDL_PixelFormat *vformat);
static SDL_Rect **DUMMY_ListModes(_THIS, SDL_PixelFormat *format, Uint32 flags);
static SDL_Surface *DUMMY_SetVideoMode(_THIS, SDL_Surface *current, int width, int height, int bpp, Uint32 flags);
static int DUMMY_SetColors(_THIS, int firstcolor, int ncolors, SDL_Color *colors);
static void DUMMY_VideoQuit(_THIS);
/* Hardware surface functions */
static int DUMMY_AllocHWSurface(_THIS, SDL_Surface *surface);
static int DUMMY_LockHWSurface(_THIS, SDL_Surface *surface);
static void DUMMY_UnlockHWSurface(_THIS, SDL_Surface *surface);
static void DUMMY_FreeHWSurface(_THIS, SDL_Surface *surface);
/* etc. */
static void DUMMY_UpdateRects(_THIS, int numrects, SDL_Rect *rects);
/* DUMMY driver bootstrap functions */
static int DUMMY_Available(void)
{
const char *envr = SDL_getenv("SDL_VIDEODRIVER");
if ((envr) && (SDL_strcmp(envr, DUMMYVID_DRIVER_NAME) == 0)) {
return(1);
}
return(0);
}
static void DUMMY_DeleteDevice(SDL_VideoDevice *device)
{
SDL_free(device->hidden);
SDL_free(device);
}
static SDL_VideoDevice *DUMMY_CreateDevice(int devindex)
{
SDL_VideoDevice *device;
/* Initialize all variables that we clean on shutdown */
device = (SDL_VideoDevice *)SDL_malloc(sizeof(SDL_VideoDevice));
if ( device ) {
SDL_memset(device, 0, (sizeof *device));
device->hidden = (struct SDL_PrivateVideoData *)
SDL_malloc((sizeof *device->hidden));
}
if ( (device == NULL) || (device->hidden == NULL) ) {
SDL_OutOfMemory();
if ( device ) {
SDL_free(device);
}
return(0);
}
SDL_memset(device->hidden, 0, (sizeof *device->hidden));
/* Set the function pointers */
device->VideoInit = DUMMY_VideoInit;
device->ListModes = DUMMY_ListModes;
device->SetVideoMode = DUMMY_SetVideoMode;
device->CreateYUVOverlay = NULL;
device->SetColors = DUMMY_SetColors;
device->UpdateRects = DUMMY_UpdateRects;
device->VideoQuit = DUMMY_VideoQuit;
device->AllocHWSurface = DUMMY_AllocHWSurface;
device->CheckHWBlit = NULL;
device->FillHWRect = NULL;
device->SetHWColorKey = NULL;
device->SetHWAlpha = NULL;
device->LockHWSurface = DUMMY_LockHWSurface;
device->UnlockHWSurface = DUMMY_UnlockHWSurface;
device->FlipHWSurface = NULL;
device->FreeHWSurface = DUMMY_FreeHWSurface;
device->SetCaption = NULL;
device->SetIcon = NULL;
device->IconifyWindow = NULL;
device->GrabInput = NULL;
device->GetWMInfo = NULL;
device->InitOSKeymap = DUMMY_InitOSKeymap;
device->PumpEvents = DUMMY_PumpEvents;
device->free = DUMMY_DeleteDevice;
return device;
}
VideoBootStrap DUMMY_bootstrap = {
DUMMYVID_DRIVER_NAME, "SDL dummy video driver",
DUMMY_Available, DUMMY_CreateDevice
};
int DUMMY_VideoInit(_THIS, SDL_PixelFormat *vformat)
{
/*
fprintf(stderr, "WARNING: You are using the SDL dummy video driver!\n");
*/
/* Determine the screen depth (use default 8-bit depth) */
/* we change this during the SDL_SetVideoMode implementation... */
vformat->BitsPerPixel = 8;
vformat->BytesPerPixel = 1;
/* We're done! */
return(0);
}
SDL_Rect **DUMMY_ListModes(_THIS, SDL_PixelFormat *format, Uint32 flags)
{
return (SDL_Rect **) -1;
}
SDL_Surface *DUMMY_SetVideoMode(_THIS, SDL_Surface *current,
int width, int height, int bpp, Uint32 flags)
{
if ( this->hidden->buffer ) {
SDL_free( this->hidden->buffer );
}
this->hidden->buffer = SDL_malloc(width * height * (bpp / 8));
if ( ! this->hidden->buffer ) {
SDL_SetError("Couldn't allocate buffer for requested mode");
return(NULL);
}
/* printf("Setting mode %dx%d\n", width, height); */
SDL_memset(this->hidden->buffer, 0, width * height * (bpp / 8));
/* Allocate the new pixel format for the screen */
if ( ! SDL_ReallocFormat(current, bpp, 0, 0, 0, 0) ) {
SDL_free(this->hidden->buffer);
this->hidden->buffer = NULL;
SDL_SetError("Couldn't allocate new pixel format for requested mode");
return(NULL);
}
/* Set up the new mode framebuffer */
current->flags = flags & SDL_FULLSCREEN;
this->hidden->w = current->w = width;
this->hidden->h = current->h = height;
current->pitch = current->w * (bpp / 8);
current->pixels = this->hidden->buffer;
/* We're done */
return(current);
}
/* We don't actually allow hardware surfaces other than the main one */
static int DUMMY_AllocHWSurface(_THIS, SDL_Surface *surface)
{
return(-1);
}
static void DUMMY_FreeHWSurface(_THIS, SDL_Surface *surface)
{
return;
}
/* We need to wait for vertical retrace on page flipped displays */
static int DUMMY_LockHWSurface(_THIS, SDL_Surface *surface)
{
return(0);
}
static void DUMMY_UnlockHWSurface(_THIS, SDL_Surface *surface)
{
return;
}
static void DUMMY_UpdateRects(_THIS, int numrects, SDL_Rect *rects)
{
/* do nothing. */
}
int DUMMY_SetColors(_THIS, int firstcolor, int ncolors, SDL_Color *colors)
{
/* do nothing of note. */
return(1);
}
/* Note: If we are terminated, this could be called in the middle of
another SDL video routine -- notably UpdateRects.
*/
void DUMMY_VideoQuit(_THIS)
{
if (this->screen->pixels != NULL)
{
SDL_free(this->screen->pixels);
this->screen->pixels = NULL;
}
}
|
441486.c | #include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <3ds.h>
#include "uitask.h"
#include "listtitles.h"
#include "../resources.h"
#include "../../core/core.h"
static Result task_populate_titles_add_ctr(populate_titles_data* data, FS_MediaType mediaType, u64 titleId) {
Result res = 0;
AM_TitleEntry entry;
if(R_SUCCEEDED(res = AM_GetTitleInfo(mediaType, 1, &titleId, &entry))) {
list_item* item = (list_item*) calloc(1, sizeof(list_item));
if(item != NULL) {
title_info* titleInfo = (title_info*) calloc(1, sizeof(title_info));
if(titleInfo != NULL) {
titleInfo->mediaType = mediaType;
titleInfo->titleId = titleId;
AM_GetTitleProductCode(mediaType, titleId, titleInfo->productCode);
titleInfo->version = entry.version;
titleInfo->installedSize = entry.size;
titleInfo->twl = false;
titleInfo->hasMeta = false;
static const u32 filePath[5] = {0x00000000, 0x00000000, 0x00000002, 0x6E6F6369, 0x00000000};
u32 archivePath[4] = {(u32) (titleId & 0xFFFFFFFF), (u32) ((titleId >> 32) & 0xFFFFFFFF), mediaType, 0x00000000};
Handle fileHandle;
if(R_SUCCEEDED(FSUSER_OpenFileDirectly(&fileHandle, ARCHIVE_SAVEDATA_AND_CONTENT,
fs_make_path_binary(archivePath, sizeof(archivePath)),
fs_make_path_binary(filePath, sizeof(filePath)), FS_OPEN_READ, 0))) {
SMDH* smdh = (SMDH*) calloc(1, sizeof(SMDH));
if(smdh != NULL) {
u32 bytesRead = 0;
if(R_SUCCEEDED(FSFILE_Read(fileHandle, &bytesRead, 0, smdh, sizeof(SMDH))) && bytesRead == sizeof(SMDH)) {
if(smdh->magic[0] == 'S' && smdh->magic[1] == 'M' && smdh->magic[2] == 'D' && smdh->magic[3] == 'H') {
titleInfo->hasMeta = true;
SMDH_title* smdhTitle = smdh_select_title(smdh);
utf16_to_utf8((uint8_t*) item->name, smdhTitle->shortDescription, LIST_ITEM_NAME_MAX - 1);
utf16_to_utf8((uint8_t*) titleInfo->meta.shortDescription, smdhTitle->shortDescription, sizeof(titleInfo->meta.shortDescription) - 1);
utf16_to_utf8((uint8_t*) titleInfo->meta.longDescription, smdhTitle->longDescription, sizeof(titleInfo->meta.longDescription) - 1);
utf16_to_utf8((uint8_t*) titleInfo->meta.publisher, smdhTitle->publisher, sizeof(titleInfo->meta.publisher) - 1);
titleInfo->meta.region = smdh->region;
titleInfo->meta.texture = screen_allocate_free_texture();
screen_load_texture_tiled(titleInfo->meta.texture, smdh->largeIcon, sizeof(smdh->largeIcon), 48, 48, GPU_RGB565, false);
}
}
free(smdh);
}
FSFILE_Close(fileHandle);
}
if(string_is_empty(item->name)) {
snprintf(item->name, LIST_ITEM_NAME_MAX, "%016llX", titleId);
}
if(mediaType == MEDIATYPE_NAND) {
item->color = COLOR_NAND;
} else if(mediaType == MEDIATYPE_SD) {
item->color = COLOR_SD;
} else if(mediaType == MEDIATYPE_GAME_CARD) {
item->color = COLOR_GAME_CARD;
}
item->data = titleInfo;
linked_list_add_sorted(data->items, item, data->userData, data->compare);
} else {
free(item);
res = R_APP_OUT_OF_MEMORY;
}
} else {
res = R_APP_OUT_OF_MEMORY;
}
}
return res;
}
static Result task_populate_titles_add_twl(populate_titles_data* data, FS_MediaType mediaType, u64 titleId) {
Result res = 0;
u64 realTitleId = 0;
char productCode[0x10] = {'\0'};
u16 version = 0;
u64 installedSize = 0;
u8 header[0x3B4] = {0};
Result headerRes = FSUSER_GetLegacyRomHeader(mediaType, titleId, header);
AM_TitleEntry entry;
if(R_SUCCEEDED(res = AM_GetTitleInfo(mediaType, 1, &titleId, &entry))) {
realTitleId = titleId;
AM_GetTitleProductCode(mediaType, titleId, productCode);
version = entry.version;
installedSize = entry.size;
} else if(R_SUCCEEDED(res = headerRes)) {
memcpy(&realTitleId, &header[0x230], sizeof(realTitleId));
memcpy(productCode, header, 0xC);
version = header[0x01E];
u32 size = 0;
if((header[0x012] & 0x2) != 0) {
memcpy(&size, &header[0x210], sizeof(size));
} else {
memcpy(&size, &header[0x080], sizeof(size));
}
installedSize = size;
}
if(R_SUCCEEDED(res)) {
list_item* item = (list_item*) calloc(1, sizeof(list_item));
if(item != NULL) {
title_info* titleInfo = (title_info*) calloc(1, sizeof(title_info));
if(titleInfo != NULL) {
titleInfo->mediaType = mediaType;
titleInfo->titleId = realTitleId;
string_copy(titleInfo->productCode, productCode, sizeof(titleInfo->productCode));
titleInfo->version = version;
titleInfo->installedSize = installedSize;
titleInfo->twl = true;
titleInfo->hasMeta = false;
BNR* bnr = (BNR*) calloc(1, sizeof(BNR));
if(bnr != NULL) {
if(R_SUCCEEDED(FSUSER_GetLegacyBannerData(mediaType, titleId, (u8*) bnr))) {
titleInfo->hasMeta = true;
char title[0x100] = {'\0'};
utf16_to_utf8((uint8_t*) title, bnr_select_title(bnr), sizeof(title) - 1);
if(strchr(title, '\n') == NULL) {
string_copy(item->name, title, sizeof(item->name));
string_copy(titleInfo->meta.shortDescription, title, sizeof(titleInfo->meta.shortDescription));
} else {
char* destinations[] = {titleInfo->meta.shortDescription, titleInfo->meta.longDescription, titleInfo->meta.publisher};
u32 destinationLens[] = {sizeof(titleInfo->meta.shortDescription), sizeof(titleInfo->meta.longDescription), sizeof(titleInfo->meta.publisher)};
int currDest = 0;
char* last = title;
char* curr = NULL;
while(currDest < 3 && (curr = strchr(last, '\n')) != NULL) {
u32 copyLen = curr - last + 1;
if(copyLen > destinationLens[currDest]) {
copyLen = destinationLens[currDest];
}
string_copy(destinations[currDest++], last, copyLen);
last = curr + 1;
*curr = ' ';
}
string_copy(item->name, title, last - title);
if(currDest < 3) {
string_copy(destinations[currDest], last, destinationLens[currDest]);
}
}
u8 icon[32 * 32 * 2];
for(u32 x = 0; x < 32; x++) {
for(u32 y = 0; y < 32; y++) {
u32 srcPos = (((y >> 3) * 4 + (x >> 3)) * 8 + (y & 7)) * 4 + ((x & 7) >> 1);
u32 srcShift = (x & 1) * 4;
u16 srcPx = bnr->mainIconPalette[(bnr->mainIconBitmap[srcPos] >> srcShift) & 0xF];
u8 r = (u8) (srcPx & 0x1F);
u8 g = (u8) ((srcPx >> 5) & 0x1F);
u8 b = (u8) ((srcPx >> 10) & 0x1F);
u16 reversedPx = (u16) ((r << 11) | (g << 6) | (b << 1) | 1);
u32 dstPos = (y * 32 + x) * 2;
icon[dstPos + 0] = (u8) (reversedPx & 0xFF);
icon[dstPos + 1] = (u8) ((reversedPx >> 8) & 0xFF);
}
}
if(R_SUCCEEDED(headerRes)) {
memcpy(&titleInfo->meta.region, &header[0x1B0], sizeof(titleInfo->meta.region));
} else {
titleInfo->meta.region = 0;
}
titleInfo->meta.texture = screen_allocate_free_texture();
screen_load_texture_untiled(titleInfo->meta.texture, icon, sizeof(icon), 32, 32, GPU_RGBA5551, false);
}
free(bnr);
}
if(string_is_empty(item->name)) {
snprintf(item->name, LIST_ITEM_NAME_MAX, "%016llX", realTitleId);
}
item->color = COLOR_DS_TITLE;
item->data = titleInfo;
linked_list_add_sorted(data->items, item, data->userData, data->compare);
} else {
free(item);
res = R_APP_OUT_OF_MEMORY;
}
} else {
res = R_APP_OUT_OF_MEMORY;
}
}
return res;
}
static int task_populate_titles_compare_ids(const void* e1, const void* e2) {
u64 id1 = *(u64*) e1;
u64 id2 = *(u64*) e2;
return id1 > id2 ? 1 : id1 < id2 ? -1 : 0;
}
static Result task_populate_titles_from(populate_titles_data* data, FS_MediaType mediaType, bool useDSiWare) {
bool inserted;
FS_CardType type;
if(mediaType == MEDIATYPE_GAME_CARD && (R_FAILED(FSUSER_CardSlotIsInserted(&inserted)) || !inserted || R_FAILED(FSUSER_GetCardType(&type)))) {
return 0;
}
Result res = 0;
if(mediaType != MEDIATYPE_GAME_CARD || type == CARD_CTR) {
u32 titleCount = 0;
if(R_SUCCEEDED(res = AM_GetTitleCount(mediaType, &titleCount))) {
u64* titleIds = (u64*) calloc(titleCount, sizeof(u64));
if(titleIds != NULL) {
if(R_SUCCEEDED(res = AM_GetTitleList(&titleCount, mediaType, titleCount, titleIds))) {
qsort(titleIds, titleCount, sizeof(u64), task_populate_titles_compare_ids);
for(u32 i = 0; i < titleCount && R_SUCCEEDED(res); i++) {
svcWaitSynchronization(task_get_pause_event(), U64_MAX);
if(task_is_quit_all() || svcWaitSynchronization(data->cancelEvent, 0) == 0) {
break;
}
if(data->filter == NULL || data->filter(data->userData, titleIds[i], mediaType)) {
bool dsiWare = ((titleIds[i] >> 32) & 0x8000) != 0;
if(dsiWare != useDSiWare) {
continue;
}
res = dsiWare ? task_populate_titles_add_twl(data, mediaType, titleIds[i]) : task_populate_titles_add_ctr(data, mediaType, titleIds[i]);
}
}
}
free(titleIds);
} else {
res = R_APP_OUT_OF_MEMORY;
}
}
} else {
res = task_populate_titles_add_twl(data, mediaType, 0);
}
return res;
}
static void task_populate_titles_thread(void* arg) {
populate_titles_data* data = (populate_titles_data*) arg;
Result res = 0;
if(R_SUCCEEDED(res = task_populate_titles_from(data, MEDIATYPE_GAME_CARD, false))) {
if(R_SUCCEEDED(res = task_populate_titles_from(data, MEDIATYPE_SD, false))) {
if(R_SUCCEEDED(res = task_populate_titles_from(data, MEDIATYPE_NAND, false))) {
res = task_populate_titles_from(data, MEDIATYPE_NAND, true);
}
}
}
svcCloseHandle(data->cancelEvent);
data->result = res;
data->finished = true;
}
void task_free_title(list_item* item) {
if(item == NULL) {
return;
}
if(item->data != NULL) {
title_info* titleInfo = (title_info*) item->data;
if(titleInfo->hasMeta) {
screen_unload_texture(titleInfo->meta.texture);
}
free(item->data);
}
free(item);
}
void task_clear_titles(linked_list* items) {
if(items == NULL) {
return;
}
linked_list_iter iter;
linked_list_iterate(items, &iter);
while(linked_list_iter_has_next(&iter)) {
list_item* item = (list_item*) linked_list_iter_next(&iter);
linked_list_iter_remove(&iter);
task_free_title(item);
}
}
Result task_populate_titles(populate_titles_data* data) {
if(data == NULL || data->items == NULL) {
return R_APP_INVALID_ARGUMENT;
}
task_clear_titles(data->items);
data->finished = false;
data->result = 0;
data->cancelEvent = 0;
Result res = 0;
if(R_SUCCEEDED(res = svcCreateEvent(&data->cancelEvent, RESET_STICKY))) {
if(threadCreate(task_populate_titles_thread, data, 0x10000, 0x19, 1, true) == NULL) {
res = R_APP_THREAD_CREATE_FAILED;
}
}
if(R_FAILED(res)) {
data->finished = true;
if(data->cancelEvent != 0) {
svcCloseHandle(data->cancelEvent);
data->cancelEvent = 0;
}
}
return res;
}
|
167815.c | /* { dg-do preprocess } */
/* { dg-options "-std=c99 -Wnormalized=id" { target c } } */
/* { dg-options "-Wnormalized=id" { target c++ } } */
\u00AA
\u00B7
\u0F43 /* { dg-warning "not in NFC" } */
a\u05B8\u05B9\u05B9\u05BBb
a\u05BB\u05B9\u05B8\u05B9b /* { dg-warning "not in NFC" } */
\u09CB
\u09C7\u09BE /* { dg-warning "not in NFC" } */
\u0B4B
\u0B47\u0B3E /* { dg-warning "not in NFC" } */
\u0BCA
\u0BC6\u0BBE /* { dg-warning "not in NFC" } */
\u0BCB
\u0BC7\u0BBE /* { dg-warning "not in NFC" } */
\u0CCA
\u0CC6\u0CC2 /* { dg-warning "not in NFC" } */
\u0D4A
\u0D46\u0D3E /* { dg-warning "not in NFC" } */
\u0D4B
\u0D47\u0D3E /* { dg-warning "not in NFC" } */
K
\u212A
\u03AC
\u1F71 /* { dg-warning "not in NFC" } */
\uAC00
\u1100\u1161
\uAC01
\u1100\u1161\u11A8
\uAC00\u11A8
|
912176.c | /*
* linux/mm/swapfile.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
* Swap reorganised 29.12.95, Stephen Tweedie
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/shmem_fs.h>
#include <linux/blkdev.h>
#include <linux/random.h>
#include <linux/writeback.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
#include <linux/mutex.h>
#include <linux/capability.h>
#include <linux/syscalls.h>
#include <linux/memcontrol.h>
#include <linux/poll.h>
#include <linux/oom.h>
#include <linux/frontswap.h>
#include <linux/swapfile.h>
#include <linux/export.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
#include <linux/page_cgroup.h>
static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
static sector_t map_swap_entry(swp_entry_t, struct block_device**);
DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
atomic_long_t nr_swap_pages;
/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
long total_swap_pages;
static int least_priority;
static atomic_t highest_priority_index = ATOMIC_INIT(-1);
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
static const char Bad_offset[] = "Bad swap offset entry ";
static const char Unused_offset[] = "Unused swap offset entry ";
struct swap_list_t swap_list = {-1, -1};
struct swap_info_struct *swap_info[MAX_SWAPFILES];
static DEFINE_MUTEX(swapon_mutex);
static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
/* Activity counter to indicate that a swapon or swapoff has occurred */
static atomic_t proc_poll_event = ATOMIC_INIT(0);
static inline unsigned char swap_count(unsigned char ent)
{
return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
}
/* returns 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
{
swp_entry_t entry = swp_entry(si->type, offset);
struct page *page;
int ret = 0;
page = find_get_page(swap_address_space(entry), entry.val);
if (!page)
return 0;
/*
* This function is called from scan_swap_map() and it's called
* by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
* We have to use trylock for avoiding deadlock. This is a special
* case and you should use try_to_free_swap() with explicit lock_page()
* in usual operations.
*/
if (trylock_page(page)) {
ret = try_to_free_swap(page);
unlock_page(page);
}
page_cache_release(page);
return ret;
}
/*
* swapon tell device that all the old swap contents can be discarded,
* to allow the swap device to optimize its wear-levelling.
*/
static int discard_swap(struct swap_info_struct *si)
{
struct swap_extent *se;
sector_t start_block;
sector_t nr_blocks;
int err = 0;
/* Do not discard the swap header page! */
se = &si->first_swap_extent;
start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
if (nr_blocks) {
err = blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_KERNEL, 0);
if (err)
return err;
cond_resched();
}
list_for_each_entry(se, &si->first_swap_extent.list, list) {
start_block = se->start_block << (PAGE_SHIFT - 9);
nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
err = blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_KERNEL, 0);
if (err)
break;
cond_resched();
}
return err; /* That will often be -EOPNOTSUPP */
}
/*
* swap allocation tell device that a cluster of swap can now be discarded,
* to allow the swap device to optimize its wear-levelling.
*/
static void discard_swap_cluster(struct swap_info_struct *si,
pgoff_t start_page, pgoff_t nr_pages)
{
struct swap_extent *se = si->curr_swap_extent;
int found_extent = 0;
while (nr_pages) {
struct list_head *lh;
if (se->start_page <= start_page &&
start_page < se->start_page + se->nr_pages) {
pgoff_t offset = start_page - se->start_page;
sector_t start_block = se->start_block + offset;
sector_t nr_blocks = se->nr_pages - offset;
if (nr_blocks > nr_pages)
nr_blocks = nr_pages;
start_page += nr_blocks;
nr_pages -= nr_blocks;
if (!found_extent++)
si->curr_swap_extent = se;
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
nr_blocks, GFP_NOIO, 0))
break;
}
lh = se->list.next;
se = list_entry(lh, struct swap_extent, list);
}
}
#define SWAPFILE_CLUSTER 256
#define LATENCY_LIMIT 256
static inline void cluster_set_flag(struct swap_cluster_info *info,
unsigned int flag)
{
info->flags = flag;
}
static inline unsigned int cluster_count(struct swap_cluster_info *info)
{
return info->data;
}
static inline void cluster_set_count(struct swap_cluster_info *info,
unsigned int c)
{
info->data = c;
}
static inline void cluster_set_count_flag(struct swap_cluster_info *info,
unsigned int c, unsigned int f)
{
info->flags = f;
info->data = c;
}
static inline unsigned int cluster_next(struct swap_cluster_info *info)
{
return info->data;
}
static inline void cluster_set_next(struct swap_cluster_info *info,
unsigned int n)
{
info->data = n;
}
static inline void cluster_set_next_flag(struct swap_cluster_info *info,
unsigned int n, unsigned int f)
{
info->flags = f;
info->data = n;
}
static inline bool cluster_is_free(struct swap_cluster_info *info)
{
return info->flags & CLUSTER_FLAG_FREE;
}
static inline bool cluster_is_null(struct swap_cluster_info *info)
{
return info->flags & CLUSTER_FLAG_NEXT_NULL;
}
static inline void cluster_set_null(struct swap_cluster_info *info)
{
info->flags = CLUSTER_FLAG_NEXT_NULL;
info->data = 0;
}
/* Add a cluster to discard list and schedule it to do discard */
static void swap_cluster_schedule_discard(struct swap_info_struct *si,
unsigned int idx)
{
/*
* If scan_swap_map() can't find a free cluster, it will check
* si->swap_map directly. To make sure the discarding cluster isn't
* taken by scan_swap_map(), mark the swap entries bad (occupied). It
* will be cleared after discard
*/
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
SWAP_MAP_BAD, SWAPFILE_CLUSTER);
if (cluster_is_null(&si->discard_cluster_head)) {
cluster_set_next_flag(&si->discard_cluster_head,
idx, 0);
cluster_set_next_flag(&si->discard_cluster_tail,
idx, 0);
} else {
unsigned int tail = cluster_next(&si->discard_cluster_tail);
cluster_set_next(&si->cluster_info[tail], idx);
cluster_set_next_flag(&si->discard_cluster_tail,
idx, 0);
}
schedule_work(&si->discard_work);
}
/*
* Doing discard actually. After a cluster discard is finished, the cluster
* will be added to free cluster list. caller should hold si->lock.
*/
static void swap_do_scheduled_discard(struct swap_info_struct *si)
{
struct swap_cluster_info *info;
unsigned int idx;
info = si->cluster_info;
while (!cluster_is_null(&si->discard_cluster_head)) {
idx = cluster_next(&si->discard_cluster_head);
cluster_set_next_flag(&si->discard_cluster_head,
cluster_next(&info[idx]), 0);
if (cluster_next(&si->discard_cluster_tail) == idx) {
cluster_set_null(&si->discard_cluster_head);
cluster_set_null(&si->discard_cluster_tail);
}
spin_unlock(&si->lock);
discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
SWAPFILE_CLUSTER);
spin_lock(&si->lock);
cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE);
if (cluster_is_null(&si->free_cluster_head)) {
cluster_set_next_flag(&si->free_cluster_head,
idx, 0);
cluster_set_next_flag(&si->free_cluster_tail,
idx, 0);
} else {
unsigned int tail;
tail = cluster_next(&si->free_cluster_tail);
cluster_set_next(&info[tail], idx);
cluster_set_next_flag(&si->free_cluster_tail,
idx, 0);
}
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
0, SWAPFILE_CLUSTER);
}
}
static void swap_discard_work(struct work_struct *work)
{
struct swap_info_struct *si;
si = container_of(work, struct swap_info_struct, discard_work);
spin_lock(&si->lock);
swap_do_scheduled_discard(si);
spin_unlock(&si->lock);
}
/*
* The cluster corresponding to page_nr will be used. The cluster will be
* removed from free cluster list and its usage counter will be increased.
*/
static void inc_cluster_info_page(struct swap_info_struct *p,
struct swap_cluster_info *cluster_info, unsigned long page_nr)
{
unsigned long idx = page_nr / SWAPFILE_CLUSTER;
if (!cluster_info)
return;
if (cluster_is_free(&cluster_info[idx])) {
VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx);
cluster_set_next_flag(&p->free_cluster_head,
cluster_next(&cluster_info[idx]), 0);
if (cluster_next(&p->free_cluster_tail) == idx) {
cluster_set_null(&p->free_cluster_tail);
cluster_set_null(&p->free_cluster_head);
}
cluster_set_count_flag(&cluster_info[idx], 0, 0);
}
VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
cluster_set_count(&cluster_info[idx],
cluster_count(&cluster_info[idx]) + 1);
}
/*
* The cluster corresponding to page_nr decreases one usage. If the usage
* counter becomes 0, which means no page in the cluster is in using, we can
* optionally discard the cluster and add it to free cluster list.
*/
static void dec_cluster_info_page(struct swap_info_struct *p,
struct swap_cluster_info *cluster_info, unsigned long page_nr)
{
unsigned long idx = page_nr / SWAPFILE_CLUSTER;
if (!cluster_info)
return;
VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
cluster_set_count(&cluster_info[idx],
cluster_count(&cluster_info[idx]) - 1);
if (cluster_count(&cluster_info[idx]) == 0) {
/*
* If the swap is discardable, prepare discard the cluster
* instead of free it immediately. The cluster will be freed
* after discard.
*/
if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
(SWP_WRITEOK | SWP_PAGE_DISCARD)) {
swap_cluster_schedule_discard(p, idx);
return;
}
cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
if (cluster_is_null(&p->free_cluster_head)) {
cluster_set_next_flag(&p->free_cluster_head, idx, 0);
cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
} else {
unsigned int tail = cluster_next(&p->free_cluster_tail);
cluster_set_next(&cluster_info[tail], idx);
cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
}
}
}
/*
* It's possible scan_swap_map() uses a free cluster in the middle of free
* cluster list. Avoiding such abuse to avoid list corruption.
*/
static bool
scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
unsigned long offset)
{
struct percpu_cluster *percpu_cluster;
bool conflict;
offset /= SWAPFILE_CLUSTER;
conflict = !cluster_is_null(&si->free_cluster_head) &&
offset != cluster_next(&si->free_cluster_head) &&
cluster_is_free(&si->cluster_info[offset]);
if (!conflict)
return false;
percpu_cluster = this_cpu_ptr(si->percpu_cluster);
cluster_set_null(&percpu_cluster->index);
return true;
}
/*
* Try to get a swap entry from current cpu's swap entry pool (a cluster). This
* might involve allocating a new cluster for current CPU too.
*/
static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
unsigned long *offset, unsigned long *scan_base)
{
struct percpu_cluster *cluster;
bool found_free;
unsigned long tmp;
new_cluster:
cluster = this_cpu_ptr(si->percpu_cluster);
if (cluster_is_null(&cluster->index)) {
if (!cluster_is_null(&si->free_cluster_head)) {
cluster->index = si->free_cluster_head;
cluster->next = cluster_next(&cluster->index) *
SWAPFILE_CLUSTER;
} else if (!cluster_is_null(&si->discard_cluster_head)) {
/*
* we don't have free cluster but have some clusters in
* discarding, do discard now and reclaim them
*/
swap_do_scheduled_discard(si);
*scan_base = *offset = si->cluster_next;
goto new_cluster;
} else
return;
}
found_free = false;
/*
* Other CPUs can use our cluster if they can't find a free cluster,
* check if there is still free entry in the cluster
*/
tmp = cluster->next;
while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) *
SWAPFILE_CLUSTER) {
if (!si->swap_map[tmp]) {
found_free = true;
break;
}
tmp++;
}
if (!found_free) {
cluster_set_null(&cluster->index);
goto new_cluster;
}
cluster->next = tmp + 1;
*offset = tmp;
*scan_base = tmp;
}
static unsigned long scan_swap_map(struct swap_info_struct *si,
unsigned char usage)
{
unsigned long offset;
unsigned long scan_base;
unsigned long last_in_cluster = 0;
int latency_ration = LATENCY_LIMIT;
/*
* We try to cluster swap pages by allocating them sequentially
* in swap. Once we've allocated SWAPFILE_CLUSTER pages this
* way, however, we resort to first-free allocation, starting
* a new cluster. This prevents us from scattering swap pages
* all over the entire swap partition, so that we reduce
* overall disk seek times between swap pages. -- sct
* But we do now try to find an empty cluster. -Andrea
* And we let swap pages go all over an SSD partition. Hugh
*/
si->flags += SWP_SCANNING;
scan_base = offset = si->cluster_next;
/* SSD algorithm */
if (si->cluster_info) {
scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
goto checks;
}
if (unlikely(!si->cluster_nr--)) {
if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
}
spin_unlock(&si->lock);
/*
* If seek is expensive, start searching for new cluster from
* start of partition, to minimize the span of allocated swap.
* But if seek is cheap, search from our current position, so
* that swap is allocated from all over the partition: if the
* Flash Translation Layer only remaps within limited zones,
* we don't want to wear out the first zone too quickly.
*/
if (!(si->flags & SWP_SOLIDSTATE))
scan_base = offset = si->lowest_bit;
last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
/* Locate the first empty (unaligned) cluster */
for (; last_in_cluster <= si->highest_bit; offset++) {
if (si->swap_map[offset])
last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) {
spin_lock(&si->lock);
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
}
}
offset = si->lowest_bit;
last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
/* Locate the first empty (unaligned) cluster */
for (; last_in_cluster < scan_base; offset++) {
if (si->swap_map[offset])
last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) {
spin_lock(&si->lock);
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
}
}
offset = scan_base;
spin_lock(&si->lock);
si->cluster_nr = SWAPFILE_CLUSTER - 1;
}
checks:
if (si->cluster_info) {
while (scan_swap_map_ssd_cluster_conflict(si, offset))
scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
}
if (!(si->flags & SWP_WRITEOK))
goto no_page;
if (!si->highest_bit)
goto no_page;
if (offset > si->highest_bit)
scan_base = offset = si->lowest_bit;
/* reuse swap entry of cache-only swap if not busy. */
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&si->lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
spin_lock(&si->lock);
/* entry was freed successfully, try to use this again */
if (swap_was_freed)
goto checks;
goto scan; /* check next one */
}
if (si->swap_map[offset])
goto scan;
if (offset == si->lowest_bit)
si->lowest_bit++;
if (offset == si->highest_bit)
si->highest_bit--;
si->inuse_pages++;
if (si->inuse_pages == si->pages) {
si->lowest_bit = si->max;
si->highest_bit = 0;
}
si->swap_map[offset] = usage;
inc_cluster_info_page(si, si->cluster_info, offset);
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
return offset;
scan:
spin_unlock(&si->lock);
while (++offset <= si->highest_bit) {
if (!si->swap_map[offset]) {
spin_lock(&si->lock);
goto checks;
}
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&si->lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
}
}
offset = si->lowest_bit;
while (++offset < scan_base) {
if (!si->swap_map[offset]) {
spin_lock(&si->lock);
goto checks;
}
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&si->lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
cond_resched();
latency_ration = LATENCY_LIMIT;
}
}
spin_lock(&si->lock);
no_page:
si->flags -= SWP_SCANNING;
return 0;
}
swp_entry_t get_swap_page(void)
{
struct swap_info_struct *si;
pgoff_t offset;
int type, next;
int wrapped = 0;
int hp_index;
spin_lock(&swap_lock);
if (atomic_long_read(&nr_swap_pages) <= 0)
goto noswap;
atomic_long_dec(&nr_swap_pages);
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
hp_index = atomic_xchg(&highest_priority_index, -1);
/*
* highest_priority_index records current highest priority swap
* type which just frees swap entries. If its priority is
* higher than that of swap_list.next swap type, we use it. It
* isn't protected by swap_lock, so it can be an invalid value
* if the corresponding swap type is swapoff. We double check
* the flags here. It's even possible the swap type is swapoff
* and swapon again and its priority is changed. In such rare
* case, low prority swap type might be used, but eventually
* high priority swap will be used after several rounds of
* swap.
*/
if (hp_index != -1 && hp_index != type &&
swap_info[type]->prio < swap_info[hp_index]->prio &&
(swap_info[hp_index]->flags & SWP_WRITEOK)) {
type = hp_index;
swap_list.next = type;
}
si = swap_info[type];
next = si->next;
if (next < 0 ||
(!wrapped && si->prio != swap_info[next]->prio)) {
next = swap_list.head;
wrapped++;
}
spin_lock(&si->lock);
if (!si->highest_bit) {
spin_unlock(&si->lock);
continue;
}
if (!(si->flags & SWP_WRITEOK)) {
spin_unlock(&si->lock);
continue;
}
swap_list.next = next;
spin_unlock(&swap_lock);
/* This is called for allocating swap entry for cache */
offset = scan_swap_map(si, SWAP_HAS_CACHE);
spin_unlock(&si->lock);
if (offset)
return swp_entry(type, offset);
spin_lock(&swap_lock);
next = swap_list.next;
}
atomic_long_inc(&nr_swap_pages);
noswap:
spin_unlock(&swap_lock);
return (swp_entry_t) {0};
}
/* The only caller of this function is now suspend routine */
swp_entry_t get_swap_page_of_type(int type)
{
struct swap_info_struct *si;
pgoff_t offset;
si = swap_info[type];
spin_lock(&si->lock);
if (si && (si->flags & SWP_WRITEOK)) {
atomic_long_dec(&nr_swap_pages);
/* This is called for allocating swap entry, not cache */
offset = scan_swap_map(si, 1);
if (offset) {
spin_unlock(&si->lock);
return swp_entry(type, offset);
}
atomic_long_inc(&nr_swap_pages);
}
spin_unlock(&si->lock);
return (swp_entry_t) {0};
}
static struct swap_info_struct *swap_info_get(swp_entry_t entry)
{
struct swap_info_struct *p;
unsigned long offset, type;
if (!entry.val)
goto out;
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_nofile;
p = swap_info[type];
if (!(p->flags & SWP_USED))
goto bad_device;
offset = swp_offset(entry);
if (offset >= p->max)
goto bad_offset;
if (!p->swap_map[offset])
goto bad_free;
spin_lock(&p->lock);
return p;
bad_free:
pr_err("swap_free: %s%08lx\n", Unused_offset, entry.val);
goto out;
bad_offset:
pr_err("swap_free: %s%08lx\n", Bad_offset, entry.val);
goto out;
bad_device:
pr_err("swap_free: %s%08lx\n", Unused_file, entry.val);
goto out;
bad_nofile:
pr_err("swap_free: %s%08lx\n", Bad_file, entry.val);
out:
return NULL;
}
/*
* This swap type frees swap entry, check if it is the highest priority swap
* type which just frees swap entry. get_swap_page() uses
* highest_priority_index to search highest priority swap type. The
* swap_info_struct.lock can't protect us if there are multiple swap types
* active, so we use atomic_cmpxchg.
*/
static void set_highest_priority_index(int type)
{
int old_hp_index, new_hp_index;
do {
old_hp_index = atomic_read(&highest_priority_index);
if (old_hp_index != -1 &&
swap_info[old_hp_index]->prio >= swap_info[type]->prio)
break;
new_hp_index = type;
} while (atomic_cmpxchg(&highest_priority_index,
old_hp_index, new_hp_index) != old_hp_index);
}
static unsigned char swap_entry_free(struct swap_info_struct *p,
swp_entry_t entry, unsigned char usage)
{
unsigned long offset = swp_offset(entry);
unsigned char count;
unsigned char has_cache;
count = p->swap_map[offset];
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
if (usage == SWAP_HAS_CACHE) {
VM_BUG_ON(!has_cache);
has_cache = 0;
} else if (count == SWAP_MAP_SHMEM) {
/*
* Or we could insist on shmem.c using a special
* swap_shmem_free() and free_shmem_swap_and_cache()...
*/
count = 0;
} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
if (count == COUNT_CONTINUED) {
if (swap_count_continued(p, offset, count))
count = SWAP_MAP_MAX | COUNT_CONTINUED;
else
count = SWAP_MAP_MAX;
} else
count--;
}
if (!count)
mem_cgroup_uncharge_swap(entry);
usage = count | has_cache;
p->swap_map[offset] = usage;
/* free if no reference */
if (!usage) {
dec_cluster_info_page(p, p->cluster_info, offset);
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
set_highest_priority_index(p->type);
atomic_long_inc(&nr_swap_pages);
p->inuse_pages--;
frontswap_invalidate_page(p->type, offset);
if (p->flags & SWP_BLKDEV) {
struct gendisk *disk = p->bdev->bd_disk;
if (disk->fops->swap_slot_free_notify)
disk->fops->swap_slot_free_notify(p->bdev,
offset);
}
}
return usage;
}
/*
* Caller has made sure that the swap device corresponding to entry
* is still around or has not been recycled.
*/
void swap_free(swp_entry_t entry)
{
struct swap_info_struct *p;
p = swap_info_get(entry);
if (p) {
swap_entry_free(p, entry, 1);
spin_unlock(&p->lock);
}
}
/*
* Called after dropping swapcache to decrease refcnt to swap entries.
*/
void swapcache_free(swp_entry_t entry, struct page *page)
{
struct swap_info_struct *p;
unsigned char count;
p = swap_info_get(entry);
if (p) {
count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
if (page)
mem_cgroup_uncharge_swapcache(page, entry, count != 0);
spin_unlock(&p->lock);
}
}
/*
* How many references to page are currently swapped out?
* This does not give an exact answer when swap count is continued,
* but does include the high COUNT_CONTINUED flag to allow for that.
*/
int page_swapcount(struct page *page)
{
int count = 0;
struct swap_info_struct *p;
swp_entry_t entry;
entry.val = page_private(page);
p = swap_info_get(entry);
if (p) {
count = swap_count(p->swap_map[swp_offset(entry)]);
spin_unlock(&p->lock);
}
return count;
}
/*
* We can write to an anon page without COW if there are no other references
* to it. And as a side-effect, free up its swap: because the old content
* on disk will never be read, and seeking back there to write new content
* later would only waste time away from clustering.
*/
int reuse_swap_page(struct page *page)
{
int count;
VM_BUG_ON(!PageLocked(page));
if (unlikely(PageKsm(page)))
return 0;
count = page_mapcount(page);
if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
if (count == 1 && !PageWriteback(page)) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
}
return count <= 1;
}
/*
* If swap is getting full, or if there are no more mappings of this page,
* then try_to_free_swap is called to free its swap space.
*/
int try_to_free_swap(struct page *page)
{
VM_BUG_ON(!PageLocked(page));
if (!PageSwapCache(page))
return 0;
if (PageWriteback(page))
return 0;
if (page_swapcount(page))
return 0;
/*
* Once hibernation has begun to create its image of memory,
* there's a danger that one of the calls to try_to_free_swap()
* - most probably a call from __try_to_reclaim_swap() while
* hibernation is allocating its own swap pages for the image,
* but conceivably even a call from memory reclaim - will free
* the swap from a page which has already been recorded in the
* image as a clean swapcache page, and then reuse its swap for
* another page of the image. On waking from hibernation, the
* original page might be freed under memory pressure, then
* later read back in from swap, now with the wrong data.
*
* Hibernation suspends storage while it is writing the image
* to disk so check that here.
*/
if (pm_suspended_storage())
return 0;
delete_from_swap_cache(page);
SetPageDirty(page);
return 1;
}
/*
* Free the swap entry like above, but also try to
* free the page cache entry if it is the last user.
*/
int free_swap_and_cache(swp_entry_t entry)
{
struct swap_info_struct *p;
struct page *page = NULL;
if (non_swap_entry(entry))
return 1;
p = swap_info_get(entry);
if (p) {
if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
page = find_get_page(swap_address_space(entry),
entry.val);
if (page && !trylock_page(page)) {
page_cache_release(page);
page = NULL;
}
}
spin_unlock(&p->lock);
}
if (page) {
/*
* Not mapped elsewhere, or swap space full? Free it!
* Also recheck PageSwapCache now page is locked (above).
*/
if (PageSwapCache(page) && !PageWriteback(page) &&
(!page_mapped(page) || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
unlock_page(page);
page_cache_release(page);
}
return p != NULL;
}
#ifdef CONFIG_HIBERNATION
/*
* Find the swap type that corresponds to given device (if any).
*
* @offset - number of the PAGE_SIZE-sized block of the device, starting
* from 0, in which the swap header is expected to be located.
*
* This is needed for the suspend to disk (aka swsusp).
*/
int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
{
struct block_device *bdev = NULL;
int type;
if (device)
bdev = bdget(device);
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *sis = swap_info[type];
if (!(sis->flags & SWP_WRITEOK))
continue;
if (!bdev) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
return type;
}
if (bdev == sis->bdev) {
struct swap_extent *se = &sis->first_swap_extent;
if (se->start_block == offset) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
bdput(bdev);
return type;
}
}
}
spin_unlock(&swap_lock);
if (bdev)
bdput(bdev);
return -ENODEV;
}
/*
* Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
* corresponding to given index in swap_info (swap type).
*/
sector_t swapdev_block(int type, pgoff_t offset)
{
struct block_device *bdev;
if ((unsigned int)type >= nr_swapfiles)
return 0;
if (!(swap_info[type]->flags & SWP_WRITEOK))
return 0;
return map_swap_entry(swp_entry(type, offset), &bdev);
}
/*
* Return either the total number of swap pages of given type, or the number
* of free pages of that type (depending on @free)
*
* This is needed for software suspend
*/
unsigned int count_swap_pages(int type, int free)
{
unsigned int n = 0;
spin_lock(&swap_lock);
if ((unsigned int)type < nr_swapfiles) {
struct swap_info_struct *sis = swap_info[type];
spin_lock(&sis->lock);
if (sis->flags & SWP_WRITEOK) {
n = sis->pages;
if (free)
n -= sis->inuse_pages;
}
spin_unlock(&sis->lock);
}
spin_unlock(&swap_lock);
return n;
}
#endif /* CONFIG_HIBERNATION */
static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
/*
* When pte keeps soft dirty bit the pte generated
* from swap entry does not has it, still it's same
* pte from logical point of view.
*/
pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
#else
return pte_same(pte, swp_pte);
#endif
}
/*
* No need to decide whether this PTE shares the swap entry with others,
* just let do_wp_page work it out if a write is requested later - to
* force COW, vm_page_prot omits write permission from any private vma.
*/
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page)
{
struct page *swapcache;
struct mem_cgroup *memcg;
spinlock_t *ptl;
pte_t *pte;
int ret = 1;
swapcache = page;
page = ksm_might_need_to_copy(page, vma, addr);
if (unlikely(!page))
return -ENOMEM;
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
GFP_KERNEL, &memcg)) {
ret = -ENOMEM;
goto out_nolock;
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
mem_cgroup_cancel_charge_swapin(memcg);
ret = 0;
goto out;
}
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
get_page(page);
set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot)));
if (page == swapcache)
page_add_anon_rmap(page, vma, addr);
else /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, addr);
mem_cgroup_commit_charge_swapin(page, memcg);
swap_free(entry);
/*
* Move the page to the active list so it is not
* immediately swapped out again after swapon.
*/
activate_page(page);
out:
pte_unmap_unlock(pte, ptl);
out_nolock:
if (page != swapcache) {
unlock_page(page);
put_page(page);
}
return ret;
}
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pte_t swp_pte = swp_entry_to_pte(entry);
pte_t *pte;
int ret = 0;
/*
* We don't actually need pte lock while scanning for swp_pte: since
* we hold page lock and mmap_sem, swp_pte cannot be inserted into the
* page table while we're scanning; though it could get zapped, and on
* some architectures (e.g. x86_32 with PAE) we might catch a glimpse
* of unmatched parts which look like swp_pte, so unuse_pte must
* recheck under pte lock. Scanning without pte lock lets it be
* preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
*/
pte = pte_offset_map(pmd, addr);
do {
/*
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
if (unlikely(maybe_same_pte(*pte, swp_pte))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
goto out;
pte = pte_offset_map(pmd, addr);
}
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
out:
return ret;
}
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pmd_t *pmd;
unsigned long next;
int ret;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret)
return ret;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pud_t *pud;
unsigned long next;
int ret;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
if (ret)
return ret;
} while (pud++, addr = next, addr != end);
return 0;
}
static int unuse_vma(struct vm_area_struct *vma,
swp_entry_t entry, struct page *page)
{
pgd_t *pgd;
unsigned long addr, end, next;
int ret;
if (page_anon_vma(page)) {
addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
return 0;
else
end = addr + PAGE_SIZE;
} else {
addr = vma->vm_start;
end = vma->vm_end;
}
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
if (ret)
return ret;
} while (pgd++, addr = next, addr != end);
return 0;
}
static int unuse_mm(struct mm_struct *mm,
swp_entry_t entry, struct page *page)
{
struct vm_area_struct *vma;
int ret = 0;
if (!down_read_trylock(&mm->mmap_sem)) {
/*
* Activate page so shrink_inactive_list is unlikely to unmap
* its ptes while lock is dropped, so swapoff can make progress.
*/
activate_page(page);
unlock_page(page);
down_read(&mm->mmap_sem);
lock_page(page);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
break;
}
up_read(&mm->mmap_sem);
return (ret < 0)? ret: 0;
}
/*
* Scan swap_map (or frontswap_map if frontswap parameter is true)
* from current position to next entry still in use.
* Recycle to start on reaching the end, returning 0 when empty.
*/
static unsigned int find_next_to_unuse(struct swap_info_struct *si,
unsigned int prev, bool frontswap)
{
unsigned int max = si->max;
unsigned int i = prev;
unsigned char count;
/*
* No need for swap_lock here: we're just looking
* for whether an entry is in use, not modifying it; false
* hits are okay, and sys_swapoff() has already prevented new
* allocations from this area (while holding swap_lock).
*/
for (;;) {
if (++i >= max) {
if (!prev) {
i = 0;
break;
}
/*
* No entries in use at top of swap_map,
* loop back to start and recheck there.
*/
max = prev + 1;
prev = 0;
i = 1;
}
if (frontswap) {
if (frontswap_test(si, i))
break;
else
continue;
}
count = ACCESS_ONCE(si->swap_map[i]);
if (count && swap_count(count) != SWAP_MAP_BAD)
break;
}
return i;
}
/*
* We completely avoid races by reading each swap page in advance,
* and then search for the process using it. All the necessary
* page table adjustments can then be made atomically.
*
* if the boolean frontswap is true, only unuse pages_to_unuse pages;
* pages_to_unuse==0 means all pages; ignored if frontswap is false
*/
int try_to_unuse(unsigned int type, bool frontswap,
unsigned long pages_to_unuse)
{
struct swap_info_struct *si = swap_info[type];
struct mm_struct *start_mm;
volatile unsigned char *swap_map; /* swap_map is accessed without
* locking. Mark it as volatile
* to prevent compiler doing
* something odd.
*/
unsigned char swcount;
struct page *page;
swp_entry_t entry;
unsigned int i = 0;
int retval = 0;
/*
* When searching mms for an entry, a good strategy is to
* start at the first mm we freed the previous entry from
* (though actually we don't notice whether we or coincidence
* freed the entry). Initialize this start_mm with a hold.
*
* A simpler strategy would be to start at the last mm we
* freed the previous entry from; but that would take less
* advantage of mmlist ordering, which clusters forked mms
* together, child after parent. If we race with dup_mmap(), we
* prefer to resolve parent before child, lest we miss entries
* duplicated after we scanned child: using last mm would invert
* that.
*/
start_mm = &init_mm;
atomic_inc(&init_mm.mm_users);
/*
* Keep on scanning until all entries have gone. Usually,
* one pass through swap_map is enough, but not necessarily:
* there are races when an instance of an entry might be missed.
*/
while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
if (signal_pending(current)) {
retval = -EINTR;
break;
}
/*
* Get a page for the entry, using the existing swap
* cache page if there is one. Otherwise, get a clean
* page and read the swap into it.
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
page = read_swap_cache_async(entry,
GFP_HIGHUSER_MOVABLE, NULL, 0);
if (!page) {
/*
* Either swap_duplicate() failed because entry
* has been freed independently, and will not be
* reused since sys_swapoff() already disabled
* allocation from here, or alloc_page() failed.
*/
swcount = *swap_map;
/*
* We don't hold lock here, so the swap entry could be
* SWAP_MAP_BAD (when the cluster is discarding).
* Instead of fail out, We can just skip the swap
* entry because swapoff will wait for discarding
* finish anyway.
*/
if (!swcount || swcount == SWAP_MAP_BAD)
continue;
retval = -ENOMEM;
break;
}
/*
* Don't hold on to start_mm if it looks like exiting.
*/
if (atomic_read(&start_mm->mm_users) == 1) {
mmput(start_mm);
start_mm = &init_mm;
atomic_inc(&init_mm.mm_users);
}
/*
* Wait for and lock page. When do_swap_page races with
* try_to_unuse, do_swap_page can handle the fault much
* faster than try_to_unuse can locate the entry. This
* apparently redundant "wait_on_page_locked" lets try_to_unuse
* defer to do_swap_page in such a case - in some tests,
* do_swap_page and try_to_unuse repeatedly compete.
*/
wait_on_page_locked(page);
wait_on_page_writeback(page);
lock_page(page);
wait_on_page_writeback(page);
/*
* Remove all references to entry.
*/
swcount = *swap_map;
if (swap_count(swcount) == SWAP_MAP_SHMEM) {
retval = shmem_unuse(entry, page);
/* page has already been unlocked and released */
if (retval < 0)
break;
continue;
}
if (swap_count(swcount) && start_mm != &init_mm)
retval = unuse_mm(start_mm, entry, page);
if (swap_count(*swap_map)) {
int set_start_mm = (*swap_map >= swcount);
struct list_head *p = &start_mm->mmlist;
struct mm_struct *new_start_mm = start_mm;
struct mm_struct *prev_mm = start_mm;
struct mm_struct *mm;
atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock);
while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
if (!atomic_inc_not_zero(&mm->mm_users))
continue;
spin_unlock(&mmlist_lock);
mmput(prev_mm);
prev_mm = mm;
cond_resched();
swcount = *swap_map;
if (!swap_count(swcount)) /* any usage ? */
;
else if (mm == &init_mm)
set_start_mm = 1;
else
retval = unuse_mm(mm, entry, page);
if (set_start_mm && *swap_map < swcount) {
mmput(new_start_mm);
atomic_inc(&mm->mm_users);
new_start_mm = mm;
set_start_mm = 0;
}
spin_lock(&mmlist_lock);
}
spin_unlock(&mmlist_lock);
mmput(prev_mm);
mmput(start_mm);
start_mm = new_start_mm;
}
if (retval) {
unlock_page(page);
page_cache_release(page);
break;
}
/*
* If a reference remains (rare), we would like to leave
* the page in the swap cache; but try_to_unmap could
* then re-duplicate the entry once we drop page lock,
* so we might loop indefinitely; also, that page could
* not be swapped out to other storage meanwhile. So:
* delete from cache even if there's another reference,
* after ensuring that the data has been saved to disk -
* since if the reference remains (rarer), it will be
* read from disk into another page. Splitting into two
* pages would be incorrect if swap supported "shared
* private" pages, but they are handled by tmpfs files.
*
* Given how unuse_vma() targets one particular offset
* in an anon_vma, once the anon_vma has been determined,
* this splitting happens to be just what is needed to
* handle where KSM pages have been swapped out: re-reading
* is unnecessarily slow, but we can fix that later on.
*/
if (swap_count(*swap_map) &&
PageDirty(page) && PageSwapCache(page)) {
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
};
swap_writepage(page, &wbc);
lock_page(page);
wait_on_page_writeback(page);
}
/*
* It is conceivable that a racing task removed this page from
* swap cache just before we acquired the page lock at the top,
* or while we dropped it in unuse_mm(). The page might even
* be back in swap cache on another swap area: that we must not
* delete, since it may not have been written out to swap yet.
*/
if (PageSwapCache(page) &&
likely(page_private(page) == entry.val))
delete_from_swap_cache(page);
/*
* So we could skip searching mms once swap count went
* to 1, we did not mark any present ptes as dirty: must
* mark page dirty so shrink_page_list will preserve it.
*/
SetPageDirty(page);
unlock_page(page);
page_cache_release(page);
/*
* Make sure that we aren't completely killing
* interactive performance.
*/
cond_resched();
if (frontswap && pages_to_unuse > 0) {
if (!--pages_to_unuse)
break;
}
}
mmput(start_mm);
return retval;
}
/*
* After a successful try_to_unuse, if no swap is now in use, we know
* we can empty the mmlist. swap_lock must be held on entry and exit.
* Note that mmlist_lock nests inside swap_lock, and an mm must be
* added to the mmlist just after page_duplicate - before would be racy.
*/
static void drain_mmlist(void)
{
struct list_head *p, *next;
unsigned int type;
for (type = 0; type < nr_swapfiles; type++)
if (swap_info[type]->inuse_pages)
return;
spin_lock(&mmlist_lock);
list_for_each_safe(p, next, &init_mm.mmlist)
list_del_init(p);
spin_unlock(&mmlist_lock);
}
/*
* Use this swapdev's extent info to locate the (PAGE_SIZE) block which
* corresponds to page offset for the specified swap entry.
* Note that the type of this function is sector_t, but it returns page offset
* into the bdev, not sector offset.
*/
static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
{
struct swap_info_struct *sis;
struct swap_extent *start_se;
struct swap_extent *se;
pgoff_t offset;
sis = swap_info[swp_type(entry)];
*bdev = sis->bdev;
offset = swp_offset(entry);
start_se = sis->curr_swap_extent;
se = start_se;
for ( ; ; ) {
struct list_head *lh;
if (se->start_page <= offset &&
offset < (se->start_page + se->nr_pages)) {
return se->start_block + (offset - se->start_page);
}
lh = se->list.next;
se = list_entry(lh, struct swap_extent, list);
sis->curr_swap_extent = se;
BUG_ON(se == start_se); /* It *must* be present */
}
}
/*
* Returns the page offset into bdev for the specified page's swap entry.
*/
sector_t map_swap_page(struct page *page, struct block_device **bdev)
{
swp_entry_t entry;
entry.val = page_private(page);
return map_swap_entry(entry, bdev);
}
/*
* Free all of a swapdev's extent information
*/
static void destroy_swap_extents(struct swap_info_struct *sis)
{
while (!list_empty(&sis->first_swap_extent.list)) {
struct swap_extent *se;
se = list_entry(sis->first_swap_extent.list.next,
struct swap_extent, list);
list_del(&se->list);
kfree(se);
}
if (sis->flags & SWP_FILE) {
struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping;
sis->flags &= ~SWP_FILE;
mapping->a_ops->swap_deactivate(swap_file);
}
}
/*
* Add a block range (and the corresponding page range) into this swapdev's
* extent list. The extent list is kept sorted in page order.
*
* This function rather assumes that it is called in ascending page order.
*/
int
add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block)
{
struct swap_extent *se;
struct swap_extent *new_se;
struct list_head *lh;
if (start_page == 0) {
se = &sis->first_swap_extent;
sis->curr_swap_extent = se;
se->start_page = 0;
se->nr_pages = nr_pages;
se->start_block = start_block;
return 1;
} else {
lh = sis->first_swap_extent.list.prev; /* Highest extent */
se = list_entry(lh, struct swap_extent, list);
BUG_ON(se->start_page + se->nr_pages != start_page);
if (se->start_block + se->nr_pages == start_block) {
/* Merge it */
se->nr_pages += nr_pages;
return 0;
}
}
/*
* No merge. Insert a new extent, preserving ordering.
*/
new_se = kmalloc(sizeof(*se), GFP_KERNEL);
if (new_se == NULL)
return -ENOMEM;
new_se->start_page = start_page;
new_se->nr_pages = nr_pages;
new_se->start_block = start_block;
list_add_tail(&new_se->list, &sis->first_swap_extent.list);
return 1;
}
/*
* A `swap extent' is a simple thing which maps a contiguous range of pages
* onto a contiguous range of disk blocks. An ordered list of swap extents
* is built at swapon time and is then used at swap_writepage/swap_readpage
* time for locating where on disk a page belongs.
*
* If the swapfile is an S_ISBLK block device, a single extent is installed.
* This is done so that the main operating code can treat S_ISBLK and S_ISREG
* swap files identically.
*
* Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
* extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
* swapfiles are handled *identically* after swapon time.
*
* For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
* and will parse them into an ordered extent list, in PAGE_SIZE chunks. If
* some stray blocks are found which do not fall within the PAGE_SIZE alignment
* requirements, they are simply tossed out - we will never use those blocks
* for swapping.
*
* For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This
* prevents root from shooting her foot off by ftruncating an in-use swapfile,
* which will scribble on the fs.
*
* The amount of disk space which a single swap extent represents varies.
* Typically it is in the 1-4 megabyte range. So we can have hundreds of
* extents in the list. To avoid much list walking, we cache the previous
* search location in `curr_swap_extent', and start new searches from there.
* This is extremely effective. The average number of iterations in
* map_swap_page() has been measured at about 0.3 per page. - akpm.
*/
static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
{
struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping;
struct inode *inode = mapping->host;
int ret;
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
return ret;
}
if (mapping->a_ops->swap_activate) {
ret = mapping->a_ops->swap_activate(sis, swap_file, span);
if (!ret) {
sis->flags |= SWP_FILE;
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
}
return ret;
}
return generic_swapfile_activate(sis, swap_file, span);
}
static void _enable_swap_info(struct swap_info_struct *p, int prio,
unsigned char *swap_map,
struct swap_cluster_info *cluster_info)
{
int i, prev;
if (prio >= 0)
p->prio = prio;
else
p->prio = --least_priority;
p->swap_map = swap_map;
p->cluster_info = cluster_info;
p->flags |= SWP_WRITEOK;
atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
/* insert swap space into swap_list: */
prev = -1;
for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
if (p->prio >= swap_info[i]->prio)
break;
prev = i;
}
p->next = i;
if (prev < 0)
swap_list.head = swap_list.next = p->type;
else
swap_info[prev]->next = p->type;
}
static void enable_swap_info(struct swap_info_struct *p, int prio,
unsigned char *swap_map,
struct swap_cluster_info *cluster_info,
unsigned long *frontswap_map)
{
frontswap_init(p->type, frontswap_map);
spin_lock(&swap_lock);
spin_lock(&p->lock);
_enable_swap_info(p, prio, swap_map, cluster_info);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
static void reinsert_swap_info(struct swap_info_struct *p)
{
spin_lock(&swap_lock);
spin_lock(&p->lock);
_enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct *p = NULL;
unsigned char *swap_map;
struct swap_cluster_info *cluster_info;
unsigned long *frontswap_map;
struct file *swap_file, *victim;
struct address_space *mapping;
struct inode *inode;
struct filename *pathname;
int i, type, prev;
int err;
unsigned int old_block_size;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
BUG_ON(!current->mm);
pathname = getname(specialfile);
if (IS_ERR(pathname))
return PTR_ERR(pathname);
victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
err = PTR_ERR(victim);
if (IS_ERR(victim))
goto out;
mapping = victim->f_mapping;
prev = -1;
spin_lock(&swap_lock);
for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
p = swap_info[type];
if (p->flags & SWP_WRITEOK) {
if (p->swap_file->f_mapping == mapping)
break;
}
prev = type;
}
if (type < 0) {
err = -EINVAL;
spin_unlock(&swap_lock);
goto out_dput;
}
if (!security_vm_enough_memory_mm(current->mm, p->pages))
vm_unacct_memory(p->pages);
else {
err = -ENOMEM;
spin_unlock(&swap_lock);
goto out_dput;
}
if (prev < 0)
swap_list.head = p->next;
else
swap_info[prev]->next = p->next;
if (type == swap_list.next) {
/* just pick something that's safe... */
swap_list.next = swap_list.head;
}
spin_lock(&p->lock);
if (p->prio < 0) {
for (i = p->next; i >= 0; i = swap_info[i]->next)
swap_info[i]->prio = p->prio--;
least_priority++;
}
atomic_long_sub(p->pages, &nr_swap_pages);
total_swap_pages -= p->pages;
p->flags &= ~SWP_WRITEOK;
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
set_current_oom_origin();
err = try_to_unuse(type, false, 0); /* force all pages to be unused */
clear_current_oom_origin();
if (err) {
/* re-insert swap space back into swap_list */
reinsert_swap_info(p);
goto out_dput;
}
flush_work(&p->discard_work);
destroy_swap_extents(p);
if (p->flags & SWP_CONTINUED)
free_swap_count_continuations(p);
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
spin_lock(&p->lock);
drain_mmlist();
/* wait for anyone still in scan_swap_map */
p->highest_bit = 0; /* cuts scans short */
while (p->flags >= SWP_SCANNING) {
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
schedule_timeout_uninterruptible(1);
spin_lock(&swap_lock);
spin_lock(&p->lock);
}
swap_file = p->swap_file;
old_block_size = p->old_block_size;
p->swap_file = NULL;
p->max = 0;
swap_map = p->swap_map;
p->swap_map = NULL;
cluster_info = p->cluster_info;
p->cluster_info = NULL;
frontswap_map = frontswap_map_get(p);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
frontswap_invalidate_area(type);
frontswap_map_set(p, NULL);
mutex_unlock(&swapon_mutex);
free_percpu(p->percpu_cluster);
p->percpu_cluster = NULL;
vfree(swap_map);
vfree(cluster_info);
vfree(frontswap_map);
/* Destroy swap account information */
swap_cgroup_swapoff(type);
inode = mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct block_device *bdev = I_BDEV(inode);
set_blocksize(bdev, old_block_size);
blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
} else {
mutex_lock(&inode->i_mutex);
inode->i_flags &= ~S_SWAPFILE;
mutex_unlock(&inode->i_mutex);
}
filp_close(swap_file, NULL);
/*
* Clear the SWP_USED flag after all resources are freed so that swapon
* can reuse this swap_info in alloc_swap_info() safely. It is ok to
* not hold p->lock after we cleared its SWP_WRITEOK.
*/
spin_lock(&swap_lock);
p->flags = 0;
spin_unlock(&swap_lock);
err = 0;
atomic_inc(&proc_poll_event);
wake_up_interruptible(&proc_poll_wait);
out_dput:
filp_close(victim, NULL);
out:
putname(pathname);
return err;
}
#ifdef CONFIG_PROC_FS
static unsigned swaps_poll(struct file *file, poll_table *wait)
{
struct seq_file *seq = file->private_data;
poll_wait(file, &proc_poll_wait, wait);
if (seq->poll_event != atomic_read(&proc_poll_event)) {
seq->poll_event = atomic_read(&proc_poll_event);
return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
}
return POLLIN | POLLRDNORM;
}
/* iterator */
static void *swap_start(struct seq_file *swap, loff_t *pos)
{
struct swap_info_struct *si;
int type;
loff_t l = *pos;
mutex_lock(&swapon_mutex);
if (!l)
return SEQ_START_TOKEN;
for (type = 0; type < nr_swapfiles; type++) {
smp_rmb(); /* read nr_swapfiles before swap_info[type] */
si = swap_info[type];
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
if (!--l)
return si;
}
return NULL;
}
static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
struct swap_info_struct *si = v;
int type;
if (v == SEQ_START_TOKEN)
type = 0;
else
type = si->type + 1;
for (; type < nr_swapfiles; type++) {
smp_rmb(); /* read nr_swapfiles before swap_info[type] */
si = swap_info[type];
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
++*pos;
return si;
}
return NULL;
}
static void swap_stop(struct seq_file *swap, void *v)
{
mutex_unlock(&swapon_mutex);
}
static int swap_show(struct seq_file *swap, void *v)
{
struct swap_info_struct *si = v;
struct file *file;
int len;
if (si == SEQ_START_TOKEN) {
seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
return 0;
}
file = si->swap_file;
len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file_inode(file)->i_mode) ?
"partition" : "file\t",
si->pages << (PAGE_SHIFT - 10),
si->inuse_pages << (PAGE_SHIFT - 10),
si->prio);
return 0;
}
static const struct seq_operations swaps_op = {
.start = swap_start,
.next = swap_next,
.stop = swap_stop,
.show = swap_show
};
static int swaps_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int ret;
ret = seq_open(file, &swaps_op);
if (ret)
return ret;
seq = file->private_data;
seq->poll_event = atomic_read(&proc_poll_event);
return 0;
}
static const struct file_operations proc_swaps_operations = {
.open = swaps_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.poll = swaps_poll,
};
static int __init procswaps_init(void)
{
proc_create("swaps", 0, NULL, &proc_swaps_operations);
return 0;
}
__initcall(procswaps_init);
#endif /* CONFIG_PROC_FS */
#ifdef MAX_SWAPFILES_CHECK
static int __init max_swapfiles_check(void)
{
MAX_SWAPFILES_CHECK();
return 0;
}
late_initcall(max_swapfiles_check);
#endif
static struct swap_info_struct *alloc_swap_info(void)
{
struct swap_info_struct *p;
unsigned int type;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
if (!(swap_info[type]->flags & SWP_USED))
break;
}
if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
kfree(p);
return ERR_PTR(-EPERM);
}
if (type >= nr_swapfiles) {
p->type = type;
swap_info[type] = p;
/*
* Write swap_info[type] before nr_swapfiles, in case a
* racing procfs swap_start() or swap_next() is reading them.
* (We never shrink nr_swapfiles, we never free this entry.)
*/
smp_wmb();
nr_swapfiles++;
} else {
kfree(p);
p = swap_info[type];
/*
* Do not memset this entry: a racing procfs swap_next()
* would be relying on p->type to remain valid.
*/
}
INIT_LIST_HEAD(&p->first_swap_extent.list);
p->flags = SWP_USED;
p->next = -1;
spin_unlock(&swap_lock);
spin_lock_init(&p->lock);
return p;
}
static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
{
int error;
if (S_ISBLK(inode->i_mode)) {
p->bdev = bdgrab(I_BDEV(inode));
error = blkdev_get(p->bdev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL,
sys_swapon);
if (error < 0) {
p->bdev = NULL;
return -EINVAL;
}
p->old_block_size = block_size(p->bdev);
error = set_blocksize(p->bdev, PAGE_SIZE);
if (error < 0)
return error;
p->flags |= SWP_BLKDEV;
} else if (S_ISREG(inode->i_mode)) {
p->bdev = inode->i_sb->s_bdev;
mutex_lock(&inode->i_mutex);
if (IS_SWAPFILE(inode))
return -EBUSY;
} else
return -EINVAL;
return 0;
}
static unsigned long read_swap_header(struct swap_info_struct *p,
union swap_header *swap_header,
struct inode *inode)
{
int i;
unsigned long maxpages;
unsigned long swapfilepages;
unsigned long last_page;
if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
pr_err("Unable to find swap-space signature\n");
return 0;
}
/* swap partition endianess hack... */
if (swab32(swap_header->info.version) == 1) {
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}
/* Check the swap header's sub-version */
if (swap_header->info.version != 1) {
pr_warn("Unable to handle swap header version %d\n",
swap_header->info.version);
return 0;
}
p->lowest_bit = 1;
p->cluster_next = 1;
p->cluster_nr = 0;
/*
* Find out how many pages are allowed for a single swap
* device. There are two limiting factors: 1) the number
* of bits for the swap offset in the swp_entry_t type, and
* 2) the number of bits in the swap pte as defined by the
* different architectures. In order to find the
* largest possible bit mask, a swap entry with swap type 0
* and swap offset ~0UL is created, encoded to a swap pte,
* decoded to a swp_entry_t again, and finally the swap
* offset is extracted. This will mask all the bits from
* the initial ~0UL mask that can't be encoded in either
* the swp_entry_t or the architecture definition of a
* swap pte.
*/
maxpages = swp_offset(pte_to_swp_entry(
swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
last_page = swap_header->info.last_page;
if (last_page > maxpages) {
pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
maxpages << (PAGE_SHIFT - 10),
last_page << (PAGE_SHIFT - 10));
}
if (maxpages > last_page) {
maxpages = last_page + 1;
/* p->max is an unsigned int: don't overflow it */
if ((unsigned int)maxpages == 0)
maxpages = UINT_MAX;
}
p->highest_bit = maxpages - 1;
if (!maxpages)
return 0;
swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
if (swapfilepages && maxpages > swapfilepages) {
pr_warn("Swap area shorter than signature indicates\n");
return 0;
}
if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
return 0;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
return 0;
return maxpages;
}
static int setup_swap_map_and_extents(struct swap_info_struct *p,
union swap_header *swap_header,
unsigned char *swap_map,
struct swap_cluster_info *cluster_info,
unsigned long maxpages,
sector_t *span)
{
int i;
unsigned int nr_good_pages;
int nr_extents;
unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
unsigned long idx = p->cluster_next / SWAPFILE_CLUSTER;
nr_good_pages = maxpages - 1; /* omit header page */
cluster_set_null(&p->free_cluster_head);
cluster_set_null(&p->free_cluster_tail);
cluster_set_null(&p->discard_cluster_head);
cluster_set_null(&p->discard_cluster_tail);
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr == 0 || page_nr > swap_header->info.last_page)
return -EINVAL;
if (page_nr < maxpages) {
swap_map[page_nr] = SWAP_MAP_BAD;
nr_good_pages--;
/*
* Haven't marked the cluster free yet, no list
* operation involved
*/
inc_cluster_info_page(p, cluster_info, page_nr);
}
}
/* Haven't marked the cluster free yet, no list operation involved */
for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
inc_cluster_info_page(p, cluster_info, i);
if (nr_good_pages) {
swap_map[0] = SWAP_MAP_BAD;
/*
* Not mark the cluster free yet, no list
* operation involved
*/
inc_cluster_info_page(p, cluster_info, 0);
p->max = maxpages;
p->pages = nr_good_pages;
nr_extents = setup_swap_extents(p, span);
if (nr_extents < 0)
return nr_extents;
nr_good_pages = p->pages;
}
if (!nr_good_pages) {
pr_warn("Empty swap-file\n");
return -EINVAL;
}
if (!cluster_info)
return nr_extents;
for (i = 0; i < nr_clusters; i++) {
if (!cluster_count(&cluster_info[idx])) {
cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
if (cluster_is_null(&p->free_cluster_head)) {
cluster_set_next_flag(&p->free_cluster_head,
idx, 0);
cluster_set_next_flag(&p->free_cluster_tail,
idx, 0);
} else {
unsigned int tail;
tail = cluster_next(&p->free_cluster_tail);
cluster_set_next(&cluster_info[tail], idx);
cluster_set_next_flag(&p->free_cluster_tail,
idx, 0);
}
}
idx++;
if (idx == nr_clusters)
idx = 0;
}
return nr_extents;
}
/*
* Helper to sys_swapon determining if a given swap
* backing device queue supports DISCARD operations.
*/
static bool swap_discardable(struct swap_info_struct *si)
{
struct request_queue *q = bdev_get_queue(si->bdev);
if (!q || !blk_queue_discard(q))
return false;
return true;
}
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
struct swap_info_struct *p;
struct filename *name;
struct file *swap_file = NULL;
struct address_space *mapping;
int i;
int prio;
int error;
union swap_header *swap_header;
int nr_extents;
sector_t span;
unsigned long maxpages;
unsigned char *swap_map = NULL;
struct swap_cluster_info *cluster_info = NULL;
unsigned long *frontswap_map = NULL;
struct page *page = NULL;
struct inode *inode = NULL;
if (swap_flags & ~SWAP_FLAGS_VALID)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
p = alloc_swap_info();
if (IS_ERR(p))
return PTR_ERR(p);
INIT_WORK(&p->discard_work, swap_discard_work);
name = getname(specialfile);
if (IS_ERR(name)) {
error = PTR_ERR(name);
name = NULL;
goto bad_swap;
}
swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
if (IS_ERR(swap_file)) {
error = PTR_ERR(swap_file);
swap_file = NULL;
goto bad_swap;
}
p->swap_file = swap_file;
mapping = swap_file->f_mapping;
for (i = 0; i < nr_swapfiles; i++) {
struct swap_info_struct *q = swap_info[i];
if (q == p || !q->swap_file)
continue;
if (mapping == q->swap_file->f_mapping) {
error = -EBUSY;
goto bad_swap;
}
}
inode = mapping->host;
/* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
error = claim_swapfile(p, inode);
if (unlikely(error))
goto bad_swap;
/*
* Read the swap header.
*/
if (!mapping->a_ops->readpage) {
error = -EINVAL;
goto bad_swap;
}
page = read_mapping_page(mapping, 0, swap_file);
if (IS_ERR(page)) {
error = PTR_ERR(page);
goto bad_swap;
}
swap_header = kmap(page);
maxpages = read_swap_header(p, swap_header, inode);
if (unlikely(!maxpages)) {
error = -EINVAL;
goto bad_swap;
}
/* OK, set up the swap map and apply the bad block list */
swap_map = vzalloc(maxpages);
if (!swap_map) {
error = -ENOMEM;
goto bad_swap;
}
if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
p->flags |= SWP_SOLIDSTATE;
/*
* select a random position to start with to help wear leveling
* SSD
*/
p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
cluster_info = vzalloc(DIV_ROUND_UP(maxpages,
SWAPFILE_CLUSTER) * sizeof(*cluster_info));
if (!cluster_info) {
error = -ENOMEM;
goto bad_swap;
}
p->percpu_cluster = alloc_percpu(struct percpu_cluster);
if (!p->percpu_cluster) {
error = -ENOMEM;
goto bad_swap;
}
for_each_possible_cpu(i) {
struct percpu_cluster *cluster;
cluster = per_cpu_ptr(p->percpu_cluster, i);
cluster_set_null(&cluster->index);
}
}
error = swap_cgroup_swapon(p->type, maxpages);
if (error)
goto bad_swap;
nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
cluster_info, maxpages, &span);
if (unlikely(nr_extents < 0)) {
error = nr_extents;
goto bad_swap;
}
/* frontswap enabled? set up bit-per-page map for frontswap */
if (frontswap_enabled)
frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
/*
* When discard is enabled for swap with no particular
* policy flagged, we set all swap discard flags here in
* order to sustain backward compatibility with older
* swapon(8) releases.
*/
p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
SWP_PAGE_DISCARD);
/*
* By flagging sys_swapon, a sysadmin can tell us to
* either do single-time area discards only, or to just
* perform discards for released swap page-clusters.
* Now it's time to adjust the p->flags accordingly.
*/
if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
p->flags &= ~SWP_PAGE_DISCARD;
else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
p->flags &= ~SWP_AREA_DISCARD;
/* issue a swapon-time discard if it's still required */
if (p->flags & SWP_AREA_DISCARD) {
int err = discard_swap(p);
if (unlikely(err))
pr_err("swapon: discard_swap(%p): %d\n",
p, err);
}
}
mutex_lock(&swapon_mutex);
prio = -1;
if (swap_flags & SWAP_FLAG_PREFER)
prio =
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
pr_info("Adding %uk swap on %s. "
"Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
(p->flags & SWP_DISCARDABLE) ? "D" : "",
(p->flags & SWP_AREA_DISCARD) ? "s" : "",
(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
(frontswap_map) ? "FS" : "");
mutex_unlock(&swapon_mutex);
atomic_inc(&proc_poll_event);
wake_up_interruptible(&proc_poll_wait);
if (S_ISREG(inode->i_mode))
inode->i_flags |= S_SWAPFILE;
error = 0;
goto out;
bad_swap:
free_percpu(p->percpu_cluster);
p->percpu_cluster = NULL;
if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
set_blocksize(p->bdev, p->old_block_size);
blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
}
destroy_swap_extents(p);
swap_cgroup_swapoff(p->type);
spin_lock(&swap_lock);
p->swap_file = NULL;
p->flags = 0;
spin_unlock(&swap_lock);
vfree(swap_map);
vfree(cluster_info);
if (swap_file) {
if (inode && S_ISREG(inode->i_mode)) {
mutex_unlock(&inode->i_mutex);
inode = NULL;
}
filp_close(swap_file, NULL);
}
out:
if (page && !IS_ERR(page)) {
kunmap(page);
page_cache_release(page);
}
if (name)
putname(name);
if (inode && S_ISREG(inode->i_mode))
mutex_unlock(&inode->i_mutex);
return error;
}
void si_swapinfo(struct sysinfo *val)
{
unsigned int type;
unsigned long nr_to_be_unused = 0;
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *si = swap_info[type];
if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
nr_to_be_unused += si->inuse_pages;
}
val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
}
/*
* Verify that a swap entry is valid and increment its swap map count.
*
* Returns error code in following case.
* - success -> 0
* - swp_entry is invalid -> EINVAL
* - swp_entry is migration entry -> EINVAL
* - swap-cache reference is requested but there is already one. -> EEXIST
* - swap-cache reference is requested but the entry is not used. -> ENOENT
* - swap-mapped reference requested but needs continued swap count. -> ENOMEM
*/
static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
{
struct swap_info_struct *p;
unsigned long offset, type;
unsigned char count;
unsigned char has_cache;
int err = -EINVAL;
if (non_swap_entry(entry))
goto out;
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_file;
p = swap_info[type];
offset = swp_offset(entry);
spin_lock(&p->lock);
if (unlikely(offset >= p->max))
goto unlock_out;
count = p->swap_map[offset];
/*
* swapin_readahead() doesn't check if a swap entry is valid, so the
* swap entry could be SWAP_MAP_BAD. Check here with lock held.
*/
if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
err = -ENOENT;
goto unlock_out;
}
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
err = 0;
if (usage == SWAP_HAS_CACHE) {
/* set SWAP_HAS_CACHE if there is no cache and entry is used */
if (!has_cache && count)
has_cache = SWAP_HAS_CACHE;
else if (has_cache) /* someone else added cache */
err = -EEXIST;
else /* no users remaining */
err = -ENOENT;
} else if (count || has_cache) {
if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
count += usage;
else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
err = -EINVAL;
else if (swap_count_continued(p, offset, count))
count = COUNT_CONTINUED;
else
err = -ENOMEM;
} else
err = -ENOENT; /* unused swap entry */
p->swap_map[offset] = count | has_cache;
unlock_out:
spin_unlock(&p->lock);
out:
return err;
bad_file:
pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
goto out;
}
/*
* Help swapoff by noting that swap entry belongs to shmem/tmpfs
* (in which case its reference count is never incremented).
*/
void swap_shmem_alloc(swp_entry_t entry)
{
__swap_duplicate(entry, SWAP_MAP_SHMEM);
}
/*
* Increase reference count of swap entry by 1.
* Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
* but could not be atomically allocated. Returns 0, just as if it succeeded,
* if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
* might occur if a page table entry has got corrupted.
*/
int swap_duplicate(swp_entry_t entry)
{
int err = 0;
while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
err = add_swap_count_continuation(entry, GFP_ATOMIC);
return err;
}
/*
* @entry: swap entry for which we allocate swap cache.
*
* Called when allocating swap cache for existing swap entry,
* This can return error codes. Returns 0 at success.
* -EBUSY means there is a swap cache.
* Note: return code is different from swap_duplicate().
*/
int swapcache_prepare(swp_entry_t entry)
{
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
struct swap_info_struct *page_swap_info(struct page *page)
{
swp_entry_t swap = { .val = page_private(page) };
BUG_ON(!PageSwapCache(page));
return swap_info[swp_type(swap)];
}
/*
* out-of-line __page_file_ methods to avoid include hell.
*/
struct address_space *__page_file_mapping(struct page *page)
{
VM_BUG_ON(!PageSwapCache(page));
return page_swap_info(page)->swap_file->f_mapping;
}
EXPORT_SYMBOL_GPL(__page_file_mapping);
pgoff_t __page_file_index(struct page *page)
{
swp_entry_t swap = { .val = page_private(page) };
VM_BUG_ON(!PageSwapCache(page));
return swp_offset(swap);
}
EXPORT_SYMBOL_GPL(__page_file_index);
/*
* add_swap_count_continuation - called when a swap count is duplicated
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
* page of the original vmalloc'ed swap_map, to hold the continuation count
* (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
* again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
*
* These continuation pages are seldom referenced: the common paths all work
* on the original swap_map, only referring to a continuation page when the
* low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
*
* add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
* page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
* can be called after dropping locks.
*/
int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
{
struct swap_info_struct *si;
struct page *head;
struct page *page;
struct page *list_page;
pgoff_t offset;
unsigned char count;
/*
* When debugging, it's easier to use __GFP_ZERO here; but it's better
* for latency not to zero a page while GFP_ATOMIC and holding locks.
*/
page = alloc_page(gfp_mask | __GFP_HIGHMEM);
si = swap_info_get(entry);
if (!si) {
/*
* An acceptable race has occurred since the failing
* __swap_duplicate(): the swap entry has been freed,
* perhaps even the whole swap_map cleared for swapoff.
*/
goto outer;
}
offset = swp_offset(entry);
count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
/*
* The higher the swap count, the more likely it is that tasks
* will race to add swap count continuation: we need to avoid
* over-provisioning.
*/
goto out;
}
if (!page) {
spin_unlock(&si->lock);
return -ENOMEM;
}
/*
* We are fortunate that although vmalloc_to_page uses pte_offset_map,
* no architecture is using highmem pages for kernel page tables: so it
* will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
*/
head = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;
/*
* Page allocation does not initialize the page's lru field,
* but it does always reset its private field.
*/
if (!page_private(head)) {
BUG_ON(count & COUNT_CONTINUED);
INIT_LIST_HEAD(&head->lru);
set_page_private(head, SWP_CONTINUED);
si->flags |= SWP_CONTINUED;
}
list_for_each_entry(list_page, &head->lru, lru) {
unsigned char *map;
/*
* If the previous map said no continuation, but we've found
* a continuation page, free our allocation and use this one.
*/
if (!(count & COUNT_CONTINUED))
goto out;
map = kmap_atomic(list_page) + offset;
count = *map;
kunmap_atomic(map);
/*
* If this continuation count now has some space in it,
* free our allocation and use this one.
*/
if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
goto out;
}
list_add_tail(&page->lru, &head->lru);
page = NULL; /* now it's attached, don't free it */
out:
spin_unlock(&si->lock);
outer:
if (page)
__free_page(page);
return 0;
}
/*
* swap_count_continued - when the original swap_map count is incremented
* from SWAP_MAP_MAX, check if there is already a continuation page to carry
* into, carry if so, or else fail until a new continuation page is allocated;
* when the original swap_map count is decremented from 0 with continuation,
* borrow from the continuation and report whether it still holds more.
* Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
*/
static bool swap_count_continued(struct swap_info_struct *si,
pgoff_t offset, unsigned char count)
{
struct page *head;
struct page *page;
unsigned char *map;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head) != SWP_CONTINUED) {
BUG_ON(count & COUNT_CONTINUED);
return false; /* need to add count continuation */
}
offset &= ~PAGE_MASK;
page = list_entry(head->lru.next, struct page, lru);
map = kmap_atomic(page) + offset;
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
goto init_map; /* jump over SWAP_CONT_MAX checks */
if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
/*
* Think of how you add 1 to 999
*/
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
map = kmap_atomic(page) + offset;
}
if (*map == SWAP_CONT_MAX) {
kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
if (page == head)
return false; /* add count continuation */
map = kmap_atomic(page) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
*map += 1;
kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
map = kmap_atomic(page) + offset;
*map = COUNT_CONTINUED;
kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
return true; /* incremented */
} else { /* decrementing */
/*
* Think of how you subtract 1 from 1000
*/
BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
map = kmap_atomic(page) + offset;
}
BUG_ON(*map == 0);
*map -= 1;
if (*map == 0)
count = 0;
kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
map = kmap_atomic(page) + offset;
*map = SWAP_CONT_MAX | count;
count = COUNT_CONTINUED;
kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
return count == COUNT_CONTINUED;
}
}
/*
* free_swap_count_continuations - swapoff free all the continuation pages
* appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
*/
static void free_swap_count_continuations(struct swap_info_struct *si)
{
pgoff_t offset;
for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
struct page *head;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head)) {
struct list_head *this, *next;
list_for_each_safe(this, next, &head->lru) {
struct page *page;
page = list_entry(this, struct page, lru);
list_del(this);
__free_page(page);
}
}
}
}
|
626192.c | /*
* MessagePack for C unpacking routine
*
* Copyright (C) 2008-2009 FURUHASHI Sadayuki
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*/
#include "msgpack/unpack.h"
#include "msgpack/unpack_define.h"
#include "msgpack/util.h"
#include <stdlib.h>
#ifdef _msgpack_atomic_counter_header
#include _msgpack_atomic_counter_header
#endif
typedef struct {
msgpack_zone* z;
bool referenced;
} unpack_user;
#define msgpack_unpack_struct(name) \
struct template ## name
#define msgpack_unpack_func(ret, name) \
ret template ## name
#define msgpack_unpack_callback(name) \
template_callback ## name
#define msgpack_unpack_object msgpack_object
#define msgpack_unpack_user unpack_user
struct template_context;
typedef struct template_context template_context;
static void template_init(template_context* ctx);
static msgpack_object template_data(template_context* ctx);
static int template_execute(
template_context* ctx, const char* data, size_t len, size_t* off);
static inline msgpack_object template_callback_root(unpack_user* u)
{
msgpack_object o;
MSGPACK_UNUSED(u);
o.type = MSGPACK_OBJECT_NIL;
return o;
}
static inline int template_callback_uint8(unpack_user* u, uint8_t d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
o->type = MSGPACK_OBJECT_POSITIVE_INTEGER;
o->via.u64 = d;
return 0;
}
static inline int template_callback_uint16(unpack_user* u, uint16_t d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
o->type = MSGPACK_OBJECT_POSITIVE_INTEGER;
o->via.u64 = d;
return 0;
}
static inline int template_callback_uint32(unpack_user* u, uint32_t d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
o->type = MSGPACK_OBJECT_POSITIVE_INTEGER;
o->via.u64 = d;
return 0;
}
static inline int template_callback_uint64(unpack_user* u, uint64_t d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
o->type = MSGPACK_OBJECT_POSITIVE_INTEGER;
o->via.u64 = d;
return 0;
}
static inline int template_callback_int8(unpack_user* u, int8_t d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
if(d >= 0) {
o->type = MSGPACK_OBJECT_POSITIVE_INTEGER;
o->via.u64 = (uint64_t)d;
return 0;
}
else {
o->type = MSGPACK_OBJECT_NEGATIVE_INTEGER;
o->via.i64 = d;
return 0;
}
}
static inline int template_callback_int16(unpack_user* u, int16_t d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
if(d >= 0) {
o->type = MSGPACK_OBJECT_POSITIVE_INTEGER;
o->via.u64 = (uint64_t)d;
return 0;
}
else {
o->type = MSGPACK_OBJECT_NEGATIVE_INTEGER;
o->via.i64 = d;
return 0;
}
}
static inline int template_callback_int32(unpack_user* u, int32_t d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
if(d >= 0) {
o->type = MSGPACK_OBJECT_POSITIVE_INTEGER;
o->via.u64 = (uint64_t)d;
return 0;
}
else {
o->type = MSGPACK_OBJECT_NEGATIVE_INTEGER;
o->via.i64 = d;
return 0;
}
}
static inline int template_callback_int64(unpack_user* u, int64_t d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
if(d >= 0) {
o->type = MSGPACK_OBJECT_POSITIVE_INTEGER;
o->via.u64 = (uint64_t)d;
return 0;
}
else {
o->type = MSGPACK_OBJECT_NEGATIVE_INTEGER;
o->via.i64 = d;
return 0;
}
}
static inline int template_callback_float(unpack_user* u, float d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
o->type = MSGPACK_OBJECT_FLOAT32;
o->via.f64 = d;
return 0;
}
static inline int template_callback_double(unpack_user* u, double d, msgpack_object* o)
{
MSGPACK_UNUSED(u);
o->type = MSGPACK_OBJECT_FLOAT64;
o->via.f64 = d;
return 0;
}
static inline int template_callback_nil(unpack_user* u, msgpack_object* o)
{
MSGPACK_UNUSED(u);
o->type = MSGPACK_OBJECT_NIL;
return 0;
}
static inline int template_callback_true(unpack_user* u, msgpack_object* o)
{
MSGPACK_UNUSED(u);
o->type = MSGPACK_OBJECT_BOOLEAN;
o->via.boolean = true;
return 0;
}
static inline int template_callback_false(unpack_user* u, msgpack_object* o)
{
MSGPACK_UNUSED(u);
o->type = MSGPACK_OBJECT_BOOLEAN;
o->via.boolean = false;
return 0;
}
static inline int template_callback_array(unpack_user* u, unsigned int n, msgpack_object* o)
{
o->type = MSGPACK_OBJECT_ARRAY;
o->via.array.size = 0;
o->via.array.ptr = (msgpack_object*)msgpack_zone_malloc(u->z, n*sizeof(msgpack_object));
if(o->via.array.ptr == NULL) { return -1; }
return 0;
}
static inline int template_callback_array_item(unpack_user* u, msgpack_object* c, msgpack_object o)
{
MSGPACK_UNUSED(u);
#if defined(__GNUC__) && !defined(__clang__)
memcpy(&c->via.array.ptr[c->via.array.size], &o, sizeof(msgpack_object));
#else /* __GNUC__ && !__clang__ */
c->via.array.ptr[c->via.array.size] = o;
#endif /* __GNUC__ && !__clang__ */
++c->via.array.size;
return 0;
}
static inline int template_callback_map(unpack_user* u, unsigned int n, msgpack_object* o)
{
o->type = MSGPACK_OBJECT_MAP;
o->via.map.size = 0;
o->via.map.ptr = (msgpack_object_kv*)msgpack_zone_malloc(u->z, n*sizeof(msgpack_object_kv));
if(o->via.map.ptr == NULL) { return -1; }
return 0;
}
static inline int template_callback_map_item(unpack_user* u, msgpack_object* c, msgpack_object k, msgpack_object v)
{
MSGPACK_UNUSED(u);
#if defined(__GNUC__) && !defined(__clang__)
memcpy(&c->via.map.ptr[c->via.map.size].key, &k, sizeof(msgpack_object));
memcpy(&c->via.map.ptr[c->via.map.size].val, &v, sizeof(msgpack_object));
#else /* __GNUC__ && !__clang__ */
c->via.map.ptr[c->via.map.size].key = k;
c->via.map.ptr[c->via.map.size].val = v;
#endif /* __GNUC__ && !__clang__ */
++c->via.map.size;
return 0;
}
static inline int template_callback_str(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_object* o)
{
MSGPACK_UNUSED(u);
MSGPACK_UNUSED(b);
o->type = MSGPACK_OBJECT_STR;
o->via.str.ptr = p;
o->via.str.size = l;
u->referenced = true;
return 0;
}
static inline int template_callback_bin(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_object* o)
{
MSGPACK_UNUSED(u);
MSGPACK_UNUSED(b);
o->type = MSGPACK_OBJECT_BIN;
o->via.bin.ptr = p;
o->via.bin.size = l;
u->referenced = true;
return 0;
}
static inline int template_callback_ext(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_object* o)
{
MSGPACK_UNUSED(u);
MSGPACK_UNUSED(b);
o->type = MSGPACK_OBJECT_EXT;
o->via.ext.type = *p;
o->via.ext.ptr = p + 1;
o->via.ext.size = l - 1;
u->referenced = true;
return 0;
}
#include "msgpack/unpack_template.h"
#define CTX_CAST(m) ((template_context*)(m))
#define CTX_REFERENCED(mpac) CTX_CAST((mpac)->ctx)->user.referenced
#define COUNTER_SIZE (sizeof(_msgpack_atomic_counter_t))
static inline void init_count(void* buffer)
{
*(volatile _msgpack_atomic_counter_t*)buffer = 1;
}
static inline void decr_count(void* buffer)
{
// atomic if(--*(_msgpack_atomic_counter_t*)buffer == 0) { free(buffer); }
if(_msgpack_sync_decr_and_fetch((volatile _msgpack_atomic_counter_t*)buffer) == 0) {
free(buffer);
}
}
static inline void incr_count(void* buffer)
{
// atomic ++*(_msgpack_atomic_counter_t*)buffer;
_msgpack_sync_incr_and_fetch((volatile _msgpack_atomic_counter_t*)buffer);
}
static inline _msgpack_atomic_counter_t get_count(void* buffer)
{
return *(volatile _msgpack_atomic_counter_t*)buffer;
}
bool msgpack_unpacker_init(msgpack_unpacker* mpac, size_t initial_buffer_size)
{
char* buffer;
void* ctx;
msgpack_zone* z;
if(initial_buffer_size < COUNTER_SIZE) {
initial_buffer_size = COUNTER_SIZE;
}
buffer = (char*)malloc(initial_buffer_size);
if(buffer == NULL) {
return false;
}
ctx = malloc(sizeof(template_context));
if(ctx == NULL) {
free(buffer);
return false;
}
z = msgpack_zone_new(MSGPACK_ZONE_CHUNK_SIZE);
if(z == NULL) {
free(ctx);
free(buffer);
return false;
}
mpac->buffer = buffer;
mpac->used = COUNTER_SIZE;
mpac->free = initial_buffer_size - mpac->used;
mpac->off = COUNTER_SIZE;
mpac->parsed = 0;
mpac->initial_buffer_size = initial_buffer_size;
mpac->z = z;
mpac->ctx = ctx;
init_count(mpac->buffer);
template_init(CTX_CAST(mpac->ctx));
CTX_CAST(mpac->ctx)->user.z = mpac->z;
CTX_CAST(mpac->ctx)->user.referenced = false;
return true;
}
void msgpack_unpacker_destroy(msgpack_unpacker* mpac)
{
msgpack_zone_free(mpac->z);
free(mpac->ctx);
decr_count(mpac->buffer);
}
msgpack_unpacker* msgpack_unpacker_new(size_t initial_buffer_size)
{
msgpack_unpacker* mpac = (msgpack_unpacker*)malloc(sizeof(msgpack_unpacker));
if(mpac == NULL) {
return NULL;
}
if(!msgpack_unpacker_init(mpac, initial_buffer_size)) {
free(mpac);
return NULL;
}
return mpac;
}
void msgpack_unpacker_free(msgpack_unpacker* mpac)
{
msgpack_unpacker_destroy(mpac);
free(mpac);
}
bool msgpack_unpacker_expand_buffer(msgpack_unpacker* mpac, size_t size)
{
if(mpac->used == mpac->off && get_count(mpac->buffer) == 1
&& !CTX_REFERENCED(mpac)) {
// rewind buffer
mpac->free += mpac->used - COUNTER_SIZE;
mpac->used = COUNTER_SIZE;
mpac->off = COUNTER_SIZE;
if(mpac->free >= size) {
return true;
}
}
if(mpac->off == COUNTER_SIZE) {
char* tmp;
size_t next_size = (mpac->used + mpac->free) * 2; // include COUNTER_SIZE
while(next_size < size + mpac->used) {
size_t tmp_next_size = next_size * 2;
if (tmp_next_size <= next_size) {
next_size = size + mpac->used;
break;
}
next_size = tmp_next_size;
}
tmp = (char*)realloc(mpac->buffer, next_size);
if(tmp == NULL) {
return false;
}
mpac->buffer = tmp;
mpac->free = next_size - mpac->used;
} else {
char* tmp;
size_t next_size = mpac->initial_buffer_size; // include COUNTER_SIZE
size_t not_parsed = mpac->used - mpac->off;
while(next_size < size + not_parsed + COUNTER_SIZE) {
size_t tmp_next_size = next_size * 2;
if (tmp_next_size <= next_size) {
next_size = size + not_parsed + COUNTER_SIZE;
break;
}
next_size = tmp_next_size;
}
tmp = (char*)malloc(next_size);
if(tmp == NULL) {
return false;
}
init_count(tmp);
memcpy(tmp+COUNTER_SIZE, mpac->buffer+mpac->off, not_parsed);
if(CTX_REFERENCED(mpac)) {
if(!msgpack_zone_push_finalizer(mpac->z, decr_count, mpac->buffer)) {
free(tmp);
return false;
}
CTX_REFERENCED(mpac) = false;
} else {
decr_count(mpac->buffer);
}
mpac->buffer = tmp;
mpac->used = not_parsed + COUNTER_SIZE;
mpac->free = next_size - mpac->used;
mpac->off = COUNTER_SIZE;
}
return true;
}
int msgpack_unpacker_execute(msgpack_unpacker* mpac)
{
size_t off = mpac->off;
int ret = template_execute(CTX_CAST(mpac->ctx),
mpac->buffer, mpac->used, &mpac->off);
if(mpac->off > off) {
mpac->parsed += mpac->off - off;
}
return ret;
}
msgpack_object msgpack_unpacker_data(msgpack_unpacker* mpac)
{
return template_data(CTX_CAST(mpac->ctx));
}
msgpack_zone* msgpack_unpacker_release_zone(msgpack_unpacker* mpac)
{
msgpack_zone* r;
msgpack_zone* old;
if(!msgpack_unpacker_flush_zone(mpac)) {
return NULL;
}
r = msgpack_zone_new(MSGPACK_ZONE_CHUNK_SIZE);
if(r == NULL) {
return NULL;
}
old = mpac->z;
mpac->z = r;
CTX_CAST(mpac->ctx)->user.z = mpac->z;
return old;
}
void msgpack_unpacker_reset_zone(msgpack_unpacker* mpac)
{
msgpack_zone_clear(mpac->z);
}
bool msgpack_unpacker_flush_zone(msgpack_unpacker* mpac)
{
if(CTX_REFERENCED(mpac)) {
if(!msgpack_zone_push_finalizer(mpac->z, decr_count, mpac->buffer)) {
return false;
}
CTX_REFERENCED(mpac) = false;
incr_count(mpac->buffer);
}
return true;
}
void msgpack_unpacker_reset(msgpack_unpacker* mpac)
{
template_init(CTX_CAST(mpac->ctx));
// don't reset referenced flag
mpac->parsed = 0;
}
static inline msgpack_unpack_return unpacker_next(msgpack_unpacker* mpac,
msgpack_unpacked* result)
{
int ret;
msgpack_unpacked_destroy(result);
ret = msgpack_unpacker_execute(mpac);
if(ret < 0) {
result->zone = NULL;
memset(&result->data, 0, sizeof(msgpack_object));
return MSGPACK_UNPACK_PARSE_ERROR;
}
if(ret == 0) {
return MSGPACK_UNPACK_CONTINUE;
}
result->zone = msgpack_unpacker_release_zone(mpac);
result->data = msgpack_unpacker_data(mpac);
return MSGPACK_UNPACK_SUCCESS;
}
msgpack_unpack_return msgpack_unpacker_next(msgpack_unpacker* mpac,
msgpack_unpacked* result)
{
int ret;
ret = unpacker_next(mpac, result);
if (ret == MSGPACK_UNPACK_SUCCESS) {
msgpack_unpacker_reset(mpac);
}
return ret;
}
msgpack_unpack_return
msgpack_unpacker_next_with_size(msgpack_unpacker* mpac,
msgpack_unpacked* result, size_t *p_bytes)
{
int ret;
ret = unpacker_next(mpac, result);
if (ret == MSGPACK_UNPACK_SUCCESS || ret == MSGPACK_UNPACK_CONTINUE) {
*p_bytes = mpac->parsed;
}
if (ret == MSGPACK_UNPACK_SUCCESS) {
msgpack_unpacker_reset(mpac);
}
return ret;
}
msgpack_unpack_return
msgpack_unpack(const char* data, size_t len, size_t* off,
msgpack_zone* result_zone, msgpack_object* result)
{
size_t noff = 0;
if(off != NULL) { noff = *off; }
if(len <= noff) {
// FIXME
return MSGPACK_UNPACK_CONTINUE;
}
else {
int e;
template_context ctx;
template_init(&ctx);
ctx.user.z = result_zone;
ctx.user.referenced = false;
e = template_execute(&ctx, data, len, &noff);
if(e < 0) {
return MSGPACK_UNPACK_PARSE_ERROR;
}
if(off != NULL) { *off = noff; }
if(e == 0) {
return MSGPACK_UNPACK_CONTINUE;
}
*result = template_data(&ctx);
if(noff < len) {
return MSGPACK_UNPACK_EXTRA_BYTES;
}
return MSGPACK_UNPACK_SUCCESS;
}
}
msgpack_unpack_return
msgpack_unpack_next(msgpack_unpacked* result,
const char* data, size_t len, size_t* off)
{
size_t noff = 0;
msgpack_unpacked_destroy(result);
if(off != NULL) { noff = *off; }
if(len <= noff) {
return MSGPACK_UNPACK_CONTINUE;
}
if (!result->zone) {
result->zone = msgpack_zone_new(MSGPACK_ZONE_CHUNK_SIZE);
}
if (!result->zone) {
return MSGPACK_UNPACK_NOMEM_ERROR;
}
else {
int e;
template_context ctx;
template_init(&ctx);
ctx.user.z = result->zone;
ctx.user.referenced = false;
e = template_execute(&ctx, data, len, &noff);
if(e < 0) {
msgpack_zone_free(result->zone);
result->zone = NULL;
return MSGPACK_UNPACK_PARSE_ERROR;
}
if(e == 0) {
return MSGPACK_UNPACK_CONTINUE;
}
if(off != NULL) { *off = noff; }
result->data = template_data(&ctx);
return MSGPACK_UNPACK_SUCCESS;
}
}
#if defined(MSGPACK_OLD_COMPILER_BUS_ERROR_WORKAROUND)
// FIXME: Dirty hack to avoid a bus error caused by OS X's old gcc.
static void dummy_function_to_avoid_bus_error()
{
}
#endif
|
388205.c | /* Copyright 2021 KiwiKey
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include QMK_KEYBOARD_H
// Defines names for use in layer keycodes and the keymap
enum layer_names {
_BASE,
_FN
};
const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = {
[_BASE] = LAYOUT_alice_split_bs(
KC_ESC, KC_GRV, KC_1, KC_2, KC_3, KC_4, KC_5, KC_6, KC_7, KC_8, KC_9, KC_0, KC_MINS, KC_EQL, KC_BSPC, KC_DEL,
KC_MUTE, KC_TAB, KC_Q, KC_W, KC_E, KC_R, KC_T, KC_Y, KC_U, KC_I, KC_O, KC_P, KC_LBRC, KC_RBRC, KC_BSLS,
KC_VOLD, KC_CAPS, KC_A, KC_S, KC_D, KC_F, KC_G, KC_H, KC_J, KC_K, KC_L, KC_SCLN, KC_QUOT, KC_ENT,
KC_VOLU, KC_LSFT, KC_Z, KC_X, KC_C, KC_V, KC_B, KC_B, KC_N, KC_M, KC_COMM, KC_DOT, KC_SLSH, KC_RSFT, KC_RGUI,
KC_LCTL, KC_LALT, KC_SPC, MO(_FN), KC_SPC, KC_MENU, KC_RCTRL
),
[_FN] = LAYOUT_alice_split_bs(
_______, _______, KC_F1, KC_F2, KC_F3, KC_F4, KC_F5, KC_F6, KC_F7, KC_F8, KC_F9, KC_F10, KC_F11, KC_F12, _______, KC_DEL,
_______, RGB_TOG, RGB_MOD, RGB_RMOD,_______, _______, _______, BL_TOGG, BL_STEP, BL_BRTG, KC_UP, _______, _______, _______, _______,
_______, _______, RGB_HUD, RGB_HUI, _______, _______, _______, _______, _______, KC_LEFT, KC_DOWN, KC_RGHT, _______, _______,
RESET, _______, RGB_VAD, RGB_VAI, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______,
_______, _______, _______, _______, _______, _______, _______
)
};
|
129699.c | /*----------------------------------------------------------------------------*/
/* Xymon monitor library. */
/* */
/* This is a library module, part of libxymon. */
/* It contains routines for handling header- and footer-files. */
/* */
/* Copyright (C) 2002-2011 Henrik Storner <henrik@storner.dk> */
/* */
/* This program is released under the GNU General Public License (GPL), */
/* version 2. See the file "COPYING" for details. */
/* */
/*----------------------------------------------------------------------------*/
static char rcsid[] = "$Id$";
#include <sys/types.h>
#include <sys/stat.h>
#include <limits.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#include <pcre.h>
#include "libxymon.h"
#include "version.h"
/* Stuff for headfoot - variables we can set dynamically */
static char *hostenv_hikey = NULL;
static char *hostenv_host = NULL;
static char *hostenv_ip = NULL;
static char *hostenv_svc = NULL;
static char *hostenv_color = NULL;
static char *hostenv_pagepath = NULL;
static time_t hostenv_reportstart = 0;
static time_t hostenv_reportend = 0;
static char *hostenv_repwarn = NULL;
static char *hostenv_reppanic = NULL;
static time_t hostenv_snapshot = 0;
static char *hostenv_logtime = NULL;
static char *hostenv_templatedir = NULL;
static int hostenv_refresh = 60;
static char *statusboard = NULL;
static char *scheduleboard = NULL;
static char *hostpattern_text = NULL;
static pcre *hostpattern = NULL;
static char *pagepattern_text = NULL;
static pcre *pagepattern = NULL;
static char *ippattern_text = NULL;
static pcre *ippattern = NULL;
static RbtHandle hostnames;
static RbtHandle testnames;
typedef struct treerec_t {
char *name;
int flag;
} treerec_t;
static int backdays = 0, backhours = 0, backmins = 0, backsecs = 0;
static char hostenv_eventtimestart[20];
static char hostenv_eventtimeend[20];
typedef struct listrec_t {
char *name, *val, *extra;
int selected;
struct listrec_t *next;
} listrec_t;
typedef struct listpool_t {
char *name;
struct listrec_t *listhead, *listtail;
struct listpool_t *next;
} listpool_t;
static listpool_t *listpoolhead = NULL;
typedef struct bodystorage_t {
char *id;
strbuffer_t *txt;
} bodystorage_t;
static void clearflags(RbtHandle tree)
{
RbtIterator handle;
treerec_t *rec;
if (!tree) return;
for (handle = rbtBegin(tree); (handle != rbtEnd(tree)); handle = rbtNext(tree, handle)) {
rec = (treerec_t *)gettreeitem(tree, handle);
rec->flag = 0;
}
}
void sethostenv(char *host, char *ip, char *svc, char *color, char *hikey)
{
if (hostenv_hikey) xfree(hostenv_hikey);
if (hostenv_host) xfree(hostenv_host);
if (hostenv_ip) xfree(hostenv_ip);
if (hostenv_svc) xfree(hostenv_svc);
if (hostenv_color) xfree(hostenv_color);
hostenv_hikey = (hikey ? strdup(htmlquoted(hikey)) : NULL);
hostenv_host = strdup(htmlquoted(host));
hostenv_ip = strdup(htmlquoted(ip));
hostenv_svc = strdup(htmlquoted(svc));
hostenv_color = strdup(color);
}
void sethostenv_report(time_t reportstart, time_t reportend, double repwarn, double reppanic)
{
if (hostenv_repwarn == NULL) hostenv_repwarn = malloc(10);
if (hostenv_reppanic == NULL) hostenv_reppanic = malloc(10);
hostenv_reportstart = reportstart;
hostenv_reportend = reportend;
sprintf(hostenv_repwarn, "%.2f", repwarn);
sprintf(hostenv_reppanic, "%.2f", reppanic);
}
void sethostenv_snapshot(time_t snapshot)
{
hostenv_snapshot = snapshot;
}
void sethostenv_histlog(char *histtime)
{
if (hostenv_logtime) xfree(hostenv_logtime);
hostenv_logtime = strdup(histtime);
}
void sethostenv_template(char *dir)
{
if (hostenv_templatedir) xfree(hostenv_templatedir);
hostenv_templatedir = strdup(dir);
}
void sethostenv_refresh(int n)
{
hostenv_refresh = n;
}
void sethostenv_pagepath(char *s)
{
if (!s) return;
if (hostenv_pagepath) xfree(hostenv_pagepath);
hostenv_pagepath = strdup(s);
}
void sethostenv_filter(char *hostptn, char *pageptn, char *ipptn)
{
const char *errmsg;
int errofs;
if (hostpattern_text) xfree(hostpattern_text);
if (hostpattern) { pcre_free(hostpattern); hostpattern = NULL; }
if (pagepattern_text) xfree(pagepattern_text);
if (pagepattern) { pcre_free(pagepattern); pagepattern = NULL; }
if (ippattern_text) xfree(ippattern_text);
if (ippattern) { pcre_free(ippattern); ippattern = NULL; }
/* Setup the pattern to match names against */
if (hostptn) {
hostpattern_text = strdup(hostptn);
hostpattern = pcre_compile(hostptn, PCRE_CASELESS, &errmsg, &errofs, NULL);
}
if (pageptn) {
pagepattern_text = strdup(pageptn);
pagepattern = pcre_compile(pageptn, PCRE_CASELESS, &errmsg, &errofs, NULL);
}
if (ipptn) {
ippattern_text = strdup(ipptn);
ippattern = pcre_compile(ipptn, PCRE_CASELESS, &errmsg, &errofs, NULL);
}
}
static listpool_t *find_listpool(char *listname)
{
listpool_t *pool = NULL;
listrec_t *zombie;
if (!listname) listname = "";
for (pool = listpoolhead; (pool && strcmp(pool->name, listname)); pool = pool->next);
if (!pool) {
pool = (listpool_t *)calloc(1, sizeof(listpool_t));
pool->name = strdup(listname);
pool->next = listpoolhead;
listpoolhead = pool;
}
return pool;
}
void sethostenv_clearlist(char *listname)
{
listpool_t *pool = NULL;
listrec_t *zombie;
pool = find_listpool(listname);
while (pool->listhead) {
zombie = pool->listhead;
pool->listhead = pool->listhead->next;
xfree(zombie->name); xfree(zombie->val); xfree(zombie);
}
}
void sethostenv_addtolist(char *listname, char *name, char *val, char *extra, int selected)
{
listpool_t *pool = NULL;
listrec_t *newitem = (listrec_t *)calloc(1, sizeof(listrec_t));
pool = find_listpool(listname);
newitem->name = strdup(name);
newitem->val = strdup(val);
newitem->extra = (extra ? strdup(extra) : NULL);
newitem->selected = selected;
if (pool->listtail) {
pool->listtail->next = newitem;
pool->listtail = newitem;
}
else {
pool->listhead = pool->listtail = newitem;
}
}
static int critackttprio = 0;
static char *critackttgroup = NULL;
static char *critackttextra = NULL;
static char *ackinfourl = NULL;
static char *critackdocurl = NULL;
void sethostenv_critack(int prio, char *ttgroup, char *ttextra, char *infourl, char *docurl)
{
critackttprio = prio;
if (critackttgroup) xfree(critackttgroup); critackttgroup = strdup((ttgroup && *ttgroup) ? ttgroup : " ");
if (critackttextra) xfree(critackttextra); critackttextra = strdup((ttextra && *ttextra) ? ttextra : " ");
if (ackinfourl) xfree(ackinfourl); ackinfourl = strdup(infourl);
if (critackdocurl) xfree(critackdocurl); critackdocurl = strdup((docurl && *docurl) ? docurl : "");
}
static char *criteditupdinfo = NULL;
static int criteditprio = -1;
static char *criteditgroup = NULL;
static time_t criteditstarttime = 0;
static time_t criteditendtime = 0;
static char *criteditextra = NULL;
static char *criteditslawkdays = NULL;
static char *criteditslastart = NULL;
static char *criteditslaend = NULL;
static char **criteditclonelist = NULL;
static int criteditclonesize = 0;
void sethostenv_critedit(char *updinfo, int prio, char *group, time_t starttime, time_t endtime, char *crittime, char *extra)
{
char *p;
if (criteditupdinfo) xfree(criteditupdinfo);
criteditupdinfo = strdup(updinfo);
criteditprio = prio;
criteditstarttime = starttime;
criteditendtime = endtime;
if (criteditgroup) xfree(criteditgroup);
criteditgroup = strdup(group ? group : "");
if (criteditextra) xfree(criteditextra);
criteditextra = strdup(extra ? extra : "");
if (criteditslawkdays) xfree(criteditslawkdays);
criteditslawkdays = criteditslastart = criteditslaend = NULL;
if (crittime) {
criteditslawkdays = strdup(crittime);
p = strchr(criteditslawkdays, ':');
if (p) {
*p = '\0';
criteditslastart = p+1;
p = strchr(criteditslastart, ':');
if (p) {
*p = '\0';
criteditslaend = p+1;
}
}
if (criteditslawkdays && (!criteditslastart || !criteditslaend)) {
xfree(criteditslawkdays);
criteditslawkdays = criteditslastart = criteditslaend = NULL;
}
}
}
void sethostenv_critclonelist_clear(void)
{
int i;
if (criteditclonelist) {
for (i=0; (criteditclonelist[i]); i++) xfree(criteditclonelist[i]);
xfree(criteditclonelist);
}
criteditclonelist = malloc(sizeof(char *));
criteditclonelist[0] = NULL;
criteditclonesize = 0;
}
void sethostenv_critclonelist_add(char *hostname)
{
char *p;
criteditclonelist = (char **)realloc(criteditclonelist, (criteditclonesize + 2)*sizeof(char *));
criteditclonelist[criteditclonesize] = strdup(hostname);
p = criteditclonelist[criteditclonesize];
criteditclonelist[++criteditclonesize] = NULL;
p += (strlen(p) - 1);
if (*p == '=') *p = '\0';
}
void sethostenv_backsecs(int seconds)
{
backdays = seconds / 86400; seconds -= backdays*86400;
backhours = seconds / 3600; seconds -= backhours*3600;
backmins = seconds / 60; seconds -= backmins*60;
backsecs = seconds;
}
void sethostenv_eventtime(time_t starttime, time_t endtime)
{
*hostenv_eventtimestart = *hostenv_eventtimeend = '\0';
if (starttime) strftime(hostenv_eventtimestart, sizeof(hostenv_eventtimestart), "%Y/%m/%d@%H:%M:%S", localtime(&starttime));
if (endtime) strftime(hostenv_eventtimeend, sizeof(hostenv_eventtimeend), "%Y/%m/%d@%H:%M:%S", localtime(&endtime));
}
char *wkdayselect(char wkday, char *valtxt, int isdefault)
{
static char result[100];
char *selstr;
if (!criteditslawkdays) {
if (isdefault) selstr = "SELECTED";
else selstr = "";
}
else {
if (strchr(criteditslawkdays, wkday)) selstr = "SELECTED";
else selstr = "";
}
sprintf(result, "<option value=\"%c\" %s>%s</option>\n", wkday, selstr, valtxt);
return result;
}
static void *wanted_host(char *hostname)
{
void *hinfo = hostinfo(hostname);
int result, ovector[30];
if (!hinfo) return NULL;
if (hostpattern) {
result = pcre_exec(hostpattern, NULL, hostname, strlen(hostname), 0, 0,
ovector, (sizeof(ovector)/sizeof(int)));
if (result < 0) return NULL;
}
if (pagepattern && hinfo) {
char *pname = xmh_item(hinfo, XMH_PAGEPATH);
result = pcre_exec(pagepattern, NULL, pname, strlen(pname), 0, 0,
ovector, (sizeof(ovector)/sizeof(int)));
if (result < 0) return NULL;
}
if (ippattern && hinfo) {
char *hostip = xmh_item(hinfo, XMH_IP);
result = pcre_exec(ippattern, NULL, hostip, strlen(hostip), 0, 0,
ovector, (sizeof(ovector)/sizeof(int)));
if (result < 0) return NULL;
}
return hinfo;
}
static void fetch_board(void)
{
static int haveboard = 0;
char *walk, *eoln;
sendreturn_t *sres;
if (haveboard) return;
sres = newsendreturnbuf(1, NULL);
if (sendmessage("xymondboard fields=hostname,testname,disabletime,dismsg",
NULL, XYMON_TIMEOUT, sres) != XYMONSEND_OK) {
freesendreturnbuf(sres);
return;
}
haveboard = 1;
statusboard = getsendreturnstr(sres, 1);
freesendreturnbuf(sres);
hostnames = rbtNew(name_compare);
testnames = rbtNew(name_compare);
walk = statusboard;
while (walk) {
eoln = strchr(walk, '\n'); if (eoln) *eoln = '\0';
if (strlen(walk) && (strncmp(walk, "summary|", 8) != 0)) {
char *buf, *hname = NULL, *tname = NULL;
treerec_t *newrec;
buf = strdup(walk);
hname = gettok(buf, "|");
if (hname && wanted_host(hname) && hostinfo(hname)) {
newrec = (treerec_t *)malloc(sizeof(treerec_t));
newrec->name = strdup(hname);
newrec->flag = 0;
rbtInsert(hostnames, newrec->name, newrec);
tname = gettok(NULL, "|");
if (tname) {
newrec = (treerec_t *)malloc(sizeof(treerec_t));
newrec->name = strdup(tname);
newrec->flag = 0;
rbtInsert(testnames, strdup(tname), newrec);
}
}
xfree(buf);
}
if (eoln) {
*eoln = '\n';
walk = eoln + 1;
}
else
walk = NULL;
}
sres = newsendreturnbuf(1, NULL);
if (sendmessage("schedule", NULL, XYMON_TIMEOUT, sres) != XYMONSEND_OK) {
freesendreturnbuf(sres);
return;
}
scheduleboard = getsendreturnstr(sres, 1);
freesendreturnbuf(sres);
}
static char *eventreport_timestring(time_t timestamp)
{
static char result[20];
strftime(result, sizeof(result), "%Y/%m/%d@%H:%M:%S", localtime(×tamp));
return result;
}
static void build_pagepath_dropdown(FILE *output)
{
RbtHandle ptree;
void *hwalk;
RbtIterator handle;
ptree = rbtNew(string_compare);
for (hwalk = first_host(); (hwalk); hwalk = next_host(hwalk, 0)) {
char *path = xmh_item(hwalk, XMH_PAGEPATH);
char *ptext;
handle = rbtFind(ptree, path);
if (handle != rbtEnd(ptree)) continue;
ptext = xmh_item(hwalk, XMH_PAGEPATHTITLE);
rbtInsert(ptree, ptext, path);
}
for (handle = rbtBegin(ptree); (handle != rbtEnd(ptree)); handle = rbtNext(ptree, handle)) {
char *path, *ptext;
rbtKeyValue(ptree, handle, (void **)&ptext, (void **)&path);
fprintf(output, "<option value=\"%s\">%s</option>\n", path, ptext);
}
rbtDelete(ptree);
}
char *xymonbody(char *id)
{
static RbtHandle bodystorage;
static int firsttime = 1;
RbtIterator handle;
bodystorage_t *bodyelement;
strbuffer_t *rawdata, *parseddata;
char *envstart, *envend, *outpos;
char *idtag, *idval;
int idtaglen;
if (firsttime) {
bodystorage = rbtNew(string_compare);
firsttime = 0;
}
idtaglen = strspn(id, "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
idtag = (char *)malloc(idtaglen + 1);
strncpy(idtag, id, idtaglen);
*(idtag+idtaglen) = '\0';
handle = rbtFind(bodystorage, idtag);
if (handle != rbtEnd(bodystorage)) {
bodyelement = (bodystorage_t *)gettreeitem(bodystorage, handle);
xfree(idtag);
return STRBUF(bodyelement->txt);
}
rawdata = newstrbuffer(0);
idval = xgetenv(idtag);
if (idval == NULL) return "";
if (strncmp(idval, "file:", 5) == 0) {
FILE *fd;
strbuffer_t *inbuf = newstrbuffer(0);
fd = stackfopen(idval+5, "r", NULL);
if (fd != NULL) {
while (stackfgets(inbuf, NULL)) addtostrbuffer(rawdata, inbuf);
stackfclose(fd);
}
freestrbuffer(inbuf);
}
else {
addtobuffer(rawdata, idval);
}
/* Output the body data, but expand any environment variables along the way */
parseddata = newstrbuffer(0);
outpos = STRBUF(rawdata);
while (*outpos) {
envstart = strchr(outpos, '$');
if (envstart) {
char savechar;
char *envval = NULL;
*envstart = '\0';
addtobuffer(parseddata, outpos);
envstart++;
envend = envstart + strspn(envstart, "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789");
savechar = *envend; *envend = '\0';
if (*envstart) envval = xgetenv(envstart);
*envend = savechar;
outpos = envend;
if (envval) {
addtobuffer(parseddata, envval);
}
else {
addtobuffer(parseddata, "$");
addtobuffer(parseddata, envstart);
}
}
else {
addtobuffer(parseddata, outpos);
outpos += strlen(outpos);
}
}
freestrbuffer(rawdata);
bodyelement = (bodystorage_t *)calloc(1, sizeof(bodystorage_t));
bodyelement->id = idtag;
bodyelement->txt = parseddata;
rbtInsert(bodystorage, bodyelement->id, bodyelement);
return STRBUF(bodyelement->txt);
}
typedef struct distest_t {
char *name;
char *cause;
time_t until;
struct distest_t *next;
} distest_t;
typedef struct dishost_t {
char *name;
struct distest_t *tests;
struct dishost_t *next;
} dishost_t;
void output_parsed(FILE *output, char *templatedata, int bgcolor, time_t selectedtime)
{
char *t_start, *t_next;
char savechar;
time_t now = getcurrenttime(NULL);
time_t yesterday = getcurrenttime(NULL) - 86400;
struct tm *nowtm;
for (t_start = templatedata, t_next = strchr(t_start, '&'); (t_next); ) {
/* Copy from t_start to t_next unchanged */
*t_next = '\0'; t_next++;
fprintf(output, "%s", t_start);
/* Find token */
t_start = t_next;
/* Dont include lower-case letters - reserve those for eg " " */
t_next += strspn(t_next, "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_");
savechar = *t_next; *t_next = '\0';
if ((strcmp(t_start, "XYMWEBDATE") == 0) || (strcmp(t_start, "BBDATE") == 0)) {
char *datefmt = xgetenv("XYMONDATEFORMAT");
char datestr[100];
MEMDEFINE(datestr);
/*
* If no XYMONDATEFORMAT setting, use a format string that
* produces output similar to that from ctime()
*/
if (datefmt == NULL) datefmt = "%a %b %d %H:%M:%S %Y\n";
if (hostenv_reportstart != 0) {
char starttime[20], endtime[20];
MEMDEFINE(starttime); MEMDEFINE(endtime);
strftime(starttime, sizeof(starttime), "%b %d %Y", localtime(&hostenv_reportstart));
strftime(endtime, sizeof(endtime), "%b %d %Y", localtime(&hostenv_reportend));
if (strcmp(starttime, endtime) == 0)
fprintf(output, "%s", starttime);
else
fprintf(output, "%s - %s", starttime, endtime);
MEMUNDEFINE(starttime); MEMUNDEFINE(endtime);
}
else if (hostenv_snapshot != 0) {
strftime(datestr, sizeof(datestr), datefmt, localtime(&hostenv_snapshot));
fprintf(output, "%s", datestr);
}
else {
strftime(datestr, sizeof(datestr), datefmt, localtime(&now));
fprintf(output, "%s", datestr);
}
MEMUNDEFINE(datestr);
}
else if ((strcmp(t_start, "XYMWEBBACKGROUND") == 0) || (strcmp(t_start, "BBBACKGROUND") == 0)) {
fprintf(output, "%s", colorname(bgcolor));
}
else if ((strcmp(t_start, "XYMWEBCOLOR") == 0) || (strcmp(t_start, "BBCOLOR") == 0))
fprintf(output, "%s", hostenv_color);
else if ((strcmp(t_start, "XYMWEBSVC") == 0) || (strcmp(t_start, "BBSVC") == 0))
fprintf(output, "%s", hostenv_svc);
else if ((strcmp(t_start, "XYMWEBHOST") == 0) || (strcmp(t_start, "BBHOST") == 0))
fprintf(output, "%s", hostenv_host);
else if ((strcmp(t_start, "XYMWEBHIKEY") == 0) || (strcmp(t_start, "BBHIKEY") == 0))
fprintf(output, "%s", (hostenv_hikey ? hostenv_hikey : hostenv_host));
else if ((strcmp(t_start, "XYMWEBIP") == 0) || (strcmp(t_start, "BBIP") == 0))
fprintf(output, "%s", hostenv_ip);
else if ((strcmp(t_start, "XYMWEBIPNAME") == 0) || (strcmp(t_start, "BBIPNAME") == 0)) {
if (strcmp(hostenv_ip, "0.0.0.0") == 0) fprintf(output, "%s", hostenv_host);
else fprintf(output, "%s", hostenv_ip);
}
else if ((strcmp(t_start, "XYMONREPWARN") == 0) || (strcmp(t_start, "BBREPWARN") == 0))
fprintf(output, "%s", hostenv_repwarn);
else if ((strcmp(t_start, "XYMONREPPANIC") == 0) || (strcmp(t_start, "BBREPPANIC") == 0))
fprintf(output, "%s", hostenv_reppanic);
else if (strcmp(t_start, "LOGTIME") == 0) fprintf(output, "%s", (hostenv_logtime ? hostenv_logtime : ""));
else if ((strcmp(t_start, "XYMWEBREFRESH") == 0) || (strcmp(t_start, "BBREFRESH") == 0))
fprintf(output, "%d", hostenv_refresh);
else if ((strcmp(t_start, "XYMWEBPAGEPATH") == 0) || (strcmp(t_start, "BBPAGEPATH") == 0))
fprintf(output, "%s", (hostenv_pagepath ? hostenv_pagepath : ""));
else if (strcmp(t_start, "REPMONLIST") == 0) {
int i;
struct tm monthtm;
char mname[20];
char *selstr;
MEMDEFINE(mname);
nowtm = localtime(&selectedtime);
for (i=1; (i <= 12); i++) {
if (i == (nowtm->tm_mon + 1)) selstr = "SELECTED"; else selstr = "";
monthtm.tm_mon = (i-1); monthtm.tm_mday = 1; monthtm.tm_year = nowtm->tm_year;
monthtm.tm_hour = monthtm.tm_min = monthtm.tm_sec = monthtm.tm_isdst = 0;
strftime(mname, sizeof(mname)-1, "%B", &monthtm);
fprintf(output, "<OPTION VALUE=\"%d\" %s>%s\n", i, selstr, mname);
}
MEMUNDEFINE(mname);
}
else if (strcmp(t_start, "MONLIST") == 0) {
int i;
struct tm monthtm;
char mname[20];
MEMDEFINE(mname);
nowtm = localtime(&selectedtime);
for (i=1; (i <= 12); i++) {
monthtm.tm_mon = (i-1); monthtm.tm_mday = 1; monthtm.tm_year = nowtm->tm_year;
monthtm.tm_hour = monthtm.tm_min = monthtm.tm_sec = monthtm.tm_isdst = 0;
strftime(mname, sizeof(mname)-1, "%B", &monthtm);
fprintf(output, "<OPTION VALUE=\"%d\">%s\n", i, mname);
}
MEMUNDEFINE(mname);
}
else if (strcmp(t_start, "REPWEEKLIST") == 0) {
int i;
char weekstr[5];
int weeknum;
char *selstr;
nowtm = localtime(&selectedtime);
strftime(weekstr, sizeof(weekstr)-1, "%V", nowtm); weeknum = atoi(weekstr);
for (i=1; (i <= 53); i++) {
if (i == weeknum) selstr = "SELECTED"; else selstr = "";
fprintf(output, "<OPTION VALUE=\"%d\" %s>%d\n", i, selstr, i);
}
}
else if (strcmp(t_start, "REPDAYLIST") == 0) {
int i;
char *selstr;
nowtm = localtime(&selectedtime);
for (i=1; (i <= 31); i++) {
if (i == nowtm->tm_mday) selstr = "SELECTED"; else selstr = "";
fprintf(output, "<OPTION VALUE=\"%d\" %s>%d\n", i, selstr, i);
}
}
else if (strcmp(t_start, "DAYLIST") == 0) {
int i;
nowtm = localtime(&selectedtime);
for (i=1; (i <= 31); i++) {
fprintf(output, "<OPTION VALUE=\"%d\">%d\n", i, i);
}
}
else if (strcmp(t_start, "REPYEARLIST") == 0) {
int i;
char *selstr;
int beginyear, endyear;
nowtm = localtime(&selectedtime);
beginyear = nowtm->tm_year + 1900 - 5;
endyear = nowtm->tm_year + 1900;
for (i=beginyear; (i <= endyear); i++) {
if (i == (nowtm->tm_year + 1900)) selstr = "SELECTED"; else selstr = "";
fprintf(output, "<OPTION VALUE=\"%d\" %s>%d\n", i, selstr, i);
}
}
else if (strcmp(t_start, "FUTUREYEARLIST") == 0) {
int i;
char *selstr;
int beginyear, endyear;
nowtm = localtime(&selectedtime);
beginyear = nowtm->tm_year + 1900;
endyear = nowtm->tm_year + 1900 + 5;
for (i=beginyear; (i <= endyear); i++) {
if (i == (nowtm->tm_year + 1900)) selstr = "SELECTED"; else selstr = "";
fprintf(output, "<OPTION VALUE=\"%d\" %s>%d\n", i, selstr, i);
}
}
else if (strcmp(t_start, "YEARLIST") == 0) {
int i;
int beginyear, endyear;
nowtm = localtime(&selectedtime);
beginyear = nowtm->tm_year + 1900;
endyear = nowtm->tm_year + 1900 + 5;
for (i=beginyear; (i <= endyear); i++) {
fprintf(output, "<OPTION VALUE=\"%d\">%d\n", i, i);
}
}
else if (strcmp(t_start, "REPHOURLIST") == 0) {
int i;
struct tm *nowtm = localtime(&yesterday);
char *selstr;
for (i=0; (i <= 24); i++) {
if (i == nowtm->tm_hour) selstr = "SELECTED"; else selstr = "";
fprintf(output, "<OPTION VALUE=\"%d\" %s>%d\n", i, selstr, i);
}
}
else if (strcmp(t_start, "HOURLIST") == 0) {
int i;
for (i=0; (i <= 24); i++) {
fprintf(output, "<OPTION VALUE=\"%d\">%d\n", i, i);
}
}
else if (strcmp(t_start, "REPMINLIST") == 0) {
int i;
struct tm *nowtm = localtime(&yesterday);
char *selstr;
for (i=0; (i <= 59); i++) {
if (i == nowtm->tm_min) selstr = "SELECTED"; else selstr = "";
fprintf(output, "<OPTION VALUE=\"%02d\" %s>%02d\n", i, selstr, i);
}
}
else if (strcmp(t_start, "MINLIST") == 0) {
int i;
for (i=0; (i <= 59); i++) {
fprintf(output, "<OPTION VALUE=\"%02d\">%02d\n", i, i);
}
}
else if (strcmp(t_start, "REPSECLIST") == 0) {
int i;
char *selstr;
for (i=0; (i <= 59); i++) {
if (i == 0) selstr = "SELECTED"; else selstr = "";
fprintf(output, "<OPTION VALUE=\"%02d\" %s>%02d\n", i, selstr, i);
}
}
else if (strcmp(t_start, "HOSTFILTER") == 0) {
if (hostpattern_text) fprintf(output, "%s", hostpattern_text);
}
else if (strcmp(t_start, "PAGEFILTER") == 0) {
if (pagepattern_text) fprintf(output, "%s", pagepattern_text);
}
else if (strcmp(t_start, "IPFILTER") == 0) {
if (ippattern_text) fprintf(output, "%s", ippattern_text);
}
else if (strcmp(t_start, "HOSTLIST") == 0) {
RbtIterator handle;
treerec_t *rec;
fetch_board();
for (handle = rbtBegin(hostnames); (handle != rbtEnd(hostnames)); handle = rbtNext(hostnames, handle)) {
rec = (treerec_t *)gettreeitem(hostnames, handle);
if (wanted_host(rec->name)) {
fprintf(output, "<OPTION VALUE=\"%s\">%s</OPTION>\n", rec->name, rec->name);
}
}
}
else if (strcmp(t_start, "JSHOSTLIST") == 0) {
RbtIterator handle;
fetch_board();
clearflags(testnames);
fprintf(output, "var hosts = new Array();\n");
fprintf(output, "hosts[\"ALL\"] = [ \"ALL\"");
for (handle = rbtBegin(testnames); (handle != rbtEnd(testnames)); handle = rbtNext(testnames, handle)) {
treerec_t *rec = gettreeitem(testnames, handle);
fprintf(output, ", \"%s\"", rec->name);
}
fprintf(output, " ];\n");
for (handle = rbtBegin(hostnames); (handle != rbtEnd(hostnames)); handle = rbtNext(hostnames, handle)) {
treerec_t *hrec = gettreeitem(hostnames, handle);
if (wanted_host(hrec->name)) {
RbtIterator thandle;
treerec_t *trec;
char *bwalk, *tname, *p;
char *key = (char *)malloc(strlen(hrec->name) + 3);
/* Setup the search key and find the first occurrence. */
sprintf(key, "\n%s|", hrec->name);
if (strncmp(statusboard, (key+1), strlen(key+1)) == 0)
bwalk = statusboard;
else {
bwalk = strstr(statusboard, key);
if (bwalk) bwalk++;
}
while (bwalk) {
tname = bwalk + strlen(key+1);
p = strchr(tname, '|'); if (p) *p = '\0';
if ( (strcmp(tname, xgetenv("INFOCOLUMN")) != 0) &&
(strcmp(tname, xgetenv("TRENDSCOLUMN")) != 0) ) {
thandle = rbtFind(testnames, tname);
if (thandle != rbtEnd(testnames)) {
trec = (treerec_t *)gettreeitem(testnames, thandle);
trec->flag = 1;
}
}
if (p) *p = '|';
bwalk = strstr(tname, key); if (bwalk) bwalk++;
}
fprintf(output, "hosts[\"%s\"] = [ \"ALL\"", hrec->name);
for (thandle = rbtBegin(testnames); (thandle != rbtEnd(testnames)); thandle = rbtNext(testnames, thandle)) {
trec = (treerec_t *)gettreeitem(testnames, thandle);
if (trec->flag == 0) continue;
trec->flag = 0;
fprintf(output, ", \"%s\"", trec->name);
}
fprintf(output, " ];\n");
}
}
}
else if (strcmp(t_start, "TESTLIST") == 0) {
RbtIterator handle;
treerec_t *rec;
fetch_board();
for (handle = rbtBegin(testnames); (handle != rbtEnd(testnames)); handle = rbtNext(testnames, handle)) {
rec = (treerec_t *)gettreeitem(testnames, handle);
fprintf(output, "<OPTION VALUE=\"%s\">%s</OPTION>\n", rec->name, rec->name);
}
}
else if (strcmp(t_start, "DISABLELIST") == 0) {
char *walk, *eoln;
dishost_t *dhosts = NULL, *hwalk, *hprev;
distest_t *twalk;
fetch_board();
clearflags(testnames);
walk = statusboard;
while (walk) {
eoln = strchr(walk, '\n'); if (eoln) *eoln = '\0';
if (*walk) {
char *buf, *hname, *tname, *dismsg, *p;
time_t distime;
RbtIterator thandle;
treerec_t *rec;
buf = strdup(walk);
hname = tname = dismsg = NULL; distime = 0;
hname = gettok(buf, "|");
if (hname) tname = gettok(NULL, "|");
if (tname) { p = gettok(NULL, "|"); if (p) distime = atol(p); }
if (distime) dismsg = gettok(NULL, "|\n");
if (hname && tname && (distime != 0) && dismsg && wanted_host(hname)) {
nldecode(dismsg);
hwalk = dhosts; hprev = NULL;
while (hwalk && (strcasecmp(hname, hwalk->name) > 0)) {
hprev = hwalk;
hwalk = hwalk->next;
}
if (!hwalk || (strcasecmp(hname, hwalk->name) != 0)) {
dishost_t *newitem = (dishost_t *) malloc(sizeof(dishost_t));
newitem->name = strdup(hname);
newitem->tests = NULL;
newitem->next = hwalk;
if (!hprev)
dhosts = newitem;
else
hprev->next = newitem;
hwalk = newitem;
}
twalk = (distest_t *) malloc(sizeof(distest_t));
twalk->name = strdup(tname);
twalk->cause = strdup(dismsg);
twalk->until = distime;
twalk->next = hwalk->tests;
hwalk->tests = twalk;
thandle = rbtFind(testnames, tname);
if (thandle != rbtEnd(testnames)) {
rec = gettreeitem(testnames, thandle);
rec->flag = 1;
}
}
xfree(buf);
}
if (eoln) {
*eoln = '\n';
walk = eoln+1;
}
else {
walk = NULL;
}
}
if (dhosts) {
/* Insert the "All hosts" record first. */
hwalk = (dishost_t *)calloc(1, sizeof(dishost_t));
hwalk->next = dhosts;
dhosts = hwalk;
for (hwalk = dhosts; (hwalk); hwalk = hwalk->next) {
fprintf(output, "<TR>");
fprintf(output, "<TD>");
fprintf(output,"<form method=\"post\" action=\"%s/enadis.sh\">\n",
xgetenv("SECURECGIBINURL"));
fprintf(output, "<table summary=\"%s disabled tests\" width=\"100%%\">\n",
(hwalk->name ? hwalk->name : ""));
fprintf(output, "<tr>\n");
fprintf(output, "<TH COLSPAN=3><I>%s</I></TH>",
(hwalk->name ? hwalk->name : "All hosts"));
fprintf(output, "</tr>\n");
fprintf(output, "<tr>\n");
fprintf(output, "<td>\n");
if (hwalk->name) {
fprintf(output, "<input name=\"hostname\" type=hidden value=\"%s\">\n",
hwalk->name);
fprintf(output, "<textarea name=\"%s causes\" rows=\"8\" cols=\"50\" readonly style=\"font-size: 10pt\">\n", hwalk->name);
for (twalk = hwalk->tests; (twalk); twalk = twalk->next) {
char *msg = twalk->cause;
msg += strspn(msg, "0123456789 ");
fprintf(output, "%s\n%s\nUntil: %s\n---------------------\n",
twalk->name, msg,
(twalk->until == -1) ? "OK" : ctime(&twalk->until));
}
fprintf(output, "</textarea>\n");
}
else {
dishost_t *hw2;
fprintf(output, "<select multiple size=8 name=\"hostname\">\n");
for (hw2 = hwalk->next; (hw2); hw2 = hw2->next)
fprintf(output, "<option value=\"%s\">%s</option>\n",
hw2->name, hw2->name);
fprintf(output, "</select>\n");
}
fprintf(output, "</td>\n");
fprintf(output, "<td align=center>\n");
fprintf(output, "<select multiple size=8 name=\"enabletest\">\n");
fprintf(output, "<option value=\"*\" selected>ALL</option>\n");
if (hwalk->tests) {
for (twalk = hwalk->tests; (twalk); twalk = twalk->next) {
fprintf(output, "<option value=\"%s\">%s</option>\n",
twalk->name, twalk->name);
}
}
else {
RbtIterator tidx;
treerec_t *rec;
for (tidx = rbtBegin(testnames); (tidx != rbtEnd(testnames)); tidx = rbtNext(testnames, tidx)) {
rec = gettreeitem(testnames, tidx);
if (rec->flag == 0) continue;
fprintf(output, "<option value=\"%s\">%s</option>\n",
rec->name, rec->name);
}
}
fprintf(output, "</select>\n");
fprintf(output, "</td>\n");
fprintf(output, "<td align=center>\n");
fprintf(output, "<input name=\"go\" type=submit value=\"Enable\">\n");
fprintf(output, "</td>\n");
fprintf(output, "</tr>\n");
fprintf(output, "</table>\n");
fprintf(output, "</form>\n");
fprintf(output, "</td>\n");
fprintf(output, "</TR>\n");
}
}
else {
fprintf(output, "<tr><th align=center colspan=3><i>No tests disabled</i></th></tr>\n");
}
}
else if (strcmp(t_start, "SCHEDULELIST") == 0) {
char *walk, *eoln;
int gotany = 0;
fetch_board();
walk = scheduleboard;
while (walk) {
eoln = strchr(walk, '\n'); if (eoln) *eoln = '\0';
if (*walk) {
int id = 0;
time_t executiontime = 0;
char *sender = NULL, *cmd = NULL, *buf, *p, *eoln;
buf = strdup(walk);
p = gettok(buf, "|");
if (p) { id = atoi(p); p = gettok(NULL, "|"); }
if (p) { executiontime = atoi(p); p = gettok(NULL, "|"); }
if (p) { sender = p; p = gettok(NULL, "|"); }
if (p) { cmd = p; }
if (id && executiontime && sender && cmd) {
gotany = 1;
nldecode(cmd);
fprintf(output, "<TR>\n");
fprintf(output, "<TD>%s</TD>\n", ctime(&executiontime));
fprintf(output, "<TD>");
p = cmd;
while ((eoln = strchr(p, '\n')) != NULL) {
*eoln = '\0';
fprintf(output, "%s<BR>", p);
p = (eoln + 1);
}
fprintf(output, "</TD>\n");
fprintf(output, "<td>\n");
fprintf(output, "<form method=\"post\" action=\"%s/enadis.sh\">\n",
xgetenv("SECURECGIBINURL"));
fprintf(output, "<input name=canceljob type=hidden value=\"%d\">\n",
id);
fprintf(output, "<input name=go type=submit value=\"Cancel\">\n");
fprintf(output, "</form></td>\n");
fprintf(output, "</TR>\n");
}
xfree(buf);
}
if (eoln) {
*eoln = '\n';
walk = eoln+1;
}
else {
walk = NULL;
}
}
if (!gotany) {
fprintf(output, "<tr><th align=center colspan=3><i>No tasks scheduled</i></th></tr>\n");
}
}
else if (strncmp(t_start, "GENERICLIST", strlen("GENERICLIST")) == 0) {
listpool_t *pool = find_listpool(t_start + strlen("GENERICLIST"));
listrec_t *walk;
for (walk = pool->listhead; (walk); walk = walk->next)
fprintf(output, "<OPTION VALUE=\"%s\" %s %s>%s</OPTION>\n",
walk->val, (walk->selected ? "SELECTED" : ""), (walk->extra ? walk->extra : ""),
walk->name);
}
else if (strcmp(t_start, "CRITACKTTPRIO") == 0) fprintf(output, "%d", critackttprio);
else if (strcmp(t_start, "CRITACKTTGROUP") == 0) fprintf(output, "%s", critackttgroup);
else if (strcmp(t_start, "CRITACKTTEXTRA") == 0) fprintf(output, "%s", critackttextra);
else if (strcmp(t_start, "CRITACKINFOURL") == 0) fprintf(output, "%s", ackinfourl);
else if (strcmp(t_start, "CRITACKDOCURL") == 0) fprintf(output, "%s", critackdocurl);
else if (strcmp(t_start, "CRITEDITUPDINFO") == 0) {
fprintf(output, "%s", criteditupdinfo);
}
else if (strcmp(t_start, "CRITEDITPRIOLIST") == 0) {
int i;
char *selstr;
for (i=1; (i <= 3); i++) {
selstr = ((i == criteditprio) ? "SELECTED" : "");
fprintf(output, "<option value=\"%d\" %s>%d</option>\n", i, selstr, i);
}
}
else if (strcmp(t_start, "CRITEDITCLONELIST") == 0) {
int i;
for (i=0; (criteditclonelist[i]); i++)
fprintf(output, "<option value=\"%s\">%s</option>\n",
criteditclonelist[i], criteditclonelist[i]);
}
else if (strcmp(t_start, "CRITEDITGROUP") == 0) {
fprintf(output, "%s", criteditgroup);
}
else if (strcmp(t_start, "CRITEDITEXTRA") == 0) {
fprintf(output, "%s", criteditextra);
}
else if (strcmp(t_start, "CRITEDITWKDAYS") == 0) {
fprintf(output, "%s", wkdayselect('*', "All days", 1));
fprintf(output, "%s", wkdayselect('W', "Mon-Fri", 0));
fprintf(output, "%s", wkdayselect('1', "Monday", 0));
fprintf(output, "%s", wkdayselect('2', "Tuesday", 0));
fprintf(output, "%s", wkdayselect('3', "Wednesday", 0));
fprintf(output, "%s", wkdayselect('4', "Thursday", 0));
fprintf(output, "%s", wkdayselect('5', "Friday", 0));
fprintf(output, "%s", wkdayselect('6', "Saturday", 0));
fprintf(output, "%s", wkdayselect('0', "Sunday", 0));
}
else if (strcmp(t_start, "CRITEDITSTART") == 0) {
int i, curr;
char *selstr;
curr = (criteditslastart ? (atoi(criteditslastart) / 100) : 0);
for (i=0; (i <= 23); i++) {
selstr = ((i == curr) ? "SELECTED" : "");
fprintf(output, "<option value=\"%02i00\" %s>%02i:00</option>\n", i, selstr, i);
}
}
else if (strcmp(t_start, "CRITEDITEND") == 0) {
int i, curr;
char *selstr;
curr = (criteditslaend ? (atoi(criteditslaend) / 100) : 24);
for (i=1; (i <= 24); i++) {
selstr = ((i == curr) ? "SELECTED" : "");
fprintf(output, "<option value=\"%02i00\" %s>%02i:00</option>\n", i, selstr, i);
}
}
else if (strncmp(t_start, "CRITEDITDAYLIST", 13) == 0) {
time_t t = ((*(t_start+13) == '1') ? criteditstarttime : criteditendtime);
char *defstr = ((*(t_start+13) == '1') ? "Now" : "Never");
int i;
char *selstr;
struct tm *tm;
tm = localtime(&t);
selstr = ((t == 0) ? "SELECTED" : "");
fprintf(output, "<option value=\"0\" %s>%s</option>\n", selstr, defstr);
for (i=1; (i <= 31); i++) {
selstr = ( (t && (tm->tm_mday == i)) ? "SELECTED" : "");
fprintf(output, "<option value=\"%d\" %s>%d</option>\n", i, selstr, i);
}
}
else if (strncmp(t_start, "CRITEDITMONLIST", 13) == 0) {
time_t t = ((*(t_start+13) == '1') ? criteditstarttime : criteditendtime);
char *defstr = ((*(t_start+13) == '1') ? "Now" : "Never");
int i;
char *selstr;
struct tm tm;
time_t now;
struct tm nowtm;
struct tm monthtm;
char mname[20];
memcpy(&tm, localtime(&t), sizeof(tm));
now = getcurrenttime(NULL);
memcpy(&nowtm, localtime(&now), sizeof(tm));
selstr = ((t == 0) ? "SELECTED" : "");
fprintf(output, "<option value=\"0\" %s>%s</option>\n", selstr, defstr);
for (i=1; (i <= 12); i++) {
selstr = ( (t && (tm.tm_mon == (i -1))) ? "SELECTED" : "");
monthtm.tm_mon = (i-1); monthtm.tm_mday = 1; monthtm.tm_year = nowtm.tm_year;
monthtm.tm_hour = monthtm.tm_min = monthtm.tm_sec = monthtm.tm_isdst = 0;
strftime(mname, sizeof(mname)-1, "%B", &monthtm);
fprintf(output, "<OPTION VALUE=\"%d\" %s>%s</option>\n", i, selstr, mname);
}
}
else if (strncmp(t_start, "CRITEDITYEARLIST", 14) == 0) {
time_t t = ((*(t_start+14) == '1') ? criteditstarttime : criteditendtime);
char *defstr = ((*(t_start+14) == '1') ? "Now" : "Never");
int i;
char *selstr;
struct tm tm;
time_t now;
struct tm nowtm;
int beginyear, endyear;
memcpy(&tm, localtime(&t), sizeof(tm));
now = getcurrenttime(NULL);
memcpy(&nowtm, localtime(&now), sizeof(tm));
beginyear = nowtm.tm_year + 1900;
endyear = nowtm.tm_year + 1900 + 5;
selstr = ((t == 0) ? "SELECTED" : "");
fprintf(output, "<option value=\"0\" %s>%s</option>\n", selstr, defstr);
for (i=beginyear; (i <= endyear); i++) {
selstr = ( (t && (tm.tm_year == (i - 1900))) ? "SELECTED" : "");
fprintf(output, "<OPTION VALUE=\"%d\" %s>%d</option>\n", i, selstr, i);
}
}
else if (hostenv_hikey && ( (strncmp(t_start, "XMH_", 4) == 0) || (strncmp(t_start, "BBH_", 4) == 0) )) {
void *hinfo = hostinfo(hostenv_hikey);
if (hinfo) {
char *s;
if (strncmp(t_start, "BBH_", 4) == 0) memmove(t_start, "XMH_", 4); /* For compatibility */
s = xmh_item_byname(hinfo, t_start);
if (!s) {
fprintf(output, "&%s", t_start);
}
else {
fprintf(output, "%s", s);
}
}
}
else if (strncmp(t_start, "BACKDAYS", 8) == 0) {
fprintf(output, "%d", backdays);
}
else if (strncmp(t_start, "BACKHOURS", 9) == 0) {
fprintf(output, "%d", backhours);
}
else if (strncmp(t_start, "BACKMINS", 8) == 0) {
fprintf(output, "%d", backmins);
}
else if (strncmp(t_start, "BACKSECS", 8) == 0) {
fprintf(output, "%d", backsecs);
}
else if (strncmp(t_start, "EVENTLASTMONTHBEGIN", 19) == 0) {
time_t t = getcurrenttime(NULL);
struct tm *tm = localtime(&t);
tm->tm_mon -= 1;
tm->tm_mday = 1;
tm->tm_hour = tm->tm_min = tm->tm_sec = 0;
tm->tm_isdst = -1;
t = mktime(tm);
fprintf(output, "%s", eventreport_timestring(t));
}
else if (strncmp(t_start, "EVENTCURRMONTHBEGIN", 19) == 0) {
time_t t = getcurrenttime(NULL);
struct tm *tm = localtime(&t);
tm->tm_mday = 1;
tm->tm_hour = tm->tm_min = tm->tm_sec = 0;
tm->tm_isdst = -1;
t = mktime(tm);
fprintf(output, "%s", eventreport_timestring(t));
}
else if (strncmp(t_start, "EVENTLASTWEEKBEGIN", 18) == 0) {
time_t t = getcurrenttime(NULL);
struct tm *tm = localtime(&t);
int weekstart = atoi(xgetenv("WEEKSTART"));
if (tm->tm_wday == weekstart) { /* Do nothing */ }
else if (tm->tm_wday > weekstart) tm->tm_mday -= (tm->tm_wday - weekstart);
else tm->tm_mday += (weekstart - tm->tm_wday) - 7;
tm->tm_mday -= 7;
tm->tm_hour = tm->tm_min = tm->tm_sec = 0;
tm->tm_isdst = -1;
t = mktime(tm);
fprintf(output, "%s", eventreport_timestring(t));
}
else if (strncmp(t_start, "EVENTCURRWEEKBEGIN", 18) == 0) {
time_t t = getcurrenttime(NULL);
struct tm *tm = localtime(&t);
int weekstart = atoi(xgetenv("WEEKSTART"));
if (tm->tm_wday == weekstart) { /* Do nothing */ }
else if (tm->tm_wday > weekstart) tm->tm_mday -= (tm->tm_wday - weekstart);
else tm->tm_mday += (weekstart - tm->tm_wday) - 7;
tm->tm_hour = tm->tm_min = tm->tm_sec = 0;
tm->tm_isdst = -1;
t = mktime(tm);
fprintf(output, "%s", eventreport_timestring(t));
}
else if (strncmp(t_start, "EVENTLASTYEARBEGIN", 18) == 0) {
time_t t = getcurrenttime(NULL);
struct tm *tm = localtime(&t);
tm->tm_year -= 1;
tm->tm_mon = 0;
tm->tm_mday = 1;
tm->tm_hour = tm->tm_min = tm->tm_sec = 0;
tm->tm_isdst = -1;
t = mktime(tm);
fprintf(output, "%s", eventreport_timestring(t));
}
else if (strncmp(t_start, "EVENTCURRYEARBEGIN", 18) == 0) {
time_t t = getcurrenttime(NULL);
struct tm *tm = localtime(&t);
tm->tm_mon = 0;
tm->tm_mday = 1;
tm->tm_hour = tm->tm_min = tm->tm_sec = 0;
tm->tm_isdst = -1;
t = mktime(tm);
fprintf(output, "%s", eventreport_timestring(t));
}
else if (strncmp(t_start, "EVENTYESTERDAY", 14) == 0) {
time_t t = getcurrenttime(NULL);
struct tm *tm = localtime(&t);
tm->tm_mday -= 1;
tm->tm_hour = tm->tm_min = tm->tm_sec = 0;
tm->tm_isdst = -1;
t = mktime(tm);
fprintf(output, "%s", eventreport_timestring(t));
}
else if (strncmp(t_start, "EVENTTODAY", 10) == 0) {
time_t t = getcurrenttime(NULL);
struct tm *tm = localtime(&t);
tm->tm_hour = tm->tm_min = tm->tm_sec = 0;
tm->tm_isdst = -1;
t = mktime(tm);
fprintf(output, "%s", eventreport_timestring(t));
}
else if (strncmp(t_start, "EVENTNOW", 8) == 0) {
time_t t = getcurrenttime(NULL);
fprintf(output, "%s", eventreport_timestring(t));
}
else if (strncmp(t_start, "PAGEPATH_DROPDOWN", 17) == 0) {
build_pagepath_dropdown(output);
}
else if (strncmp(t_start, "EVENTSTARTTIME", 8) == 0) {
fprintf(output, "%s", hostenv_eventtimestart);
}
else if (strncmp(t_start, "EVENTENDTIME", 8) == 0) {
fprintf(output, "%s", hostenv_eventtimeend);
}
else if (strncmp(t_start, "XYMONBODY", 9) == 0) {
char *bodytext = xymonbody(t_start);
fprintf(output, "%s", bodytext);
}
else if (*t_start && (savechar == ';')) {
/* A "&xxx;" is probably an HTML escape - output unchanged. */
fprintf(output, "&%s", t_start);
}
else if (*t_start && (strncmp(t_start, "SELECT_", 7) == 0)) {
/*
* Special for getting the SELECTED tag into list boxes.
* Cannot use xgetenv because it complains for undefined
* environment variables.
*/
char *val = getenv(t_start);
fprintf(output, "%s", (val ? val : ""));
}
else if (strlen(t_start) && xgetenv(t_start)) {
fprintf(output, "%s", xgetenv(t_start));
}
else fprintf(output, "&%s", t_start); /* No substitution - copy all unchanged. */
*t_next = savechar; t_start = t_next; t_next = strchr(t_start, '&');
}
/* Remainder of file */
fprintf(output, "%s", t_start);
}
void headfoot(FILE *output, char *template, char *pagepath, char *head_or_foot, int bgcolor)
{
int fd;
char filename[PATH_MAX];
char *bulletinfile;
struct stat st;
char *templatedata;
char *hfpath;
int have_pagepath = (hostenv_pagepath != NULL);
MEMDEFINE(filename);
if (xgetenv("XYMONDREL") == NULL) {
char *xymondrel = (char *)malloc(12+strlen(VERSION));
sprintf(xymondrel, "XYMONDREL=%s", VERSION);
putenv(xymondrel);
}
/*
* "pagepath" is the relative path for this page, e.g.
* - for the top-level page it is ""
* - for a page, it is "pagename/"
* - for a subpage, it is "pagename/subpagename/"
*
* We allow header/footer files named template_PAGE_header or template_PAGE_SUBPAGE_header
* so we need to scan for an existing file - starting with the
* most detailed one, and working up towards the standard "web/template_TYPE" file.
*/
hfpath = strdup(pagepath);
/* Trim off excess trailing slashes */
if (*hfpath) {
while (*(hfpath + strlen(hfpath) - 1) == '/') *(hfpath + strlen(hfpath) - 1) = '\0';
}
fd = -1;
if (!have_pagepath) hostenv_pagepath = strdup(hfpath);
while ((fd == -1) && strlen(hfpath)) {
char *p;
char *elemstart;
if (hostenv_templatedir) {
sprintf(filename, "%s/", hostenv_templatedir);
}
else {
sprintf(filename, "%s/web/", xgetenv("XYMONHOME"));
}
p = strchr(hfpath, '/'); elemstart = hfpath;
while (p) {
*p = '\0';
strcat(filename, elemstart);
strcat(filename, "_");
*p = '/';
p++;
elemstart = p; p = strchr(elemstart, '/');
}
strcat(filename, elemstart);
strcat(filename, "_");
strcat(filename, head_or_foot);
dbgprintf("Trying header/footer file '%s'\n", filename);
fd = open(filename, O_RDONLY);
if (fd == -1) {
p = strrchr(hfpath, '/');
if (p == NULL) p = hfpath;
*p = '\0';
}
}
xfree(hfpath);
if (fd == -1) {
/* Fall back to default head/foot file. */
if (hostenv_templatedir) {
sprintf(filename, "%s/%s_%s", hostenv_templatedir, template, head_or_foot);
}
else {
sprintf(filename, "%s/web/%s_%s", xgetenv("XYMONHOME"), template, head_or_foot);
}
dbgprintf("Trying header/footer file '%s'\n", filename);
fd = open(filename, O_RDONLY);
}
if (fd != -1) {
fstat(fd, &st);
templatedata = (char *) malloc(st.st_size + 1);
read(fd, templatedata, st.st_size);
templatedata[st.st_size] = '\0';
close(fd);
output_parsed(output, templatedata, bgcolor, getcurrenttime(NULL));
xfree(templatedata);
}
else {
fprintf(output, "<HTML><BODY> \n <HR size=4> \n <BR>%s is either missing or invalid, please create this file with your custom header<BR> \n<HR size=4>", htmlquoted(filename));
}
/* Check for bulletin files */
bulletinfile = (char *)malloc(strlen(xgetenv("XYMONHOME")) + strlen("/web/bulletin_") + strlen(head_or_foot)+1);
sprintf(bulletinfile, "%s/web/bulletin_%s", xgetenv("XYMONHOME"), head_or_foot);
fd = open(bulletinfile, O_RDONLY);
if (fd != -1) {
fstat(fd, &st);
templatedata = (char *) malloc(st.st_size + 1);
read(fd, templatedata, st.st_size);
templatedata[st.st_size] = '\0';
close(fd);
output_parsed(output, templatedata, bgcolor, getcurrenttime(NULL));
xfree(templatedata);
}
if (!have_pagepath) {
xfree(hostenv_pagepath); hostenv_pagepath = NULL;
}
xfree(bulletinfile);
MEMUNDEFINE(filename);
}
void showform(FILE *output, char *headertemplate, char *formtemplate, int color, time_t seltime,
char *pretext, char *posttext)
{
/* Present the query form */
int formfile;
char formfn[PATH_MAX];
sprintf(formfn, "%s/web/%s", xgetenv("XYMONHOME"), formtemplate);
formfile = open(formfn, O_RDONLY);
if (formfile >= 0) {
char *inbuf;
struct stat st;
fstat(formfile, &st);
inbuf = (char *) malloc(st.st_size + 1);
read(formfile, inbuf, st.st_size);
inbuf[st.st_size] = '\0';
close(formfile);
if (headertemplate) headfoot(output, headertemplate, (hostenv_pagepath ? hostenv_pagepath : ""), "header", color);
if (pretext) fprintf(output, "%s", pretext);
output_parsed(output, inbuf, color, seltime);
if (posttext) fprintf(output, "%s", posttext);
if (headertemplate) headfoot(output, headertemplate, (hostenv_pagepath ? hostenv_pagepath : ""), "footer", color);
xfree(inbuf);
}
}
|
935630.c | #include<stdio.h>
void bubble_sort(int a[10],int n)
{
int i,j,temp;
for(i=0;i<n-1;i++)
{
for(j=0;j<n-i-1;j++)
{
if(a[j]>a[j+1])
{
temp=a[j];
a[j]=a[j+1];
a[j+1]=temp;
}
}
}
}
int main()
{
int array[10],i,n,data,found=0;
printf("\nEnter the size of the array : ");
scanf("%d",&n);
printf("\nEnter the elements of the array : \n");
for(i=0;i<n;i++)
{
printf("Enter the element %d : ",i+1);
scanf("%d",&array[i]);
}
printf("\nEnter the data to be search for : ");
scanf("%d",&data);
for(i=0;i<n;i++)
{
if(array[i]==data)
{
printf("\nIn Linear Searching :\nThe data is found at index : %d \nNumber of comparisons made : %d",i,i+1);
found=1;
break;
}
}
if(found==0)
{
printf("\nElement not found.....!");
return 0;
}
bubble_sort(array,n);
int beg,end,mid,counter=0;
beg=0;
end=n-1;
mid=(beg+end)/2;
while(beg<=end)
{
if(array[mid]==data)
{
printf("\nIn Binary searching :\nData is found at index : %d\nNumber of comparisons made : %d",mid,counter);
break;
}
else if(array[mid]>data)
{
end=mid-1;
mid=(beg+end)/2;
counter++;
}
else
{
beg=mid+1;
mid=(beg+end)/2;
counter++;
}
}
return 0;
} |
281270.c | /**
* (C) Copyright 2016 Intel Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
* The Government's rights to use, modify, reproduce, release, perform, display,
* or disclose this software are subject to the terms of the Apache License as
* provided in Contract No. B609815.
* Any reproduction of computer software, computer software documentation, or
* portions thereof marked with this legend must also reproduce the markings.
*/
/**
* dc_pool/ds_pool: RPC Protocol Serialization Functions
*/
#define DD_SUBSYS DD_FAC(pool)
#include <daos/rpc.h>
#include "rpc.h"
struct crt_msg_field *pool_create_in_fields[] = {
&CMF_UUID, /* op.uuid */
&CMF_UUID, /* op.hdl */
&CMF_UINT32, /* uid */
&CMF_UINT32, /* gid */
&CMF_UINT32, /* mode */
&CMF_UINT32, /* ntgts */
&DMF_UUID_ARRAY, /* tgt_uuids */
&CMF_RANK_LIST, /* tgt_ranks */
&CMF_UINT32, /* ndomains */
&CMF_UINT32, /* padding */
&DMF_UINT32_ARRAY /* domains */
};
struct crt_msg_field *pool_create_out_fields[] = {
&CMF_INT, /* op.rc */
&CMF_UINT32, /* op.map_version (unused) */
&DMF_RSVC_HINT /* op.hint */
};
struct crt_msg_field *pool_connect_in_fields[] = {
&CMF_UUID, /* op.uuid */
&CMF_UUID, /* op.handle */
&CMF_UINT32, /* uid */
&CMF_UINT32, /* gid */
&CMF_UINT64, /* capas */
&CMF_BULK /* map_bulk */
};
struct crt_msg_field *pool_connect_out_fields[] = {
&CMF_INT, /* op.rc */
&CMF_UINT32, /* op.map_version */
&DMF_RSVC_HINT, /* op.hint */
&CMF_UINT32, /* mode */
&CMF_UINT32 /* map_buf_size */
};
struct crt_msg_field *pool_disconnect_in_fields[] = {
&CMF_UUID, /* op.uuid */
&CMF_UUID /* op.handle */
};
struct crt_msg_field *pool_disconnect_out_fields[] = {
&CMF_INT, /* op.rc */
&CMF_UINT32, /* op.map_version */
&DMF_RSVC_HINT /* op.hint */
};
struct crt_msg_field *pool_query_in_fields[] = {
&CMF_UUID, /* op.uuid */
&CMF_UUID, /* op.handle */
&CMF_BULK /* map_bulk */
};
struct crt_msg_field *pool_query_out_fields[] = {
&CMF_INT, /* op.rc */
&CMF_UINT32, /* op.map_version */
&DMF_RSVC_HINT, /* op.hint */
&CMF_UINT32, /* mode */
&CMF_UINT32, /* map_buf_size */
&CMF_UINT32, /* rebuild_st.version */
&CMF_UINT32, /* rebuild_st.pad_32 */
&CMF_INT, /* rebuild_st.errno */
&CMF_INT, /* rebuild_st.done */
&CMF_UINT64, /* rebuild_st.obj_nr */
&CMF_UINT64, /* rebuild_st.rec_nr */
};
struct crt_msg_field *pool_tgt_update_in_fields[] = {
&CMF_UUID, /* op.uuid */
&CMF_UUID, /* op.handle */
&CMF_RANK_LIST /* targets */
};
struct crt_msg_field *pool_tgt_update_out_fields[] = {
&CMF_INT, /* op.rc */
&CMF_UINT32, /* op.map_version */
&DMF_RSVC_HINT, /* op.hint */
&CMF_RANK_LIST /* targets */
};
struct crt_msg_field *pool_evict_in_fields[] = {
&CMF_UUID, /* op.uuid */
&CMF_UUID /* op.handle */
};
struct crt_msg_field *pool_evict_out_fields[] = {
&CMF_INT, /* op.rc */
&CMF_UINT32, /* op.map_version */
&DMF_RSVC_HINT /* op.hint */
};
struct crt_msg_field *pool_svc_stop_in_fields[] = {
&CMF_UUID, /* op.uuid */
&CMF_UUID /* op.handle */
};
struct crt_msg_field *pool_svc_stop_out_fields[] = {
&CMF_INT, /* op.rc */
&CMF_UINT32, /* op.map_version */
&DMF_RSVC_HINT /* op.hint */
};
struct crt_msg_field *pool_tgt_connect_in_fields[] = {
&CMF_UUID, /* pool */
&CMF_UUID, /* pool_hdl */
&CMF_UINT64, /* capas */
&CMF_UINT32 /* pool_map_version */
};
struct crt_msg_field *pool_tgt_connect_out_fields[] = {
&CMF_INT /* rc */
};
struct crt_msg_field *pool_tgt_disconnect_in_fields[] = {
&CMF_UUID, /* pool */
&DMF_UUID_ARRAY /* hdls */
};
struct crt_msg_field *pool_tgt_disconnect_out_fields[] = {
&CMF_INT /* rc */
};
struct crt_msg_field *pool_tgt_update_map_in_fields[] = {
&CMF_UUID, /* pool */
&CMF_UINT32 /* map_version */
};
struct crt_msg_field *pool_tgt_update_map_out_fields[] = {
&CMF_INT /* rc */
};
struct crt_req_format DQF_POOL_CREATE =
DEFINE_CRT_REQ_FMT("POOL_CREATE", pool_create_in_fields,
pool_create_out_fields);
struct crt_req_format DQF_POOL_CONNECT =
DEFINE_CRT_REQ_FMT("POOL_CONNECT", pool_connect_in_fields,
pool_connect_out_fields);
struct crt_req_format DQF_POOL_DISCONNECT =
DEFINE_CRT_REQ_FMT("POOL_DISCONNECT", pool_disconnect_in_fields,
pool_disconnect_out_fields);
struct crt_req_format DQF_POOL_QUERY =
DEFINE_CRT_REQ_FMT("POOL_QUERY", pool_query_in_fields,
pool_query_out_fields);
struct crt_req_format DQF_POOL_EXCLUDE =
DEFINE_CRT_REQ_FMT("POOL_EXCLUDE", pool_tgt_update_in_fields,
pool_tgt_update_out_fields);
struct crt_req_format DQF_POOL_EXCLUDE_OUT =
DEFINE_CRT_REQ_FMT("POOL_EXCLUDE_OUT", pool_tgt_update_in_fields,
pool_tgt_update_out_fields);
struct crt_req_format DQF_POOL_ADD =
DEFINE_CRT_REQ_FMT("POOL_ADD", pool_tgt_update_in_fields,
pool_tgt_update_out_fields);
struct crt_req_format DQF_POOL_EVICT =
DEFINE_CRT_REQ_FMT("POOL_EVICT", pool_evict_in_fields,
pool_evict_out_fields);
struct crt_req_format DQF_POOL_SVC_STOP =
DEFINE_CRT_REQ_FMT("POOL_SVC_STOP", pool_svc_stop_in_fields,
pool_svc_stop_out_fields);
struct crt_req_format DQF_POOL_TGT_CONNECT =
DEFINE_CRT_REQ_FMT("POOL_TGT_CONNECT", pool_tgt_connect_in_fields,
pool_tgt_connect_out_fields);
struct crt_req_format DQF_POOL_TGT_DISCONNECT =
DEFINE_CRT_REQ_FMT("POOL_TGT_DISCONNECT", pool_tgt_disconnect_in_fields,
pool_tgt_disconnect_out_fields);
struct crt_req_format DQF_POOL_TGT_UPDATE_MAP =
DEFINE_CRT_REQ_FMT("POOL_TGT_UPDATE_MAP", pool_tgt_update_map_in_fields,
pool_tgt_update_map_out_fields);
int
pool_req_create(crt_context_t crt_ctx, crt_endpoint_t *tgt_ep,
crt_opcode_t opc, crt_rpc_t **req)
{
crt_opcode_t opcode;
opcode = DAOS_RPC_OPCODE(opc, DAOS_POOL_MODULE, 1);
return crt_req_create(crt_ctx, tgt_ep, opcode, req);
}
struct daos_rpc pool_rpcs[] = {
{
.dr_name = "POOL_CREATE",
.dr_opc = POOL_CREATE,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_CREATE,
}, {
.dr_name = "POOL_CONNECT",
.dr_opc = POOL_CONNECT,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_CONNECT,
}, {
.dr_name = "POOL_DISCONNECT",
.dr_opc = POOL_DISCONNECT,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_DISCONNECT,
}, {
.dr_name = "POOL_QUERY",
.dr_opc = POOL_QUERY,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_QUERY
}, {
.dr_name = "POOL_EXCLUDE",
.dr_opc = POOL_EXCLUDE,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_EXCLUDE
}, {
.dr_name = "POOL_EVICT",
.dr_opc = POOL_EVICT,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_EVICT
}, {
.dr_name = "POOL_ADD",
.dr_opc = POOL_ADD,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_ADD,
}, {
.dr_name = "POOL_EXCLUDE_OUT",
.dr_opc = POOL_EXCLUDE_OUT,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_EXCLUDE_OUT,
}, {
.dr_name = "POOL_SVC_STOP",
.dr_opc = POOL_SVC_STOP,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_SVC_STOP,
}, {
.dr_opc = 0
}
};
struct daos_rpc pool_srv_rpcs[] = {
{
.dr_name = "POOL_TGT_CONNECT",
.dr_opc = POOL_TGT_CONNECT,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_TGT_CONNECT
}, {
.dr_name = "POOL_TGT_DISCONNECT",
.dr_opc = POOL_TGT_DISCONNECT,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_TGT_DISCONNECT
}, {
.dr_name = "POOL_TGT_UPDATE_MAP",
.dr_opc = POOL_TGT_UPDATE_MAP,
.dr_ver = 1,
.dr_flags = 0,
.dr_req_fmt = &DQF_POOL_TGT_UPDATE_MAP
}, {
.dr_opc = 0
}
};
|
128511.c | #include "alloca.h"
#include "flann.h"
#include "math.h"
#include "memory.h"
#include "pthread.h"
#include "stdarg.h"
#include "stdint.h"
#include "stdio.h"
#include "stdlib.h"
#include "unistd.h"
#include "_survey_v2.h"
/*
Survey
This code's job is to measure how "isolated" each peptide is
with the goal of quickly prediciting how well a given
label/protease scheme will perform.
*/
void dump_dytpeps(SurveyV2Context *ctx) {
Index last_pep_i = 0xFFFFFFFFFFFFFFFF;
for(Index i = 0; i < ctx->dytpeps.n_rows; i++) {
tab_var(DytPepRec, dytpep, &ctx->dytpeps, i);
tab_var(Index, mlpep_i, &ctx->dyt_i_to_mlpep_i, dytpep->dyt_i);
if(last_pep_i != dytpep->pep_i) {
trace("pep_i%lu\n", dytpep->pep_i);
last_pep_i = dytpep->pep_i;
}
// if(*mlpep_i != dytpep->pep_i) {
trace(" dyt_i:%-4lu n_reads:%-8lu mlpep_i:%-4lu ", dytpep->dyt_i, dytpep->n_reads, *mlpep_i);
tab_var(DytType, dyt, &ctx->dytmat, dytpep->dyt_i);
for(Index k = 0; k < ctx->dytmat.n_bytes_per_row; k++) {
trace("%d ", dyt[k]);
}
trace("\n");
//}
}
}
void dump_row_of_dytmat(Tab *dytmat, int row, char *prefix) {
tab_var(DytType, dyt, dytmat, row);
for(Index k = 0; k < dytmat->n_bytes_per_row; k++) {
printf("%s%d ", prefix, dyt[k]);
}
printf("\n");
}
HashKey pep_i_to_hash_key(Index pep_i) { return (pep_i + 1); }
Index hash_key_pep_i(HashKey pep_i) { return (pep_i - 1); }
int show_debug = 0;
void context_pep_measure_isolation(SurveyV2Context *ctx, Index pep_i) {
/*
Terminology:
dyt: Dyttrack
ml: Most Likely
mic: Most In Contention
pep: Peptide
nn: nearest neighbor
local_dyts: The dyetracks that are associated with pep_i
(ie "local" because they pertain only to the input parameter)
global:dyts: The set of all dyetracks in the simulation.
ml_pep: "Most Likely Peptide" That is the peptide with the most reads
from any given dyetrack
self-dyetrack: a dyetrack that has THIS peptide as its ML-pep
foreign-dyetrack: a dyetrack that has SOME OTHER peptide as its ML-pep
isolation: A metric of how well separated this peptide is
from other peptides. This is a relative metric, not
an actual distance (ie this is NOT a Euclidiean distance)
contention: The inverse of isolation. A large number means the
peptide is LESS isolated.
This function analyzes the "isolation" of a single peptide.
It has access to:
* The dytpeps which is a table w/ columns: (dyt_i, pep_i, n_reads)
That is, each peptide has a list of all dyetracks it can
create and how many reads that peptide generated for each
dyetrack.
* All the dyetracks
We seek features for this peptide:
* A measure of "p_correct"
* Which OTHER peptide is the most contentious with this peptide?
(Of the peptides that cause problems, which is the worst?)
Algorithm:
For this_peptide...
for dyt in this_peptide's_dyetracks...
for neighbor in neighbors_of(dyt)...
dist = distance from dyt to neighbor
p_not_pred = distance_function(dist)
The distance_function is one that goes from
0.0 (close by) and asymptotically approaches 1.0
as it gets further away. It can be thought of
as "probability that this neighbor does NOT predict
the peptide"
BUT -- the dyt might itself be foreign... so in
that case the logic reverses...
it's the "probability that this neighbor rescues
the predicition of this peptide."
if dyt is local:
p_correct = product( all p_not_pred )
else if dyt is foreign
p_correct = 1 - product( all p_not_pred )
Each dyt was generated by pep_i n_reads / n_total_reads
So we now scale each of the p_correct times the fraction
of calls that dyt contributes to this peptide
Meanwhile, compute the contention metric for the ml-peptide
for each dyetrack.
*/
if(show_debug) {
trace("pep_i=%ld\n", pep_i);
}
Size n_global_dyts = ctx->n_dyts;
int n_neighbors = ctx->n_neighbors;
int dyt_row_n_bytes = ctx->dytmat.n_bytes_per_row;
int n_dyt_cols = ctx->n_dyt_cols;
ensure(n_dyt_cols > 0, "no n_dyt_cols");
// SETUP a local table for the dytpeps OF THIS peptide by using the
// pep_i_to_dytpep_row_i table to get the start and stop range.
tab_var(Index, dytpeps_offset_start_of_this_pep, &ctx->pep_i_to_dytpep_row_i, pep_i);
tab_var(Index, dytpeps_offset_start_of_next_pep, &ctx->pep_i_to_dytpep_row_i, pep_i + 1);
int _n_local_dyts = *dytpeps_offset_start_of_next_pep - *dytpeps_offset_start_of_this_pep;
ensure(
_n_local_dyts > 0,
"no dyts pep_i=%ld (this=%ld next=%ld)",
pep_i,
*dytpeps_offset_start_of_this_pep,
*dytpeps_offset_start_of_next_pep);
Index n_local_dyts = (Index)_n_local_dyts;
// Using the pep_i_to_dytpep_row_i we now have the range of the dytpeps and we
// can create a table subset (which is jsut a view into the table)
Tab dytpeps = tab_subset(&ctx->dytpeps, *dytpeps_offset_start_of_this_pep, n_local_dyts);
// We need a contiguous dytmat to feed to the FLANN function so we have
// to copy each referenced dytmat from the global ctx->dytmat into a local copy.
// ALLOC a dytmat for all of the dyts of this peptide
RadType *local_dytmat_buffer = (RadType *)alloca(n_local_dyts * n_dyt_cols * sizeof(DytType));
memset(local_dytmat_buffer, 0, n_local_dyts * n_dyt_cols * sizeof(DytType));
Tab local_dytmat = tab_by_n_rows(local_dytmat_buffer, n_local_dyts, n_dyt_cols * sizeof(DytType), TAB_NOT_GROWABLE);
// LOAD the local dytmat table by copying rows from the global dytmat
// using the dyt_iz referenced in the dytpeps table.
for(Index i = 0; i < n_local_dyts; i++) {
tab_var(DytPepRec, dytpep_row, &dytpeps, i);
tab_var(DytType, src, &ctx->dytmat, dytpep_row->dyt_i);
tab_var(DytType, dst, &local_dytmat, i);
memcpy(dst, src, dyt_row_n_bytes);
}
// Now local_dytmat is a contiguous "local" set of the dytmats that
// are generated by this pep_i. It must be contiguous so that FLANN
// and operate on it on one fast call.
// FLANN needs output buffers to write what it found as the closest neighbors and their distances.
// ALLOC space for those table on the stack because they shouldn't be too large.
Size nn_dyt_iz_row_n_bytes = n_neighbors * sizeof(int);
Size nn_dists_row_n_bytes = n_neighbors * sizeof(Float32);
int *nn_dyt_iz_buf = (int *)alloca(n_local_dyts * nn_dyt_iz_row_n_bytes);
Float32 *nn_dists_buf = (Float32 *)alloca(n_local_dyts * nn_dists_row_n_bytes);
memset(nn_dyt_iz_buf, 0, n_local_dyts * nn_dyt_iz_row_n_bytes);
memset(nn_dists_buf, 0, n_local_dyts * nn_dists_row_n_bytes);
Tab nn_dyt_iz = tab_by_n_rows(nn_dyt_iz_buf, n_local_dyts, nn_dyt_iz_row_n_bytes, TAB_NOT_GROWABLE);
Tab nn_dists = tab_by_n_rows(nn_dists_buf, n_local_dyts, nn_dists_row_n_bytes, TAB_NOT_GROWABLE);
// FETCH a batch of neighbors from FLANN in one call against the GLOBAL index of dyttracks
int ret = flann_find_nearest_neighbors_index_byte(
ctx->flann_index_id,
tab_ptr(DytType, &local_dytmat, 0),
n_local_dyts,
nn_dyt_iz_buf,
nn_dists_buf,
n_neighbors,
ctx->flann_params);
ensure(ret == 0, "flann returned error code");
// At this point FLANN has found neighbors (and their distances) for each local dyttrack
// and put the results into:
// Tab nn_dyt_iz contains the GLOBAL dyt_i index for each neighbor
// Tab nn_dists contains the distance
Size n_neighbors_u = (Size)n_neighbors;
Size n_reads_total = 0;
#define N_PEP_HASH_RECS (128)
Hash contention_by_pep_i = hash_init(alloca(sizeof(HashRec) * N_PEP_HASH_RECS), N_PEP_HASH_RECS);
Index mlpep_i = 0;
IsolationType isolation_sum = (IsolationType)0.0;
for(Index dyt_i = 0; dyt_i < n_local_dyts; dyt_i++) {
// Reminder: dytpeps is the LOCAL dytpeps for pep_i only
tab_var(DytPepRec, dytpep_row, &dytpeps, dyt_i);
Index mlpep_i_for_this_dyt_i = tab_get(Index, &ctx->dyt_i_to_mlpep_i, dytpep_row->dyt_i);
// Get pointers to the nearest neighbor (nn) records (closest neighbot dy_y and
// distance) that FLANN returned to us for this dyt_i
tab_var(int, nn_dyt_row_i, &nn_dyt_iz, dyt_i);
tab_var(float, nn_dists_row_i, &nn_dists, dyt_i);
int is_local = mlpep_i_for_this_dyt_i == pep_i;
if(show_debug) {
trace(
" dyt_i:%-4lu n_reads:%-8lu mlpep_i:%-4lu is_local:%1d ",
dytpep_row->dyt_i,
dytpep_row->n_reads,
mlpep_i_for_this_dyt_i,
is_local);
// DRAW the dyttracl
tab_var(DytType, dyt, &ctx->dytmat, dytpep_row->dyt_i);
for(Index k = 0; k < ctx->dytmat.n_bytes_per_row; k++) {
trace("%d ", dyt[k]);
}
trace("\n");
}
Index nn_i = 0;
Index global_dyt_i_of_nn_i = 0;
Float32 p_product = 1.0f;
for(nn_i = 0; nn_i < n_neighbors_u; nn_i++) {
global_dyt_i_of_nn_i = nn_dyt_row_i[nn_i];
ensure_only_in_debug(
0 <= (int)global_dyt_i_of_nn_i && (int)global_dyt_i_of_nn_i < (int)n_global_dyts,
"Illegal dyt in nn lookup: %ld %ld",
global_dyt_i_of_nn_i,
n_global_dyts);
if(dytpep_row->dyt_i == global_dyt_i_of_nn_i) {
// Do not compare a dyttrack to itself, it will always be zero
continue;
}
// LOOKUP the ml-pep for this dyt_i.
// Remember, we must use the global_dyt_i_of_nn_i not the local_dyt_i
mlpep_i = tab_get(Index, &ctx->dyt_i_to_mlpep_i, global_dyt_i_of_nn_i);
ensure_only_in_debug(
0 <= mlpep_i && mlpep_i < ctx->n_peps, "mlpep_i out of bounds %ld %ld", mlpep_i, ctx->n_peps);
IsolationType distance = (IsolationType)nn_dists_row_i[nn_i];
// Note, FLANN's distances are Manhattan distances (ie sum of the differences
// of each dimension). I think this is good enough but it might be worth
// some sort of sweep over distance metrics.
// This magic number needs to be parameter swept
// And the function itself is jsut a guess. There are other
// distance functions that might be better?
Float32 k = 0.8f;
Float32 p_func = 1.0f - expf(-k * distance);
int mode = 0;
if(!is_local && mlpep_i == pep_i) {
// A dyt that is foreign but has a neighbor that is local.
// This is a potential rescue, we product this into
// the "p of not rescue"
p_product *= p_func;
mode = 1;
}
if(is_local && mlpep_i != pep_i) {
// A dyt that is local has a neighbor that is foreign.
// This is a potential thief, we product this into
// the "p of not stealing"
p_product *= p_func;
mode = 2;
}
// Debugging
if(show_debug && mode != 0) {
trace(
" nn dyt_i:%-4lu mlpep_i:%-4lu dist_to_nn:%7.1f p_func:%7.5f mode:%1d dyt_of_nn:",
global_dyt_i_of_nn_i,
mlpep_i,
distance,
p_func,
mode);
// Dyetrack
tab_var(DytType, dyt, &ctx->dytmat, global_dyt_i_of_nn_i);
for(Index k = 0; k < ctx->dytmat.n_bytes_per_row; k++) {
trace("%d ", dyt[k]);
}
trace("\n");
}
}
Float32 p_correct;
if(is_local) {
// A local dyt has some fraction of reads that are "stolen" by neighbors
// What the p(no theft)?
// p(no theft) = p(nn_0_did_not_steal) * p(nn_1_did_not_steal) ...
// p(no theft) = p_product
p_correct = p_product;
} else {
// A foreign dyt has some fraction of reads that are "rescued" by neighbors
// What the p(any neighbor rescues)?
// p(at least one rescue) = 1 - p(no rescue)
// p(no rescue) = p(nn_0_did_not_rescue) * p(nn_1_did_not_rescue) ...
// p(rescue) = 1 - p_product
p_correct = 1.0 - p_product;
}
IsolationType isolation = dytpep_row->n_reads * p_correct;
isolation_sum += isolation;
if(show_debug) {
trace(" p_correct=%7.5f isolation=%7.1f \n", p_correct, isolation);
}
if(!is_local) {
// This is really just kind of a guess at which peptide
// is the worst offender...
// Accumulate into the hash of foreign peps it's contention
// p_product here is the probability that it wasn't
// rescued we we're going to assume that if it wasn't
// rescued that it was assigned to the mlpep.
// This is not exactly right as some of those neighbors
// might have assigned to OTHER peptides.
IsolationType contention = dytpep_row->n_reads * p_product;
HashKey pep_hash_key = pep_i_to_hash_key(mlpep_i_for_this_dyt_i);
HashRec *by_pep_i_rec = hash_get(contention_by_pep_i, pep_hash_key);
if(by_pep_i_rec == (HashRec *)0) {
// hash full!
ensure(0, "contention_by_pep_i hash table full");
} else if(by_pep_i_rec->key == 0) {
// New record
by_pep_i_rec->key = pep_hash_key;
by_pep_i_rec->contention_val = contention;
} else {
// Existing record
by_pep_i_rec->contention_val += contention;
}
}
n_reads_total += dytpep_row->n_reads;
}
Float32 p_correct;
if(n_reads_total > 0) {
p_correct = isolation_sum / n_reads_total;
} else {
p_correct = 0;
}
// FIND the most in-contention peptide -- the one with the highest contention
IsolationType most_contentious = (IsolationType)0.0;
Index most_contentious_pep_i = 0;
for(Index i = 0; i < contention_by_pep_i.n_max_recs; i++) {
Index pep_i_from_hash = hash_key_pep_i(contention_by_pep_i.recs[i].key);
IsolationType contention_from_hash = contention_by_pep_i.recs[i].contention_val;
if(contention_from_hash > most_contentious) {
most_contentious = contention_from_hash;
most_contentious_pep_i = pep_i_from_hash;
}
}
if(show_debug) {
trace(
"\n iso_sum=%7.5f reads=%-7lu p_correct=%7.5f most_contentious_pep_i=%ld\n\n",
isolation_sum,
n_reads_total,
p_correct,
most_contentious_pep_i);
}
// RECORD the results into the output tables
tab_set(&ctx->output_pep_i_to_isolation_metric, pep_i, &p_correct);
tab_set(&ctx->output_pep_i_to_mic_pep_i, pep_i, &most_contentious_pep_i);
}
Index context_work_orders_pop(SurveyV2Context *ctx) {
// TODO: This could be dried with similar sim_v2 code
// (but remember they refer to differnte SurveyV2FastContext structs)
// NOTE: This return +1! So that 0 can be reserved.
if(ctx->n_threads > 1) {
pthread_mutex_lock(ctx->work_order_lock);
}
Index i = ctx->next_pep_i;
ctx->next_pep_i++;
if(ctx->n_threads > 1) {
pthread_mutex_unlock(ctx->work_order_lock);
}
if(i < ctx->n_peps) {
return i + 1;
}
return 0;
}
void *context_work_orders_worker(void *_ctx) {
// The worker thread. Pops off which pep to work on next
// continues until there are no more work orders.
SurveyV2Context *ctx = (SurveyV2Context *)_ctx;
while(1) {
Index pep_i_plus_1 = context_work_orders_pop(ctx);
if(pep_i_plus_1 == 0) {
break;
}
Index pep_i = pep_i_plus_1 - 1;
context_pep_measure_isolation(ctx, pep_i);
if(pep_i % 100 == 0) {
//ctx->progress_fn(pep_i, ctx->n_peps, 0);
}
}
//ctx->progress_fn(ctx->n_peps, ctx->n_peps, 0);
return (void *)0;
}
void context_start(SurveyV2Context *ctx) {
// dump_dytpeps(ctx);
// Initialize mutex and start the worker thread(s).
ctx->next_pep_i = 0;
// TODO: DRY with simialr code in nn_v2
ensure(
ctx->n_neighbors <= ctx->dytmat.n_rows,
"FLANN does not support requesting more neihbors than there are data points");
// CLEAR internally controlled elements
ctx->flann_params = &DEFAULT_FLANN_PARAMETERS;
ctx->flann_index_id = 0;
ctx->flann_params->cores = ctx->n_flann_cores;
// CREATE the ANN index
// TODO: DRY with NN
float speedup = 0.0f;
ctx->flann_index_id = flann_build_index_byte(
tab_ptr(DytType, &ctx->dytmat, 0), ctx->dytmat.n_rows, ctx->n_dyt_cols, &speedup, ctx->flann_params);
// 10/5/2020 DHW changed this to parallelize at the flann level rather than the survey level (n_threads for survey
// was already set to 1)
context_work_orders_worker(ctx);
/*
// START threads
// TODO: If threading is enabled, the ctx->work_order_lock will need to be malloc'd and free'd
pthread_t ids[256];
ensure(0 < ctx->n_threads && ctx->n_threads < 256, "Invalid n_threads");
if(ctx->n_threads > 1) {
int ret = pthread_mutex_init(ctx->work_order_lock, NULL);
ensure(ret == 0, "pthread lock create failed");
}
for(Index i=0; i<ctx->n_threads; i++) {
int ret = pthread_create(&ids[i], NULL, context_work_orders_worker, ctx);
ensure(ret == 0, "Thread not created.");
}
for(Index i=0; i<ctx->n_threads; i++) {
pthread_join(ids[i], NULL);
}
*/
if(show_debug) {
trace("\nSUMMARY\n");
for(Index pep_i = 0; pep_i < ctx->n_peps; pep_i++) {
IsolationType isolation = tab_get(IsolationType, &ctx->output_pep_i_to_isolation_metric, pep_i);
Index mic_pep_i = tab_get(Index, &ctx->output_pep_i_to_mic_pep_i, pep_i);
trace(" pep:%-4lu p_correct:%7.4f mic_pep:%-4lu\n", pep_i, isolation, mic_pep_i);
}
}
}
|
486686.c | /*-------------------------------------------------------------------------
*
* ruleutils.c
* Functions to convert stored expressions/querytrees back to
* source text
*
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/utils/adt/ruleutils.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <ctype.h>
#include <unistd.h>
#include <fcntl.h>
#include "access/amapi.h"
#include "access/htup_details.h"
#include "access/relation.h"
#include "access/sysattr.h"
#include "access/table.h"
#include "catalog/dependency.h"
#include "catalog/indexing.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_am.h"
#include "catalog/pg_authid.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_depend.h"
#include "catalog/pg_language.h"
#include "catalog/pg_opclass.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_partitioned_table.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_statistic_ext.h"
#include "catalog/pg_trigger.h"
#include "catalog/pg_type.h"
#include "commands/defrem.h"
#include "commands/tablespace.h"
#include "common/keywords.h"
#include "executor/spi.h"
#include "funcapi.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/pathnodes.h"
#include "optimizer/optimizer.h"
#include "parser/parse_agg.h"
#include "parser/parse_func.h"
#include "parser/parse_node.h"
#include "parser/parse_oper.h"
#include "parser/parser.h"
#include "parser/parsetree.h"
#include "rewrite/rewriteHandler.h"
#include "rewrite/rewriteManip.h"
#include "rewrite/rewriteSupport.h"
#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/guc.h"
#include "utils/hsearch.h"
#include "utils/lsyscache.h"
#include "utils/partcache.h"
#include "utils/rel.h"
#include "utils/ruleutils.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/typcache.h"
#include "utils/varlena.h"
#include "utils/xml.h"
/* ----------
* Pretty formatting constants
* ----------
*/
/* Indent counts */
#define PRETTYINDENT_STD 8
#define PRETTYINDENT_JOIN 4
#define PRETTYINDENT_VAR 4
#define PRETTYINDENT_LIMIT 40 /* wrap limit */
/* Pretty flags */
#define PRETTYFLAG_PAREN 0x0001
#define PRETTYFLAG_INDENT 0x0002
#define PRETTYFLAG_SCHEMA 0x0004
/* Default line length for pretty-print wrapping: 0 means wrap always */
#define WRAP_COLUMN_DEFAULT 0
/* macros to test if pretty action needed */
#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN)
#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT)
#define PRETTY_SCHEMA(context) ((context)->prettyFlags & PRETTYFLAG_SCHEMA)
/* ----------
* Local data types
* ----------
*/
/* Context info needed for invoking a recursive querytree display routine */
typedef struct
{
StringInfo buf; /* output buffer to append to */
List *namespaces; /* List of deparse_namespace nodes */
List *windowClause; /* Current query level's WINDOW clause */
List *windowTList; /* targetlist for resolving WINDOW clause */
int prettyFlags; /* enabling of pretty-print functions */
int wrapColumn; /* max line length, or -1 for no limit */
int indentLevel; /* current indent level for pretty-print */
bool varprefix; /* true to print prefixes on Vars */
ParseExprKind special_exprkind; /* set only for exprkinds needing special
* handling */
Bitmapset *appendparents; /* if not null, map child Vars of these relids
* back to the parent rel */
} deparse_context;
/*
* Each level of query context around a subtree needs a level of Var namespace.
* A Var having varlevelsup=N refers to the N'th item (counting from 0) in
* the current context's namespaces list.
*
* rtable is the list of actual RTEs from the Query or PlannedStmt.
* rtable_names holds the alias name to be used for each RTE (either a C
* string, or NULL for nameless RTEs such as unnamed joins).
* rtable_columns holds the column alias names to be used for each RTE.
*
* subplans is a list of Plan trees for SubPlans and CTEs (it's only used
* in the PlannedStmt case).
* ctes is a list of CommonTableExpr nodes (only used in the Query case).
* appendrels, if not null (it's only used in the PlannedStmt case), is an
* array of AppendRelInfo nodes, indexed by child relid. We use that to map
* child-table Vars to their inheritance parents.
*
* In some cases we need to make names of merged JOIN USING columns unique
* across the whole query, not only per-RTE. If so, unique_using is true
* and using_names is a list of C strings representing names already assigned
* to USING columns.
*
* When deparsing plan trees, there is always just a single item in the
* deparse_namespace list (since a plan tree never contains Vars with
* varlevelsup > 0). We store the Plan node that is the immediate
* parent of the expression to be deparsed, as well as a list of that
* Plan's ancestors. In addition, we store its outer and inner subplan nodes,
* as well as their targetlists, and the index tlist if the current plan node
* might contain INDEX_VAR Vars. (These fields could be derived on-the-fly
* from the current Plan node, but it seems notationally clearer to set them
* up as separate fields.)
*/
typedef struct
{
List *rtable; /* List of RangeTblEntry nodes */
List *rtable_names; /* Parallel list of names for RTEs */
List *rtable_columns; /* Parallel list of deparse_columns structs */
List *subplans; /* List of Plan trees for SubPlans */
List *ctes; /* List of CommonTableExpr nodes */
AppendRelInfo **appendrels; /* Array of AppendRelInfo nodes, or NULL */
/* Workspace for column alias assignment: */
bool unique_using; /* Are we making USING names globally unique */
List *using_names; /* List of assigned names for USING columns */
/* Remaining fields are used only when deparsing a Plan tree: */
Plan *plan; /* immediate parent of current expression */
List *ancestors; /* ancestors of plan */
Plan *outer_plan; /* outer subnode, or NULL if none */
Plan *inner_plan; /* inner subnode, or NULL if none */
List *outer_tlist; /* referent for OUTER_VAR Vars */
List *inner_tlist; /* referent for INNER_VAR Vars */
List *index_tlist; /* referent for INDEX_VAR Vars */
} deparse_namespace;
/*
* Per-relation data about column alias names.
*
* Selecting aliases is unreasonably complicated because of the need to dump
* rules/views whose underlying tables may have had columns added, deleted, or
* renamed since the query was parsed. We must nonetheless print the rule/view
* in a form that can be reloaded and will produce the same results as before.
*
* For each RTE used in the query, we must assign column aliases that are
* unique within that RTE. SQL does not require this of the original query,
* but due to factors such as *-expansion we need to be able to uniquely
* reference every column in a decompiled query. As long as we qualify all
* column references, per-RTE uniqueness is sufficient for that.
*
* However, we can't ensure per-column name uniqueness for unnamed join RTEs,
* since they just inherit column names from their input RTEs, and we can't
* rename the columns at the join level. Most of the time this isn't an issue
* because we don't need to reference the join's output columns as such; we
* can reference the input columns instead. That approach can fail for merged
* JOIN USING columns, however, so when we have one of those in an unnamed
* join, we have to make that column's alias globally unique across the whole
* query to ensure it can be referenced unambiguously.
*
* Another problem is that a JOIN USING clause requires the columns to be
* merged to have the same aliases in both input RTEs, and that no other
* columns in those RTEs or their children conflict with the USING names.
* To handle that, we do USING-column alias assignment in a recursive
* traversal of the query's jointree. When descending through a JOIN with
* USING, we preassign the USING column names to the child columns, overriding
* other rules for column alias assignment. We also mark each RTE with a list
* of all USING column names selected for joins containing that RTE, so that
* when we assign other columns' aliases later, we can avoid conflicts.
*
* Another problem is that if a JOIN's input tables have had columns added or
* deleted since the query was parsed, we must generate a column alias list
* for the join that matches the current set of input columns --- otherwise, a
* change in the number of columns in the left input would throw off matching
* of aliases to columns of the right input. Thus, positions in the printable
* column alias list are not necessarily one-for-one with varattnos of the
* JOIN, so we need a separate new_colnames[] array for printing purposes.
*/
typedef struct
{
/*
* colnames is an array containing column aliases to use for columns that
* existed when the query was parsed. Dropped columns have NULL entries.
* This array can be directly indexed by varattno to get a Var's name.
*
* Non-NULL entries are guaranteed unique within the RTE, *except* when
* this is for an unnamed JOIN RTE. In that case we merely copy up names
* from the two input RTEs.
*
* During the recursive descent in set_using_names(), forcible assignment
* of a child RTE's column name is represented by pre-setting that element
* of the child's colnames array. So at that stage, NULL entries in this
* array just mean that no name has been preassigned, not necessarily that
* the column is dropped.
*/
int num_cols; /* length of colnames[] array */
char **colnames; /* array of C strings and NULLs */
/*
* new_colnames is an array containing column aliases to use for columns
* that would exist if the query was re-parsed against the current
* definitions of its base tables. This is what to print as the column
* alias list for the RTE. This array does not include dropped columns,
* but it will include columns added since original parsing. Indexes in
* it therefore have little to do with current varattno values. As above,
* entries are unique unless this is for an unnamed JOIN RTE. (In such an
* RTE, we never actually print this array, but we must compute it anyway
* for possible use in computing column names of upper joins.) The
* parallel array is_new_col marks which of these columns are new since
* original parsing. Entries with is_new_col false must match the
* non-NULL colnames entries one-for-one.
*/
int num_new_cols; /* length of new_colnames[] array */
char **new_colnames; /* array of C strings */
bool *is_new_col; /* array of bool flags */
/* This flag tells whether we should actually print a column alias list */
bool printaliases;
/* This list has all names used as USING names in joins above this RTE */
List *parentUsing; /* names assigned to parent merged columns */
/*
* If this struct is for a JOIN RTE, we fill these fields during the
* set_using_names() pass to describe its relationship to its child RTEs.
*
* leftattnos and rightattnos are arrays with one entry per existing
* output column of the join (hence, indexable by join varattno). For a
* simple reference to a column of the left child, leftattnos[i] is the
* child RTE's attno and rightattnos[i] is zero; and conversely for a
* column of the right child. But for merged columns produced by JOIN
* USING/NATURAL JOIN, both leftattnos[i] and rightattnos[i] are nonzero.
* Note that a simple reference might be to a child RTE column that's been
* dropped; but that's OK since the column could not be used in the query.
*
* If it's a JOIN USING, usingNames holds the alias names selected for the
* merged columns (these might be different from the original USING list,
* if we had to modify names to achieve uniqueness).
*/
int leftrti; /* rangetable index of left child */
int rightrti; /* rangetable index of right child */
int *leftattnos; /* left-child varattnos of join cols, or 0 */
int *rightattnos; /* right-child varattnos of join cols, or 0 */
List *usingNames; /* names assigned to merged columns */
} deparse_columns;
/* This macro is analogous to rt_fetch(), but for deparse_columns structs */
#define deparse_columns_fetch(rangetable_index, dpns) \
((deparse_columns *) list_nth((dpns)->rtable_columns, (rangetable_index)-1))
/*
* Entry in set_rtable_names' hash table
*/
typedef struct
{
char name[NAMEDATALEN]; /* Hash key --- must be first */
int counter; /* Largest addition used so far for name */
} NameHashEntry;
/* Callback signature for resolve_special_varno() */
typedef void (*rsv_callback) (Node *node, deparse_context *context,
void *callback_arg);
/* ----------
* Global data
* ----------
*/
static SPIPlanPtr plan_getrulebyoid = NULL;
static const char *query_getrulebyoid = "SELECT * FROM pg_catalog.pg_rewrite WHERE oid = $1";
static SPIPlanPtr plan_getviewrule = NULL;
static const char *query_getviewrule = "SELECT * FROM pg_catalog.pg_rewrite WHERE ev_class = $1 AND rulename = $2";
/* GUC parameters */
bool quote_all_identifiers = false;
/* ----------
* Local functions
*
* Most of these functions used to use fixed-size buffers to build their
* results. Now, they take an (already initialized) StringInfo object
* as a parameter, and append their text output to its contents.
* ----------
*/
static char *deparse_expression_pretty(Node *expr, List *dpcontext,
bool forceprefix, bool showimplicit,
int prettyFlags, int startIndent);
static char *pg_get_viewdef_worker(Oid viewoid,
int prettyFlags, int wrapColumn);
static char *pg_get_triggerdef_worker(Oid trigid, bool pretty);
static int decompile_column_index_array(Datum column_index_array, Oid relId,
StringInfo buf);
static char *pg_get_ruledef_worker(Oid ruleoid, int prettyFlags);
static char *pg_get_indexdef_worker(Oid indexrelid, int colno,
const Oid *excludeOps,
bool attrsOnly, bool keysOnly,
bool showTblSpc, bool inherits,
int prettyFlags, bool missing_ok);
static char *pg_get_statisticsobj_worker(Oid statextid, bool missing_ok);
static char *pg_get_partkeydef_worker(Oid relid, int prettyFlags,
bool attrsOnly, bool missing_ok);
static char *pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
int prettyFlags, bool missing_ok);
static text *pg_get_expr_worker(text *expr, Oid relid, const char *relname,
int prettyFlags);
static int print_function_arguments(StringInfo buf, HeapTuple proctup,
bool print_table_args, bool print_defaults);
static void print_function_rettype(StringInfo buf, HeapTuple proctup);
static void print_function_trftypes(StringInfo buf, HeapTuple proctup);
static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
Bitmapset *rels_used);
static void set_deparse_for_query(deparse_namespace *dpns, Query *query,
List *parent_namespaces);
static void set_simple_column_names(deparse_namespace *dpns);
static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode);
static void set_using_names(deparse_namespace *dpns, Node *jtnode,
List *parentUsing);
static void set_relation_column_names(deparse_namespace *dpns,
RangeTblEntry *rte,
deparse_columns *colinfo);
static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
deparse_columns *colinfo);
static bool colname_is_unique(const char *colname, deparse_namespace *dpns,
deparse_columns *colinfo);
static char *make_colname_unique(char *colname, deparse_namespace *dpns,
deparse_columns *colinfo);
static void expand_colnames_array_to(deparse_columns *colinfo, int n);
static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte,
deparse_columns *colinfo);
static char *get_rtable_name(int rtindex, deparse_context *context);
static void set_deparse_plan(deparse_namespace *dpns, Plan *plan);
static void push_child_plan(deparse_namespace *dpns, Plan *plan,
deparse_namespace *save_dpns);
static void pop_child_plan(deparse_namespace *dpns,
deparse_namespace *save_dpns);
static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell,
deparse_namespace *save_dpns);
static void pop_ancestor_plan(deparse_namespace *dpns,
deparse_namespace *save_dpns);
static void make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
int prettyFlags);
static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
int prettyFlags, int wrapColumn);
static void get_query_def(Query *query, StringInfo buf, List *parentnamespace,
TupleDesc resultDesc,
int prettyFlags, int wrapColumn, int startIndent);
static void get_values_def(List *values_lists, deparse_context *context);
static void get_with_clause(Query *query, deparse_context *context);
static void get_select_query_def(Query *query, deparse_context *context,
TupleDesc resultDesc);
static void get_insert_query_def(Query *query, deparse_context *context);
static void get_update_query_def(Query *query, deparse_context *context);
static void get_update_query_targetlist_def(Query *query, List *targetList,
deparse_context *context,
RangeTblEntry *rte);
static void get_delete_query_def(Query *query, deparse_context *context);
static void get_utility_query_def(Query *query, deparse_context *context);
static void get_basic_select_query(Query *query, deparse_context *context,
TupleDesc resultDesc);
static void get_target_list(List *targetList, deparse_context *context,
TupleDesc resultDesc);
static void get_setop_query(Node *setOp, Query *query,
deparse_context *context,
TupleDesc resultDesc);
static Node *get_rule_sortgroupclause(Index ref, List *tlist,
bool force_colno,
deparse_context *context);
static void get_rule_groupingset(GroupingSet *gset, List *targetlist,
bool omit_parens, deparse_context *context);
static void get_rule_orderby(List *orderList, List *targetList,
bool force_colno, deparse_context *context);
static void get_rule_windowclause(Query *query, deparse_context *context);
static void get_rule_windowspec(WindowClause *wc, List *targetList,
deparse_context *context);
static char *get_variable(Var *var, int levelsup, bool istoplevel,
deparse_context *context);
static void get_special_variable(Node *node, deparse_context *context,
void *callback_arg);
static void resolve_special_varno(Node *node, deparse_context *context,
rsv_callback callback, void *callback_arg);
static Node *find_param_referent(Param *param, deparse_context *context,
deparse_namespace **dpns_p, ListCell **ancestor_cell_p);
static void get_parameter(Param *param, deparse_context *context);
static const char *get_simple_binary_op_name(OpExpr *expr);
static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags);
static void appendContextKeyword(deparse_context *context, const char *str,
int indentBefore, int indentAfter, int indentPlus);
static void removeStringInfoSpaces(StringInfo str);
static void get_rule_expr(Node *node, deparse_context *context,
bool showimplicit);
static void get_rule_expr_toplevel(Node *node, deparse_context *context,
bool showimplicit);
static void get_rule_expr_funccall(Node *node, deparse_context *context,
bool showimplicit);
static bool looks_like_function(Node *node);
static void get_oper_expr(OpExpr *expr, deparse_context *context);
static void get_func_expr(FuncExpr *expr, deparse_context *context,
bool showimplicit);
static void get_agg_expr(Aggref *aggref, deparse_context *context,
Aggref *original_aggref);
static void get_agg_combine_expr(Node *node, deparse_context *context,
void *callback_arg);
static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context);
static void get_coercion_expr(Node *arg, deparse_context *context,
Oid resulttype, int32 resulttypmod,
Node *parentNode);
static void get_const_expr(Const *constval, deparse_context *context,
int showtype);
static void get_const_collation(Const *constval, deparse_context *context);
static void simple_quote_literal(StringInfo buf, const char *val);
static void get_sublink_expr(SubLink *sublink, deparse_context *context);
static void get_tablefunc(TableFunc *tf, deparse_context *context,
bool showimplicit);
static void get_from_clause(Query *query, const char *prefix,
deparse_context *context);
static void get_from_clause_item(Node *jtnode, Query *query,
deparse_context *context);
static void get_column_alias_list(deparse_columns *colinfo,
deparse_context *context);
static void get_from_clause_coldeflist(RangeTblFunction *rtfunc,
deparse_columns *colinfo,
deparse_context *context);
static void get_tablesample_def(TableSampleClause *tablesample,
deparse_context *context);
static void get_opclass_name(Oid opclass, Oid actual_datatype,
StringInfo buf);
static Node *processIndirection(Node *node, deparse_context *context);
static void printSubscripts(SubscriptingRef *sbsref, deparse_context *context);
static char *get_relation_name(Oid relid);
static char *generate_relation_name(Oid relid, List *namespaces);
static char *generate_qualified_relation_name(Oid relid);
static char *generate_function_name(Oid funcid, int nargs,
List *argnames, Oid *argtypes,
bool has_variadic, bool *use_variadic_p,
ParseExprKind special_exprkind);
static char *generate_operator_name(Oid operid, Oid arg1, Oid arg2);
static void add_cast_to(StringInfo buf, Oid typid);
static char *generate_qualified_type_name(Oid typid);
static text *string_to_text(char *str);
static char *flatten_reloptions(Oid relid);
static void get_reloptions(StringInfo buf, Datum reloptions);
#define only_marker(rte) ((rte)->inh ? "" : "ONLY ")
/* ----------
* pg_get_ruledef - Do it all and return a text
* that could be used as a statement
* to recreate the rule
* ----------
*/
Datum
pg_get_ruledef(PG_FUNCTION_ARGS)
{
Oid ruleoid = PG_GETARG_OID(0);
int prettyFlags;
char *res;
prettyFlags = PRETTYFLAG_INDENT;
res = pg_get_ruledef_worker(ruleoid, prettyFlags);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
Datum
pg_get_ruledef_ext(PG_FUNCTION_ARGS)
{
Oid ruleoid = PG_GETARG_OID(0);
bool pretty = PG_GETARG_BOOL(1);
int prettyFlags;
char *res;
prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
res = pg_get_ruledef_worker(ruleoid, prettyFlags);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
static char *
pg_get_ruledef_worker(Oid ruleoid, int prettyFlags)
{
Datum args[1];
char nulls[1];
int spirc;
HeapTuple ruletup;
TupleDesc rulettc;
StringInfoData buf;
/*
* Do this first so that string is alloc'd in outer context not SPI's.
*/
initStringInfo(&buf);
/*
* Connect to SPI manager
*/
if (SPI_connect() != SPI_OK_CONNECT)
elog(ERROR, "SPI_connect failed");
/*
* On the first call prepare the plan to lookup pg_rewrite. We read
* pg_rewrite over the SPI manager instead of using the syscache to be
* checked for read access on pg_rewrite.
*/
if (plan_getrulebyoid == NULL)
{
Oid argtypes[1];
SPIPlanPtr plan;
argtypes[0] = OIDOID;
plan = SPI_prepare(query_getrulebyoid, 1, argtypes);
if (plan == NULL)
elog(ERROR, "SPI_prepare failed for \"%s\"", query_getrulebyoid);
SPI_keepplan(plan);
plan_getrulebyoid = plan;
}
/*
* Get the pg_rewrite tuple for this rule
*/
args[0] = ObjectIdGetDatum(ruleoid);
nulls[0] = ' ';
spirc = SPI_execute_plan(plan_getrulebyoid, args, nulls, true, 0);
if (spirc != SPI_OK_SELECT)
elog(ERROR, "failed to get pg_rewrite tuple for rule %u", ruleoid);
if (SPI_processed != 1)
{
/*
* There is no tuple data available here, just keep the output buffer
* empty.
*/
}
else
{
/*
* Get the rule's definition and put it into executor's memory
*/
ruletup = SPI_tuptable->vals[0];
rulettc = SPI_tuptable->tupdesc;
make_ruledef(&buf, ruletup, rulettc, prettyFlags);
}
/*
* Disconnect from SPI manager
*/
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
if (buf.len == 0)
return NULL;
return buf.data;
}
/* ----------
* pg_get_viewdef - Mainly the same thing, but we
* only return the SELECT part of a view
* ----------
*/
Datum
pg_get_viewdef(PG_FUNCTION_ARGS)
{
/* By OID */
Oid viewoid = PG_GETARG_OID(0);
int prettyFlags;
char *res;
prettyFlags = PRETTYFLAG_INDENT;
res = pg_get_viewdef_worker(viewoid, prettyFlags, WRAP_COLUMN_DEFAULT);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
Datum
pg_get_viewdef_ext(PG_FUNCTION_ARGS)
{
/* By OID */
Oid viewoid = PG_GETARG_OID(0);
bool pretty = PG_GETARG_BOOL(1);
int prettyFlags;
char *res;
prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
res = pg_get_viewdef_worker(viewoid, prettyFlags, WRAP_COLUMN_DEFAULT);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
Datum
pg_get_viewdef_wrap(PG_FUNCTION_ARGS)
{
/* By OID */
Oid viewoid = PG_GETARG_OID(0);
int wrap = PG_GETARG_INT32(1);
int prettyFlags;
char *res;
/* calling this implies we want pretty printing */
prettyFlags = PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA;
res = pg_get_viewdef_worker(viewoid, prettyFlags, wrap);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
Datum
pg_get_viewdef_name(PG_FUNCTION_ARGS)
{
/* By qualified name */
text *viewname = PG_GETARG_TEXT_PP(0);
int prettyFlags;
RangeVar *viewrel;
Oid viewoid;
char *res;
prettyFlags = PRETTYFLAG_INDENT;
/* Look up view name. Can't lock it - we might not have privileges. */
viewrel = makeRangeVarFromNameList(textToQualifiedNameList(viewname));
viewoid = RangeVarGetRelid(viewrel, NoLock, false);
res = pg_get_viewdef_worker(viewoid, prettyFlags, WRAP_COLUMN_DEFAULT);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
Datum
pg_get_viewdef_name_ext(PG_FUNCTION_ARGS)
{
/* By qualified name */
text *viewname = PG_GETARG_TEXT_PP(0);
bool pretty = PG_GETARG_BOOL(1);
int prettyFlags;
RangeVar *viewrel;
Oid viewoid;
char *res;
prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
/* Look up view name. Can't lock it - we might not have privileges. */
viewrel = makeRangeVarFromNameList(textToQualifiedNameList(viewname));
viewoid = RangeVarGetRelid(viewrel, NoLock, false);
res = pg_get_viewdef_worker(viewoid, prettyFlags, WRAP_COLUMN_DEFAULT);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
/*
* Common code for by-OID and by-name variants of pg_get_viewdef
*/
static char *
pg_get_viewdef_worker(Oid viewoid, int prettyFlags, int wrapColumn)
{
Datum args[2];
char nulls[2];
int spirc;
HeapTuple ruletup;
TupleDesc rulettc;
StringInfoData buf;
/*
* Do this first so that string is alloc'd in outer context not SPI's.
*/
initStringInfo(&buf);
/*
* Connect to SPI manager
*/
if (SPI_connect() != SPI_OK_CONNECT)
elog(ERROR, "SPI_connect failed");
/*
* On the first call prepare the plan to lookup pg_rewrite. We read
* pg_rewrite over the SPI manager instead of using the syscache to be
* checked for read access on pg_rewrite.
*/
if (plan_getviewrule == NULL)
{
Oid argtypes[2];
SPIPlanPtr plan;
argtypes[0] = OIDOID;
argtypes[1] = NAMEOID;
plan = SPI_prepare(query_getviewrule, 2, argtypes);
if (plan == NULL)
elog(ERROR, "SPI_prepare failed for \"%s\"", query_getviewrule);
SPI_keepplan(plan);
plan_getviewrule = plan;
}
/*
* Get the pg_rewrite tuple for the view's SELECT rule
*/
args[0] = ObjectIdGetDatum(viewoid);
args[1] = DirectFunctionCall1(namein, CStringGetDatum(ViewSelectRuleName));
nulls[0] = ' ';
nulls[1] = ' ';
spirc = SPI_execute_plan(plan_getviewrule, args, nulls, true, 0);
if (spirc != SPI_OK_SELECT)
elog(ERROR, "failed to get pg_rewrite tuple for view %u", viewoid);
if (SPI_processed != 1)
{
/*
* There is no tuple data available here, just keep the output buffer
* empty.
*/
}
else
{
/*
* Get the rule's definition and put it into executor's memory
*/
ruletup = SPI_tuptable->vals[0];
rulettc = SPI_tuptable->tupdesc;
make_viewdef(&buf, ruletup, rulettc, prettyFlags, wrapColumn);
}
/*
* Disconnect from SPI manager
*/
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
if (buf.len == 0)
return NULL;
return buf.data;
}
/* ----------
* pg_get_triggerdef - Get the definition of a trigger
* ----------
*/
Datum
pg_get_triggerdef(PG_FUNCTION_ARGS)
{
Oid trigid = PG_GETARG_OID(0);
char *res;
res = pg_get_triggerdef_worker(trigid, false);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
Datum
pg_get_triggerdef_ext(PG_FUNCTION_ARGS)
{
Oid trigid = PG_GETARG_OID(0);
bool pretty = PG_GETARG_BOOL(1);
char *res;
res = pg_get_triggerdef_worker(trigid, pretty);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
static char *
pg_get_triggerdef_worker(Oid trigid, bool pretty)
{
HeapTuple ht_trig;
Form_pg_trigger trigrec;
StringInfoData buf;
Relation tgrel;
ScanKeyData skey[1];
SysScanDesc tgscan;
int findx = 0;
char *tgname;
char *tgoldtable;
char *tgnewtable;
Datum value;
bool isnull;
/*
* Fetch the pg_trigger tuple by the Oid of the trigger
*/
tgrel = table_open(TriggerRelationId, AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_trigger_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(trigid));
tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
NULL, 1, skey);
ht_trig = systable_getnext(tgscan);
if (!HeapTupleIsValid(ht_trig))
{
systable_endscan(tgscan);
table_close(tgrel, AccessShareLock);
return NULL;
}
trigrec = (Form_pg_trigger) GETSTRUCT(ht_trig);
/*
* Start the trigger definition. Note that the trigger's name should never
* be schema-qualified, but the trigger rel's name may be.
*/
initStringInfo(&buf);
tgname = NameStr(trigrec->tgname);
appendStringInfo(&buf, "CREATE %sTRIGGER %s ",
OidIsValid(trigrec->tgconstraint) ? "CONSTRAINT " : "",
quote_identifier(tgname));
if (TRIGGER_FOR_BEFORE(trigrec->tgtype))
appendStringInfoString(&buf, "BEFORE");
else if (TRIGGER_FOR_AFTER(trigrec->tgtype))
appendStringInfoString(&buf, "AFTER");
else if (TRIGGER_FOR_INSTEAD(trigrec->tgtype))
appendStringInfoString(&buf, "INSTEAD OF");
else
elog(ERROR, "unexpected tgtype value: %d", trigrec->tgtype);
if (TRIGGER_FOR_INSERT(trigrec->tgtype))
{
appendStringInfoString(&buf, " INSERT");
findx++;
}
if (TRIGGER_FOR_DELETE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR DELETE");
else
appendStringInfoString(&buf, " DELETE");
findx++;
}
if (TRIGGER_FOR_UPDATE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR UPDATE");
else
appendStringInfoString(&buf, " UPDATE");
findx++;
/* tgattr is first var-width field, so OK to access directly */
if (trigrec->tgattr.dim1 > 0)
{
int i;
appendStringInfoString(&buf, " OF ");
for (i = 0; i < trigrec->tgattr.dim1; i++)
{
char *attname;
if (i > 0)
appendStringInfoString(&buf, ", ");
attname = get_attname(trigrec->tgrelid,
trigrec->tgattr.values[i], false);
appendStringInfoString(&buf, quote_identifier(attname));
}
}
}
if (TRIGGER_FOR_TRUNCATE(trigrec->tgtype))
{
if (findx > 0)
appendStringInfoString(&buf, " OR TRUNCATE");
else
appendStringInfoString(&buf, " TRUNCATE");
findx++;
}
/*
* In non-pretty mode, always schema-qualify the target table name for
* safety. In pretty mode, schema-qualify only if not visible.
*/
appendStringInfo(&buf, " ON %s ",
pretty ?
generate_relation_name(trigrec->tgrelid, NIL) :
generate_qualified_relation_name(trigrec->tgrelid));
if (OidIsValid(trigrec->tgconstraint))
{
if (OidIsValid(trigrec->tgconstrrelid))
appendStringInfo(&buf, "FROM %s ",
generate_relation_name(trigrec->tgconstrrelid, NIL));
if (!trigrec->tgdeferrable)
appendStringInfoString(&buf, "NOT ");
appendStringInfoString(&buf, "DEFERRABLE INITIALLY ");
if (trigrec->tginitdeferred)
appendStringInfoString(&buf, "DEFERRED ");
else
appendStringInfoString(&buf, "IMMEDIATE ");
}
value = fastgetattr(ht_trig, Anum_pg_trigger_tgoldtable,
tgrel->rd_att, &isnull);
if (!isnull)
tgoldtable = NameStr(*DatumGetName(value));
else
tgoldtable = NULL;
value = fastgetattr(ht_trig, Anum_pg_trigger_tgnewtable,
tgrel->rd_att, &isnull);
if (!isnull)
tgnewtable = NameStr(*DatumGetName(value));
else
tgnewtable = NULL;
if (tgoldtable != NULL || tgnewtable != NULL)
{
appendStringInfoString(&buf, "REFERENCING ");
if (tgoldtable != NULL)
appendStringInfo(&buf, "OLD TABLE AS %s ",
quote_identifier(tgoldtable));
if (tgnewtable != NULL)
appendStringInfo(&buf, "NEW TABLE AS %s ",
quote_identifier(tgnewtable));
}
if (TRIGGER_FOR_ROW(trigrec->tgtype))
appendStringInfoString(&buf, "FOR EACH ROW ");
else
appendStringInfoString(&buf, "FOR EACH STATEMENT ");
/* If the trigger has a WHEN qualification, add that */
value = fastgetattr(ht_trig, Anum_pg_trigger_tgqual,
tgrel->rd_att, &isnull);
if (!isnull)
{
Node *qual;
char relkind;
deparse_context context;
deparse_namespace dpns;
RangeTblEntry *oldrte;
RangeTblEntry *newrte;
appendStringInfoString(&buf, "WHEN (");
qual = stringToNode(TextDatumGetCString(value));
relkind = get_rel_relkind(trigrec->tgrelid);
/* Build minimal OLD and NEW RTEs for the rel */
oldrte = makeNode(RangeTblEntry);
oldrte->rtekind = RTE_RELATION;
oldrte->relid = trigrec->tgrelid;
oldrte->relkind = relkind;
oldrte->rellockmode = AccessShareLock;
oldrte->alias = makeAlias("old", NIL);
oldrte->eref = oldrte->alias;
oldrte->lateral = false;
oldrte->inh = false;
oldrte->inFromCl = true;
newrte = makeNode(RangeTblEntry);
newrte->rtekind = RTE_RELATION;
newrte->relid = trigrec->tgrelid;
newrte->relkind = relkind;
newrte->rellockmode = AccessShareLock;
newrte->alias = makeAlias("new", NIL);
newrte->eref = newrte->alias;
newrte->lateral = false;
newrte->inh = false;
newrte->inFromCl = true;
/* Build two-element rtable */
memset(&dpns, 0, sizeof(dpns));
dpns.rtable = list_make2(oldrte, newrte);
dpns.subplans = NIL;
dpns.ctes = NIL;
dpns.appendrels = NULL;
set_rtable_names(&dpns, NIL, NULL);
set_simple_column_names(&dpns);
/* Set up context with one-deep namespace stack */
context.buf = &buf;
context.namespaces = list_make1(&dpns);
context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = true;
context.prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = PRETTYINDENT_STD;
context.special_exprkind = EXPR_KIND_NONE;
context.appendparents = NULL;
get_rule_expr(qual, &context, false);
appendStringInfoString(&buf, ") ");
}
appendStringInfo(&buf, "EXECUTE FUNCTION %s(",
generate_function_name(trigrec->tgfoid, 0,
NIL, NULL,
false, NULL, EXPR_KIND_NONE));
if (trigrec->tgnargs > 0)
{
char *p;
int i;
value = fastgetattr(ht_trig, Anum_pg_trigger_tgargs,
tgrel->rd_att, &isnull);
if (isnull)
elog(ERROR, "tgargs is null for trigger %u", trigid);
p = (char *) VARDATA_ANY(DatumGetByteaPP(value));
for (i = 0; i < trigrec->tgnargs; i++)
{
if (i > 0)
appendStringInfoString(&buf, ", ");
simple_quote_literal(&buf, p);
/* advance p to next string embedded in tgargs */
while (*p)
p++;
p++;
}
}
/* We deliberately do not put semi-colon at end */
appendStringInfoChar(&buf, ')');
/* Clean up */
systable_endscan(tgscan);
table_close(tgrel, AccessShareLock);
return buf.data;
}
/* ----------
* pg_get_indexdef - Get the definition of an index
*
* In the extended version, there is a colno argument as well as pretty bool.
* if colno == 0, we want a complete index definition.
* if colno > 0, we only want the Nth index key's variable or expression.
*
* Note that the SQL-function versions of this omit any info about the
* index tablespace; this is intentional because pg_dump wants it that way.
* However pg_get_indexdef_string() includes the index tablespace.
* ----------
*/
Datum
pg_get_indexdef(PG_FUNCTION_ARGS)
{
Oid indexrelid = PG_GETARG_OID(0);
int prettyFlags;
char *res;
prettyFlags = PRETTYFLAG_INDENT;
res = pg_get_indexdef_worker(indexrelid, 0, NULL,
false, false,
false, false,
prettyFlags, true);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
Datum
pg_get_indexdef_ext(PG_FUNCTION_ARGS)
{
Oid indexrelid = PG_GETARG_OID(0);
int32 colno = PG_GETARG_INT32(1);
bool pretty = PG_GETARG_BOOL(2);
int prettyFlags;
char *res;
prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
res = pg_get_indexdef_worker(indexrelid, colno, NULL,
colno != 0, false,
false, false,
prettyFlags, true);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
/*
* Internal version for use by ALTER TABLE.
* Includes a tablespace clause in the result.
* Returns a palloc'd C string; no pretty-printing.
*/
char *
pg_get_indexdef_string(Oid indexrelid)
{
return pg_get_indexdef_worker(indexrelid, 0, NULL,
false, false,
true, true,
0, false);
}
/* Internal version that just reports the key-column definitions */
char *
pg_get_indexdef_columns(Oid indexrelid, bool pretty)
{
int prettyFlags;
prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
return pg_get_indexdef_worker(indexrelid, 0, NULL,
true, true,
false, false,
prettyFlags, false);
}
/*
* Internal workhorse to decompile an index definition.
*
* This is now used for exclusion constraints as well: if excludeOps is not
* NULL then it points to an array of exclusion operator OIDs.
*/
static char *
pg_get_indexdef_worker(Oid indexrelid, int colno,
const Oid *excludeOps,
bool attrsOnly, bool keysOnly,
bool showTblSpc, bool inherits,
int prettyFlags, bool missing_ok)
{
/* might want a separate isConstraint parameter later */
bool isConstraint = (excludeOps != NULL);
HeapTuple ht_idx;
HeapTuple ht_idxrel;
HeapTuple ht_am;
Form_pg_index idxrec;
Form_pg_class idxrelrec;
Form_pg_am amrec;
IndexAmRoutine *amroutine;
List *indexprs;
ListCell *indexpr_item;
List *context;
Oid indrelid;
int keyno;
Datum indcollDatum;
Datum indclassDatum;
Datum indoptionDatum;
bool isnull;
oidvector *indcollation;
oidvector *indclass;
int2vector *indoption;
StringInfoData buf;
char *str;
char *sep;
/*
* Fetch the pg_index tuple by the Oid of the index
*/
ht_idx = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexrelid));
if (!HeapTupleIsValid(ht_idx))
{
if (missing_ok)
return NULL;
elog(ERROR, "cache lookup failed for index %u", indexrelid);
}
idxrec = (Form_pg_index) GETSTRUCT(ht_idx);
indrelid = idxrec->indrelid;
Assert(indexrelid == idxrec->indexrelid);
/* Must get indcollation, indclass, and indoption the hard way */
indcollDatum = SysCacheGetAttr(INDEXRELID, ht_idx,
Anum_pg_index_indcollation, &isnull);
Assert(!isnull);
indcollation = (oidvector *) DatumGetPointer(indcollDatum);
indclassDatum = SysCacheGetAttr(INDEXRELID, ht_idx,
Anum_pg_index_indclass, &isnull);
Assert(!isnull);
indclass = (oidvector *) DatumGetPointer(indclassDatum);
indoptionDatum = SysCacheGetAttr(INDEXRELID, ht_idx,
Anum_pg_index_indoption, &isnull);
Assert(!isnull);
indoption = (int2vector *) DatumGetPointer(indoptionDatum);
/*
* Fetch the pg_class tuple of the index relation
*/
ht_idxrel = SearchSysCache1(RELOID, ObjectIdGetDatum(indexrelid));
if (!HeapTupleIsValid(ht_idxrel))
elog(ERROR, "cache lookup failed for relation %u", indexrelid);
idxrelrec = (Form_pg_class) GETSTRUCT(ht_idxrel);
/*
* Fetch the pg_am tuple of the index' access method
*/
ht_am = SearchSysCache1(AMOID, ObjectIdGetDatum(idxrelrec->relam));
if (!HeapTupleIsValid(ht_am))
elog(ERROR, "cache lookup failed for access method %u",
idxrelrec->relam);
amrec = (Form_pg_am) GETSTRUCT(ht_am);
/* Fetch the index AM's API struct */
amroutine = GetIndexAmRoutine(amrec->amhandler);
/*
* Get the index expressions, if any. (NOTE: we do not use the relcache
* versions of the expressions and predicate, because we want to display
* non-const-folded expressions.)
*/
if (!heap_attisnull(ht_idx, Anum_pg_index_indexprs, NULL))
{
Datum exprsDatum;
bool isnull;
char *exprsString;
exprsDatum = SysCacheGetAttr(INDEXRELID, ht_idx,
Anum_pg_index_indexprs, &isnull);
Assert(!isnull);
exprsString = TextDatumGetCString(exprsDatum);
indexprs = (List *) stringToNode(exprsString);
pfree(exprsString);
}
else
indexprs = NIL;
indexpr_item = list_head(indexprs);
context = deparse_context_for(get_relation_name(indrelid), indrelid);
/*
* Start the index definition. Note that the index's name should never be
* schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
if (!attrsOnly)
{
if (!isConstraint)
appendStringInfo(&buf, "CREATE %sINDEX %s ON %s%s USING %s (",
idxrec->indisunique ? "UNIQUE " : "",
quote_identifier(NameStr(idxrelrec->relname)),
idxrelrec->relkind == RELKIND_PARTITIONED_INDEX
&& !inherits ? "ONLY " : "",
(prettyFlags & PRETTYFLAG_SCHEMA) ?
generate_relation_name(indrelid, NIL) :
generate_qualified_relation_name(indrelid),
quote_identifier(NameStr(amrec->amname)));
else /* currently, must be EXCLUDE constraint */
appendStringInfo(&buf, "EXCLUDE USING %s (",
quote_identifier(NameStr(amrec->amname)));
}
/*
* Report the indexed attributes
*/
sep = "";
for (keyno = 0; keyno < idxrec->indnatts; keyno++)
{
AttrNumber attnum = idxrec->indkey.values[keyno];
Oid keycoltype;
Oid keycolcollation;
/*
* Ignore non-key attributes if told to.
*/
if (keysOnly && keyno >= idxrec->indnkeyatts)
break;
/* Otherwise, print INCLUDE to divide key and non-key attrs. */
if (!colno && keyno == idxrec->indnkeyatts)
{
appendStringInfoString(&buf, ") INCLUDE (");
sep = "";
}
if (!colno)
appendStringInfoString(&buf, sep);
sep = ", ";
if (attnum != 0)
{
/* Simple index column */
char *attname;
int32 keycoltypmod;
attname = get_attname(indrelid, attnum, false);
if (!colno || colno == keyno + 1)
appendStringInfoString(&buf, quote_identifier(attname));
get_atttypetypmodcoll(indrelid, attnum,
&keycoltype, &keycoltypmod,
&keycolcollation);
}
else
{
/* expressional index */
Node *indexkey;
if (indexpr_item == NULL)
elog(ERROR, "too few entries in indexprs list");
indexkey = (Node *) lfirst(indexpr_item);
indexpr_item = lnext(indexprs, indexpr_item);
/* Deparse */
str = deparse_expression_pretty(indexkey, context, false, false,
prettyFlags, 0);
if (!colno || colno == keyno + 1)
{
/* Need parens if it's not a bare function call */
if (looks_like_function(indexkey))
appendStringInfoString(&buf, str);
else
appendStringInfo(&buf, "(%s)", str);
}
keycoltype = exprType(indexkey);
keycolcollation = exprCollation(indexkey);
}
/* Print additional decoration for (selected) key columns */
if (!attrsOnly && keyno < idxrec->indnkeyatts &&
(!colno || colno == keyno + 1))
{
int16 opt = indoption->values[keyno];
Oid indcoll = indcollation->values[keyno];
Datum attoptions = get_attoptions(indexrelid, keyno + 1);
bool has_options = attoptions != (Datum) 0;
/* Add collation, if not default for column */
if (OidIsValid(indcoll) && indcoll != keycolcollation)
appendStringInfo(&buf, " COLLATE %s",
generate_collation_name((indcoll)));
/* Add the operator class name, if not default */
get_opclass_name(indclass->values[keyno],
has_options ? InvalidOid : keycoltype, &buf);
if (has_options)
{
appendStringInfoString(&buf, " (");
get_reloptions(&buf, attoptions);
appendStringInfoChar(&buf, ')');
}
/* Add options if relevant */
if (amroutine->amcanorder)
{
/* if it supports sort ordering, report DESC and NULLS opts */
if (opt & INDOPTION_DESC)
{
appendStringInfoString(&buf, " DESC");
/* NULLS FIRST is the default in this case */
if (!(opt & INDOPTION_NULLS_FIRST))
appendStringInfoString(&buf, " NULLS LAST");
}
else
{
if (opt & INDOPTION_NULLS_FIRST)
appendStringInfoString(&buf, " NULLS FIRST");
}
}
/* Add the exclusion operator if relevant */
if (excludeOps != NULL)
appendStringInfo(&buf, " WITH %s",
generate_operator_name(excludeOps[keyno],
keycoltype,
keycoltype));
}
}
if (!attrsOnly)
{
appendStringInfoChar(&buf, ')');
/*
* If it has options, append "WITH (options)"
*/
str = flatten_reloptions(indexrelid);
if (str)
{
appendStringInfo(&buf, " WITH (%s)", str);
pfree(str);
}
/*
* Print tablespace, but only if requested
*/
if (showTblSpc)
{
Oid tblspc;
tblspc = get_rel_tablespace(indexrelid);
if (OidIsValid(tblspc))
{
if (isConstraint)
appendStringInfoString(&buf, " USING INDEX");
appendStringInfo(&buf, " TABLESPACE %s",
quote_identifier(get_tablespace_name(tblspc)));
}
}
/*
* If it's a partial index, decompile and append the predicate
*/
if (!heap_attisnull(ht_idx, Anum_pg_index_indpred, NULL))
{
Node *node;
Datum predDatum;
bool isnull;
char *predString;
/* Convert text string to node tree */
predDatum = SysCacheGetAttr(INDEXRELID, ht_idx,
Anum_pg_index_indpred, &isnull);
Assert(!isnull);
predString = TextDatumGetCString(predDatum);
node = (Node *) stringToNode(predString);
pfree(predString);
/* Deparse */
str = deparse_expression_pretty(node, context, false, false,
prettyFlags, 0);
if (isConstraint)
appendStringInfo(&buf, " WHERE (%s)", str);
else
appendStringInfo(&buf, " WHERE %s", str);
}
}
/* Clean up */
ReleaseSysCache(ht_idx);
ReleaseSysCache(ht_idxrel);
ReleaseSysCache(ht_am);
return buf.data;
}
/*
* pg_get_statisticsobjdef
* Get the definition of an extended statistics object
*/
Datum
pg_get_statisticsobjdef(PG_FUNCTION_ARGS)
{
Oid statextid = PG_GETARG_OID(0);
char *res;
res = pg_get_statisticsobj_worker(statextid, true);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
/*
* Internal workhorse to decompile an extended statistics object.
*/
static char *
pg_get_statisticsobj_worker(Oid statextid, bool missing_ok)
{
Form_pg_statistic_ext statextrec;
HeapTuple statexttup;
StringInfoData buf;
int colno;
char *nsp;
ArrayType *arr;
char *enabled;
Datum datum;
bool isnull;
bool ndistinct_enabled;
bool dependencies_enabled;
bool mcv_enabled;
int i;
statexttup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statextid));
if (!HeapTupleIsValid(statexttup))
{
if (missing_ok)
return NULL;
elog(ERROR, "cache lookup failed for statistics object %u", statextid);
}
statextrec = (Form_pg_statistic_ext) GETSTRUCT(statexttup);
initStringInfo(&buf);
nsp = get_namespace_name(statextrec->stxnamespace);
appendStringInfo(&buf, "CREATE STATISTICS %s",
quote_qualified_identifier(nsp,
NameStr(statextrec->stxname)));
/*
* Decode the stxkind column so that we know which stats types to print.
*/
datum = SysCacheGetAttr(STATEXTOID, statexttup,
Anum_pg_statistic_ext_stxkind, &isnull);
Assert(!isnull);
arr = DatumGetArrayTypeP(datum);
if (ARR_NDIM(arr) != 1 ||
ARR_HASNULL(arr) ||
ARR_ELEMTYPE(arr) != CHAROID)
elog(ERROR, "stxkind is not a 1-D char array");
enabled = (char *) ARR_DATA_PTR(arr);
ndistinct_enabled = false;
dependencies_enabled = false;
mcv_enabled = false;
for (i = 0; i < ARR_DIMS(arr)[0]; i++)
{
if (enabled[i] == STATS_EXT_NDISTINCT)
ndistinct_enabled = true;
if (enabled[i] == STATS_EXT_DEPENDENCIES)
dependencies_enabled = true;
if (enabled[i] == STATS_EXT_MCV)
mcv_enabled = true;
}
/*
* If any option is disabled, then we'll need to append the types clause
* to show which options are enabled. We omit the types clause on purpose
* when all options are enabled, so a pg_dump/pg_restore will create all
* statistics types on a newer postgres version, if the statistics had all
* options enabled on the original version.
*/
if (!ndistinct_enabled || !dependencies_enabled || !mcv_enabled)
{
bool gotone = false;
appendStringInfoString(&buf, " (");
if (ndistinct_enabled)
{
appendStringInfoString(&buf, "ndistinct");
gotone = true;
}
if (dependencies_enabled)
{
appendStringInfo(&buf, "%sdependencies", gotone ? ", " : "");
gotone = true;
}
if (mcv_enabled)
appendStringInfo(&buf, "%smcv", gotone ? ", " : "");
appendStringInfoChar(&buf, ')');
}
appendStringInfoString(&buf, " ON ");
for (colno = 0; colno < statextrec->stxkeys.dim1; colno++)
{
AttrNumber attnum = statextrec->stxkeys.values[colno];
char *attname;
if (colno > 0)
appendStringInfoString(&buf, ", ");
attname = get_attname(statextrec->stxrelid, attnum, false);
appendStringInfoString(&buf, quote_identifier(attname));
}
appendStringInfo(&buf, " FROM %s",
generate_relation_name(statextrec->stxrelid, NIL));
ReleaseSysCache(statexttup);
return buf.data;
}
/*
* pg_get_partkeydef
*
* Returns the partition key specification, ie, the following:
*
* PARTITION BY { RANGE | LIST | HASH } (column opt_collation opt_opclass [, ...])
*/
Datum
pg_get_partkeydef(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
char *res;
res = pg_get_partkeydef_worker(relid, PRETTYFLAG_INDENT, false, true);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
/* Internal version that just reports the column definitions */
char *
pg_get_partkeydef_columns(Oid relid, bool pretty)
{
int prettyFlags;
prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
return pg_get_partkeydef_worker(relid, prettyFlags, true, false);
}
/*
* Internal workhorse to decompile a partition key definition.
*/
static char *
pg_get_partkeydef_worker(Oid relid, int prettyFlags,
bool attrsOnly, bool missing_ok)
{
Form_pg_partitioned_table form;
HeapTuple tuple;
oidvector *partclass;
oidvector *partcollation;
List *partexprs;
ListCell *partexpr_item;
List *context;
Datum datum;
bool isnull;
StringInfoData buf;
int keyno;
char *str;
char *sep;
tuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
{
if (missing_ok)
return NULL;
elog(ERROR, "cache lookup failed for partition key of %u", relid);
}
form = (Form_pg_partitioned_table) GETSTRUCT(tuple);
Assert(form->partrelid == relid);
/* Must get partclass and partcollation the hard way */
datum = SysCacheGetAttr(PARTRELID, tuple,
Anum_pg_partitioned_table_partclass, &isnull);
Assert(!isnull);
partclass = (oidvector *) DatumGetPointer(datum);
datum = SysCacheGetAttr(PARTRELID, tuple,
Anum_pg_partitioned_table_partcollation, &isnull);
Assert(!isnull);
partcollation = (oidvector *) DatumGetPointer(datum);
/*
* Get the expressions, if any. (NOTE: we do not use the relcache
* versions of the expressions, because we want to display
* non-const-folded expressions.)
*/
if (!heap_attisnull(tuple, Anum_pg_partitioned_table_partexprs, NULL))
{
Datum exprsDatum;
bool isnull;
char *exprsString;
exprsDatum = SysCacheGetAttr(PARTRELID, tuple,
Anum_pg_partitioned_table_partexprs, &isnull);
Assert(!isnull);
exprsString = TextDatumGetCString(exprsDatum);
partexprs = (List *) stringToNode(exprsString);
if (!IsA(partexprs, List))
elog(ERROR, "unexpected node type found in partexprs: %d",
(int) nodeTag(partexprs));
pfree(exprsString);
}
else
partexprs = NIL;
partexpr_item = list_head(partexprs);
context = deparse_context_for(get_relation_name(relid), relid);
initStringInfo(&buf);
switch (form->partstrat)
{
case PARTITION_STRATEGY_HASH:
if (!attrsOnly)
appendStringInfoString(&buf, "HASH");
break;
case PARTITION_STRATEGY_LIST:
if (!attrsOnly)
appendStringInfoString(&buf, "LIST");
break;
case PARTITION_STRATEGY_RANGE:
if (!attrsOnly)
appendStringInfoString(&buf, "RANGE");
break;
default:
elog(ERROR, "unexpected partition strategy: %d",
(int) form->partstrat);
}
if (!attrsOnly)
appendStringInfoString(&buf, " (");
sep = "";
for (keyno = 0; keyno < form->partnatts; keyno++)
{
AttrNumber attnum = form->partattrs.values[keyno];
Oid keycoltype;
Oid keycolcollation;
Oid partcoll;
appendStringInfoString(&buf, sep);
sep = ", ";
if (attnum != 0)
{
/* Simple attribute reference */
char *attname;
int32 keycoltypmod;
attname = get_attname(relid, attnum, false);
appendStringInfoString(&buf, quote_identifier(attname));
get_atttypetypmodcoll(relid, attnum,
&keycoltype, &keycoltypmod,
&keycolcollation);
}
else
{
/* Expression */
Node *partkey;
if (partexpr_item == NULL)
elog(ERROR, "too few entries in partexprs list");
partkey = (Node *) lfirst(partexpr_item);
partexpr_item = lnext(partexprs, partexpr_item);
/* Deparse */
str = deparse_expression_pretty(partkey, context, false, false,
prettyFlags, 0);
/* Need parens if it's not a bare function call */
if (looks_like_function(partkey))
appendStringInfoString(&buf, str);
else
appendStringInfo(&buf, "(%s)", str);
keycoltype = exprType(partkey);
keycolcollation = exprCollation(partkey);
}
/* Add collation, if not default for column */
partcoll = partcollation->values[keyno];
if (!attrsOnly && OidIsValid(partcoll) && partcoll != keycolcollation)
appendStringInfo(&buf, " COLLATE %s",
generate_collation_name((partcoll)));
/* Add the operator class name, if not default */
if (!attrsOnly)
get_opclass_name(partclass->values[keyno], keycoltype, &buf);
}
if (!attrsOnly)
appendStringInfoChar(&buf, ')');
/* Clean up */
ReleaseSysCache(tuple);
return buf.data;
}
/*
* pg_get_partition_constraintdef
*
* Returns partition constraint expression as a string for the input relation
*/
Datum
pg_get_partition_constraintdef(PG_FUNCTION_ARGS)
{
Oid relationId = PG_GETARG_OID(0);
Expr *constr_expr;
int prettyFlags;
List *context;
char *consrc;
constr_expr = get_partition_qual_relid(relationId);
/* Quick exit if no partition constraint */
if (constr_expr == NULL)
PG_RETURN_NULL();
/*
* Deparse and return the constraint expression.
*/
prettyFlags = PRETTYFLAG_INDENT;
context = deparse_context_for(get_relation_name(relationId), relationId);
consrc = deparse_expression_pretty((Node *) constr_expr, context, false,
false, prettyFlags, 0);
PG_RETURN_TEXT_P(string_to_text(consrc));
}
/*
* pg_get_partconstrdef_string
*
* Returns the partition constraint as a C-string for the input relation, with
* the given alias. No pretty-printing.
*/
char *
pg_get_partconstrdef_string(Oid partitionId, char *aliasname)
{
Expr *constr_expr;
List *context;
constr_expr = get_partition_qual_relid(partitionId);
context = deparse_context_for(aliasname, partitionId);
return deparse_expression((Node *) constr_expr, context, true, false);
}
/*
* pg_get_constraintdef
*
* Returns the definition for the constraint, ie, everything that needs to
* appear after "ALTER TABLE ... ADD CONSTRAINT <constraintname>".
*/
Datum
pg_get_constraintdef(PG_FUNCTION_ARGS)
{
Oid constraintId = PG_GETARG_OID(0);
int prettyFlags;
char *res;
prettyFlags = PRETTYFLAG_INDENT;
res = pg_get_constraintdef_worker(constraintId, false, prettyFlags, true);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
Datum
pg_get_constraintdef_ext(PG_FUNCTION_ARGS)
{
Oid constraintId = PG_GETARG_OID(0);
bool pretty = PG_GETARG_BOOL(1);
int prettyFlags;
char *res;
prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
res = pg_get_constraintdef_worker(constraintId, false, prettyFlags, true);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(string_to_text(res));
}
/*
* Internal version that returns a full ALTER TABLE ... ADD CONSTRAINT command
*/
char *
pg_get_constraintdef_command(Oid constraintId)
{
return pg_get_constraintdef_worker(constraintId, true, 0, false);
}
/*
* As of 9.4, we now use an MVCC snapshot for this.
*/
static char *
pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
int prettyFlags, bool missing_ok)
{
HeapTuple tup;
Form_pg_constraint conForm;
StringInfoData buf;
SysScanDesc scandesc;
ScanKeyData scankey[1];
Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
Relation relation = table_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scankey[0],
Anum_pg_constraint_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(constraintId));
scandesc = systable_beginscan(relation,
ConstraintOidIndexId,
true,
snapshot,
1,
scankey);
/*
* We later use the tuple with SysCacheGetAttr() as if we had obtained it
* via SearchSysCache, which works fine.
*/
tup = systable_getnext(scandesc);
UnregisterSnapshot(snapshot);
if (!HeapTupleIsValid(tup))
{
if (missing_ok)
{
systable_endscan(scandesc);
table_close(relation, AccessShareLock);
return NULL;
}
elog(ERROR, "could not find tuple for constraint %u", constraintId);
}
conForm = (Form_pg_constraint) GETSTRUCT(tup);
initStringInfo(&buf);
if (fullCommand)
{
if (OidIsValid(conForm->conrelid))
{
/*
* Currently, callers want ALTER TABLE (without ONLY) for CHECK
* constraints, and other types of constraints don't inherit
* anyway so it doesn't matter whether we say ONLY or not. Someday
* we might need to let callers specify whether to put ONLY in the
* command.
*/
appendStringInfo(&buf, "ALTER TABLE %s ADD CONSTRAINT %s ",
generate_qualified_relation_name(conForm->conrelid),
quote_identifier(NameStr(conForm->conname)));
}
else
{
/* Must be a domain constraint */
Assert(OidIsValid(conForm->contypid));
appendStringInfo(&buf, "ALTER DOMAIN %s ADD CONSTRAINT %s ",
generate_qualified_type_name(conForm->contypid),
quote_identifier(NameStr(conForm->conname)));
}
}
switch (conForm->contype)
{
case CONSTRAINT_FOREIGN:
{
Datum val;
bool isnull;
const char *string;
/* Start off the constraint definition */
appendStringInfoString(&buf, "FOREIGN KEY (");
/* Fetch and build referencing-column list */
val = SysCacheGetAttr(CONSTROID, tup,
Anum_pg_constraint_conkey, &isnull);
if (isnull)
elog(ERROR, "null conkey for constraint %u",
constraintId);
decompile_column_index_array(val, conForm->conrelid, &buf);
/* add foreign relation name */
appendStringInfo(&buf, ") REFERENCES %s(",
generate_relation_name(conForm->confrelid,
NIL));
/* Fetch and build referenced-column list */
val = SysCacheGetAttr(CONSTROID, tup,
Anum_pg_constraint_confkey, &isnull);
if (isnull)
elog(ERROR, "null confkey for constraint %u",
constraintId);
decompile_column_index_array(val, conForm->confrelid, &buf);
appendStringInfoChar(&buf, ')');
/* Add match type */
switch (conForm->confmatchtype)
{
case FKCONSTR_MATCH_FULL:
string = " MATCH FULL";
break;
case FKCONSTR_MATCH_PARTIAL:
string = " MATCH PARTIAL";
break;
case FKCONSTR_MATCH_SIMPLE:
string = "";
break;
default:
elog(ERROR, "unrecognized confmatchtype: %d",
conForm->confmatchtype);
string = ""; /* keep compiler quiet */
break;
}
appendStringInfoString(&buf, string);
/* Add ON UPDATE and ON DELETE clauses, if needed */
switch (conForm->confupdtype)
{
case FKCONSTR_ACTION_NOACTION:
string = NULL; /* suppress default */
break;
case FKCONSTR_ACTION_RESTRICT:
string = "RESTRICT";
break;
case FKCONSTR_ACTION_CASCADE:
string = "CASCADE";
break;
case FKCONSTR_ACTION_SETNULL:
string = "SET NULL";
break;
case FKCONSTR_ACTION_SETDEFAULT:
string = "SET DEFAULT";
break;
default:
elog(ERROR, "unrecognized confupdtype: %d",
conForm->confupdtype);
string = NULL; /* keep compiler quiet */
break;
}
if (string)
appendStringInfo(&buf, " ON UPDATE %s", string);
switch (conForm->confdeltype)
{
case FKCONSTR_ACTION_NOACTION:
string = NULL; /* suppress default */
break;
case FKCONSTR_ACTION_RESTRICT:
string = "RESTRICT";
break;
case FKCONSTR_ACTION_CASCADE:
string = "CASCADE";
break;
case FKCONSTR_ACTION_SETNULL:
string = "SET NULL";
break;
case FKCONSTR_ACTION_SETDEFAULT:
string = "SET DEFAULT";
break;
default:
elog(ERROR, "unrecognized confdeltype: %d",
conForm->confdeltype);
string = NULL; /* keep compiler quiet */
break;
}
if (string)
appendStringInfo(&buf, " ON DELETE %s", string);
break;
}
case CONSTRAINT_PRIMARY:
case CONSTRAINT_UNIQUE:
{
Datum val;
bool isnull;
Oid indexId;
int keyatts;
HeapTuple indtup;
/* Start off the constraint definition */
if (conForm->contype == CONSTRAINT_PRIMARY)
appendStringInfoString(&buf, "PRIMARY KEY (");
else
appendStringInfoString(&buf, "UNIQUE (");
/* Fetch and build target column list */
val = SysCacheGetAttr(CONSTROID, tup,
Anum_pg_constraint_conkey, &isnull);
if (isnull)
elog(ERROR, "null conkey for constraint %u",
constraintId);
keyatts = decompile_column_index_array(val, conForm->conrelid, &buf);
appendStringInfoChar(&buf, ')');
indexId = get_constraint_index(constraintId);
/* Build including column list (from pg_index.indkeys) */
indtup = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexId));
if (!HeapTupleIsValid(indtup))
elog(ERROR, "cache lookup failed for index %u", indexId);
val = SysCacheGetAttr(INDEXRELID, indtup,
Anum_pg_index_indnatts, &isnull);
if (isnull)
elog(ERROR, "null indnatts for index %u", indexId);
if (DatumGetInt32(val) > keyatts)
{
Datum cols;
Datum *keys;
int nKeys;
int j;
appendStringInfoString(&buf, " INCLUDE (");
cols = SysCacheGetAttr(INDEXRELID, indtup,
Anum_pg_index_indkey, &isnull);
if (isnull)
elog(ERROR, "null indkey for index %u", indexId);
deconstruct_array(DatumGetArrayTypeP(cols),
INT2OID, 2, true, TYPALIGN_SHORT,
&keys, NULL, &nKeys);
for (j = keyatts; j < nKeys; j++)
{
char *colName;
colName = get_attname(conForm->conrelid,
DatumGetInt16(keys[j]), false);
if (j > keyatts)
appendStringInfoString(&buf, ", ");
appendStringInfoString(&buf, quote_identifier(colName));
}
appendStringInfoChar(&buf, ')');
}
ReleaseSysCache(indtup);
/* XXX why do we only print these bits if fullCommand? */
if (fullCommand && OidIsValid(indexId))
{
char *options = flatten_reloptions(indexId);
Oid tblspc;
if (options)
{
appendStringInfo(&buf, " WITH (%s)", options);
pfree(options);
}
/*
* Print the tablespace, unless it's the database default.
* This is to help ALTER TABLE usage of this facility,
* which needs this behavior to recreate exact catalog
* state.
*/
tblspc = get_rel_tablespace(indexId);
if (OidIsValid(tblspc))
appendStringInfo(&buf, " USING INDEX TABLESPACE %s",
quote_identifier(get_tablespace_name(tblspc)));
}
break;
}
case CONSTRAINT_CHECK:
{
Datum val;
bool isnull;
char *conbin;
char *consrc;
Node *expr;
List *context;
/* Fetch constraint expression in parsetree form */
val = SysCacheGetAttr(CONSTROID, tup,
Anum_pg_constraint_conbin, &isnull);
if (isnull)
elog(ERROR, "null conbin for constraint %u",
constraintId);
conbin = TextDatumGetCString(val);
expr = stringToNode(conbin);
/* Set up deparsing context for Var nodes in constraint */
if (conForm->conrelid != InvalidOid)
{
/* relation constraint */
context = deparse_context_for(get_relation_name(conForm->conrelid),
conForm->conrelid);
}
else
{
/* domain constraint --- can't have Vars */
context = NIL;
}
consrc = deparse_expression_pretty(expr, context, false, false,
prettyFlags, 0);
/*
* Now emit the constraint definition, adding NO INHERIT if
* necessary.
*
* There are cases where the constraint expression will be
* fully parenthesized and we don't need the outer parens ...
* but there are other cases where we do need 'em. Be
* conservative for now.
*
* Note that simply checking for leading '(' and trailing ')'
* would NOT be good enough, consider "(x > 0) AND (y > 0)".
*/
appendStringInfo(&buf, "CHECK (%s)%s",
consrc,
conForm->connoinherit ? " NO INHERIT" : "");
break;
}
case CONSTRAINT_TRIGGER:
/*
* There isn't an ALTER TABLE syntax for creating a user-defined
* constraint trigger, but it seems better to print something than
* throw an error; if we throw error then this function couldn't
* safely be applied to all rows of pg_constraint.
*/
appendStringInfoString(&buf, "TRIGGER");
break;
case CONSTRAINT_EXCLUSION:
{
Oid indexOid = conForm->conindid;
Datum val;
bool isnull;
Datum *elems;
int nElems;
int i;
Oid *operators;
/* Extract operator OIDs from the pg_constraint tuple */
val = SysCacheGetAttr(CONSTROID, tup,
Anum_pg_constraint_conexclop,
&isnull);
if (isnull)
elog(ERROR, "null conexclop for constraint %u",
constraintId);
deconstruct_array(DatumGetArrayTypeP(val),
OIDOID, sizeof(Oid), true, TYPALIGN_INT,
&elems, NULL, &nElems);
operators = (Oid *) palloc(nElems * sizeof(Oid));
for (i = 0; i < nElems; i++)
operators[i] = DatumGetObjectId(elems[i]);
/* pg_get_indexdef_worker does the rest */
/* suppress tablespace because pg_dump wants it that way */
appendStringInfoString(&buf,
pg_get_indexdef_worker(indexOid,
0,
operators,
false,
false,
false,
false,
prettyFlags,
false));
break;
}
default:
elog(ERROR, "invalid constraint type \"%c\"", conForm->contype);
break;
}
if (conForm->condeferrable)
appendStringInfoString(&buf, " DEFERRABLE");
if (conForm->condeferred)
appendStringInfoString(&buf, " INITIALLY DEFERRED");
if (!conForm->convalidated)
appendStringInfoString(&buf, " NOT VALID");
/* Cleanup */
systable_endscan(scandesc);
table_close(relation, AccessShareLock);
return buf.data;
}
/*
* Convert an int16[] Datum into a comma-separated list of column names
* for the indicated relation; append the list to buf. Returns the number
* of keys.
*/
static int
decompile_column_index_array(Datum column_index_array, Oid relId,
StringInfo buf)
{
Datum *keys;
int nKeys;
int j;
/* Extract data from array of int16 */
deconstruct_array(DatumGetArrayTypeP(column_index_array),
INT2OID, 2, true, TYPALIGN_SHORT,
&keys, NULL, &nKeys);
for (j = 0; j < nKeys; j++)
{
char *colName;
colName = get_attname(relId, DatumGetInt16(keys[j]), false);
if (j == 0)
appendStringInfoString(buf, quote_identifier(colName));
else
appendStringInfo(buf, ", %s", quote_identifier(colName));
}
return nKeys;
}
/* ----------
* pg_get_expr - Decompile an expression tree
*
* Input: an expression tree in nodeToString form, and a relation OID
*
* Output: reverse-listed expression
*
* Currently, the expression can only refer to a single relation, namely
* the one specified by the second parameter. This is sufficient for
* partial indexes, column default expressions, etc. We also support
* Var-free expressions, for which the OID can be InvalidOid.
* ----------
*/
Datum
pg_get_expr(PG_FUNCTION_ARGS)
{
text *expr = PG_GETARG_TEXT_PP(0);
Oid relid = PG_GETARG_OID(1);
int prettyFlags;
char *relname;
prettyFlags = PRETTYFLAG_INDENT;
if (OidIsValid(relid))
{
/* Get the name for the relation */
relname = get_rel_name(relid);
/*
* If the OID isn't actually valid, don't throw an error, just return
* NULL. This is a bit questionable, but it's what we've done
* historically, and it can help avoid unwanted failures when
* examining catalog entries for just-deleted relations.
*/
if (relname == NULL)
PG_RETURN_NULL();
}
else
relname = NULL;
PG_RETURN_TEXT_P(pg_get_expr_worker(expr, relid, relname, prettyFlags));
}
Datum
pg_get_expr_ext(PG_FUNCTION_ARGS)
{
text *expr = PG_GETARG_TEXT_PP(0);
Oid relid = PG_GETARG_OID(1);
bool pretty = PG_GETARG_BOOL(2);
int prettyFlags;
char *relname;
prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT | PRETTYFLAG_SCHEMA) : PRETTYFLAG_INDENT;
if (OidIsValid(relid))
{
/* Get the name for the relation */
relname = get_rel_name(relid);
/* See notes above */
if (relname == NULL)
PG_RETURN_NULL();
}
else
relname = NULL;
PG_RETURN_TEXT_P(pg_get_expr_worker(expr, relid, relname, prettyFlags));
}
static text *
pg_get_expr_worker(text *expr, Oid relid, const char *relname, int prettyFlags)
{
Node *node;
List *context;
char *exprstr;
char *str;
/* Convert input TEXT object to C string */
exprstr = text_to_cstring(expr);
/* Convert expression to node tree */
node = (Node *) stringToNode(exprstr);
pfree(exprstr);
/* Prepare deparse context if needed */
if (OidIsValid(relid))
context = deparse_context_for(relname, relid);
else
context = NIL;
/* Deparse */
str = deparse_expression_pretty(node, context, false, false,
prettyFlags, 0);
return string_to_text(str);
}
/* ----------
* pg_get_userbyid - Get a user name by roleid and
* fallback to 'unknown (OID=n)'
* ----------
*/
Datum
pg_get_userbyid(PG_FUNCTION_ARGS)
{
Oid roleid = PG_GETARG_OID(0);
Name result;
HeapTuple roletup;
Form_pg_authid role_rec;
/*
* Allocate space for the result
*/
result = (Name) palloc(NAMEDATALEN);
memset(NameStr(*result), 0, NAMEDATALEN);
/*
* Get the pg_authid entry and print the result
*/
roletup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleid));
if (HeapTupleIsValid(roletup))
{
role_rec = (Form_pg_authid) GETSTRUCT(roletup);
StrNCpy(NameStr(*result), NameStr(role_rec->rolname), NAMEDATALEN);
ReleaseSysCache(roletup);
}
else
sprintf(NameStr(*result), "unknown (OID=%u)", roleid);
PG_RETURN_NAME(result);
}
/*
* pg_get_serial_sequence
* Get the name of the sequence used by an identity or serial column,
* formatted suitably for passing to setval, nextval or currval.
* First parameter is not treated as double-quoted, second parameter
* is --- see documentation for reason.
*/
Datum
pg_get_serial_sequence(PG_FUNCTION_ARGS)
{
text *tablename = PG_GETARG_TEXT_PP(0);
text *columnname = PG_GETARG_TEXT_PP(1);
RangeVar *tablerv;
Oid tableOid;
char *column;
AttrNumber attnum;
Oid sequenceId = InvalidOid;
Relation depRel;
ScanKeyData key[3];
SysScanDesc scan;
HeapTuple tup;
/* Look up table name. Can't lock it - we might not have privileges. */
tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename));
tableOid = RangeVarGetRelid(tablerv, NoLock, false);
/* Get the number of the column */
column = text_to_cstring(columnname);
attnum = get_attnum(tableOid, column);
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" of relation \"%s\" does not exist",
column, tablerv->relname)));
/* Search the dependency table for the dependent sequence */
depRel = table_open(DependRelationId, AccessShareLock);
ScanKeyInit(&key[0],
Anum_pg_depend_refclassid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(RelationRelationId));
ScanKeyInit(&key[1],
Anum_pg_depend_refobjid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(tableOid));
ScanKeyInit(&key[2],
Anum_pg_depend_refobjsubid,
BTEqualStrategyNumber, F_INT4EQ,
Int32GetDatum(attnum));
scan = systable_beginscan(depRel, DependReferenceIndexId, true,
NULL, 3, key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
/*
* Look for an auto dependency (serial column) or internal dependency
* (identity column) of a sequence on a column. (We need the relkind
* test because indexes can also have auto dependencies on columns.)
*/
if (deprec->classid == RelationRelationId &&
deprec->objsubid == 0 &&
(deprec->deptype == DEPENDENCY_AUTO ||
deprec->deptype == DEPENDENCY_INTERNAL) &&
get_rel_relkind(deprec->objid) == RELKIND_SEQUENCE)
{
sequenceId = deprec->objid;
break;
}
}
systable_endscan(scan);
table_close(depRel, AccessShareLock);
if (OidIsValid(sequenceId))
{
char *result;
result = generate_qualified_relation_name(sequenceId);
PG_RETURN_TEXT_P(string_to_text(result));
}
PG_RETURN_NULL();
}
/*
* pg_get_functiondef
* Returns the complete "CREATE OR REPLACE FUNCTION ..." statement for
* the specified function.
*
* Note: if you change the output format of this function, be careful not
* to break psql's rules (in \ef and \sf) for identifying the start of the
* function body. To wit: the function body starts on a line that begins
* with "AS ", and no preceding line will look like that.
*/
Datum
pg_get_functiondef(PG_FUNCTION_ARGS)
{
Oid funcid = PG_GETARG_OID(0);
StringInfoData buf;
StringInfoData dq;
HeapTuple proctup;
Form_pg_proc proc;
bool isfunction;
Datum tmp;
bool isnull;
const char *prosrc;
const char *name;
const char *nsp;
float4 procost;
int oldlen;
initStringInfo(&buf);
/* Look up the function */
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
if (!HeapTupleIsValid(proctup))
PG_RETURN_NULL();
proc = (Form_pg_proc) GETSTRUCT(proctup);
name = NameStr(proc->proname);
if (proc->prokind == PROKIND_AGGREGATE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function", name)));
isfunction = (proc->prokind != PROKIND_PROCEDURE);
/*
* We always qualify the function name, to ensure the right function gets
* replaced.
*/
nsp = get_namespace_name(proc->pronamespace);
appendStringInfo(&buf, "CREATE OR REPLACE %s %s(",
isfunction ? "FUNCTION" : "PROCEDURE",
quote_qualified_identifier(nsp, name));
(void) print_function_arguments(&buf, proctup, false, true);
appendStringInfoString(&buf, ")\n");
if (isfunction)
{
appendStringInfoString(&buf, " RETURNS ");
print_function_rettype(&buf, proctup);
appendStringInfoChar(&buf, '\n');
}
print_function_trftypes(&buf, proctup);
appendStringInfo(&buf, " LANGUAGE %s\n",
quote_identifier(get_language_name(proc->prolang, false)));
/* Emit some miscellaneous options on one line */
oldlen = buf.len;
if (proc->prokind == PROKIND_WINDOW)
appendStringInfoString(&buf, " WINDOW");
switch (proc->provolatile)
{
case PROVOLATILE_IMMUTABLE:
appendStringInfoString(&buf, " IMMUTABLE");
break;
case PROVOLATILE_STABLE:
appendStringInfoString(&buf, " STABLE");
break;
case PROVOLATILE_VOLATILE:
break;
}
switch (proc->proparallel)
{
case PROPARALLEL_SAFE:
appendStringInfoString(&buf, " PARALLEL SAFE");
break;
case PROPARALLEL_RESTRICTED:
appendStringInfoString(&buf, " PARALLEL RESTRICTED");
break;
case PROPARALLEL_UNSAFE:
break;
}
if (proc->proisstrict)
appendStringInfoString(&buf, " STRICT");
if (proc->prosecdef)
appendStringInfoString(&buf, " SECURITY DEFINER");
if (proc->proleakproof)
appendStringInfoString(&buf, " LEAKPROOF");
/* This code for the default cost and rows should match functioncmds.c */
if (proc->prolang == INTERNALlanguageId ||
proc->prolang == ClanguageId)
procost = 1;
else
procost = 100;
if (proc->procost != procost)
appendStringInfo(&buf, " COST %g", proc->procost);
if (proc->prorows > 0 && proc->prorows != 1000)
appendStringInfo(&buf, " ROWS %g", proc->prorows);
if (proc->prosupport)
{
Oid argtypes[1];
/*
* We should qualify the support function's name if it wouldn't be
* resolved by lookup in the current search path.
*/
argtypes[0] = INTERNALOID;
appendStringInfo(&buf, " SUPPORT %s",
generate_function_name(proc->prosupport, 1,
NIL, argtypes,
false, NULL, EXPR_KIND_NONE));
}
if (oldlen != buf.len)
appendStringInfoChar(&buf, '\n');
/* Emit any proconfig options, one per line */
tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_proconfig, &isnull);
if (!isnull)
{
ArrayType *a = DatumGetArrayTypeP(tmp);
int i;
Assert(ARR_ELEMTYPE(a) == TEXTOID);
Assert(ARR_NDIM(a) == 1);
Assert(ARR_LBOUND(a)[0] == 1);
for (i = 1; i <= ARR_DIMS(a)[0]; i++)
{
Datum d;
d = array_ref(a, 1, &i,
-1 /* varlenarray */ ,
-1 /* TEXT's typlen */ ,
false /* TEXT's typbyval */ ,
TYPALIGN_INT /* TEXT's typalign */ ,
&isnull);
if (!isnull)
{
char *configitem = TextDatumGetCString(d);
char *pos;
pos = strchr(configitem, '=');
if (pos == NULL)
continue;
*pos++ = '\0';
appendStringInfo(&buf, " SET %s TO ",
quote_identifier(configitem));
/*
* Variables that are marked GUC_LIST_QUOTE were already fully
* quoted by flatten_set_variable_args() before they were put
* into the proconfig array. However, because the quoting
* rules used there aren't exactly like SQL's, we have to
* break the list value apart and then quote the elements as
* string literals. (The elements may be double-quoted as-is,
* but we can't just feed them to the SQL parser; it would do
* the wrong thing with elements that are zero-length or
* longer than NAMEDATALEN.)
*
* Variables that are not so marked should just be emitted as
* simple string literals. If the variable is not known to
* guc.c, we'll do that; this makes it unsafe to use
* GUC_LIST_QUOTE for extension variables.
*/
if (GetConfigOptionFlags(configitem, true) & GUC_LIST_QUOTE)
{
List *namelist;
ListCell *lc;
/* Parse string into list of identifiers */
if (!SplitGUCList(pos, ',', &namelist))
{
/* this shouldn't fail really */
elog(ERROR, "invalid list syntax in proconfig item");
}
foreach(lc, namelist)
{
char *curname = (char *) lfirst(lc);
simple_quote_literal(&buf, curname);
if (lnext(namelist, lc))
appendStringInfoString(&buf, ", ");
}
}
else
simple_quote_literal(&buf, pos);
appendStringInfoChar(&buf, '\n');
}
}
}
/* And finally the function definition ... */
appendStringInfoString(&buf, "AS ");
tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_probin, &isnull);
if (!isnull)
{
simple_quote_literal(&buf, TextDatumGetCString(tmp));
appendStringInfoString(&buf, ", "); /* assume prosrc isn't null */
}
tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_prosrc, &isnull);
if (isnull)
elog(ERROR, "null prosrc");
prosrc = TextDatumGetCString(tmp);
/*
* We always use dollar quoting. Figure out a suitable delimiter.
*
* Since the user is likely to be editing the function body string, we
* shouldn't use a short delimiter that he might easily create a conflict
* with. Hence prefer "$function$"/"$procedure$", but extend if needed.
*/
initStringInfo(&dq);
appendStringInfoChar(&dq, '$');
appendStringInfoString(&dq, (isfunction ? "function" : "procedure"));
while (strstr(prosrc, dq.data) != NULL)
appendStringInfoChar(&dq, 'x');
appendStringInfoChar(&dq, '$');
appendBinaryStringInfo(&buf, dq.data, dq.len);
appendStringInfoString(&buf, prosrc);
appendBinaryStringInfo(&buf, dq.data, dq.len);
appendStringInfoChar(&buf, '\n');
ReleaseSysCache(proctup);
PG_RETURN_TEXT_P(string_to_text(buf.data));
}
/*
* pg_get_function_arguments
* Get a nicely-formatted list of arguments for a function.
* This is everything that would go between the parentheses in
* CREATE FUNCTION.
*/
Datum
pg_get_function_arguments(PG_FUNCTION_ARGS)
{
Oid funcid = PG_GETARG_OID(0);
StringInfoData buf;
HeapTuple proctup;
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
if (!HeapTupleIsValid(proctup))
PG_RETURN_NULL();
initStringInfo(&buf);
(void) print_function_arguments(&buf, proctup, false, true);
ReleaseSysCache(proctup);
PG_RETURN_TEXT_P(string_to_text(buf.data));
}
/*
* pg_get_function_identity_arguments
* Get a formatted list of arguments for a function.
* This is everything that would go between the parentheses in
* ALTER FUNCTION, etc. In particular, don't print defaults.
*/
Datum
pg_get_function_identity_arguments(PG_FUNCTION_ARGS)
{
Oid funcid = PG_GETARG_OID(0);
StringInfoData buf;
HeapTuple proctup;
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
if (!HeapTupleIsValid(proctup))
PG_RETURN_NULL();
initStringInfo(&buf);
(void) print_function_arguments(&buf, proctup, false, false);
ReleaseSysCache(proctup);
PG_RETURN_TEXT_P(string_to_text(buf.data));
}
/*
* pg_get_function_result
* Get a nicely-formatted version of the result type of a function.
* This is what would appear after RETURNS in CREATE FUNCTION.
*/
Datum
pg_get_function_result(PG_FUNCTION_ARGS)
{
Oid funcid = PG_GETARG_OID(0);
StringInfoData buf;
HeapTuple proctup;
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
if (!HeapTupleIsValid(proctup))
PG_RETURN_NULL();
if (((Form_pg_proc) GETSTRUCT(proctup))->prokind == PROKIND_PROCEDURE)
{
ReleaseSysCache(proctup);
PG_RETURN_NULL();
}
initStringInfo(&buf);
print_function_rettype(&buf, proctup);
ReleaseSysCache(proctup);
PG_RETURN_TEXT_P(string_to_text(buf.data));
}
/*
* Guts of pg_get_function_result: append the function's return type
* to the specified buffer.
*/
static void
print_function_rettype(StringInfo buf, HeapTuple proctup)
{
Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(proctup);
int ntabargs = 0;
StringInfoData rbuf;
initStringInfo(&rbuf);
if (proc->proretset)
{
/* It might be a table function; try to print the arguments */
appendStringInfoString(&rbuf, "TABLE(");
ntabargs = print_function_arguments(&rbuf, proctup, true, false);
if (ntabargs > 0)
appendStringInfoChar(&rbuf, ')');
else
resetStringInfo(&rbuf);
}
if (ntabargs == 0)
{
/* Not a table function, so do the normal thing */
if (proc->proretset)
appendStringInfoString(&rbuf, "SETOF ");
appendStringInfoString(&rbuf, format_type_be(proc->prorettype));
}
appendBinaryStringInfo(buf, rbuf.data, rbuf.len);
}
/*
* Common code for pg_get_function_arguments and pg_get_function_result:
* append the desired subset of arguments to buf. We print only TABLE
* arguments when print_table_args is true, and all the others when it's false.
* We print argument defaults only if print_defaults is true.
* Function return value is the number of arguments printed.
*/
static int
print_function_arguments(StringInfo buf, HeapTuple proctup,
bool print_table_args, bool print_defaults)
{
Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(proctup);
int numargs;
Oid *argtypes;
char **argnames;
char *argmodes;
int insertorderbyat = -1;
int argsprinted;
int inputargno;
int nlackdefaults;
List *argdefaults = NIL;
ListCell *nextargdefault = NULL;
int i;
numargs = get_func_arg_info(proctup,
&argtypes, &argnames, &argmodes);
nlackdefaults = numargs;
if (print_defaults && proc->pronargdefaults > 0)
{
Datum proargdefaults;
bool isnull;
proargdefaults = SysCacheGetAttr(PROCOID, proctup,
Anum_pg_proc_proargdefaults,
&isnull);
if (!isnull)
{
char *str;
str = TextDatumGetCString(proargdefaults);
argdefaults = castNode(List, stringToNode(str));
pfree(str);
nextargdefault = list_head(argdefaults);
/* nlackdefaults counts only *input* arguments lacking defaults */
nlackdefaults = proc->pronargs - list_length(argdefaults);
}
}
/* Check for special treatment of ordered-set aggregates */
if (proc->prokind == PROKIND_AGGREGATE)
{
HeapTuple aggtup;
Form_pg_aggregate agg;
aggtup = SearchSysCache1(AGGFNOID, proc->oid);
if (!HeapTupleIsValid(aggtup))
elog(ERROR, "cache lookup failed for aggregate %u",
proc->oid);
agg = (Form_pg_aggregate) GETSTRUCT(aggtup);
if (AGGKIND_IS_ORDERED_SET(agg->aggkind))
insertorderbyat = agg->aggnumdirectargs;
ReleaseSysCache(aggtup);
}
argsprinted = 0;
inputargno = 0;
for (i = 0; i < numargs; i++)
{
Oid argtype = argtypes[i];
char *argname = argnames ? argnames[i] : NULL;
char argmode = argmodes ? argmodes[i] : PROARGMODE_IN;
const char *modename;
bool isinput;
switch (argmode)
{
case PROARGMODE_IN:
modename = "";
isinput = true;
break;
case PROARGMODE_INOUT:
modename = "INOUT ";
isinput = true;
break;
case PROARGMODE_OUT:
modename = "OUT ";
isinput = false;
break;
case PROARGMODE_VARIADIC:
modename = "VARIADIC ";
isinput = true;
break;
case PROARGMODE_TABLE:
modename = "";
isinput = false;
break;
default:
elog(ERROR, "invalid parameter mode '%c'", argmode);
modename = NULL; /* keep compiler quiet */
isinput = false;
break;
}
if (isinput)
inputargno++; /* this is a 1-based counter */
if (print_table_args != (argmode == PROARGMODE_TABLE))
continue;
if (argsprinted == insertorderbyat)
{
if (argsprinted)
appendStringInfoChar(buf, ' ');
appendStringInfoString(buf, "ORDER BY ");
}
else if (argsprinted)
appendStringInfoString(buf, ", ");
appendStringInfoString(buf, modename);
if (argname && argname[0])
appendStringInfo(buf, "%s ", quote_identifier(argname));
appendStringInfoString(buf, format_type_be(argtype));
if (print_defaults && isinput && inputargno > nlackdefaults)
{
Node *expr;
Assert(nextargdefault != NULL);
expr = (Node *) lfirst(nextargdefault);
nextargdefault = lnext(argdefaults, nextargdefault);
appendStringInfo(buf, " DEFAULT %s",
deparse_expression(expr, NIL, false, false));
}
argsprinted++;
/* nasty hack: print the last arg twice for variadic ordered-set agg */
if (argsprinted == insertorderbyat && i == numargs - 1)
{
i--;
/* aggs shouldn't have defaults anyway, but just to be sure ... */
print_defaults = false;
}
}
return argsprinted;
}
static bool
is_input_argument(int nth, const char *argmodes)
{
return (!argmodes
|| argmodes[nth] == PROARGMODE_IN
|| argmodes[nth] == PROARGMODE_INOUT
|| argmodes[nth] == PROARGMODE_VARIADIC);
}
/*
* Append used transformed types to specified buffer
*/
static void
print_function_trftypes(StringInfo buf, HeapTuple proctup)
{
Oid *trftypes;
int ntypes;
ntypes = get_func_trftypes(proctup, &trftypes);
if (ntypes > 0)
{
int i;
appendStringInfoString(buf, "\n TRANSFORM ");
for (i = 0; i < ntypes; i++)
{
if (i != 0)
appendStringInfoString(buf, ", ");
appendStringInfo(buf, "FOR TYPE %s", format_type_be(trftypes[i]));
}
}
}
/*
* Get textual representation of a function argument's default value. The
* second argument of this function is the argument number among all arguments
* (i.e. proallargtypes, *not* proargtypes), starting with 1, because that's
* how information_schema.sql uses it.
*/
Datum
pg_get_function_arg_default(PG_FUNCTION_ARGS)
{
Oid funcid = PG_GETARG_OID(0);
int32 nth_arg = PG_GETARG_INT32(1);
HeapTuple proctup;
Form_pg_proc proc;
int numargs;
Oid *argtypes;
char **argnames;
char *argmodes;
int i;
List *argdefaults;
Node *node;
char *str;
int nth_inputarg;
Datum proargdefaults;
bool isnull;
int nth_default;
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
if (!HeapTupleIsValid(proctup))
PG_RETURN_NULL();
numargs = get_func_arg_info(proctup, &argtypes, &argnames, &argmodes);
if (nth_arg < 1 || nth_arg > numargs || !is_input_argument(nth_arg - 1, argmodes))
{
ReleaseSysCache(proctup);
PG_RETURN_NULL();
}
nth_inputarg = 0;
for (i = 0; i < nth_arg; i++)
if (is_input_argument(i, argmodes))
nth_inputarg++;
proargdefaults = SysCacheGetAttr(PROCOID, proctup,
Anum_pg_proc_proargdefaults,
&isnull);
if (isnull)
{
ReleaseSysCache(proctup);
PG_RETURN_NULL();
}
str = TextDatumGetCString(proargdefaults);
argdefaults = castNode(List, stringToNode(str));
pfree(str);
proc = (Form_pg_proc) GETSTRUCT(proctup);
/*
* Calculate index into proargdefaults: proargdefaults corresponds to the
* last N input arguments, where N = pronargdefaults.
*/
nth_default = nth_inputarg - 1 - (proc->pronargs - proc->pronargdefaults);
if (nth_default < 0 || nth_default >= list_length(argdefaults))
{
ReleaseSysCache(proctup);
PG_RETURN_NULL();
}
node = list_nth(argdefaults, nth_default);
str = deparse_expression(node, NIL, false, false);
ReleaseSysCache(proctup);
PG_RETURN_TEXT_P(string_to_text(str));
}
/*
* deparse_expression - General utility for deparsing expressions
*
* calls deparse_expression_pretty with all prettyPrinting disabled
*/
char *
deparse_expression(Node *expr, List *dpcontext,
bool forceprefix, bool showimplicit)
{
return deparse_expression_pretty(expr, dpcontext, forceprefix,
showimplicit, 0, 0);
}
/* ----------
* deparse_expression_pretty - General utility for deparsing expressions
*
* expr is the node tree to be deparsed. It must be a transformed expression
* tree (ie, not the raw output of gram.y).
*
* dpcontext is a list of deparse_namespace nodes representing the context
* for interpreting Vars in the node tree. It can be NIL if no Vars are
* expected.
*
* forceprefix is true to force all Vars to be prefixed with their table names.
*
* showimplicit is true to force all implicit casts to be shown explicitly.
*
* Tries to pretty up the output according to prettyFlags and startIndent.
*
* The result is a palloc'd string.
* ----------
*/
static char *
deparse_expression_pretty(Node *expr, List *dpcontext,
bool forceprefix, bool showimplicit,
int prettyFlags, int startIndent)
{
StringInfoData buf;
deparse_context context;
initStringInfo(&buf);
context.buf = &buf;
context.namespaces = dpcontext;
context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = forceprefix;
context.prettyFlags = prettyFlags;
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = startIndent;
context.special_exprkind = EXPR_KIND_NONE;
context.appendparents = NULL;
get_rule_expr(expr, &context, showimplicit);
return buf.data;
}
/* ----------
* deparse_context_for - Build deparse context for a single relation
*
* Given the reference name (alias) and OID of a relation, build deparsing
* context for an expression referencing only that relation (as varno 1,
* varlevelsup 0). This is sufficient for many uses of deparse_expression.
* ----------
*/
List *
deparse_context_for(const char *aliasname, Oid relid)
{
deparse_namespace *dpns;
RangeTblEntry *rte;
dpns = (deparse_namespace *) palloc0(sizeof(deparse_namespace));
/* Build a minimal RTE for the rel */
rte = makeNode(RangeTblEntry);
rte->rtekind = RTE_RELATION;
rte->relid = relid;
rte->relkind = RELKIND_RELATION; /* no need for exactness here */
rte->rellockmode = AccessShareLock;
rte->alias = makeAlias(aliasname, NIL);
rte->eref = rte->alias;
rte->lateral = false;
rte->inh = false;
rte->inFromCl = true;
/* Build one-element rtable */
dpns->rtable = list_make1(rte);
dpns->subplans = NIL;
dpns->ctes = NIL;
dpns->appendrels = NULL;
set_rtable_names(dpns, NIL, NULL);
set_simple_column_names(dpns);
/* Return a one-deep namespace stack */
return list_make1(dpns);
}
/*
* deparse_context_for_plan_tree - Build deparse context for a Plan tree
*
* When deparsing an expression in a Plan tree, we use the plan's rangetable
* to resolve names of simple Vars. The initialization of column names for
* this is rather expensive if the rangetable is large, and it'll be the same
* for every expression in the Plan tree; so we do it just once and re-use
* the result of this function for each expression. (Note that the result
* is not usable until set_deparse_context_plan() is applied to it.)
*
* In addition to the PlannedStmt, pass the per-RTE alias names
* assigned by a previous call to select_rtable_names_for_explain.
*/
List *
deparse_context_for_plan_tree(PlannedStmt *pstmt, List *rtable_names)
{
deparse_namespace *dpns;
dpns = (deparse_namespace *) palloc0(sizeof(deparse_namespace));
/* Initialize fields that stay the same across the whole plan tree */
dpns->rtable = pstmt->rtable;
dpns->rtable_names = rtable_names;
dpns->subplans = pstmt->subplans;
dpns->ctes = NIL;
if (pstmt->appendRelations)
{
/* Set up the array, indexed by child relid */
int ntables = list_length(dpns->rtable);
ListCell *lc;
dpns->appendrels = (AppendRelInfo **)
palloc0((ntables + 1) * sizeof(AppendRelInfo *));
foreach(lc, pstmt->appendRelations)
{
AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
Index crelid = appinfo->child_relid;
Assert(crelid > 0 && crelid <= ntables);
Assert(dpns->appendrels[crelid] == NULL);
dpns->appendrels[crelid] = appinfo;
}
}
else
dpns->appendrels = NULL; /* don't need it */
/*
* Set up column name aliases. We will get rather bogus results for join
* RTEs, but that doesn't matter because plan trees don't contain any join
* alias Vars.
*/
set_simple_column_names(dpns);
/* Return a one-deep namespace stack */
return list_make1(dpns);
}
/*
* set_deparse_context_plan - Specify Plan node containing expression
*
* When deparsing an expression in a Plan tree, we might have to resolve
* OUTER_VAR, INNER_VAR, or INDEX_VAR references. To do this, the caller must
* provide the parent Plan node. Then OUTER_VAR and INNER_VAR references
* can be resolved by drilling down into the left and right child plans.
* Similarly, INDEX_VAR references can be resolved by reference to the
* indextlist given in a parent IndexOnlyScan node, or to the scan tlist in
* ForeignScan and CustomScan nodes. (Note that we don't currently support
* deparsing of indexquals in regular IndexScan or BitmapIndexScan nodes;
* for those, we can only deparse the indexqualorig fields, which won't
* contain INDEX_VAR Vars.)
*
* The ancestors list is a list of the Plan's parent Plan and SubPlan nodes,
* the most-closely-nested first. This is needed to resolve PARAM_EXEC
* Params. Note we assume that all the Plan nodes share the same rtable.
*
* Once this function has been called, deparse_expression() can be called on
* subsidiary expression(s) of the specified Plan node. To deparse
* expressions of a different Plan node in the same Plan tree, re-call this
* function to identify the new parent Plan node.
*
* The result is the same List passed in; this is a notational convenience.
*/
List *
set_deparse_context_plan(List *dpcontext, Plan *plan, List *ancestors)
{
deparse_namespace *dpns;
/* Should always have one-entry namespace list for Plan deparsing */
Assert(list_length(dpcontext) == 1);
dpns = (deparse_namespace *) linitial(dpcontext);
/* Set our attention on the specific plan node passed in */
set_deparse_plan(dpns, plan);
dpns->ancestors = ancestors;
return dpcontext;
}
/*
* select_rtable_names_for_explain - Select RTE aliases for EXPLAIN
*
* Determine the relation aliases we'll use during an EXPLAIN operation.
* This is just a frontend to set_rtable_names. We have to expose the aliases
* to EXPLAIN because EXPLAIN needs to know the right alias names to print.
*/
List *
select_rtable_names_for_explain(List *rtable, Bitmapset *rels_used)
{
deparse_namespace dpns;
memset(&dpns, 0, sizeof(dpns));
dpns.rtable = rtable;
dpns.subplans = NIL;
dpns.ctes = NIL;
dpns.appendrels = NULL;
set_rtable_names(&dpns, NIL, rels_used);
/* We needn't bother computing column aliases yet */
return dpns.rtable_names;
}
/*
* set_rtable_names: select RTE aliases to be used in printing a query
*
* We fill in dpns->rtable_names with a list of names that is one-for-one with
* the already-filled dpns->rtable list. Each RTE name is unique among those
* in the new namespace plus any ancestor namespaces listed in
* parent_namespaces.
*
* If rels_used isn't NULL, only RTE indexes listed in it are given aliases.
*
* Note that this function is only concerned with relation names, not column
* names.
*/
static void
set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
Bitmapset *rels_used)
{
HASHCTL hash_ctl;
HTAB *names_hash;
NameHashEntry *hentry;
bool found;
int rtindex;
ListCell *lc;
dpns->rtable_names = NIL;
/* nothing more to do if empty rtable */
if (dpns->rtable == NIL)
return;
/*
* We use a hash table to hold known names, so that this process is O(N)
* not O(N^2) for N names.
*/
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = NAMEDATALEN;
hash_ctl.entrysize = sizeof(NameHashEntry);
hash_ctl.hcxt = CurrentMemoryContext;
names_hash = hash_create("set_rtable_names names",
list_length(dpns->rtable),
&hash_ctl,
HASH_ELEM | HASH_CONTEXT);
/* Preload the hash table with names appearing in parent_namespaces */
foreach(lc, parent_namespaces)
{
deparse_namespace *olddpns = (deparse_namespace *) lfirst(lc);
ListCell *lc2;
foreach(lc2, olddpns->rtable_names)
{
char *oldname = (char *) lfirst(lc2);
if (oldname == NULL)
continue;
hentry = (NameHashEntry *) hash_search(names_hash,
oldname,
HASH_ENTER,
&found);
/* we do not complain about duplicate names in parent namespaces */
hentry->counter = 0;
}
}
/* Now we can scan the rtable */
rtindex = 1;
foreach(lc, dpns->rtable)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
char *refname;
/* Just in case this takes an unreasonable amount of time ... */
CHECK_FOR_INTERRUPTS();
if (rels_used && !bms_is_member(rtindex, rels_used))
{
/* Ignore unreferenced RTE */
refname = NULL;
}
else if (rte->alias)
{
/* If RTE has a user-defined alias, prefer that */
refname = rte->alias->aliasname;
}
else if (rte->rtekind == RTE_RELATION)
{
/* Use the current actual name of the relation */
refname = get_rel_name(rte->relid);
}
else if (rte->rtekind == RTE_JOIN)
{
/* Unnamed join has no refname */
refname = NULL;
}
else
{
/* Otherwise use whatever the parser assigned */
refname = rte->eref->aliasname;
}
/*
* If the selected name isn't unique, append digits to make it so, and
* make a new hash entry for it once we've got a unique name. For a
* very long input name, we might have to truncate to stay within
* NAMEDATALEN.
*/
if (refname)
{
hentry = (NameHashEntry *) hash_search(names_hash,
refname,
HASH_ENTER,
&found);
if (found)
{
/* Name already in use, must choose a new one */
int refnamelen = strlen(refname);
char *modname = (char *) palloc(refnamelen + 16);
NameHashEntry *hentry2;
do
{
hentry->counter++;
for (;;)
{
memcpy(modname, refname, refnamelen);
sprintf(modname + refnamelen, "_%d", hentry->counter);
if (strlen(modname) < NAMEDATALEN)
break;
/* drop chars from refname to keep all the digits */
refnamelen = pg_mbcliplen(refname, refnamelen,
refnamelen - 1);
}
hentry2 = (NameHashEntry *) hash_search(names_hash,
modname,
HASH_ENTER,
&found);
} while (found);
hentry2->counter = 0; /* init new hash entry */
refname = modname;
}
else
{
/* Name not previously used, need only initialize hentry */
hentry->counter = 0;
}
}
dpns->rtable_names = lappend(dpns->rtable_names, refname);
rtindex++;
}
hash_destroy(names_hash);
}
/*
* set_deparse_for_query: set up deparse_namespace for deparsing a Query tree
*
* For convenience, this is defined to initialize the deparse_namespace struct
* from scratch.
*/
static void
set_deparse_for_query(deparse_namespace *dpns, Query *query,
List *parent_namespaces)
{
ListCell *lc;
ListCell *lc2;
/* Initialize *dpns and fill rtable/ctes links */
memset(dpns, 0, sizeof(deparse_namespace));
dpns->rtable = query->rtable;
dpns->subplans = NIL;
dpns->ctes = query->cteList;
dpns->appendrels = NULL;
/* Assign a unique relation alias to each RTE */
set_rtable_names(dpns, parent_namespaces, NULL);
/* Initialize dpns->rtable_columns to contain zeroed structs */
dpns->rtable_columns = NIL;
while (list_length(dpns->rtable_columns) < list_length(dpns->rtable))
dpns->rtable_columns = lappend(dpns->rtable_columns,
palloc0(sizeof(deparse_columns)));
/* If it's a utility query, it won't have a jointree */
if (query->jointree)
{
/* Detect whether global uniqueness of USING names is needed */
dpns->unique_using =
has_dangerous_join_using(dpns, (Node *) query->jointree);
/*
* Select names for columns merged by USING, via a recursive pass over
* the query jointree.
*/
set_using_names(dpns, (Node *) query->jointree, NIL);
}
/*
* Now assign remaining column aliases for each RTE. We do this in a
* linear scan of the rtable, so as to process RTEs whether or not they
* are in the jointree (we mustn't miss NEW.*, INSERT target relations,
* etc). JOIN RTEs must be processed after their children, but this is
* okay because they appear later in the rtable list than their children
* (cf Asserts in identify_join_columns()).
*/
forboth(lc, dpns->rtable, lc2, dpns->rtable_columns)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
deparse_columns *colinfo = (deparse_columns *) lfirst(lc2);
if (rte->rtekind == RTE_JOIN)
set_join_column_names(dpns, rte, colinfo);
else
set_relation_column_names(dpns, rte, colinfo);
}
}
/*
* set_simple_column_names: fill in column aliases for non-query situations
*
* This handles EXPLAIN and cases where we only have relation RTEs. Without
* a join tree, we can't do anything smart about join RTEs, but we don't
* need to (note that EXPLAIN should never see join alias Vars anyway).
* If we do hit a join RTE we'll just process it like a non-table base RTE.
*/
static void
set_simple_column_names(deparse_namespace *dpns)
{
ListCell *lc;
ListCell *lc2;
/* Initialize dpns->rtable_columns to contain zeroed structs */
dpns->rtable_columns = NIL;
while (list_length(dpns->rtable_columns) < list_length(dpns->rtable))
dpns->rtable_columns = lappend(dpns->rtable_columns,
palloc0(sizeof(deparse_columns)));
/* Assign unique column aliases within each RTE */
forboth(lc, dpns->rtable, lc2, dpns->rtable_columns)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
deparse_columns *colinfo = (deparse_columns *) lfirst(lc2);
set_relation_column_names(dpns, rte, colinfo);
}
}
/*
* has_dangerous_join_using: search jointree for unnamed JOIN USING
*
* Merged columns of a JOIN USING may act differently from either of the input
* columns, either because they are merged with COALESCE (in a FULL JOIN) or
* because an implicit coercion of the underlying input column is required.
* In such a case the column must be referenced as a column of the JOIN not as
* a column of either input. And this is problematic if the join is unnamed
* (alias-less): we cannot qualify the column's name with an RTE name, since
* there is none. (Forcibly assigning an alias to the join is not a solution,
* since that will prevent legal references to tables below the join.)
* To ensure that every column in the query is unambiguously referenceable,
* we must assign such merged columns names that are globally unique across
* the whole query, aliasing other columns out of the way as necessary.
*
* Because the ensuing re-aliasing is fairly damaging to the readability of
* the query, we don't do this unless we have to. So, we must pre-scan
* the join tree to see if we have to, before starting set_using_names().
*/
static bool
has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode)
{
if (IsA(jtnode, RangeTblRef))
{
/* nothing to do here */
}
else if (IsA(jtnode, FromExpr))
{
FromExpr *f = (FromExpr *) jtnode;
ListCell *lc;
foreach(lc, f->fromlist)
{
if (has_dangerous_join_using(dpns, (Node *) lfirst(lc)))
return true;
}
}
else if (IsA(jtnode, JoinExpr))
{
JoinExpr *j = (JoinExpr *) jtnode;
/* Is it an unnamed JOIN with USING? */
if (j->alias == NULL && j->usingClause)
{
/*
* Yes, so check each join alias var to see if any of them are not
* simple references to underlying columns. If so, we have a
* dangerous situation and must pick unique aliases.
*/
RangeTblEntry *jrte = rt_fetch(j->rtindex, dpns->rtable);
/* We need only examine the merged columns */
for (int i = 0; i < jrte->joinmergedcols; i++)
{
Node *aliasvar = list_nth(jrte->joinaliasvars, i);
if (!IsA(aliasvar, Var))
return true;
}
}
/* Nope, but inspect children */
if (has_dangerous_join_using(dpns, j->larg))
return true;
if (has_dangerous_join_using(dpns, j->rarg))
return true;
}
else
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(jtnode));
return false;
}
/*
* set_using_names: select column aliases to be used for merged USING columns
*
* We do this during a recursive descent of the query jointree.
* dpns->unique_using must already be set to determine the global strategy.
*
* Column alias info is saved in the dpns->rtable_columns list, which is
* assumed to be filled with pre-zeroed deparse_columns structs.
*
* parentUsing is a list of all USING aliases assigned in parent joins of
* the current jointree node. (The passed-in list must not be modified.)
*/
static void
set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing)
{
if (IsA(jtnode, RangeTblRef))
{
/* nothing to do now */
}
else if (IsA(jtnode, FromExpr))
{
FromExpr *f = (FromExpr *) jtnode;
ListCell *lc;
foreach(lc, f->fromlist)
set_using_names(dpns, (Node *) lfirst(lc), parentUsing);
}
else if (IsA(jtnode, JoinExpr))
{
JoinExpr *j = (JoinExpr *) jtnode;
RangeTblEntry *rte = rt_fetch(j->rtindex, dpns->rtable);
deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns);
int *leftattnos;
int *rightattnos;
deparse_columns *leftcolinfo;
deparse_columns *rightcolinfo;
int i;
ListCell *lc;
/* Get info about the shape of the join */
identify_join_columns(j, rte, colinfo);
leftattnos = colinfo->leftattnos;
rightattnos = colinfo->rightattnos;
/* Look up the not-yet-filled-in child deparse_columns structs */
leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns);
rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns);
/*
* If this join is unnamed, then we cannot substitute new aliases at
* this level, so any name requirements pushed down to here must be
* pushed down again to the children.
*/
if (rte->alias == NULL)
{
for (i = 0; i < colinfo->num_cols; i++)
{
char *colname = colinfo->colnames[i];
if (colname == NULL)
continue;
/* Push down to left column, unless it's a system column */
if (leftattnos[i] > 0)
{
expand_colnames_array_to(leftcolinfo, leftattnos[i]);
leftcolinfo->colnames[leftattnos[i] - 1] = colname;
}
/* Same on the righthand side */
if (rightattnos[i] > 0)
{
expand_colnames_array_to(rightcolinfo, rightattnos[i]);
rightcolinfo->colnames[rightattnos[i] - 1] = colname;
}
}
}
/*
* If there's a USING clause, select the USING column names and push
* those names down to the children. We have two strategies:
*
* If dpns->unique_using is true, we force all USING names to be
* unique across the whole query level. In principle we'd only need
* the names of dangerous USING columns to be globally unique, but to
* safely assign all USING names in a single pass, we have to enforce
* the same uniqueness rule for all of them. However, if a USING
* column's name has been pushed down from the parent, we should use
* it as-is rather than making a uniqueness adjustment. This is
* necessary when we're at an unnamed join, and it creates no risk of
* ambiguity. Also, if there's a user-written output alias for a
* merged column, we prefer to use that rather than the input name;
* this simplifies the logic and seems likely to lead to less aliasing
* overall.
*
* If dpns->unique_using is false, we only need USING names to be
* unique within their own join RTE. We still need to honor
* pushed-down names, though.
*
* Though significantly different in results, these two strategies are
* implemented by the same code, with only the difference of whether
* to put assigned names into dpns->using_names.
*/
if (j->usingClause)
{
/* Copy the input parentUsing list so we don't modify it */
parentUsing = list_copy(parentUsing);
/* USING names must correspond to the first join output columns */
expand_colnames_array_to(colinfo, list_length(j->usingClause));
i = 0;
foreach(lc, j->usingClause)
{
char *colname = strVal(lfirst(lc));
/* Assert it's a merged column */
Assert(leftattnos[i] != 0 && rightattnos[i] != 0);
/* Adopt passed-down name if any, else select unique name */
if (colinfo->colnames[i] != NULL)
colname = colinfo->colnames[i];
else
{
/* Prefer user-written output alias if any */
if (rte->alias && i < list_length(rte->alias->colnames))
colname = strVal(list_nth(rte->alias->colnames, i));
/* Make it appropriately unique */
colname = make_colname_unique(colname, dpns, colinfo);
if (dpns->unique_using)
dpns->using_names = lappend(dpns->using_names,
colname);
/* Save it as output column name, too */
colinfo->colnames[i] = colname;
}
/* Remember selected names for use later */
colinfo->usingNames = lappend(colinfo->usingNames, colname);
parentUsing = lappend(parentUsing, colname);
/* Push down to left column, unless it's a system column */
if (leftattnos[i] > 0)
{
expand_colnames_array_to(leftcolinfo, leftattnos[i]);
leftcolinfo->colnames[leftattnos[i] - 1] = colname;
}
/* Same on the righthand side */
if (rightattnos[i] > 0)
{
expand_colnames_array_to(rightcolinfo, rightattnos[i]);
rightcolinfo->colnames[rightattnos[i] - 1] = colname;
}
i++;
}
}
/* Mark child deparse_columns structs with correct parentUsing info */
leftcolinfo->parentUsing = parentUsing;
rightcolinfo->parentUsing = parentUsing;
/* Now recursively assign USING column names in children */
set_using_names(dpns, j->larg, parentUsing);
set_using_names(dpns, j->rarg, parentUsing);
}
else
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(jtnode));
}
/*
* set_relation_column_names: select column aliases for a non-join RTE
*
* Column alias info is saved in *colinfo, which is assumed to be pre-zeroed.
* If any colnames entries are already filled in, those override local
* choices.
*/
static void
set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
deparse_columns *colinfo)
{
int ncolumns;
char **real_colnames;
bool changed_any;
int noldcolumns;
int i;
int j;
/*
* Extract the RTE's "real" column names. This is comparable to
* get_rte_attribute_name, except that it's important to disregard dropped
* columns. We put NULL into the array for a dropped column.
*/
if (rte->rtekind == RTE_RELATION)
{
/* Relation --- look to the system catalogs for up-to-date info */
Relation rel;
TupleDesc tupdesc;
rel = relation_open(rte->relid, AccessShareLock);
tupdesc = RelationGetDescr(rel);
ncolumns = tupdesc->natts;
real_colnames = (char **) palloc(ncolumns * sizeof(char *));
for (i = 0; i < ncolumns; i++)
{
Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
if (attr->attisdropped)
real_colnames[i] = NULL;
else
real_colnames[i] = pstrdup(NameStr(attr->attname));
}
relation_close(rel, AccessShareLock);
}
else
{
/* Otherwise use the column names from eref */
ListCell *lc;
ncolumns = list_length(rte->eref->colnames);
real_colnames = (char **) palloc(ncolumns * sizeof(char *));
i = 0;
foreach(lc, rte->eref->colnames)
{
/*
* If the column name shown in eref is an empty string, then it's
* a column that was dropped at the time of parsing the query, so
* treat it as dropped.
*/
char *cname = strVal(lfirst(lc));
if (cname[0] == '\0')
cname = NULL;
real_colnames[i] = cname;
i++;
}
}
/*
* Ensure colinfo->colnames has a slot for each column. (It could be long
* enough already, if we pushed down a name for the last column.) Note:
* it's possible that there are now more columns than there were when the
* query was parsed, ie colnames could be longer than rte->eref->colnames.
* We must assign unique aliases to the new columns too, else there could
* be unresolved conflicts when the view/rule is reloaded.
*/
expand_colnames_array_to(colinfo, ncolumns);
Assert(colinfo->num_cols == ncolumns);
/*
* Make sufficiently large new_colnames and is_new_col arrays, too.
*
* Note: because we leave colinfo->num_new_cols zero until after the loop,
* colname_is_unique will not consult that array, which is fine because it
* would only be duplicate effort.
*/
colinfo->new_colnames = (char **) palloc(ncolumns * sizeof(char *));
colinfo->is_new_col = (bool *) palloc(ncolumns * sizeof(bool));
/*
* Scan the columns, select a unique alias for each one, and store it in
* colinfo->colnames and colinfo->new_colnames. The former array has NULL
* entries for dropped columns, the latter omits them. Also mark
* new_colnames entries as to whether they are new since parse time; this
* is the case for entries beyond the length of rte->eref->colnames.
*/
noldcolumns = list_length(rte->eref->colnames);
changed_any = false;
j = 0;
for (i = 0; i < ncolumns; i++)
{
char *real_colname = real_colnames[i];
char *colname = colinfo->colnames[i];
/* Skip dropped columns */
if (real_colname == NULL)
{
Assert(colname == NULL); /* colnames[i] is already NULL */
continue;
}
/* If alias already assigned, that's what to use */
if (colname == NULL)
{
/* If user wrote an alias, prefer that over real column name */
if (rte->alias && i < list_length(rte->alias->colnames))
colname = strVal(list_nth(rte->alias->colnames, i));
else
colname = real_colname;
/* Unique-ify and insert into colinfo */
colname = make_colname_unique(colname, dpns, colinfo);
colinfo->colnames[i] = colname;
}
/* Put names of non-dropped columns in new_colnames[] too */
colinfo->new_colnames[j] = colname;
/* And mark them as new or not */
colinfo->is_new_col[j] = (i >= noldcolumns);
j++;
/* Remember if any assigned aliases differ from "real" name */
if (!changed_any && strcmp(colname, real_colname) != 0)
changed_any = true;
}
/*
* Set correct length for new_colnames[] array. (Note: if columns have
* been added, colinfo->num_cols includes them, which is not really quite
* right but is harmless, since any new columns must be at the end where
* they won't affect varattnos of pre-existing columns.)
*/
colinfo->num_new_cols = j;
/*
* For a relation RTE, we need only print the alias column names if any
* are different from the underlying "real" names. For a function RTE,
* always emit a complete column alias list; this is to protect against
* possible instability of the default column names (eg, from altering
* parameter names). For tablefunc RTEs, we never print aliases, because
* the column names are part of the clause itself. For other RTE types,
* print if we changed anything OR if there were user-written column
* aliases (since the latter would be part of the underlying "reality").
*/
if (rte->rtekind == RTE_RELATION)
colinfo->printaliases = changed_any;
else if (rte->rtekind == RTE_FUNCTION)
colinfo->printaliases = true;
else if (rte->rtekind == RTE_TABLEFUNC)
colinfo->printaliases = false;
else if (rte->alias && rte->alias->colnames != NIL)
colinfo->printaliases = true;
else
colinfo->printaliases = changed_any;
}
/*
* set_join_column_names: select column aliases for a join RTE
*
* Column alias info is saved in *colinfo, which is assumed to be pre-zeroed.
* If any colnames entries are already filled in, those override local
* choices. Also, names for USING columns were already chosen by
* set_using_names(). We further expect that column alias selection has been
* completed for both input RTEs.
*/
static void
set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
deparse_columns *colinfo)
{
deparse_columns *leftcolinfo;
deparse_columns *rightcolinfo;
bool changed_any;
int noldcolumns;
int nnewcolumns;
Bitmapset *leftmerged = NULL;
Bitmapset *rightmerged = NULL;
int i;
int j;
int ic;
int jc;
/* Look up the previously-filled-in child deparse_columns structs */
leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns);
rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns);
/*
* Ensure colinfo->colnames has a slot for each column. (It could be long
* enough already, if we pushed down a name for the last column.) Note:
* it's possible that one or both inputs now have more columns than there
* were when the query was parsed, but we'll deal with that below. We
* only need entries in colnames for pre-existing columns.
*/
noldcolumns = list_length(rte->eref->colnames);
expand_colnames_array_to(colinfo, noldcolumns);
Assert(colinfo->num_cols == noldcolumns);
/*
* Scan the join output columns, select an alias for each one, and store
* it in colinfo->colnames. If there are USING columns, set_using_names()
* already selected their names, so we can start the loop at the first
* non-merged column.
*/
changed_any = false;
for (i = list_length(colinfo->usingNames); i < noldcolumns; i++)
{
char *colname = colinfo->colnames[i];
char *real_colname;
/* Join column must refer to at least one input column */
Assert(colinfo->leftattnos[i] != 0 || colinfo->rightattnos[i] != 0);
/* Get the child column name */
if (colinfo->leftattnos[i] > 0)
real_colname = leftcolinfo->colnames[colinfo->leftattnos[i] - 1];
else if (colinfo->rightattnos[i] > 0)
real_colname = rightcolinfo->colnames[colinfo->rightattnos[i] - 1];
else
{
/* We're joining system columns --- use eref name */
real_colname = strVal(list_nth(rte->eref->colnames, i));
}
/* If child col has been dropped, no need to assign a join colname */
if (real_colname == NULL)
{
colinfo->colnames[i] = NULL;
continue;
}
/* In an unnamed join, just report child column names as-is */
if (rte->alias == NULL)
{
colinfo->colnames[i] = real_colname;
continue;
}
/* If alias already assigned, that's what to use */
if (colname == NULL)
{
/* If user wrote an alias, prefer that over real column name */
if (rte->alias && i < list_length(rte->alias->colnames))
colname = strVal(list_nth(rte->alias->colnames, i));
else
colname = real_colname;
/* Unique-ify and insert into colinfo */
colname = make_colname_unique(colname, dpns, colinfo);
colinfo->colnames[i] = colname;
}
/* Remember if any assigned aliases differ from "real" name */
if (!changed_any && strcmp(colname, real_colname) != 0)
changed_any = true;
}
/*
* Calculate number of columns the join would have if it were re-parsed
* now, and create storage for the new_colnames and is_new_col arrays.
*
* Note: colname_is_unique will be consulting new_colnames[] during the
* loops below, so its not-yet-filled entries must be zeroes.
*/
nnewcolumns = leftcolinfo->num_new_cols + rightcolinfo->num_new_cols -
list_length(colinfo->usingNames);
colinfo->num_new_cols = nnewcolumns;
colinfo->new_colnames = (char **) palloc0(nnewcolumns * sizeof(char *));
colinfo->is_new_col = (bool *) palloc0(nnewcolumns * sizeof(bool));
/*
* Generating the new_colnames array is a bit tricky since any new columns
* added since parse time must be inserted in the right places. This code
* must match the parser, which will order a join's columns as merged
* columns first (in USING-clause order), then non-merged columns from the
* left input (in attnum order), then non-merged columns from the right
* input (ditto). If one of the inputs is itself a join, its columns will
* be ordered according to the same rule, which means newly-added columns
* might not be at the end. We can figure out what's what by consulting
* the leftattnos and rightattnos arrays plus the input is_new_col arrays.
*
* In these loops, i indexes leftattnos/rightattnos (so it's join varattno
* less one), j indexes new_colnames/is_new_col, and ic/jc have similar
* meanings for the current child RTE.
*/
/* Handle merged columns; they are first and can't be new */
i = j = 0;
while (i < noldcolumns &&
colinfo->leftattnos[i] != 0 &&
colinfo->rightattnos[i] != 0)
{
/* column name is already determined and known unique */
colinfo->new_colnames[j] = colinfo->colnames[i];
colinfo->is_new_col[j] = false;
/* build bitmapsets of child attnums of merged columns */
if (colinfo->leftattnos[i] > 0)
leftmerged = bms_add_member(leftmerged, colinfo->leftattnos[i]);
if (colinfo->rightattnos[i] > 0)
rightmerged = bms_add_member(rightmerged, colinfo->rightattnos[i]);
i++, j++;
}
/* Handle non-merged left-child columns */
ic = 0;
for (jc = 0; jc < leftcolinfo->num_new_cols; jc++)
{
char *child_colname = leftcolinfo->new_colnames[jc];
if (!leftcolinfo->is_new_col[jc])
{
/* Advance ic to next non-dropped old column of left child */
while (ic < leftcolinfo->num_cols &&
leftcolinfo->colnames[ic] == NULL)
ic++;
Assert(ic < leftcolinfo->num_cols);
ic++;
/* If it is a merged column, we already processed it */
if (bms_is_member(ic, leftmerged))
continue;
/* Else, advance i to the corresponding existing join column */
while (i < colinfo->num_cols &&
colinfo->colnames[i] == NULL)
i++;
Assert(i < colinfo->num_cols);
Assert(ic == colinfo->leftattnos[i]);
/* Use the already-assigned name of this column */
colinfo->new_colnames[j] = colinfo->colnames[i];
i++;
}
else
{
/*
* Unique-ify the new child column name and assign, unless we're
* in an unnamed join, in which case just copy
*/
if (rte->alias != NULL)
{
colinfo->new_colnames[j] =
make_colname_unique(child_colname, dpns, colinfo);
if (!changed_any &&
strcmp(colinfo->new_colnames[j], child_colname) != 0)
changed_any = true;
}
else
colinfo->new_colnames[j] = child_colname;
}
colinfo->is_new_col[j] = leftcolinfo->is_new_col[jc];
j++;
}
/* Handle non-merged right-child columns in exactly the same way */
ic = 0;
for (jc = 0; jc < rightcolinfo->num_new_cols; jc++)
{
char *child_colname = rightcolinfo->new_colnames[jc];
if (!rightcolinfo->is_new_col[jc])
{
/* Advance ic to next non-dropped old column of right child */
while (ic < rightcolinfo->num_cols &&
rightcolinfo->colnames[ic] == NULL)
ic++;
Assert(ic < rightcolinfo->num_cols);
ic++;
/* If it is a merged column, we already processed it */
if (bms_is_member(ic, rightmerged))
continue;
/* Else, advance i to the corresponding existing join column */
while (i < colinfo->num_cols &&
colinfo->colnames[i] == NULL)
i++;
Assert(i < colinfo->num_cols);
Assert(ic == colinfo->rightattnos[i]);
/* Use the already-assigned name of this column */
colinfo->new_colnames[j] = colinfo->colnames[i];
i++;
}
else
{
/*
* Unique-ify the new child column name and assign, unless we're
* in an unnamed join, in which case just copy
*/
if (rte->alias != NULL)
{
colinfo->new_colnames[j] =
make_colname_unique(child_colname, dpns, colinfo);
if (!changed_any &&
strcmp(colinfo->new_colnames[j], child_colname) != 0)
changed_any = true;
}
else
colinfo->new_colnames[j] = child_colname;
}
colinfo->is_new_col[j] = rightcolinfo->is_new_col[jc];
j++;
}
/* Assert we processed the right number of columns */
#ifdef USE_ASSERT_CHECKING
while (i < colinfo->num_cols && colinfo->colnames[i] == NULL)
i++;
Assert(i == colinfo->num_cols);
Assert(j == nnewcolumns);
#endif
/*
* For a named join, print column aliases if we changed any from the child
* names. Unnamed joins cannot print aliases.
*/
if (rte->alias != NULL)
colinfo->printaliases = changed_any;
else
colinfo->printaliases = false;
}
/*
* colname_is_unique: is colname distinct from already-chosen column names?
*
* dpns is query-wide info, colinfo is for the column's RTE
*/
static bool
colname_is_unique(const char *colname, deparse_namespace *dpns,
deparse_columns *colinfo)
{
int i;
ListCell *lc;
/* Check against already-assigned column aliases within RTE */
for (i = 0; i < colinfo->num_cols; i++)
{
char *oldname = colinfo->colnames[i];
if (oldname && strcmp(oldname, colname) == 0)
return false;
}
/*
* If we're building a new_colnames array, check that too (this will be
* partially but not completely redundant with the previous checks)
*/
for (i = 0; i < colinfo->num_new_cols; i++)
{
char *oldname = colinfo->new_colnames[i];
if (oldname && strcmp(oldname, colname) == 0)
return false;
}
/* Also check against USING-column names that must be globally unique */
foreach(lc, dpns->using_names)
{
char *oldname = (char *) lfirst(lc);
if (strcmp(oldname, colname) == 0)
return false;
}
/* Also check against names already assigned for parent-join USING cols */
foreach(lc, colinfo->parentUsing)
{
char *oldname = (char *) lfirst(lc);
if (strcmp(oldname, colname) == 0)
return false;
}
return true;
}
/*
* make_colname_unique: modify colname if necessary to make it unique
*
* dpns is query-wide info, colinfo is for the column's RTE
*/
static char *
make_colname_unique(char *colname, deparse_namespace *dpns,
deparse_columns *colinfo)
{
/*
* If the selected name isn't unique, append digits to make it so. For a
* very long input name, we might have to truncate to stay within
* NAMEDATALEN.
*/
if (!colname_is_unique(colname, dpns, colinfo))
{
int colnamelen = strlen(colname);
char *modname = (char *) palloc(colnamelen + 16);
int i = 0;
do
{
i++;
for (;;)
{
memcpy(modname, colname, colnamelen);
sprintf(modname + colnamelen, "_%d", i);
if (strlen(modname) < NAMEDATALEN)
break;
/* drop chars from colname to keep all the digits */
colnamelen = pg_mbcliplen(colname, colnamelen,
colnamelen - 1);
}
} while (!colname_is_unique(modname, dpns, colinfo));
colname = modname;
}
return colname;
}
/*
* expand_colnames_array_to: make colinfo->colnames at least n items long
*
* Any added array entries are initialized to zero.
*/
static void
expand_colnames_array_to(deparse_columns *colinfo, int n)
{
if (n > colinfo->num_cols)
{
if (colinfo->colnames == NULL)
colinfo->colnames = (char **) palloc0(n * sizeof(char *));
else
{
colinfo->colnames = (char **) repalloc(colinfo->colnames,
n * sizeof(char *));
memset(colinfo->colnames + colinfo->num_cols, 0,
(n - colinfo->num_cols) * sizeof(char *));
}
colinfo->num_cols = n;
}
}
/*
* identify_join_columns: figure out where columns of a join come from
*
* Fills the join-specific fields of the colinfo struct, except for
* usingNames which is filled later.
*/
static void
identify_join_columns(JoinExpr *j, RangeTblEntry *jrte,
deparse_columns *colinfo)
{
int numjoincols;
int jcolno;
int rcolno;
ListCell *lc;
/* Extract left/right child RT indexes */
if (IsA(j->larg, RangeTblRef))
colinfo->leftrti = ((RangeTblRef *) j->larg)->rtindex;
else if (IsA(j->larg, JoinExpr))
colinfo->leftrti = ((JoinExpr *) j->larg)->rtindex;
else
elog(ERROR, "unrecognized node type in jointree: %d",
(int) nodeTag(j->larg));
if (IsA(j->rarg, RangeTblRef))
colinfo->rightrti = ((RangeTblRef *) j->rarg)->rtindex;
else if (IsA(j->rarg, JoinExpr))
colinfo->rightrti = ((JoinExpr *) j->rarg)->rtindex;
else
elog(ERROR, "unrecognized node type in jointree: %d",
(int) nodeTag(j->rarg));
/* Assert children will be processed earlier than join in second pass */
Assert(colinfo->leftrti < j->rtindex);
Assert(colinfo->rightrti < j->rtindex);
/* Initialize result arrays with zeroes */
numjoincols = list_length(jrte->joinaliasvars);
Assert(numjoincols == list_length(jrte->eref->colnames));
colinfo->leftattnos = (int *) palloc0(numjoincols * sizeof(int));
colinfo->rightattnos = (int *) palloc0(numjoincols * sizeof(int));
/*
* Deconstruct RTE's joinleftcols/joinrightcols into desired format.
* Recall that the column(s) merged due to USING are the first column(s)
* of the join output. We need not do anything special while scanning
* joinleftcols, but while scanning joinrightcols we must distinguish
* merged from unmerged columns.
*/
jcolno = 0;
foreach(lc, jrte->joinleftcols)
{
int leftattno = lfirst_int(lc);
colinfo->leftattnos[jcolno++] = leftattno;
}
rcolno = 0;
foreach(lc, jrte->joinrightcols)
{
int rightattno = lfirst_int(lc);
if (rcolno < jrte->joinmergedcols) /* merged column? */
colinfo->rightattnos[rcolno] = rightattno;
else
colinfo->rightattnos[jcolno++] = rightattno;
rcolno++;
}
Assert(jcolno == numjoincols);
}
/*
* get_rtable_name: convenience function to get a previously assigned RTE alias
*
* The RTE must belong to the topmost namespace level in "context".
*/
static char *
get_rtable_name(int rtindex, deparse_context *context)
{
deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces);
Assert(rtindex > 0 && rtindex <= list_length(dpns->rtable_names));
return (char *) list_nth(dpns->rtable_names, rtindex - 1);
}
/*
* set_deparse_plan: set up deparse_namespace to parse subexpressions
* of a given Plan node
*
* This sets the plan, outer_plan, inner_plan, outer_tlist, inner_tlist,
* and index_tlist fields. Caller is responsible for adjusting the ancestors
* list if necessary. Note that the rtable, subplans, and ctes fields do
* not need to change when shifting attention to different plan nodes in a
* single plan tree.
*/
static void
set_deparse_plan(deparse_namespace *dpns, Plan *plan)
{
dpns->plan = plan;
/*
* We special-case Append and MergeAppend to pretend that the first child
* plan is the OUTER referent; we have to interpret OUTER Vars in their
* tlists according to one of the children, and the first one is the most
* natural choice. Likewise special-case ModifyTable to pretend that the
* first child plan is the OUTER referent; this is to support RETURNING
* lists containing references to non-target relations.
*/
if (IsA(plan, Append))
dpns->outer_plan = linitial(((Append *) plan)->appendplans);
else if (IsA(plan, MergeAppend))
dpns->outer_plan = linitial(((MergeAppend *) plan)->mergeplans);
else if (IsA(plan, ModifyTable))
dpns->outer_plan = linitial(((ModifyTable *) plan)->plans);
else
dpns->outer_plan = outerPlan(plan);
if (dpns->outer_plan)
dpns->outer_tlist = dpns->outer_plan->targetlist;
else
dpns->outer_tlist = NIL;
/*
* For a SubqueryScan, pretend the subplan is INNER referent. (We don't
* use OUTER because that could someday conflict with the normal meaning.)
* Likewise, for a CteScan, pretend the subquery's plan is INNER referent.
* For ON CONFLICT .. UPDATE we just need the inner tlist to point to the
* excluded expression's tlist. (Similar to the SubqueryScan we don't want
* to reuse OUTER, it's used for RETURNING in some modify table cases,
* although not INSERT .. CONFLICT).
*/
if (IsA(plan, SubqueryScan))
dpns->inner_plan = ((SubqueryScan *) plan)->subplan;
else if (IsA(plan, CteScan))
dpns->inner_plan = list_nth(dpns->subplans,
((CteScan *) plan)->ctePlanId - 1);
else if (IsA(plan, ModifyTable))
dpns->inner_plan = plan;
else
dpns->inner_plan = innerPlan(plan);
if (IsA(plan, ModifyTable))
dpns->inner_tlist = ((ModifyTable *) plan)->exclRelTlist;
else if (dpns->inner_plan)
dpns->inner_tlist = dpns->inner_plan->targetlist;
else
dpns->inner_tlist = NIL;
/* Set up referent for INDEX_VAR Vars, if needed */
if (IsA(plan, IndexOnlyScan))
dpns->index_tlist = ((IndexOnlyScan *) plan)->indextlist;
else if (IsA(plan, ForeignScan))
dpns->index_tlist = ((ForeignScan *) plan)->fdw_scan_tlist;
else if (IsA(plan, CustomScan))
dpns->index_tlist = ((CustomScan *) plan)->custom_scan_tlist;
else
dpns->index_tlist = NIL;
}
/*
* push_child_plan: temporarily transfer deparsing attention to a child plan
*
* When expanding an OUTER_VAR or INNER_VAR reference, we must adjust the
* deparse context in case the referenced expression itself uses
* OUTER_VAR/INNER_VAR. We modify the top stack entry in-place to avoid
* affecting levelsup issues (although in a Plan tree there really shouldn't
* be any).
*
* Caller must provide a local deparse_namespace variable to save the
* previous state for pop_child_plan.
*/
static void
push_child_plan(deparse_namespace *dpns, Plan *plan,
deparse_namespace *save_dpns)
{
/* Save state for restoration later */
*save_dpns = *dpns;
/* Link current plan node into ancestors list */
dpns->ancestors = lcons(dpns->plan, dpns->ancestors);
/* Set attention on selected child */
set_deparse_plan(dpns, plan);
}
/*
* pop_child_plan: undo the effects of push_child_plan
*/
static void
pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns)
{
List *ancestors;
/* Get rid of ancestors list cell added by push_child_plan */
ancestors = list_delete_first(dpns->ancestors);
/* Restore fields changed by push_child_plan */
*dpns = *save_dpns;
/* Make sure dpns->ancestors is right (may be unnecessary) */
dpns->ancestors = ancestors;
}
/*
* push_ancestor_plan: temporarily transfer deparsing attention to an
* ancestor plan
*
* When expanding a Param reference, we must adjust the deparse context
* to match the plan node that contains the expression being printed;
* otherwise we'd fail if that expression itself contains a Param or
* OUTER_VAR/INNER_VAR/INDEX_VAR variable.
*
* The target ancestor is conveniently identified by the ListCell holding it
* in dpns->ancestors.
*
* Caller must provide a local deparse_namespace variable to save the
* previous state for pop_ancestor_plan.
*/
static void
push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell,
deparse_namespace *save_dpns)
{
Plan *plan = (Plan *) lfirst(ancestor_cell);
/* Save state for restoration later */
*save_dpns = *dpns;
/* Build a new ancestor list with just this node's ancestors */
dpns->ancestors =
list_copy_tail(dpns->ancestors,
list_cell_number(dpns->ancestors, ancestor_cell) + 1);
/* Set attention on selected ancestor */
set_deparse_plan(dpns, plan);
}
/*
* pop_ancestor_plan: undo the effects of push_ancestor_plan
*/
static void
pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns)
{
/* Free the ancestor list made in push_ancestor_plan */
list_free(dpns->ancestors);
/* Restore fields changed by push_ancestor_plan */
*dpns = *save_dpns;
}
/* ----------
* make_ruledef - reconstruct the CREATE RULE command
* for a given pg_rewrite tuple
* ----------
*/
static void
make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
int prettyFlags)
{
char *rulename;
char ev_type;
Oid ev_class;
bool is_instead;
char *ev_qual;
char *ev_action;
List *actions = NIL;
Relation ev_relation;
TupleDesc viewResultDesc = NULL;
int fno;
Datum dat;
bool isnull;
/*
* Get the attribute values from the rules tuple
*/
fno = SPI_fnumber(rulettc, "rulename");
dat = SPI_getbinval(ruletup, rulettc, fno, &isnull);
Assert(!isnull);
rulename = NameStr(*(DatumGetName(dat)));
fno = SPI_fnumber(rulettc, "ev_type");
dat = SPI_getbinval(ruletup, rulettc, fno, &isnull);
Assert(!isnull);
ev_type = DatumGetChar(dat);
fno = SPI_fnumber(rulettc, "ev_class");
dat = SPI_getbinval(ruletup, rulettc, fno, &isnull);
Assert(!isnull);
ev_class = DatumGetObjectId(dat);
fno = SPI_fnumber(rulettc, "is_instead");
dat = SPI_getbinval(ruletup, rulettc, fno, &isnull);
Assert(!isnull);
is_instead = DatumGetBool(dat);
/* these could be nulls */
fno = SPI_fnumber(rulettc, "ev_qual");
ev_qual = SPI_getvalue(ruletup, rulettc, fno);
fno = SPI_fnumber(rulettc, "ev_action");
ev_action = SPI_getvalue(ruletup, rulettc, fno);
if (ev_action != NULL)
actions = (List *) stringToNode(ev_action);
ev_relation = table_open(ev_class, AccessShareLock);
/*
* Build the rules definition text
*/
appendStringInfo(buf, "CREATE RULE %s AS",
quote_identifier(rulename));
if (prettyFlags & PRETTYFLAG_INDENT)
appendStringInfoString(buf, "\n ON ");
else
appendStringInfoString(buf, " ON ");
/* The event the rule is fired for */
switch (ev_type)
{
case '1':
appendStringInfoString(buf, "SELECT");
viewResultDesc = RelationGetDescr(ev_relation);
break;
case '2':
appendStringInfoString(buf, "UPDATE");
break;
case '3':
appendStringInfoString(buf, "INSERT");
break;
case '4':
appendStringInfoString(buf, "DELETE");
break;
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("rule \"%s\" has unsupported event type %d",
rulename, ev_type)));
break;
}
/* The relation the rule is fired on */
appendStringInfo(buf, " TO %s",
(prettyFlags & PRETTYFLAG_SCHEMA) ?
generate_relation_name(ev_class, NIL) :
generate_qualified_relation_name(ev_class));
/* If the rule has an event qualification, add it */
if (ev_qual == NULL)
ev_qual = "";
if (strlen(ev_qual) > 0 && strcmp(ev_qual, "<>") != 0)
{
Node *qual;
Query *query;
deparse_context context;
deparse_namespace dpns;
if (prettyFlags & PRETTYFLAG_INDENT)
appendStringInfoString(buf, "\n ");
appendStringInfoString(buf, " WHERE ");
qual = stringToNode(ev_qual);
/*
* We need to make a context for recognizing any Vars in the qual
* (which can only be references to OLD and NEW). Use the rtable of
* the first query in the action list for this purpose.
*/
query = (Query *) linitial(actions);
/*
* If the action is INSERT...SELECT, OLD/NEW have been pushed down
* into the SELECT, and that's what we need to look at. (Ugly kluge
* ... try to fix this when we redesign querytrees.)
*/
query = getInsertSelectQuery(query, NULL);
/* Must acquire locks right away; see notes in get_query_def() */
AcquireRewriteLocks(query, false, false);
context.buf = buf;
context.namespaces = list_make1(&dpns);
context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = (list_length(query->rtable) != 1);
context.prettyFlags = prettyFlags;
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = PRETTYINDENT_STD;
context.special_exprkind = EXPR_KIND_NONE;
context.appendparents = NULL;
set_deparse_for_query(&dpns, query, NIL);
get_rule_expr(qual, &context, false);
}
appendStringInfoString(buf, " DO ");
/* The INSTEAD keyword (if so) */
if (is_instead)
appendStringInfoString(buf, "INSTEAD ");
/* Finally the rules actions */
if (list_length(actions) > 1)
{
ListCell *action;
Query *query;
appendStringInfoChar(buf, '(');
foreach(action, actions)
{
query = (Query *) lfirst(action);
get_query_def(query, buf, NIL, viewResultDesc,
prettyFlags, WRAP_COLUMN_DEFAULT, 0);
if (prettyFlags)
appendStringInfoString(buf, ";\n");
else
appendStringInfoString(buf, "; ");
}
appendStringInfoString(buf, ");");
}
else if (list_length(actions) == 0)
{
appendStringInfoString(buf, "NOTHING;");
}
else
{
Query *query;
query = (Query *) linitial(actions);
get_query_def(query, buf, NIL, viewResultDesc,
prettyFlags, WRAP_COLUMN_DEFAULT, 0);
appendStringInfoChar(buf, ';');
}
table_close(ev_relation, AccessShareLock);
}
/* ----------
* make_viewdef - reconstruct the SELECT part of a
* view rewrite rule
* ----------
*/
static void
make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
int prettyFlags, int wrapColumn)
{
Query *query;
char ev_type;
Oid ev_class;
bool is_instead;
char *ev_qual;
char *ev_action;
List *actions = NIL;
Relation ev_relation;
int fno;
Datum dat;
bool isnull;
/*
* Get the attribute values from the rules tuple
*/
fno = SPI_fnumber(rulettc, "ev_type");
dat = SPI_getbinval(ruletup, rulettc, fno, &isnull);
Assert(!isnull);
ev_type = DatumGetChar(dat);
fno = SPI_fnumber(rulettc, "ev_class");
dat = SPI_getbinval(ruletup, rulettc, fno, &isnull);
Assert(!isnull);
ev_class = DatumGetObjectId(dat);
fno = SPI_fnumber(rulettc, "is_instead");
dat = SPI_getbinval(ruletup, rulettc, fno, &isnull);
Assert(!isnull);
is_instead = DatumGetBool(dat);
/* these could be nulls */
fno = SPI_fnumber(rulettc, "ev_qual");
ev_qual = SPI_getvalue(ruletup, rulettc, fno);
fno = SPI_fnumber(rulettc, "ev_action");
ev_action = SPI_getvalue(ruletup, rulettc, fno);
if (ev_action != NULL)
actions = (List *) stringToNode(ev_action);
if (list_length(actions) != 1)
{
/* keep output buffer empty and leave */
return;
}
query = (Query *) linitial(actions);
if (ev_type != '1' || !is_instead ||
strcmp(ev_qual, "<>") != 0 || query->commandType != CMD_SELECT)
{
/* keep output buffer empty and leave */
return;
}
ev_relation = table_open(ev_class, AccessShareLock);
get_query_def(query, buf, NIL, RelationGetDescr(ev_relation),
prettyFlags, wrapColumn, 0);
appendStringInfoChar(buf, ';');
table_close(ev_relation, AccessShareLock);
}
/* ----------
* get_query_def - Parse back one query parsetree
*
* If resultDesc is not NULL, then it is the output tuple descriptor for
* the view represented by a SELECT query.
* ----------
*/
static void
get_query_def(Query *query, StringInfo buf, List *parentnamespace,
TupleDesc resultDesc,
int prettyFlags, int wrapColumn, int startIndent)
{
deparse_context context;
deparse_namespace dpns;
/* Guard against excessively long or deeply-nested queries */
CHECK_FOR_INTERRUPTS();
check_stack_depth();
/*
* Before we begin to examine the query, acquire locks on referenced
* relations, and fix up deleted columns in JOIN RTEs. This ensures
* consistent results. Note we assume it's OK to scribble on the passed
* querytree!
*
* We are only deparsing the query (we are not about to execute it), so we
* only need AccessShareLock on the relations it mentions.
*/
AcquireRewriteLocks(query, false, false);
context.buf = buf;
context.namespaces = lcons(&dpns, list_copy(parentnamespace));
context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = (parentnamespace != NIL ||
list_length(query->rtable) != 1);
context.prettyFlags = prettyFlags;
context.wrapColumn = wrapColumn;
context.indentLevel = startIndent;
context.special_exprkind = EXPR_KIND_NONE;
context.appendparents = NULL;
set_deparse_for_query(&dpns, query, parentnamespace);
switch (query->commandType)
{
case CMD_SELECT:
get_select_query_def(query, &context, resultDesc);
break;
case CMD_UPDATE:
get_update_query_def(query, &context);
break;
case CMD_INSERT:
get_insert_query_def(query, &context);
break;
case CMD_DELETE:
get_delete_query_def(query, &context);
break;
case CMD_NOTHING:
appendStringInfoString(buf, "NOTHING");
break;
case CMD_UTILITY:
get_utility_query_def(query, &context);
break;
default:
elog(ERROR, "unrecognized query command type: %d",
query->commandType);
break;
}
}
/* ----------
* get_values_def - Parse back a VALUES list
* ----------
*/
static void
get_values_def(List *values_lists, deparse_context *context)
{
StringInfo buf = context->buf;
bool first_list = true;
ListCell *vtl;
appendStringInfoString(buf, "VALUES ");
foreach(vtl, values_lists)
{
List *sublist = (List *) lfirst(vtl);
bool first_col = true;
ListCell *lc;
if (first_list)
first_list = false;
else
appendStringInfoString(buf, ", ");
appendStringInfoChar(buf, '(');
foreach(lc, sublist)
{
Node *col = (Node *) lfirst(lc);
if (first_col)
first_col = false;
else
appendStringInfoChar(buf, ',');
/*
* Print the value. Whole-row Vars need special treatment.
*/
get_rule_expr_toplevel(col, context, false);
}
appendStringInfoChar(buf, ')');
}
}
/* ----------
* get_with_clause - Parse back a WITH clause
* ----------
*/
static void
get_with_clause(Query *query, deparse_context *context)
{
StringInfo buf = context->buf;
const char *sep;
ListCell *l;
if (query->cteList == NIL)
return;
if (PRETTY_INDENT(context))
{
context->indentLevel += PRETTYINDENT_STD;
appendStringInfoChar(buf, ' ');
}
if (query->hasRecursive)
sep = "WITH RECURSIVE ";
else
sep = "WITH ";
foreach(l, query->cteList)
{
CommonTableExpr *cte = (CommonTableExpr *) lfirst(l);
appendStringInfoString(buf, sep);
appendStringInfoString(buf, quote_identifier(cte->ctename));
if (cte->aliascolnames)
{
bool first = true;
ListCell *col;
appendStringInfoChar(buf, '(');
foreach(col, cte->aliascolnames)
{
if (first)
first = false;
else
appendStringInfoString(buf, ", ");
appendStringInfoString(buf,
quote_identifier(strVal(lfirst(col))));
}
appendStringInfoChar(buf, ')');
}
appendStringInfoString(buf, " AS ");
switch (cte->ctematerialized)
{
case CTEMaterializeDefault:
break;
case CTEMaterializeAlways:
appendStringInfoString(buf, "MATERIALIZED ");
break;
case CTEMaterializeNever:
appendStringInfoString(buf, "NOT MATERIALIZED ");
break;
}
appendStringInfoChar(buf, '(');
if (PRETTY_INDENT(context))
appendContextKeyword(context, "", 0, 0, 0);
get_query_def((Query *) cte->ctequery, buf, context->namespaces, NULL,
context->prettyFlags, context->wrapColumn,
context->indentLevel);
if (PRETTY_INDENT(context))
appendContextKeyword(context, "", 0, 0, 0);
appendStringInfoChar(buf, ')');
sep = ", ";
}
if (PRETTY_INDENT(context))
{
context->indentLevel -= PRETTYINDENT_STD;
appendContextKeyword(context, "", 0, 0, 0);
}
else
appendStringInfoChar(buf, ' ');
}
/* ----------
* get_select_query_def - Parse back a SELECT parsetree
* ----------
*/
static void
get_select_query_def(Query *query, deparse_context *context,
TupleDesc resultDesc)
{
StringInfo buf = context->buf;
List *save_windowclause;
List *save_windowtlist;
bool force_colno;
ListCell *l;
/* Insert the WITH clause if given */
get_with_clause(query, context);
/* Set up context for possible window functions */
save_windowclause = context->windowClause;
context->windowClause = query->windowClause;
save_windowtlist = context->windowTList;
context->windowTList = query->targetList;
/*
* If the Query node has a setOperations tree, then it's the top level of
* a UNION/INTERSECT/EXCEPT query; only the WITH, ORDER BY and LIMIT
* fields are interesting in the top query itself.
*/
if (query->setOperations)
{
get_setop_query(query->setOperations, query, context, resultDesc);
/* ORDER BY clauses must be simple in this case */
force_colno = true;
}
else
{
get_basic_select_query(query, context, resultDesc);
force_colno = false;
}
/* Add the ORDER BY clause if given */
if (query->sortClause != NIL)
{
appendContextKeyword(context, " ORDER BY ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_rule_orderby(query->sortClause, query->targetList,
force_colno, context);
}
/*
* Add the LIMIT/OFFSET clauses if given. If non-default options, use the
* standard spelling of LIMIT.
*/
if (query->limitOffset != NULL)
{
appendContextKeyword(context, " OFFSET ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
get_rule_expr(query->limitOffset, context, false);
}
if (query->limitCount != NULL)
{
if (query->limitOption == LIMIT_OPTION_WITH_TIES)
{
appendContextKeyword(context, " FETCH FIRST ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
get_rule_expr(query->limitCount, context, false);
appendStringInfo(buf, " ROWS WITH TIES");
}
else
{
appendContextKeyword(context, " LIMIT ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
if (IsA(query->limitCount, Const) &&
((Const *) query->limitCount)->constisnull)
appendStringInfoString(buf, "ALL");
else
get_rule_expr(query->limitCount, context, false);
}
}
/* Add FOR [KEY] UPDATE/SHARE clauses if present */
if (query->hasForUpdate)
{
foreach(l, query->rowMarks)
{
RowMarkClause *rc = (RowMarkClause *) lfirst(l);
/* don't print implicit clauses */
if (rc->pushedDown)
continue;
switch (rc->strength)
{
case LCS_NONE:
/* we intentionally throw an error for LCS_NONE */
elog(ERROR, "unrecognized LockClauseStrength %d",
(int) rc->strength);
break;
case LCS_FORKEYSHARE:
appendContextKeyword(context, " FOR KEY SHARE",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
case LCS_FORSHARE:
appendContextKeyword(context, " FOR SHARE",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
case LCS_FORNOKEYUPDATE:
appendContextKeyword(context, " FOR NO KEY UPDATE",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
case LCS_FORUPDATE:
appendContextKeyword(context, " FOR UPDATE",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
}
appendStringInfo(buf, " OF %s",
quote_identifier(get_rtable_name(rc->rti,
context)));
if (rc->waitPolicy == LockWaitError)
appendStringInfoString(buf, " NOWAIT");
else if (rc->waitPolicy == LockWaitSkip)
appendStringInfoString(buf, " SKIP LOCKED");
}
}
context->windowClause = save_windowclause;
context->windowTList = save_windowtlist;
}
/*
* Detect whether query looks like SELECT ... FROM VALUES(),
* with no need to rename the output columns of the VALUES RTE.
* If so, return the VALUES RTE. Otherwise return NULL.
*/
static RangeTblEntry *
get_simple_values_rte(Query *query, TupleDesc resultDesc)
{
RangeTblEntry *result = NULL;
ListCell *lc;
/*
* We want to detect a match even if the Query also contains OLD or NEW
* rule RTEs. So the idea is to scan the rtable and see if there is only
* one inFromCl RTE that is a VALUES RTE.
*/
foreach(lc, query->rtable)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
if (rte->rtekind == RTE_VALUES && rte->inFromCl)
{
if (result)
return NULL; /* multiple VALUES (probably not possible) */
result = rte;
}
else if (rte->rtekind == RTE_RELATION && !rte->inFromCl)
continue; /* ignore rule entries */
else
return NULL; /* something else -> not simple VALUES */
}
/*
* We don't need to check the targetlist in any great detail, because
* parser/analyze.c will never generate a "bare" VALUES RTE --- they only
* appear inside auto-generated sub-queries with very restricted
* structure. However, DefineView might have modified the tlist by
* injecting new column aliases, or we might have some other column
* aliases forced by a resultDesc. We can only simplify if the RTE's
* column names match the names that get_target_list() would select.
*/
if (result)
{
ListCell *lcn;
int colno;
if (list_length(query->targetList) != list_length(result->eref->colnames))
return NULL; /* this probably cannot happen */
colno = 0;
forboth(lc, query->targetList, lcn, result->eref->colnames)
{
TargetEntry *tle = (TargetEntry *) lfirst(lc);
char *cname = strVal(lfirst(lcn));
char *colname;
if (tle->resjunk)
return NULL; /* this probably cannot happen */
/* compute name that get_target_list would use for column */
colno++;
if (resultDesc && colno <= resultDesc->natts)
colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname);
else
colname = tle->resname;
/* does it match the VALUES RTE? */
if (colname == NULL || strcmp(colname, cname) != 0)
return NULL; /* column name has been changed */
}
}
return result;
}
static void
get_basic_select_query(Query *query, deparse_context *context,
TupleDesc resultDesc)
{
StringInfo buf = context->buf;
RangeTblEntry *values_rte;
char *sep;
ListCell *l;
if (PRETTY_INDENT(context))
{
context->indentLevel += PRETTYINDENT_STD;
appendStringInfoChar(buf, ' ');
}
/*
* If the query looks like SELECT * FROM (VALUES ...), then print just the
* VALUES part. This reverses what transformValuesClause() did at parse
* time.
*/
values_rte = get_simple_values_rte(query, resultDesc);
if (values_rte)
{
get_values_def(values_rte->values_lists, context);
return;
}
/*
* Build up the query string - first we say SELECT
*/
appendStringInfoString(buf, "SELECT");
/* Add the DISTINCT clause if given */
if (query->distinctClause != NIL)
{
if (query->hasDistinctOn)
{
appendStringInfoString(buf, " DISTINCT ON (");
sep = "";
foreach(l, query->distinctClause)
{
SortGroupClause *srt = (SortGroupClause *) lfirst(l);
appendStringInfoString(buf, sep);
get_rule_sortgroupclause(srt->tleSortGroupRef, query->targetList,
false, context);
sep = ", ";
}
appendStringInfoChar(buf, ')');
}
else
appendStringInfoString(buf, " DISTINCT");
}
/* Then we tell what to select (the targetlist) */
get_target_list(query->targetList, context, resultDesc);
/* Add the FROM clause if needed */
get_from_clause(query, " FROM ", context);
/* Add the WHERE clause if given */
if (query->jointree->quals != NULL)
{
appendContextKeyword(context, " WHERE ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_rule_expr(query->jointree->quals, context, false);
}
/* Add the GROUP BY clause if given */
if (query->groupClause != NULL || query->groupingSets != NULL)
{
ParseExprKind save_exprkind;
appendContextKeyword(context, " GROUP BY ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
save_exprkind = context->special_exprkind;
context->special_exprkind = EXPR_KIND_GROUP_BY;
if (query->groupingSets == NIL)
{
sep = "";
foreach(l, query->groupClause)
{
SortGroupClause *grp = (SortGroupClause *) lfirst(l);
appendStringInfoString(buf, sep);
get_rule_sortgroupclause(grp->tleSortGroupRef, query->targetList,
false, context);
sep = ", ";
}
}
else
{
sep = "";
foreach(l, query->groupingSets)
{
GroupingSet *grp = lfirst(l);
appendStringInfoString(buf, sep);
get_rule_groupingset(grp, query->targetList, true, context);
sep = ", ";
}
}
context->special_exprkind = save_exprkind;
}
/* Add the HAVING clause if given */
if (query->havingQual != NULL)
{
appendContextKeyword(context, " HAVING ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
get_rule_expr(query->havingQual, context, false);
}
/* Add the WINDOW clause if needed */
if (query->windowClause != NIL)
get_rule_windowclause(query, context);
}
/* ----------
* get_target_list - Parse back a SELECT target list
*
* This is also used for RETURNING lists in INSERT/UPDATE/DELETE.
* ----------
*/
static void
get_target_list(List *targetList, deparse_context *context,
TupleDesc resultDesc)
{
StringInfo buf = context->buf;
StringInfoData targetbuf;
bool last_was_multiline = false;
char *sep;
int colno;
ListCell *l;
/* we use targetbuf to hold each TLE's text temporarily */
initStringInfo(&targetbuf);
sep = " ";
colno = 0;
foreach(l, targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
char *colname;
char *attname;
if (tle->resjunk)
continue; /* ignore junk entries */
appendStringInfoString(buf, sep);
sep = ", ";
colno++;
/*
* Put the new field text into targetbuf so we can decide after we've
* got it whether or not it needs to go on a new line.
*/
resetStringInfo(&targetbuf);
context->buf = &targetbuf;
/*
* We special-case Var nodes rather than using get_rule_expr. This is
* needed because get_rule_expr will display a whole-row Var as
* "foo.*", which is the preferred notation in most contexts, but at
* the top level of a SELECT list it's not right (the parser will
* expand that notation into multiple columns, yielding behavior
* different from a whole-row Var). We need to call get_variable
* directly so that we can tell it to do the right thing, and so that
* we can get the attribute name which is the default AS label.
*/
if (tle->expr && (IsA(tle->expr, Var)))
{
attname = get_variable((Var *) tle->expr, 0, true, context);
}
else
{
get_rule_expr((Node *) tle->expr, context, true);
/* We'll show the AS name unless it's this: */
attname = "?column?";
}
/*
* Figure out what the result column should be called. In the context
* of a view, use the view's tuple descriptor (so as to pick up the
* effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE.
*/
if (resultDesc && colno <= resultDesc->natts)
colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname);
else
colname = tle->resname;
/* Show AS unless the column's name is correct as-is */
if (colname) /* resname could be NULL */
{
if (attname == NULL || strcmp(attname, colname) != 0)
appendStringInfo(&targetbuf, " AS %s", quote_identifier(colname));
}
/* Restore context's output buffer */
context->buf = buf;
/* Consider line-wrapping if enabled */
if (PRETTY_INDENT(context) && context->wrapColumn >= 0)
{
int leading_nl_pos;
/* Does the new field start with a new line? */
if (targetbuf.len > 0 && targetbuf.data[0] == '\n')
leading_nl_pos = 0;
else
leading_nl_pos = -1;
/* If so, we shouldn't add anything */
if (leading_nl_pos >= 0)
{
/* instead, remove any trailing spaces currently in buf */
removeStringInfoSpaces(buf);
}
else
{
char *trailing_nl;
/* Locate the start of the current line in the output buffer */
trailing_nl = strrchr(buf->data, '\n');
if (trailing_nl == NULL)
trailing_nl = buf->data;
else
trailing_nl++;
/*
* Add a newline, plus some indentation, if the new field is
* not the first and either the new field would cause an
* overflow or the last field used more than one line.
*/
if (colno > 1 &&
((strlen(trailing_nl) + targetbuf.len > context->wrapColumn) ||
last_was_multiline))
appendContextKeyword(context, "", -PRETTYINDENT_STD,
PRETTYINDENT_STD, PRETTYINDENT_VAR);
}
/* Remember this field's multiline status for next iteration */
last_was_multiline =
(strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL);
}
/* Add the new field */
appendBinaryStringInfo(buf, targetbuf.data, targetbuf.len);
}
/* clean up */
pfree(targetbuf.data);
}
static void
get_setop_query(Node *setOp, Query *query, deparse_context *context,
TupleDesc resultDesc)
{
StringInfo buf = context->buf;
bool need_paren;
/* Guard against excessively long or deeply-nested queries */
CHECK_FOR_INTERRUPTS();
check_stack_depth();
if (IsA(setOp, RangeTblRef))
{
RangeTblRef *rtr = (RangeTblRef *) setOp;
RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable);
Query *subquery = rte->subquery;
Assert(subquery != NULL);
Assert(subquery->setOperations == NULL);
/* Need parens if WITH, ORDER BY, FOR UPDATE, or LIMIT; see gram.y */
need_paren = (subquery->cteList ||
subquery->sortClause ||
subquery->rowMarks ||
subquery->limitOffset ||
subquery->limitCount);
if (need_paren)
appendStringInfoChar(buf, '(');
get_query_def(subquery, buf, context->namespaces, resultDesc,
context->prettyFlags, context->wrapColumn,
context->indentLevel);
if (need_paren)
appendStringInfoChar(buf, ')');
}
else if (IsA(setOp, SetOperationStmt))
{
SetOperationStmt *op = (SetOperationStmt *) setOp;
int subindent;
/*
* We force parens when nesting two SetOperationStmts, except when the
* lefthand input is another setop of the same kind. Syntactically,
* we could omit parens in rather more cases, but it seems best to use
* parens to flag cases where the setop operator changes. If we use
* parens, we also increase the indentation level for the child query.
*
* There are some cases in which parens are needed around a leaf query
* too, but those are more easily handled at the next level down (see
* code above).
*/
if (IsA(op->larg, SetOperationStmt))
{
SetOperationStmt *lop = (SetOperationStmt *) op->larg;
if (op->op == lop->op && op->all == lop->all)
need_paren = false;
else
need_paren = true;
}
else
need_paren = false;
if (need_paren)
{
appendStringInfoChar(buf, '(');
subindent = PRETTYINDENT_STD;
appendContextKeyword(context, "", subindent, 0, 0);
}
else
subindent = 0;
get_setop_query(op->larg, query, context, resultDesc);
if (need_paren)
appendContextKeyword(context, ") ", -subindent, 0, 0);
else if (PRETTY_INDENT(context))
appendContextKeyword(context, "", -subindent, 0, 0);
else
appendStringInfoChar(buf, ' ');
switch (op->op)
{
case SETOP_UNION:
appendStringInfoString(buf, "UNION ");
break;
case SETOP_INTERSECT:
appendStringInfoString(buf, "INTERSECT ");
break;
case SETOP_EXCEPT:
appendStringInfoString(buf, "EXCEPT ");
break;
default:
elog(ERROR, "unrecognized set op: %d",
(int) op->op);
}
if (op->all)
appendStringInfoString(buf, "ALL ");
/* Always parenthesize if RHS is another setop */
need_paren = IsA(op->rarg, SetOperationStmt);
/*
* The indentation code here is deliberately a bit different from that
* for the lefthand input, because we want the line breaks in
* different places.
*/
if (need_paren)
{
appendStringInfoChar(buf, '(');
subindent = PRETTYINDENT_STD;
}
else
subindent = 0;
appendContextKeyword(context, "", subindent, 0, 0);
get_setop_query(op->rarg, query, context, resultDesc);
if (PRETTY_INDENT(context))
context->indentLevel -= subindent;
if (need_paren)
appendContextKeyword(context, ")", 0, 0, 0);
}
else
{
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(setOp));
}
}
/*
* Display a sort/group clause.
*
* Also returns the expression tree, so caller need not find it again.
*/
static Node *
get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
deparse_context *context)
{
StringInfo buf = context->buf;
TargetEntry *tle;
Node *expr;
tle = get_sortgroupref_tle(ref, tlist);
expr = (Node *) tle->expr;
/*
* Use column-number form if requested by caller. Otherwise, if
* expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
* dump it without any decoration. If it's anything more complex than a
* simple Var, then force extra parens around it, to ensure it can't be
* misinterpreted as a cube() or rollup() construct.
*/
if (force_colno)
{
Assert(!tle->resjunk);
appendStringInfo(buf, "%d", tle->resno);
}
else if (expr && IsA(expr, Const))
get_const_expr((Const *) expr, context, 1);
else if (!expr || IsA(expr, Var))
get_rule_expr(expr, context, true);
else
{
/*
* We must force parens for function-like expressions even if
* PRETTY_PAREN is off, since those are the ones in danger of
* misparsing. For other expressions we need to force them only if
* PRETTY_PAREN is on, since otherwise the expression will output them
* itself. (We can't skip the parens.)
*/
bool need_paren = (PRETTY_PAREN(context)
|| IsA(expr, FuncExpr)
|| IsA(expr, Aggref)
|| IsA(expr, WindowFunc));
if (need_paren)
appendStringInfoChar(context->buf, '(');
get_rule_expr(expr, context, true);
if (need_paren)
appendStringInfoChar(context->buf, ')');
}
return expr;
}
/*
* Display a GroupingSet
*/
static void
get_rule_groupingset(GroupingSet *gset, List *targetlist,
bool omit_parens, deparse_context *context)
{
ListCell *l;
StringInfo buf = context->buf;
bool omit_child_parens = true;
char *sep = "";
switch (gset->kind)
{
case GROUPING_SET_EMPTY:
appendStringInfoString(buf, "()");
return;
case GROUPING_SET_SIMPLE:
{
if (!omit_parens || list_length(gset->content) != 1)
appendStringInfoChar(buf, '(');
foreach(l, gset->content)
{
Index ref = lfirst_int(l);
appendStringInfoString(buf, sep);
get_rule_sortgroupclause(ref, targetlist,
false, context);
sep = ", ";
}
if (!omit_parens || list_length(gset->content) != 1)
appendStringInfoChar(buf, ')');
}
return;
case GROUPING_SET_ROLLUP:
appendStringInfoString(buf, "ROLLUP(");
break;
case GROUPING_SET_CUBE:
appendStringInfoString(buf, "CUBE(");
break;
case GROUPING_SET_SETS:
appendStringInfoString(buf, "GROUPING SETS (");
omit_child_parens = false;
break;
}
foreach(l, gset->content)
{
appendStringInfoString(buf, sep);
get_rule_groupingset(lfirst(l), targetlist, omit_child_parens, context);
sep = ", ";
}
appendStringInfoChar(buf, ')');
}
/*
* Display an ORDER BY list.
*/
static void
get_rule_orderby(List *orderList, List *targetList,
bool force_colno, deparse_context *context)
{
StringInfo buf = context->buf;
const char *sep;
ListCell *l;
sep = "";
foreach(l, orderList)
{
SortGroupClause *srt = (SortGroupClause *) lfirst(l);
Node *sortexpr;
Oid sortcoltype;
TypeCacheEntry *typentry;
appendStringInfoString(buf, sep);
sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList,
force_colno, context);
sortcoltype = exprType(sortexpr);
/* See whether operator is default < or > for datatype */
typentry = lookup_type_cache(sortcoltype,
TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
if (srt->sortop == typentry->lt_opr)
{
/* ASC is default, so emit nothing for it */
if (srt->nulls_first)
appendStringInfoString(buf, " NULLS FIRST");
}
else if (srt->sortop == typentry->gt_opr)
{
appendStringInfoString(buf, " DESC");
/* DESC defaults to NULLS FIRST */
if (!srt->nulls_first)
appendStringInfoString(buf, " NULLS LAST");
}
else
{
appendStringInfo(buf, " USING %s",
generate_operator_name(srt->sortop,
sortcoltype,
sortcoltype));
/* be specific to eliminate ambiguity */
if (srt->nulls_first)
appendStringInfoString(buf, " NULLS FIRST");
else
appendStringInfoString(buf, " NULLS LAST");
}
sep = ", ";
}
}
/*
* Display a WINDOW clause.
*
* Note that the windowClause list might contain only anonymous window
* specifications, in which case we should print nothing here.
*/
static void
get_rule_windowclause(Query *query, deparse_context *context)
{
StringInfo buf = context->buf;
const char *sep;
ListCell *l;
sep = NULL;
foreach(l, query->windowClause)
{
WindowClause *wc = (WindowClause *) lfirst(l);
if (wc->name == NULL)
continue; /* ignore anonymous windows */
if (sep == NULL)
appendContextKeyword(context, " WINDOW ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
else
appendStringInfoString(buf, sep);
appendStringInfo(buf, "%s AS ", quote_identifier(wc->name));
get_rule_windowspec(wc, query->targetList, context);
sep = ", ";
}
}
/*
* Display a window definition
*/
static void
get_rule_windowspec(WindowClause *wc, List *targetList,
deparse_context *context)
{
StringInfo buf = context->buf;
bool needspace = false;
const char *sep;
ListCell *l;
appendStringInfoChar(buf, '(');
if (wc->refname)
{
appendStringInfoString(buf, quote_identifier(wc->refname));
needspace = true;
}
/* partition clauses are always inherited, so only print if no refname */
if (wc->partitionClause && !wc->refname)
{
if (needspace)
appendStringInfoChar(buf, ' ');
appendStringInfoString(buf, "PARTITION BY ");
sep = "";
foreach(l, wc->partitionClause)
{
SortGroupClause *grp = (SortGroupClause *) lfirst(l);
appendStringInfoString(buf, sep);
get_rule_sortgroupclause(grp->tleSortGroupRef, targetList,
false, context);
sep = ", ";
}
needspace = true;
}
/* print ordering clause only if not inherited */
if (wc->orderClause && !wc->copiedOrder)
{
if (needspace)
appendStringInfoChar(buf, ' ');
appendStringInfoString(buf, "ORDER BY ");
get_rule_orderby(wc->orderClause, targetList, false, context);
needspace = true;
}
/* framing clause is never inherited, so print unless it's default */
if (wc->frameOptions & FRAMEOPTION_NONDEFAULT)
{
if (needspace)
appendStringInfoChar(buf, ' ');
if (wc->frameOptions & FRAMEOPTION_RANGE)
appendStringInfoString(buf, "RANGE ");
else if (wc->frameOptions & FRAMEOPTION_ROWS)
appendStringInfoString(buf, "ROWS ");
else if (wc->frameOptions & FRAMEOPTION_GROUPS)
appendStringInfoString(buf, "GROUPS ");
else
Assert(false);
if (wc->frameOptions & FRAMEOPTION_BETWEEN)
appendStringInfoString(buf, "BETWEEN ");
if (wc->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)
appendStringInfoString(buf, "UNBOUNDED PRECEDING ");
else if (wc->frameOptions & FRAMEOPTION_START_CURRENT_ROW)
appendStringInfoString(buf, "CURRENT ROW ");
else if (wc->frameOptions & FRAMEOPTION_START_OFFSET)
{
get_rule_expr(wc->startOffset, context, false);
if (wc->frameOptions & FRAMEOPTION_START_OFFSET_PRECEDING)
appendStringInfoString(buf, " PRECEDING ");
else if (wc->frameOptions & FRAMEOPTION_START_OFFSET_FOLLOWING)
appendStringInfoString(buf, " FOLLOWING ");
else
Assert(false);
}
else
Assert(false);
if (wc->frameOptions & FRAMEOPTION_BETWEEN)
{
appendStringInfoString(buf, "AND ");
if (wc->frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
appendStringInfoString(buf, "UNBOUNDED FOLLOWING ");
else if (wc->frameOptions & FRAMEOPTION_END_CURRENT_ROW)
appendStringInfoString(buf, "CURRENT ROW ");
else if (wc->frameOptions & FRAMEOPTION_END_OFFSET)
{
get_rule_expr(wc->endOffset, context, false);
if (wc->frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
appendStringInfoString(buf, " PRECEDING ");
else if (wc->frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
appendStringInfoString(buf, " FOLLOWING ");
else
Assert(false);
}
else
Assert(false);
}
if (wc->frameOptions & FRAMEOPTION_EXCLUDE_CURRENT_ROW)
appendStringInfoString(buf, "EXCLUDE CURRENT ROW ");
else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_GROUP)
appendStringInfoString(buf, "EXCLUDE GROUP ");
else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_TIES)
appendStringInfoString(buf, "EXCLUDE TIES ");
/* we will now have a trailing space; remove it */
buf->len--;
}
appendStringInfoChar(buf, ')');
}
/* ----------
* get_insert_query_def - Parse back an INSERT parsetree
* ----------
*/
static void
get_insert_query_def(Query *query, deparse_context *context)
{
StringInfo buf = context->buf;
RangeTblEntry *select_rte = NULL;
RangeTblEntry *values_rte = NULL;
RangeTblEntry *rte;
char *sep;
ListCell *l;
List *strippedexprs;
/* Insert the WITH clause if given */
get_with_clause(query, context);
/*
* If it's an INSERT ... SELECT or multi-row VALUES, there will be a
* single RTE for the SELECT or VALUES. Plain VALUES has neither.
*/
foreach(l, query->rtable)
{
rte = (RangeTblEntry *) lfirst(l);
if (rte->rtekind == RTE_SUBQUERY)
{
if (select_rte)
elog(ERROR, "too many subquery RTEs in INSERT");
select_rte = rte;
}
if (rte->rtekind == RTE_VALUES)
{
if (values_rte)
elog(ERROR, "too many values RTEs in INSERT");
values_rte = rte;
}
}
if (select_rte && values_rte)
elog(ERROR, "both subquery and values RTEs in INSERT");
/*
* Start the query with INSERT INTO relname
*/
rte = rt_fetch(query->resultRelation, query->rtable);
Assert(rte->rtekind == RTE_RELATION);
if (PRETTY_INDENT(context))
{
context->indentLevel += PRETTYINDENT_STD;
appendStringInfoChar(buf, ' ');
}
appendStringInfo(buf, "INSERT INTO %s ",
generate_relation_name(rte->relid, NIL));
/* INSERT requires AS keyword for target alias */
if (rte->alias != NULL)
appendStringInfo(buf, "AS %s ",
quote_identifier(rte->alias->aliasname));
/*
* Add the insert-column-names list. Any indirection decoration needed on
* the column names can be inferred from the top targetlist.
*/
strippedexprs = NIL;
sep = "";
if (query->targetList)
appendStringInfoChar(buf, '(');
foreach(l, query->targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
if (tle->resjunk)
continue; /* ignore junk entries */
appendStringInfoString(buf, sep);
sep = ", ";
/*
* Put out name of target column; look in the catalogs, not at
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
quote_identifier(get_attname(rte->relid,
tle->resno,
false)));
/*
* Print any indirection needed (subfields or subscripts), and strip
* off the top-level nodes representing the indirection assignments.
* Add the stripped expressions to strippedexprs. (If it's a
* single-VALUES statement, the stripped expressions are the VALUES to
* print below. Otherwise they're just Vars and not really
* interesting.)
*/
strippedexprs = lappend(strippedexprs,
processIndirection((Node *) tle->expr,
context));
}
if (query->targetList)
appendStringInfoString(buf, ") ");
if (query->override)
{
if (query->override == OVERRIDING_SYSTEM_VALUE)
appendStringInfoString(buf, "OVERRIDING SYSTEM VALUE ");
else if (query->override == OVERRIDING_USER_VALUE)
appendStringInfoString(buf, "OVERRIDING USER VALUE ");
}
if (select_rte)
{
/* Add the SELECT */
get_query_def(select_rte->subquery, buf, NIL, NULL,
context->prettyFlags, context->wrapColumn,
context->indentLevel);
}
else if (values_rte)
{
/* Add the multi-VALUES expression lists */
get_values_def(values_rte->values_lists, context);
}
else if (strippedexprs)
{
/* Add the single-VALUES expression list */
appendContextKeyword(context, "VALUES (",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 2);
get_rule_expr((Node *) strippedexprs, context, false);
appendStringInfoChar(buf, ')');
}
else
{
/* No expressions, so it must be DEFAULT VALUES */
appendStringInfoString(buf, "DEFAULT VALUES");
}
/* Add ON CONFLICT if present */
if (query->onConflict)
{
OnConflictExpr *confl = query->onConflict;
appendStringInfoString(buf, " ON CONFLICT");
if (confl->arbiterElems)
{
/* Add the single-VALUES expression list */
appendStringInfoChar(buf, '(');
get_rule_expr((Node *) confl->arbiterElems, context, false);
appendStringInfoChar(buf, ')');
/* Add a WHERE clause (for partial indexes) if given */
if (confl->arbiterWhere != NULL)
{
bool save_varprefix;
/*
* Force non-prefixing of Vars, since parser assumes that they
* belong to target relation. WHERE clause does not use
* InferenceElem, so this is separately required.
*/
save_varprefix = context->varprefix;
context->varprefix = false;
appendContextKeyword(context, " WHERE ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_rule_expr(confl->arbiterWhere, context, false);
context->varprefix = save_varprefix;
}
}
else if (OidIsValid(confl->constraint))
{
char *constraint = get_constraint_name(confl->constraint);
if (!constraint)
elog(ERROR, "cache lookup failed for constraint %u",
confl->constraint);
appendStringInfo(buf, " ON CONSTRAINT %s",
quote_identifier(constraint));
}
if (confl->action == ONCONFLICT_NOTHING)
{
appendStringInfoString(buf, " DO NOTHING");
}
else
{
appendStringInfoString(buf, " DO UPDATE SET ");
/* Deparse targetlist */
get_update_query_targetlist_def(query, confl->onConflictSet,
context, rte);
/* Add a WHERE clause if given */
if (confl->onConflictWhere != NULL)
{
appendContextKeyword(context, " WHERE ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_rule_expr(confl->onConflictWhere, context, false);
}
}
}
/* Add RETURNING if present */
if (query->returningList)
{
appendContextKeyword(context, " RETURNING",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_target_list(query->returningList, context, NULL);
}
}
/* ----------
* get_update_query_def - Parse back an UPDATE parsetree
* ----------
*/
static void
get_update_query_def(Query *query, deparse_context *context)
{
StringInfo buf = context->buf;
RangeTblEntry *rte;
/* Insert the WITH clause if given */
get_with_clause(query, context);
/*
* Start the query with UPDATE relname SET
*/
rte = rt_fetch(query->resultRelation, query->rtable);
Assert(rte->rtekind == RTE_RELATION);
if (PRETTY_INDENT(context))
{
appendStringInfoChar(buf, ' ');
context->indentLevel += PRETTYINDENT_STD;
}
appendStringInfo(buf, "UPDATE %s%s",
only_marker(rte),
generate_relation_name(rte->relid, NIL));
if (rte->alias != NULL)
appendStringInfo(buf, " %s",
quote_identifier(rte->alias->aliasname));
appendStringInfoString(buf, " SET ");
/* Deparse targetlist */
get_update_query_targetlist_def(query, query->targetList, context, rte);
/* Add the FROM clause if needed */
get_from_clause(query, " FROM ", context);
/* Add a WHERE clause if given */
if (query->jointree->quals != NULL)
{
appendContextKeyword(context, " WHERE ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_rule_expr(query->jointree->quals, context, false);
}
/* Add RETURNING if present */
if (query->returningList)
{
appendContextKeyword(context, " RETURNING",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_target_list(query->returningList, context, NULL);
}
}
/* ----------
* get_update_query_targetlist_def - Parse back an UPDATE targetlist
* ----------
*/
static void
get_update_query_targetlist_def(Query *query, List *targetList,
deparse_context *context, RangeTblEntry *rte)
{
StringInfo buf = context->buf;
ListCell *l;
ListCell *next_ma_cell;
int remaining_ma_columns;
const char *sep;
SubLink *cur_ma_sublink;
List *ma_sublinks;
/*
* Prepare to deal with MULTIEXPR assignments: collect the source SubLinks
* into a list. We expect them to appear, in ID order, in resjunk tlist
* entries.
*/
ma_sublinks = NIL;
if (query->hasSubLinks) /* else there can't be any */
{
foreach(l, targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
if (tle->resjunk && IsA(tle->expr, SubLink))
{
SubLink *sl = (SubLink *) tle->expr;
if (sl->subLinkType == MULTIEXPR_SUBLINK)
{
ma_sublinks = lappend(ma_sublinks, sl);
Assert(sl->subLinkId == list_length(ma_sublinks));
}
}
}
}
next_ma_cell = list_head(ma_sublinks);
cur_ma_sublink = NULL;
remaining_ma_columns = 0;
/* Add the comma separated list of 'attname = value' */
sep = "";
foreach(l, targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
Node *expr;
if (tle->resjunk)
continue; /* ignore junk entries */
/* Emit separator (OK whether we're in multiassignment or not) */
appendStringInfoString(buf, sep);
sep = ", ";
/*
* Check to see if we're starting a multiassignment group: if so,
* output a left paren.
*/
if (next_ma_cell != NULL && cur_ma_sublink == NULL)
{
/*
* We must dig down into the expr to see if it's a PARAM_MULTIEXPR
* Param. That could be buried under FieldStores and
* SubscriptingRefs and CoerceToDomains (cf processIndirection()),
* and underneath those there could be an implicit type coercion.
* Because we would ignore implicit type coercions anyway, we
* don't need to be as careful as processIndirection() is about
* descending past implicit CoerceToDomains.
*/
expr = (Node *) tle->expr;
while (expr)
{
if (IsA(expr, FieldStore))
{
FieldStore *fstore = (FieldStore *) expr;
expr = (Node *) linitial(fstore->newvals);
}
else if (IsA(expr, SubscriptingRef))
{
SubscriptingRef *sbsref = (SubscriptingRef *) expr;
if (sbsref->refassgnexpr == NULL)
break;
expr = (Node *) sbsref->refassgnexpr;
}
else if (IsA(expr, CoerceToDomain))
{
CoerceToDomain *cdomain = (CoerceToDomain *) expr;
if (cdomain->coercionformat != COERCE_IMPLICIT_CAST)
break;
expr = (Node *) cdomain->arg;
}
else
break;
}
expr = strip_implicit_coercions(expr);
if (expr && IsA(expr, Param) &&
((Param *) expr)->paramkind == PARAM_MULTIEXPR)
{
cur_ma_sublink = (SubLink *) lfirst(next_ma_cell);
next_ma_cell = lnext(ma_sublinks, next_ma_cell);
remaining_ma_columns = count_nonjunk_tlist_entries(((Query *) cur_ma_sublink->subselect)->targetList);
Assert(((Param *) expr)->paramid ==
((cur_ma_sublink->subLinkId << 16) | 1));
appendStringInfoChar(buf, '(');
}
}
/*
* Put out name of target column; look in the catalogs, not at
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
quote_identifier(get_attname(rte->relid,
tle->resno,
false)));
/*
* Print any indirection needed (subfields or subscripts), and strip
* off the top-level nodes representing the indirection assignments.
*/
expr = processIndirection((Node *) tle->expr, context);
/*
* If we're in a multiassignment, skip printing anything more, unless
* this is the last column; in which case, what we print should be the
* sublink, not the Param.
*/
if (cur_ma_sublink != NULL)
{
if (--remaining_ma_columns > 0)
continue; /* not the last column of multiassignment */
appendStringInfoChar(buf, ')');
expr = (Node *) cur_ma_sublink;
cur_ma_sublink = NULL;
}
appendStringInfoString(buf, " = ");
get_rule_expr(expr, context, false);
}
}
/* ----------
* get_delete_query_def - Parse back a DELETE parsetree
* ----------
*/
static void
get_delete_query_def(Query *query, deparse_context *context)
{
StringInfo buf = context->buf;
RangeTblEntry *rte;
/* Insert the WITH clause if given */
get_with_clause(query, context);
/*
* Start the query with DELETE FROM relname
*/
rte = rt_fetch(query->resultRelation, query->rtable);
Assert(rte->rtekind == RTE_RELATION);
if (PRETTY_INDENT(context))
{
appendStringInfoChar(buf, ' ');
context->indentLevel += PRETTYINDENT_STD;
}
appendStringInfo(buf, "DELETE FROM %s%s",
only_marker(rte),
generate_relation_name(rte->relid, NIL));
if (rte->alias != NULL)
appendStringInfo(buf, " %s",
quote_identifier(rte->alias->aliasname));
/* Add the USING clause if given */
get_from_clause(query, " USING ", context);
/* Add a WHERE clause if given */
if (query->jointree->quals != NULL)
{
appendContextKeyword(context, " WHERE ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_rule_expr(query->jointree->quals, context, false);
}
/* Add RETURNING if present */
if (query->returningList)
{
appendContextKeyword(context, " RETURNING",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
get_target_list(query->returningList, context, NULL);
}
}
/* ----------
* get_utility_query_def - Parse back a UTILITY parsetree
* ----------
*/
static void
get_utility_query_def(Query *query, deparse_context *context)
{
StringInfo buf = context->buf;
if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt))
{
NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt;
appendContextKeyword(context, "",
0, PRETTYINDENT_STD, 1);
appendStringInfo(buf, "NOTIFY %s",
quote_identifier(stmt->conditionname));
if (stmt->payload)
{
appendStringInfoString(buf, ", ");
simple_quote_literal(buf, stmt->payload);
}
}
else
{
/* Currently only NOTIFY utility commands can appear in rules */
elog(ERROR, "unexpected utility statement type");
}
}
/*
* Display a Var appropriately.
*
* In some cases (currently only when recursing into an unnamed join)
* the Var's varlevelsup has to be interpreted with respect to a context
* above the current one; levelsup indicates the offset.
*
* If istoplevel is true, the Var is at the top level of a SELECT's
* targetlist, which means we need special treatment of whole-row Vars.
* Instead of the normal "tab.*", we'll print "tab.*::typename", which is a
* dirty hack to prevent "tab.*" from being expanded into multiple columns.
* (The parser will strip the useless coercion, so no inefficiency is added in
* dump and reload.) We used to print just "tab" in such cases, but that is
* ambiguous and will yield the wrong result if "tab" is also a plain column
* name in the query.
*
* Returns the attname of the Var, or NULL if the Var has no attname (because
* it is a whole-row Var or a subplan output reference).
*/
static char *
get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
{
StringInfo buf = context->buf;
RangeTblEntry *rte;
AttrNumber attnum;
int netlevelsup;
deparse_namespace *dpns;
Index varno;
AttrNumber varattno;
deparse_columns *colinfo;
char *refname;
char *attname;
/* Find appropriate nesting depth */
netlevelsup = var->varlevelsup + levelsup;
if (netlevelsup >= list_length(context->namespaces))
elog(ERROR, "bogus varlevelsup: %d offset %d",
var->varlevelsup, levelsup);
dpns = (deparse_namespace *) list_nth(context->namespaces,
netlevelsup);
/*
* If we have a syntactic referent for the Var, and we're working from a
* parse tree, prefer to use the syntactic referent. Otherwise, fall back
* on the semantic referent. (Forcing use of the semantic referent when
* printing plan trees is a design choice that's perhaps more motivated by
* backwards compatibility than anything else. But it does have the
* advantage of making plans more explicit.)
*/
if (var->varnosyn > 0 && dpns->plan == NULL)
{
varno = var->varnosyn;
varattno = var->varattnosyn;
}
else
{
varno = var->varno;
varattno = var->varattno;
}
/*
* Try to find the relevant RTE in this rtable. In a plan tree, it's
* likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig
* down into the subplans, or INDEX_VAR, which is resolved similarly. Also
* find the aliases previously assigned for this RTE.
*/
if (varno >= 1 && varno <= list_length(dpns->rtable))
{
/*
* We might have been asked to map child Vars to some parent relation.
*/
if (context->appendparents && dpns->appendrels)
{
Index pvarno = varno;
AttrNumber pvarattno = varattno;
AppendRelInfo *appinfo = dpns->appendrels[pvarno];
bool found = false;
/* Only map up to inheritance parents, not UNION ALL appendrels */
while (appinfo &&
rt_fetch(appinfo->parent_relid,
dpns->rtable)->rtekind == RTE_RELATION)
{
found = false;
if (pvarattno > 0) /* system columns stay as-is */
{
if (pvarattno > appinfo->num_child_cols)
break; /* safety check */
pvarattno = appinfo->parent_colnos[pvarattno - 1];
if (pvarattno == 0)
break; /* Var is local to child */
}
pvarno = appinfo->parent_relid;
found = true;
/* If the parent is itself a child, continue up. */
Assert(pvarno > 0 && pvarno <= list_length(dpns->rtable));
appinfo = dpns->appendrels[pvarno];
}
/*
* If we found an ancestral rel, and that rel is included in
* appendparents, print that column not the original one.
*/
if (found && bms_is_member(pvarno, context->appendparents))
{
varno = pvarno;
varattno = pvarattno;
}
}
rte = rt_fetch(varno, dpns->rtable);
refname = (char *) list_nth(dpns->rtable_names, varno - 1);
colinfo = deparse_columns_fetch(varno, dpns);
attnum = varattno;
}
else
{
resolve_special_varno((Node *) var, context,
get_special_variable, NULL);
return NULL;
}
/*
* The planner will sometimes emit Vars referencing resjunk elements of a
* subquery's target list (this is currently only possible if it chooses
* to generate a "physical tlist" for a SubqueryScan or CteScan node).
* Although we prefer to print subquery-referencing Vars using the
* subquery's alias, that's not possible for resjunk items since they have
* no alias. So in that case, drill down to the subplan and print the
* contents of the referenced tlist item. This works because in a plan
* tree, such Vars can only occur in a SubqueryScan or CteScan node, and
* we'll have set dpns->inner_plan to reference the child plan node.
*/
if ((rte->rtekind == RTE_SUBQUERY || rte->rtekind == RTE_CTE) &&
attnum > list_length(rte->eref->colnames) &&
dpns->inner_plan)
{
TargetEntry *tle;
deparse_namespace save_dpns;
tle = get_tle_by_resno(dpns->inner_tlist, attnum);
if (!tle)
elog(ERROR, "invalid attnum %d for relation \"%s\"",
attnum, rte->eref->aliasname);
Assert(netlevelsup == 0);
push_child_plan(dpns, dpns->inner_plan, &save_dpns);
/*
* Force parentheses because our caller probably assumed a Var is a
* simple expression.
*/
if (!IsA(tle->expr, Var))
appendStringInfoChar(buf, '(');
get_rule_expr((Node *) tle->expr, context, true);
if (!IsA(tle->expr, Var))
appendStringInfoChar(buf, ')');
pop_child_plan(dpns, &save_dpns);
return NULL;
}
/*
* If it's an unnamed join, look at the expansion of the alias variable.
* If it's a simple reference to one of the input vars, then recursively
* print the name of that var instead. When it's not a simple reference,
* we have to just print the unqualified join column name. (This can only
* happen with "dangerous" merged columns in a JOIN USING; we took pains
* previously to make the unqualified column name unique in such cases.)
*
* This wouldn't work in decompiling plan trees, because we don't store
* joinaliasvars lists after planning; but a plan tree should never
* contain a join alias variable.
*/
if (rte->rtekind == RTE_JOIN && rte->alias == NULL)
{
if (rte->joinaliasvars == NIL)
elog(ERROR, "cannot decompile join alias var in plan tree");
if (attnum > 0)
{
Var *aliasvar;
aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1);
/* we intentionally don't strip implicit coercions here */
if (aliasvar && IsA(aliasvar, Var))
{
return get_variable(aliasvar, var->varlevelsup + levelsup,
istoplevel, context);
}
}
/*
* Unnamed join has no refname. (Note: since it's unnamed, there is
* no way the user could have referenced it to create a whole-row Var
* for it. So we don't have to cover that case below.)
*/
Assert(refname == NULL);
}
if (attnum == InvalidAttrNumber)
attname = NULL;
else if (attnum > 0)
{
/* Get column name to use from the colinfo struct */
if (attnum > colinfo->num_cols)
elog(ERROR, "invalid attnum %d for relation \"%s\"",
attnum, rte->eref->aliasname);
attname = colinfo->colnames[attnum - 1];
if (attname == NULL) /* dropped column? */
elog(ERROR, "invalid attnum %d for relation \"%s\"",
attnum, rte->eref->aliasname);
}
else
{
/* System column - name is fixed, get it from the catalog */
attname = get_rte_attribute_name(rte, attnum);
}
if (refname && (context->varprefix || attname == NULL))
{
appendStringInfoString(buf, quote_identifier(refname));
appendStringInfoChar(buf, '.');
}
if (attname)
appendStringInfoString(buf, quote_identifier(attname));
else
{
appendStringInfoChar(buf, '*');
if (istoplevel)
appendStringInfo(buf, "::%s",
format_type_with_typemod(var->vartype,
var->vartypmod));
}
return attname;
}
/*
* Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This
* routine is actually a callback for resolve_special_varno, which handles
* finding the correct TargetEntry. We get the expression contained in that
* TargetEntry and just need to deparse it, a job we can throw back on
* get_rule_expr.
*/
static void
get_special_variable(Node *node, deparse_context *context, void *callback_arg)
{
StringInfo buf = context->buf;
/*
* For a non-Var referent, force parentheses because our caller probably
* assumed a Var is a simple expression.
*/
if (!IsA(node, Var))
appendStringInfoChar(buf, '(');
get_rule_expr(node, context, true);
if (!IsA(node, Var))
appendStringInfoChar(buf, ')');
}
/*
* Chase through plan references to special varnos (OUTER_VAR, INNER_VAR,
* INDEX_VAR) until we find a real Var or some kind of non-Var node; then,
* invoke the callback provided.
*/
static void
resolve_special_varno(Node *node, deparse_context *context,
rsv_callback callback, void *callback_arg)
{
Var *var;
deparse_namespace *dpns;
/* This function is recursive, so let's be paranoid. */
check_stack_depth();
/* If it's not a Var, invoke the callback. */
if (!IsA(node, Var))
{
(*callback) (node, context, callback_arg);
return;
}
/* Find appropriate nesting depth */
var = (Var *) node;
dpns = (deparse_namespace *) list_nth(context->namespaces,
var->varlevelsup);
/*
* If varno is special, recurse. (Don't worry about varnosyn; if we're
* here, we already decided not to use that.)
*/
if (var->varno == OUTER_VAR && dpns->outer_tlist)
{
TargetEntry *tle;
deparse_namespace save_dpns;
Bitmapset *save_appendparents;
tle = get_tle_by_resno(dpns->outer_tlist, var->varattno);
if (!tle)
elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno);
/*
* If we're descending to the first child of an Append or MergeAppend,
* update appendparents. This will affect deparsing of all Vars
* appearing within the eventually-resolved subexpression.
*/
save_appendparents = context->appendparents;
if (IsA(dpns->plan, Append))
context->appendparents = bms_union(context->appendparents,
((Append *) dpns->plan)->apprelids);
else if (IsA(dpns->plan, MergeAppend))
context->appendparents = bms_union(context->appendparents,
((MergeAppend *) dpns->plan)->apprelids);
push_child_plan(dpns, dpns->outer_plan, &save_dpns);
resolve_special_varno((Node *) tle->expr, context,
callback, callback_arg);
pop_child_plan(dpns, &save_dpns);
context->appendparents = save_appendparents;
return;
}
else if (var->varno == INNER_VAR && dpns->inner_tlist)
{
TargetEntry *tle;
deparse_namespace save_dpns;
tle = get_tle_by_resno(dpns->inner_tlist, var->varattno);
if (!tle)
elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno);
push_child_plan(dpns, dpns->inner_plan, &save_dpns);
resolve_special_varno((Node *) tle->expr, context,
callback, callback_arg);
pop_child_plan(dpns, &save_dpns);
return;
}
else if (var->varno == INDEX_VAR && dpns->index_tlist)
{
TargetEntry *tle;
tle = get_tle_by_resno(dpns->index_tlist, var->varattno);
if (!tle)
elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno);
resolve_special_varno((Node *) tle->expr, context,
callback, callback_arg);
return;
}
else if (var->varno < 1 || var->varno > list_length(dpns->rtable))
elog(ERROR, "bogus varno: %d", var->varno);
/* Not special. Just invoke the callback. */
(*callback) (node, context, callback_arg);
}
/*
* Get the name of a field of an expression of composite type. The
* expression is usually a Var, but we handle other cases too.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
*
* This is fairly straightforward when the expression has a named composite
* type; we need only look up the type in the catalogs. However, the type
* could also be RECORD. Since no actual table or view column is allowed to
* have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE
* or to a subquery output. We drill down to find the ultimate defining
* expression and attempt to infer the field name from it. We ereport if we
* can't determine the name.
*
* Similarly, a PARAM of type RECORD has to refer to some expression of
* a determinable composite type.
*/
static const char *
get_name_for_var_field(Var *var, int fieldno,
int levelsup, deparse_context *context)
{
RangeTblEntry *rte;
AttrNumber attnum;
int netlevelsup;
deparse_namespace *dpns;
Index varno;
AttrNumber varattno;
TupleDesc tupleDesc;
Node *expr;
/*
* If it's a RowExpr that was expanded from a whole-row Var, use the
* column names attached to it.
*/
if (IsA(var, RowExpr))
{
RowExpr *r = (RowExpr *) var;
if (fieldno > 0 && fieldno <= list_length(r->colnames))
return strVal(list_nth(r->colnames, fieldno - 1));
}
/*
* If it's a Param of type RECORD, try to find what the Param refers to.
*/
if (IsA(var, Param))
{
Param *param = (Param *) var;
ListCell *ancestor_cell;
expr = find_param_referent(param, context, &dpns, &ancestor_cell);
if (expr)
{
/* Found a match, so recurse to decipher the field name */
deparse_namespace save_dpns;
const char *result;
push_ancestor_plan(dpns, ancestor_cell, &save_dpns);
result = get_name_for_var_field((Var *) expr, fieldno,
0, context);
pop_ancestor_plan(dpns, &save_dpns);
return result;
}
}
/*
* If it's a Var of type RECORD, we have to find what the Var refers to;
* if not, we can use get_expr_result_tupdesc().
*/
if (!IsA(var, Var) ||
var->vartype != RECORDOID)
{
tupleDesc = get_expr_result_tupdesc((Node *) var, false);
/* Got the tupdesc, so we can extract the field name */
Assert(fieldno >= 1 && fieldno <= tupleDesc->natts);
return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname);
}
/* Find appropriate nesting depth */
netlevelsup = var->varlevelsup + levelsup;
if (netlevelsup >= list_length(context->namespaces))
elog(ERROR, "bogus varlevelsup: %d offset %d",
var->varlevelsup, levelsup);
dpns = (deparse_namespace *) list_nth(context->namespaces,
netlevelsup);
/*
* If we have a syntactic referent for the Var, and we're working from a
* parse tree, prefer to use the syntactic referent. Otherwise, fall back
* on the semantic referent. (See comments in get_variable().)
*/
if (var->varnosyn > 0 && dpns->plan == NULL)
{
varno = var->varnosyn;
varattno = var->varattnosyn;
}
else
{
varno = var->varno;
varattno = var->varattno;
}
/*
* Try to find the relevant RTE in this rtable. In a plan tree, it's
* likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig
* down into the subplans, or INDEX_VAR, which is resolved similarly.
*
* Note: unlike get_variable and resolve_special_varno, we need not worry
* about inheritance mapping: a child Var should have the same datatype as
* its parent, and here we're really only interested in the Var's type.
*/
if (varno >= 1 && varno <= list_length(dpns->rtable))
{
rte = rt_fetch(varno, dpns->rtable);
attnum = varattno;
}
else if (varno == OUTER_VAR && dpns->outer_tlist)
{
TargetEntry *tle;
deparse_namespace save_dpns;
const char *result;
tle = get_tle_by_resno(dpns->outer_tlist, varattno);
if (!tle)
elog(ERROR, "bogus varattno for OUTER_VAR var: %d", varattno);
Assert(netlevelsup == 0);
push_child_plan(dpns, dpns->outer_plan, &save_dpns);
result = get_name_for_var_field((Var *) tle->expr, fieldno,
levelsup, context);
pop_child_plan(dpns, &save_dpns);
return result;
}
else if (varno == INNER_VAR && dpns->inner_tlist)
{
TargetEntry *tle;
deparse_namespace save_dpns;
const char *result;
tle = get_tle_by_resno(dpns->inner_tlist, varattno);
if (!tle)
elog(ERROR, "bogus varattno for INNER_VAR var: %d", varattno);
Assert(netlevelsup == 0);
push_child_plan(dpns, dpns->inner_plan, &save_dpns);
result = get_name_for_var_field((Var *) tle->expr, fieldno,
levelsup, context);
pop_child_plan(dpns, &save_dpns);
return result;
}
else if (varno == INDEX_VAR && dpns->index_tlist)
{
TargetEntry *tle;
const char *result;
tle = get_tle_by_resno(dpns->index_tlist, varattno);
if (!tle)
elog(ERROR, "bogus varattno for INDEX_VAR var: %d", varattno);
Assert(netlevelsup == 0);
result = get_name_for_var_field((Var *) tle->expr, fieldno,
levelsup, context);
return result;
}
else
{
elog(ERROR, "bogus varno: %d", varno);
return NULL; /* keep compiler quiet */
}
if (attnum == InvalidAttrNumber)
{
/* Var is whole-row reference to RTE, so select the right field */
return get_rte_attribute_name(rte, fieldno);
}
/*
* This part has essentially the same logic as the parser's
* expandRecordVariable() function, but we are dealing with a different
* representation of the input context, and we only need one field name
* not a TupleDesc. Also, we need special cases for finding subquery and
* CTE subplans when deparsing Plan trees.
*/
expr = (Node *) var; /* default if we can't drill down */
switch (rte->rtekind)
{
case RTE_RELATION:
case RTE_VALUES:
case RTE_NAMEDTUPLESTORE:
case RTE_RESULT:
/*
* This case should not occur: a column of a table, values list,
* or ENR shouldn't have type RECORD. Fall through and fail (most
* likely) at the bottom.
*/
break;
case RTE_SUBQUERY:
/* Subselect-in-FROM: examine sub-select's output expr */
{
if (rte->subquery)
{
TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList,
attnum);
if (ste == NULL || ste->resjunk)
elog(ERROR, "subquery %s does not have attribute %d",
rte->eref->aliasname, attnum);
expr = (Node *) ste->expr;
if (IsA(expr, Var))
{
/*
* Recurse into the sub-select to see what its Var
* refers to. We have to build an additional level of
* namespace to keep in step with varlevelsup in the
* subselect.
*/
deparse_namespace mydpns;
const char *result;
set_deparse_for_query(&mydpns, rte->subquery,
context->namespaces);
context->namespaces = lcons(&mydpns,
context->namespaces);
result = get_name_for_var_field((Var *) expr, fieldno,
0, context);
context->namespaces =
list_delete_first(context->namespaces);
return result;
}
/* else fall through to inspect the expression */
}
else
{
/*
* We're deparsing a Plan tree so we don't have complete
* RTE entries (in particular, rte->subquery is NULL). But
* the only place we'd see a Var directly referencing a
* SUBQUERY RTE is in a SubqueryScan plan node, and we can
* look into the child plan's tlist instead.
*/
TargetEntry *tle;
deparse_namespace save_dpns;
const char *result;
if (!dpns->inner_plan)
elog(ERROR, "failed to find plan for subquery %s",
rte->eref->aliasname);
tle = get_tle_by_resno(dpns->inner_tlist, attnum);
if (!tle)
elog(ERROR, "bogus varattno for subquery var: %d",
attnum);
Assert(netlevelsup == 0);
push_child_plan(dpns, dpns->inner_plan, &save_dpns);
result = get_name_for_var_field((Var *) tle->expr, fieldno,
levelsup, context);
pop_child_plan(dpns, &save_dpns);
return result;
}
}
break;
case RTE_JOIN:
/* Join RTE --- recursively inspect the alias variable */
if (rte->joinaliasvars == NIL)
elog(ERROR, "cannot decompile join alias var in plan tree");
Assert(attnum > 0 && attnum <= list_length(rte->joinaliasvars));
expr = (Node *) list_nth(rte->joinaliasvars, attnum - 1);
Assert(expr != NULL);
/* we intentionally don't strip implicit coercions here */
if (IsA(expr, Var))
return get_name_for_var_field((Var *) expr, fieldno,
var->varlevelsup + levelsup,
context);
/* else fall through to inspect the expression */
break;
case RTE_FUNCTION:
case RTE_TABLEFUNC:
/*
* We couldn't get here unless a function is declared with one of
* its result columns as RECORD, which is not allowed.
*/
break;
case RTE_CTE:
/* CTE reference: examine subquery's output expr */
{
CommonTableExpr *cte = NULL;
Index ctelevelsup;
ListCell *lc;
/*
* Try to find the referenced CTE using the namespace stack.
*/
ctelevelsup = rte->ctelevelsup + netlevelsup;
if (ctelevelsup >= list_length(context->namespaces))
lc = NULL;
else
{
deparse_namespace *ctedpns;
ctedpns = (deparse_namespace *)
list_nth(context->namespaces, ctelevelsup);
foreach(lc, ctedpns->ctes)
{
cte = (CommonTableExpr *) lfirst(lc);
if (strcmp(cte->ctename, rte->ctename) == 0)
break;
}
}
if (lc != NULL)
{
Query *ctequery = (Query *) cte->ctequery;
TargetEntry *ste = get_tle_by_resno(GetCTETargetList(cte),
attnum);
if (ste == NULL || ste->resjunk)
elog(ERROR, "subquery %s does not have attribute %d",
rte->eref->aliasname, attnum);
expr = (Node *) ste->expr;
if (IsA(expr, Var))
{
/*
* Recurse into the CTE to see what its Var refers to.
* We have to build an additional level of namespace
* to keep in step with varlevelsup in the CTE.
* Furthermore it could be an outer CTE, so we may
* have to delete some levels of namespace.
*/
List *save_nslist = context->namespaces;
List *new_nslist;
deparse_namespace mydpns;
const char *result;
set_deparse_for_query(&mydpns, ctequery,
context->namespaces);
new_nslist = list_copy_tail(context->namespaces,
ctelevelsup);
context->namespaces = lcons(&mydpns, new_nslist);
result = get_name_for_var_field((Var *) expr, fieldno,
0, context);
context->namespaces = save_nslist;
return result;
}
/* else fall through to inspect the expression */
}
else
{
/*
* We're deparsing a Plan tree so we don't have a CTE
* list. But the only place we'd see a Var directly
* referencing a CTE RTE is in a CteScan plan node, and we
* can look into the subplan's tlist instead.
*/
TargetEntry *tle;
deparse_namespace save_dpns;
const char *result;
if (!dpns->inner_plan)
elog(ERROR, "failed to find plan for CTE %s",
rte->eref->aliasname);
tle = get_tle_by_resno(dpns->inner_tlist, attnum);
if (!tle)
elog(ERROR, "bogus varattno for subquery var: %d",
attnum);
Assert(netlevelsup == 0);
push_child_plan(dpns, dpns->inner_plan, &save_dpns);
result = get_name_for_var_field((Var *) tle->expr, fieldno,
levelsup, context);
pop_child_plan(dpns, &save_dpns);
return result;
}
}
break;
}
/*
* We now have an expression we can't expand any more, so see if
* get_expr_result_tupdesc() can do anything with it.
*/
tupleDesc = get_expr_result_tupdesc(expr, false);
/* Got the tupdesc, so we can extract the field name */
Assert(fieldno >= 1 && fieldno <= tupleDesc->natts);
return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname);
}
/*
* Try to find the referenced expression for a PARAM_EXEC Param that might
* reference a parameter supplied by an upper NestLoop or SubPlan plan node.
*
* If successful, return the expression and set *dpns_p and *ancestor_cell_p
* appropriately for calling push_ancestor_plan(). If no referent can be
* found, return NULL.
*/
static Node *
find_param_referent(Param *param, deparse_context *context,
deparse_namespace **dpns_p, ListCell **ancestor_cell_p)
{
/* Initialize output parameters to prevent compiler warnings */
*dpns_p = NULL;
*ancestor_cell_p = NULL;
/*
* If it's a PARAM_EXEC parameter, look for a matching NestLoopParam or
* SubPlan argument. This will necessarily be in some ancestor of the
* current expression's Plan node.
*/
if (param->paramkind == PARAM_EXEC)
{
deparse_namespace *dpns;
Plan *child_plan;
bool in_same_plan_level;
ListCell *lc;
dpns = (deparse_namespace *) linitial(context->namespaces);
child_plan = dpns->plan;
in_same_plan_level = true;
foreach(lc, dpns->ancestors)
{
Node *ancestor = (Node *) lfirst(lc);
ListCell *lc2;
/*
* NestLoops transmit params to their inner child only; also, once
* we've crawled up out of a subplan, this couldn't possibly be
* the right match.
*/
if (IsA(ancestor, NestLoop) &&
child_plan == innerPlan(ancestor) &&
in_same_plan_level)
{
NestLoop *nl = (NestLoop *) ancestor;
foreach(lc2, nl->nestParams)
{
NestLoopParam *nlp = (NestLoopParam *) lfirst(lc2);
if (nlp->paramno == param->paramid)
{
/* Found a match, so return it */
*dpns_p = dpns;
*ancestor_cell_p = lc;
return (Node *) nlp->paramval;
}
}
}
/*
* If ancestor is a SubPlan, check the arguments it provides.
*/
if (IsA(ancestor, SubPlan))
{
SubPlan *subplan = (SubPlan *) ancestor;
ListCell *lc3;
ListCell *lc4;
forboth(lc3, subplan->parParam, lc4, subplan->args)
{
int paramid = lfirst_int(lc3);
Node *arg = (Node *) lfirst(lc4);
if (paramid == param->paramid)
{
/*
* Found a match, so return it. But, since Vars in
* the arg are to be evaluated in the surrounding
* context, we have to point to the next ancestor item
* that is *not* a SubPlan.
*/
ListCell *rest;
for_each_cell(rest, dpns->ancestors,
lnext(dpns->ancestors, lc))
{
Node *ancestor2 = (Node *) lfirst(rest);
if (!IsA(ancestor2, SubPlan))
{
*dpns_p = dpns;
*ancestor_cell_p = rest;
return arg;
}
}
elog(ERROR, "SubPlan cannot be outermost ancestor");
}
}
/* We have emerged from a subplan. */
in_same_plan_level = false;
/* SubPlan isn't a kind of Plan, so skip the rest */
continue;
}
/*
* Check to see if we're emerging from an initplan of the current
* ancestor plan. Initplans never have any parParams, so no need
* to search that list, but we need to know if we should reset
* in_same_plan_level.
*/
foreach(lc2, ((Plan *) ancestor)->initPlan)
{
SubPlan *subplan = castNode(SubPlan, lfirst(lc2));
if (child_plan != (Plan *) list_nth(dpns->subplans,
subplan->plan_id - 1))
continue;
/* No parameters to be had here. */
Assert(subplan->parParam == NIL);
/* We have emerged from an initplan. */
in_same_plan_level = false;
break;
}
/* No luck, crawl up to next ancestor */
child_plan = (Plan *) ancestor;
}
}
/* No referent found */
return NULL;
}
/*
* Display a Param appropriately.
*/
static void
get_parameter(Param *param, deparse_context *context)
{
Node *expr;
deparse_namespace *dpns;
ListCell *ancestor_cell;
/*
* If it's a PARAM_EXEC parameter, try to locate the expression from which
* the parameter was computed. Note that failing to find a referent isn't
* an error, since the Param might well be a subplan output rather than an
* input.
*/
expr = find_param_referent(param, context, &dpns, &ancestor_cell);
if (expr)
{
/* Found a match, so print it */
deparse_namespace save_dpns;
bool save_varprefix;
bool need_paren;
/* Switch attention to the ancestor plan node */
push_ancestor_plan(dpns, ancestor_cell, &save_dpns);
/*
* Force prefixing of Vars, since they won't belong to the relation
* being scanned in the original plan node.
*/
save_varprefix = context->varprefix;
context->varprefix = true;
/*
* A Param's expansion is typically a Var, Aggref, or upper-level
* Param, which wouldn't need extra parentheses. Otherwise, insert
* parens to ensure the expression looks atomic.
*/
need_paren = !(IsA(expr, Var) ||
IsA(expr, Aggref) ||
IsA(expr, Param));
if (need_paren)
appendStringInfoChar(context->buf, '(');
get_rule_expr(expr, context, false);
if (need_paren)
appendStringInfoChar(context->buf, ')');
context->varprefix = save_varprefix;
pop_ancestor_plan(dpns, &save_dpns);
return;
}
/*
* Not PARAM_EXEC, or couldn't find referent: just print $N.
*/
appendStringInfo(context->buf, "$%d", param->paramid);
}
/*
* get_simple_binary_op_name
*
* helper function for isSimpleNode
* will return single char binary operator name, or NULL if it's not
*/
static const char *
get_simple_binary_op_name(OpExpr *expr)
{
List *args = expr->args;
if (list_length(args) == 2)
{
/* binary operator */
Node *arg1 = (Node *) linitial(args);
Node *arg2 = (Node *) lsecond(args);
const char *op;
op = generate_operator_name(expr->opno, exprType(arg1), exprType(arg2));
if (strlen(op) == 1)
return op;
}
return NULL;
}
/*
* isSimpleNode - check if given node is simple (doesn't need parenthesizing)
*
* true : simple in the context of parent node's type
* false : not simple
*/
static bool
isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
{
if (!node)
return false;
switch (nodeTag(node))
{
case T_Var:
case T_Const:
case T_Param:
case T_CoerceToDomainValue:
case T_SetToDefault:
case T_CurrentOfExpr:
/* single words: always simple */
return true;
case T_SubscriptingRef:
case T_ArrayExpr:
case T_RowExpr:
case T_CoalesceExpr:
case T_MinMaxExpr:
case T_SQLValueFunction:
case T_XmlExpr:
case T_NextValueExpr:
case T_NullIfExpr:
case T_Aggref:
case T_WindowFunc:
case T_FuncExpr:
/* function-like: name(..) or name[..] */
return true;
/* CASE keywords act as parentheses */
case T_CaseExpr:
return true;
case T_FieldSelect:
/*
* appears simple since . has top precedence, unless parent is
* T_FieldSelect itself!
*/
return (IsA(parentNode, FieldSelect) ? false : true);
case T_FieldStore:
/*
* treat like FieldSelect (probably doesn't matter)
*/
return (IsA(parentNode, FieldStore) ? false : true);
case T_CoerceToDomain:
/* maybe simple, check args */
return isSimpleNode((Node *) ((CoerceToDomain *) node)->arg,
node, prettyFlags);
case T_RelabelType:
return isSimpleNode((Node *) ((RelabelType *) node)->arg,
node, prettyFlags);
case T_CoerceViaIO:
return isSimpleNode((Node *) ((CoerceViaIO *) node)->arg,
node, prettyFlags);
case T_ArrayCoerceExpr:
return isSimpleNode((Node *) ((ArrayCoerceExpr *) node)->arg,
node, prettyFlags);
case T_ConvertRowtypeExpr:
return isSimpleNode((Node *) ((ConvertRowtypeExpr *) node)->arg,
node, prettyFlags);
case T_OpExpr:
{
/* depends on parent node type; needs further checking */
if (prettyFlags & PRETTYFLAG_PAREN && IsA(parentNode, OpExpr))
{
const char *op;
const char *parentOp;
bool is_lopriop;
bool is_hipriop;
bool is_lopriparent;
bool is_hipriparent;
op = get_simple_binary_op_name((OpExpr *) node);
if (!op)
return false;
/* We know only the basic operators + - and * / % */
is_lopriop = (strchr("+-", *op) != NULL);
is_hipriop = (strchr("*/%", *op) != NULL);
if (!(is_lopriop || is_hipriop))
return false;
parentOp = get_simple_binary_op_name((OpExpr *) parentNode);
if (!parentOp)
return false;
is_lopriparent = (strchr("+-", *parentOp) != NULL);
is_hipriparent = (strchr("*/%", *parentOp) != NULL);
if (!(is_lopriparent || is_hipriparent))
return false;
if (is_hipriop && is_lopriparent)
return true; /* op binds tighter than parent */
if (is_lopriop && is_hipriparent)
return false;
/*
* Operators are same priority --- can skip parens only if
* we have (a - b) - c, not a - (b - c).
*/
if (node == (Node *) linitial(((OpExpr *) parentNode)->args))
return true;
return false;
}
/* else do the same stuff as for T_SubLink et al. */
}
/* FALLTHROUGH */
case T_SubLink:
case T_NullTest:
case T_BooleanTest:
case T_DistinctExpr:
switch (nodeTag(parentNode))
{
case T_FuncExpr:
{
/* special handling for casts */
CoercionForm type = ((FuncExpr *) parentNode)->funcformat;
if (type == COERCE_EXPLICIT_CAST ||
type == COERCE_IMPLICIT_CAST)
return false;
return true; /* own parentheses */
}
case T_BoolExpr: /* lower precedence */
case T_SubscriptingRef: /* other separators */
case T_ArrayExpr: /* other separators */
case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
case T_XmlExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
case T_Aggref: /* own parentheses */
case T_WindowFunc: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
return false;
}
case T_BoolExpr:
switch (nodeTag(parentNode))
{
case T_BoolExpr:
if (prettyFlags & PRETTYFLAG_PAREN)
{
BoolExprType type;
BoolExprType parentType;
type = ((BoolExpr *) node)->boolop;
parentType = ((BoolExpr *) parentNode)->boolop;
switch (type)
{
case NOT_EXPR:
case AND_EXPR:
if (parentType == AND_EXPR || parentType == OR_EXPR)
return true;
break;
case OR_EXPR:
if (parentType == OR_EXPR)
return true;
break;
}
}
return false;
case T_FuncExpr:
{
/* special handling for casts */
CoercionForm type = ((FuncExpr *) parentNode)->funcformat;
if (type == COERCE_EXPLICIT_CAST ||
type == COERCE_IMPLICIT_CAST)
return false;
return true; /* own parentheses */
}
case T_SubscriptingRef: /* other separators */
case T_ArrayExpr: /* other separators */
case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
case T_XmlExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
case T_Aggref: /* own parentheses */
case T_WindowFunc: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
return false;
}
default:
break;
}
/* those we don't know: in dubio complexo */
return false;
}
/*
* appendContextKeyword - append a keyword to buffer
*
* If prettyPrint is enabled, perform a line break, and adjust indentation.
* Otherwise, just append the keyword.
*/
static void
appendContextKeyword(deparse_context *context, const char *str,
int indentBefore, int indentAfter, int indentPlus)
{
StringInfo buf = context->buf;
if (PRETTY_INDENT(context))
{
int indentAmount;
context->indentLevel += indentBefore;
/* remove any trailing spaces currently in the buffer ... */
removeStringInfoSpaces(buf);
/* ... then add a newline and some spaces */
appendStringInfoChar(buf, '\n');
if (context->indentLevel < PRETTYINDENT_LIMIT)
indentAmount = Max(context->indentLevel, 0) + indentPlus;
else
{
/*
* If we're indented more than PRETTYINDENT_LIMIT characters, try
* to conserve horizontal space by reducing the per-level
* indentation. For best results the scale factor here should
* divide all the indent amounts that get added to indentLevel
* (PRETTYINDENT_STD, etc). It's important that the indentation
* not grow unboundedly, else deeply-nested trees use O(N^2)
* whitespace; so we also wrap modulo PRETTYINDENT_LIMIT.
*/
indentAmount = PRETTYINDENT_LIMIT +
(context->indentLevel - PRETTYINDENT_LIMIT) /
(PRETTYINDENT_STD / 2);
indentAmount %= PRETTYINDENT_LIMIT;
/* scale/wrap logic affects indentLevel, but not indentPlus */
indentAmount += indentPlus;
}
appendStringInfoSpaces(buf, indentAmount);
appendStringInfoString(buf, str);
context->indentLevel += indentAfter;
if (context->indentLevel < 0)
context->indentLevel = 0;
}
else
appendStringInfoString(buf, str);
}
/*
* removeStringInfoSpaces - delete trailing spaces from a buffer.
*
* Possibly this should move to stringinfo.c at some point.
*/
static void
removeStringInfoSpaces(StringInfo str)
{
while (str->len > 0 && str->data[str->len - 1] == ' ')
str->data[--(str->len)] = '\0';
}
/*
* get_rule_expr_paren - deparse expr using get_rule_expr,
* embracing the string with parentheses if necessary for prettyPrint.
*
* Never embrace if prettyFlags=0, because it's done in the calling node.
*
* Any node that does *not* embrace its argument node by sql syntax (with
* parentheses, non-operator keywords like CASE/WHEN/ON, or comma etc) should
* use get_rule_expr_paren instead of get_rule_expr so parentheses can be
* added.
*/
static void
get_rule_expr_paren(Node *node, deparse_context *context,
bool showimplicit, Node *parentNode)
{
bool need_paren;
need_paren = PRETTY_PAREN(context) &&
!isSimpleNode(node, parentNode, context->prettyFlags);
if (need_paren)
appendStringInfoChar(context->buf, '(');
get_rule_expr(node, context, showimplicit);
if (need_paren)
appendStringInfoChar(context->buf, ')');
}
/* ----------
* get_rule_expr - Parse back an expression
*
* Note: showimplicit determines whether we display any implicit cast that
* is present at the top of the expression tree. It is a passed argument,
* not a field of the context struct, because we change the value as we
* recurse down into the expression. In general we suppress implicit casts
* when the result type is known with certainty (eg, the arguments of an
* OR must be boolean). We display implicit casts for arguments of functions
* and operators, since this is needed to be certain that the same function
* or operator will be chosen when the expression is re-parsed.
* ----------
*/
static void
get_rule_expr(Node *node, deparse_context *context,
bool showimplicit)
{
StringInfo buf = context->buf;
if (node == NULL)
return;
/* Guard against excessively long or deeply-nested queries */
CHECK_FOR_INTERRUPTS();
check_stack_depth();
/*
* Each level of get_rule_expr must emit an indivisible term
* (parenthesized if necessary) to ensure result is reparsed into the same
* expression tree. The only exception is that when the input is a List,
* we emit the component items comma-separated with no surrounding
* decoration; this is convenient for most callers.
*/
switch (nodeTag(node))
{
case T_Var:
(void) get_variable((Var *) node, 0, false, context);
break;
case T_Const:
get_const_expr((Const *) node, context, 0);
break;
case T_Param:
get_parameter((Param *) node, context);
break;
case T_Aggref:
get_agg_expr((Aggref *) node, context, (Aggref *) node);
break;
case T_GroupingFunc:
{
GroupingFunc *gexpr = (GroupingFunc *) node;
appendStringInfoString(buf, "GROUPING(");
get_rule_expr((Node *) gexpr->args, context, true);
appendStringInfoChar(buf, ')');
}
break;
case T_WindowFunc:
get_windowfunc_expr((WindowFunc *) node, context);
break;
case T_SubscriptingRef:
{
SubscriptingRef *sbsref = (SubscriptingRef *) node;
bool need_parens;
/*
* If the argument is a CaseTestExpr, we must be inside a
* FieldStore, ie, we are assigning to an element of an array
* within a composite column. Since we already punted on
* displaying the FieldStore's target information, just punt
* here too, and display only the assignment source
* expression.
*/
if (IsA(sbsref->refexpr, CaseTestExpr))
{
Assert(sbsref->refassgnexpr);
get_rule_expr((Node *) sbsref->refassgnexpr,
context, showimplicit);
break;
}
/*
* Parenthesize the argument unless it's a simple Var or a
* FieldSelect. (In particular, if it's another
* SubscriptingRef, we *must* parenthesize to avoid
* confusion.)
*/
need_parens = !IsA(sbsref->refexpr, Var) &&
!IsA(sbsref->refexpr, FieldSelect);
if (need_parens)
appendStringInfoChar(buf, '(');
get_rule_expr((Node *) sbsref->refexpr, context, showimplicit);
if (need_parens)
appendStringInfoChar(buf, ')');
/*
* If there's a refassgnexpr, we want to print the node in the
* format "container[subscripts] := refassgnexpr". This is
* not legal SQL, so decompilation of INSERT or UPDATE
* statements should always use processIndirection as part of
* the statement-level syntax. We should only see this when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement.
*/
if (sbsref->refassgnexpr)
{
Node *refassgnexpr;
/*
* Use processIndirection to print this node's subscripts
* as well as any additional field selections or
* subscripting in immediate descendants. It returns the
* RHS expr that is actually being "assigned".
*/
refassgnexpr = processIndirection(node, context);
appendStringInfoString(buf, " := ");
get_rule_expr(refassgnexpr, context, showimplicit);
}
else
{
/* Just an ordinary container fetch, so print subscripts */
printSubscripts(sbsref, context);
}
}
break;
case T_FuncExpr:
get_func_expr((FuncExpr *) node, context, showimplicit);
break;
case T_NamedArgExpr:
{
NamedArgExpr *na = (NamedArgExpr *) node;
appendStringInfo(buf, "%s => ", quote_identifier(na->name));
get_rule_expr((Node *) na->arg, context, showimplicit);
}
break;
case T_OpExpr:
get_oper_expr((OpExpr *) node, context);
break;
case T_DistinctExpr:
{
DistinctExpr *expr = (DistinctExpr *) node;
List *args = expr->args;
Node *arg1 = (Node *) linitial(args);
Node *arg2 = (Node *) lsecond(args);
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
get_rule_expr_paren(arg1, context, true, node);
appendStringInfoString(buf, " IS DISTINCT FROM ");
get_rule_expr_paren(arg2, context, true, node);
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
}
break;
case T_NullIfExpr:
{
NullIfExpr *nullifexpr = (NullIfExpr *) node;
appendStringInfoString(buf, "NULLIF(");
get_rule_expr((Node *) nullifexpr->args, context, true);
appendStringInfoChar(buf, ')');
}
break;
case T_ScalarArrayOpExpr:
{
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
List *args = expr->args;
Node *arg1 = (Node *) linitial(args);
Node *arg2 = (Node *) lsecond(args);
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
get_rule_expr_paren(arg1, context, true, node);
appendStringInfo(buf, " %s %s (",
generate_operator_name(expr->opno,
exprType(arg1),
get_base_element_type(exprType(arg2))),
expr->useOr ? "ANY" : "ALL");
get_rule_expr_paren(arg2, context, true, node);
/*
* There's inherent ambiguity in "x op ANY/ALL (y)" when y is
* a bare sub-SELECT. Since we're here, the sub-SELECT must
* be meant as a scalar sub-SELECT yielding an array value to
* be used in ScalarArrayOpExpr; but the grammar will
* preferentially interpret such a construct as an ANY/ALL
* SubLink. To prevent misparsing the output that way, insert
* a dummy coercion (which will be stripped by parse analysis,
* so no inefficiency is added in dump and reload). This is
* indeed most likely what the user wrote to get the construct
* accepted in the first place.
*/
if (IsA(arg2, SubLink) &&
((SubLink *) arg2)->subLinkType == EXPR_SUBLINK)
appendStringInfo(buf, "::%s",
format_type_with_typemod(exprType(arg2),
exprTypmod(arg2)));
appendStringInfoChar(buf, ')');
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
}
break;
case T_BoolExpr:
{
BoolExpr *expr = (BoolExpr *) node;
Node *first_arg = linitial(expr->args);
ListCell *arg = list_second_cell(expr->args);
switch (expr->boolop)
{
case AND_EXPR:
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
get_rule_expr_paren(first_arg, context,
false, node);
while (arg)
{
appendStringInfoString(buf, " AND ");
get_rule_expr_paren((Node *) lfirst(arg), context,
false, node);
arg = lnext(expr->args, arg);
}
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
break;
case OR_EXPR:
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
get_rule_expr_paren(first_arg, context,
false, node);
while (arg)
{
appendStringInfoString(buf, " OR ");
get_rule_expr_paren((Node *) lfirst(arg), context,
false, node);
arg = lnext(expr->args, arg);
}
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
break;
case NOT_EXPR:
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
appendStringInfoString(buf, "NOT ");
get_rule_expr_paren(first_arg, context,
false, node);
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
break;
default:
elog(ERROR, "unrecognized boolop: %d",
(int) expr->boolop);
}
}
break;
case T_SubLink:
get_sublink_expr((SubLink *) node, context);
break;
case T_SubPlan:
{
SubPlan *subplan = (SubPlan *) node;
/*
* We cannot see an already-planned subplan in rule deparsing,
* only while EXPLAINing a query plan. We don't try to
* reconstruct the original SQL, just reference the subplan
* that appears elsewhere in EXPLAIN's result.
*/
if (subplan->useHashTable)
appendStringInfo(buf, "(hashed %s)", subplan->plan_name);
else
appendStringInfo(buf, "(%s)", subplan->plan_name);
}
break;
case T_AlternativeSubPlan:
{
AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
ListCell *lc;
/* As above, this can only happen during EXPLAIN */
appendStringInfoString(buf, "(alternatives: ");
foreach(lc, asplan->subplans)
{
SubPlan *splan = lfirst_node(SubPlan, lc);
if (splan->useHashTable)
appendStringInfo(buf, "hashed %s", splan->plan_name);
else
appendStringInfoString(buf, splan->plan_name);
if (lnext(asplan->subplans, lc))
appendStringInfoString(buf, " or ");
}
appendStringInfoChar(buf, ')');
}
break;
case T_FieldSelect:
{
FieldSelect *fselect = (FieldSelect *) node;
Node *arg = (Node *) fselect->arg;
int fno = fselect->fieldnum;
const char *fieldname;
bool need_parens;
/*
* Parenthesize the argument unless it's an SubscriptingRef or
* another FieldSelect. Note in particular that it would be
* WRONG to not parenthesize a Var argument; simplicity is not
* the issue here, having the right number of names is.
*/
need_parens = !IsA(arg, SubscriptingRef) &&
!IsA(arg, FieldSelect);
if (need_parens)
appendStringInfoChar(buf, '(');
get_rule_expr(arg, context, true);
if (need_parens)
appendStringInfoChar(buf, ')');
/*
* Get and print the field name.
*/
fieldname = get_name_for_var_field((Var *) arg, fno,
0, context);
appendStringInfo(buf, ".%s", quote_identifier(fieldname));
}
break;
case T_FieldStore:
{
FieldStore *fstore = (FieldStore *) node;
bool need_parens;
/*
* There is no good way to represent a FieldStore as real SQL,
* so decompilation of INSERT or UPDATE statements should
* always use processIndirection as part of the
* statement-level syntax. We should only get here when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement. The plan case is even harder than
* ordinary rules would be, because the planner tries to
* collapse multiple assignments to the same field or subfield
* into one FieldStore; so we can see a list of target fields
* not just one, and the arguments could be FieldStores
* themselves. We don't bother to try to print the target
* field names; we just print the source arguments, with a
* ROW() around them if there's more than one. This isn't
* terribly complete, but it's probably good enough for
* EXPLAIN's purposes; especially since anything more would be
* either hopelessly confusing or an even poorer
* representation of what the plan is actually doing.
*/
need_parens = (list_length(fstore->newvals) != 1);
if (need_parens)
appendStringInfoString(buf, "ROW(");
get_rule_expr((Node *) fstore->newvals, context, showimplicit);
if (need_parens)
appendStringInfoChar(buf, ')');
}
break;
case T_RelabelType:
{
RelabelType *relabel = (RelabelType *) node;
Node *arg = (Node *) relabel->arg;
if (relabel->relabelformat == COERCE_IMPLICIT_CAST &&
!showimplicit)
{
/* don't show the implicit cast */
get_rule_expr_paren(arg, context, false, node);
}
else
{
get_coercion_expr(arg, context,
relabel->resulttype,
relabel->resulttypmod,
node);
}
}
break;
case T_CoerceViaIO:
{
CoerceViaIO *iocoerce = (CoerceViaIO *) node;
Node *arg = (Node *) iocoerce->arg;
if (iocoerce->coerceformat == COERCE_IMPLICIT_CAST &&
!showimplicit)
{
/* don't show the implicit cast */
get_rule_expr_paren(arg, context, false, node);
}
else
{
get_coercion_expr(arg, context,
iocoerce->resulttype,
-1,
node);
}
}
break;
case T_ArrayCoerceExpr:
{
ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
Node *arg = (Node *) acoerce->arg;
if (acoerce->coerceformat == COERCE_IMPLICIT_CAST &&
!showimplicit)
{
/* don't show the implicit cast */
get_rule_expr_paren(arg, context, false, node);
}
else
{
get_coercion_expr(arg, context,
acoerce->resulttype,
acoerce->resulttypmod,
node);
}
}
break;
case T_ConvertRowtypeExpr:
{
ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node;
Node *arg = (Node *) convert->arg;
if (convert->convertformat == COERCE_IMPLICIT_CAST &&
!showimplicit)
{
/* don't show the implicit cast */
get_rule_expr_paren(arg, context, false, node);
}
else
{
get_coercion_expr(arg, context,
convert->resulttype, -1,
node);
}
}
break;
case T_CollateExpr:
{
CollateExpr *collate = (CollateExpr *) node;
Node *arg = (Node *) collate->arg;
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
get_rule_expr_paren(arg, context, showimplicit, node);
appendStringInfo(buf, " COLLATE %s",
generate_collation_name(collate->collOid));
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
}
break;
case T_CaseExpr:
{
CaseExpr *caseexpr = (CaseExpr *) node;
ListCell *temp;
appendContextKeyword(context, "CASE",
0, PRETTYINDENT_VAR, 0);
if (caseexpr->arg)
{
appendStringInfoChar(buf, ' ');
get_rule_expr((Node *) caseexpr->arg, context, true);
}
foreach(temp, caseexpr->args)
{
CaseWhen *when = (CaseWhen *) lfirst(temp);
Node *w = (Node *) when->expr;
if (caseexpr->arg)
{
/*
* The parser should have produced WHEN clauses of the
* form "CaseTestExpr = RHS", possibly with an
* implicit coercion inserted above the CaseTestExpr.
* For accurate decompilation of rules it's essential
* that we show just the RHS. However in an
* expression that's been through the optimizer, the
* WHEN clause could be almost anything (since the
* equality operator could have been expanded into an
* inline function). If we don't recognize the form
* of the WHEN clause, just punt and display it as-is.
*/
if (IsA(w, OpExpr))
{
List *args = ((OpExpr *) w)->args;
if (list_length(args) == 2 &&
IsA(strip_implicit_coercions(linitial(args)),
CaseTestExpr))
w = (Node *) lsecond(args);
}
}
if (!PRETTY_INDENT(context))
appendStringInfoChar(buf, ' ');
appendContextKeyword(context, "WHEN ",
0, 0, 0);
get_rule_expr(w, context, false);
appendStringInfoString(buf, " THEN ");
get_rule_expr((Node *) when->result, context, true);
}
if (!PRETTY_INDENT(context))
appendStringInfoChar(buf, ' ');
appendContextKeyword(context, "ELSE ",
0, 0, 0);
get_rule_expr((Node *) caseexpr->defresult, context, true);
if (!PRETTY_INDENT(context))
appendStringInfoChar(buf, ' ');
appendContextKeyword(context, "END",
-PRETTYINDENT_VAR, 0, 0);
}
break;
case T_CaseTestExpr:
{
/*
* Normally we should never get here, since for expressions
* that can contain this node type we attempt to avoid
* recursing to it. But in an optimized expression we might
* be unable to avoid that (see comments for CaseExpr). If we
* do see one, print it as CASE_TEST_EXPR.
*/
appendStringInfoString(buf, "CASE_TEST_EXPR");
}
break;
case T_ArrayExpr:
{
ArrayExpr *arrayexpr = (ArrayExpr *) node;
appendStringInfoString(buf, "ARRAY[");
get_rule_expr((Node *) arrayexpr->elements, context, true);
appendStringInfoChar(buf, ']');
/*
* If the array isn't empty, we assume its elements are
* coerced to the desired type. If it's empty, though, we
* need an explicit coercion to the array type.
*/
if (arrayexpr->elements == NIL)
appendStringInfo(buf, "::%s",
format_type_with_typemod(arrayexpr->array_typeid, -1));
}
break;
case T_RowExpr:
{
RowExpr *rowexpr = (RowExpr *) node;
TupleDesc tupdesc = NULL;
ListCell *arg;
int i;
char *sep;
/*
* If it's a named type and not RECORD, we may have to skip
* dropped columns and/or claim there are NULLs for added
* columns.
*/
if (rowexpr->row_typeid != RECORDOID)
{
tupdesc = lookup_rowtype_tupdesc(rowexpr->row_typeid, -1);
Assert(list_length(rowexpr->args) <= tupdesc->natts);
}
/*
* SQL99 allows "ROW" to be omitted when there is more than
* one column, but for simplicity we always print it.
*/
appendStringInfoString(buf, "ROW(");
sep = "";
i = 0;
foreach(arg, rowexpr->args)
{
Node *e = (Node *) lfirst(arg);
if (tupdesc == NULL ||
!TupleDescAttr(tupdesc, i)->attisdropped)
{
appendStringInfoString(buf, sep);
/* Whole-row Vars need special treatment here */
get_rule_expr_toplevel(e, context, true);
sep = ", ";
}
i++;
}
if (tupdesc != NULL)
{
while (i < tupdesc->natts)
{
if (!TupleDescAttr(tupdesc, i)->attisdropped)
{
appendStringInfoString(buf, sep);
appendStringInfoString(buf, "NULL");
sep = ", ";
}
i++;
}
ReleaseTupleDesc(tupdesc);
}
appendStringInfoChar(buf, ')');
if (rowexpr->row_format == COERCE_EXPLICIT_CAST)
appendStringInfo(buf, "::%s",
format_type_with_typemod(rowexpr->row_typeid, -1));
}
break;
case T_RowCompareExpr:
{
RowCompareExpr *rcexpr = (RowCompareExpr *) node;
ListCell *arg;
char *sep;
/*
* SQL99 allows "ROW" to be omitted when there is more than
* one column, but for simplicity we always print it.
*/
appendStringInfoString(buf, "(ROW(");
sep = "";
foreach(arg, rcexpr->largs)
{
Node *e = (Node *) lfirst(arg);
appendStringInfoString(buf, sep);
get_rule_expr(e, context, true);
sep = ", ";
}
/*
* We assume that the name of the first-column operator will
* do for all the rest too. This is definitely open to
* failure, eg if some but not all operators were renamed
* since the construct was parsed, but there seems no way to
* be perfect.
*/
appendStringInfo(buf, ") %s ROW(",
generate_operator_name(linitial_oid(rcexpr->opnos),
exprType(linitial(rcexpr->largs)),
exprType(linitial(rcexpr->rargs))));
sep = "";
foreach(arg, rcexpr->rargs)
{
Node *e = (Node *) lfirst(arg);
appendStringInfoString(buf, sep);
get_rule_expr(e, context, true);
sep = ", ";
}
appendStringInfoString(buf, "))");
}
break;
case T_CoalesceExpr:
{
CoalesceExpr *coalesceexpr = (CoalesceExpr *) node;
appendStringInfoString(buf, "COALESCE(");
get_rule_expr((Node *) coalesceexpr->args, context, true);
appendStringInfoChar(buf, ')');
}
break;
case T_MinMaxExpr:
{
MinMaxExpr *minmaxexpr = (MinMaxExpr *) node;
switch (minmaxexpr->op)
{
case IS_GREATEST:
appendStringInfoString(buf, "GREATEST(");
break;
case IS_LEAST:
appendStringInfoString(buf, "LEAST(");
break;
}
get_rule_expr((Node *) minmaxexpr->args, context, true);
appendStringInfoChar(buf, ')');
}
break;
case T_SQLValueFunction:
{
SQLValueFunction *svf = (SQLValueFunction *) node;
/*
* Note: this code knows that typmod for time, timestamp, and
* timestamptz just prints as integer.
*/
switch (svf->op)
{
case SVFOP_CURRENT_DATE:
appendStringInfoString(buf, "CURRENT_DATE");
break;
case SVFOP_CURRENT_TIME:
appendStringInfoString(buf, "CURRENT_TIME");
break;
case SVFOP_CURRENT_TIME_N:
appendStringInfo(buf, "CURRENT_TIME(%d)", svf->typmod);
break;
case SVFOP_CURRENT_TIMESTAMP:
appendStringInfoString(buf, "CURRENT_TIMESTAMP");
break;
case SVFOP_CURRENT_TIMESTAMP_N:
appendStringInfo(buf, "CURRENT_TIMESTAMP(%d)",
svf->typmod);
break;
case SVFOP_LOCALTIME:
appendStringInfoString(buf, "LOCALTIME");
break;
case SVFOP_LOCALTIME_N:
appendStringInfo(buf, "LOCALTIME(%d)", svf->typmod);
break;
case SVFOP_LOCALTIMESTAMP:
appendStringInfoString(buf, "LOCALTIMESTAMP");
break;
case SVFOP_LOCALTIMESTAMP_N:
appendStringInfo(buf, "LOCALTIMESTAMP(%d)",
svf->typmod);
break;
case SVFOP_CURRENT_ROLE:
appendStringInfoString(buf, "CURRENT_ROLE");
break;
case SVFOP_CURRENT_USER:
appendStringInfoString(buf, "CURRENT_USER");
break;
case SVFOP_USER:
appendStringInfoString(buf, "USER");
break;
case SVFOP_SESSION_USER:
appendStringInfoString(buf, "SESSION_USER");
break;
case SVFOP_CURRENT_CATALOG:
appendStringInfoString(buf, "CURRENT_CATALOG");
break;
case SVFOP_CURRENT_SCHEMA:
appendStringInfoString(buf, "CURRENT_SCHEMA");
break;
}
}
break;
case T_XmlExpr:
{
XmlExpr *xexpr = (XmlExpr *) node;
bool needcomma = false;
ListCell *arg;
ListCell *narg;
Const *con;
switch (xexpr->op)
{
case IS_XMLCONCAT:
appendStringInfoString(buf, "XMLCONCAT(");
break;
case IS_XMLELEMENT:
appendStringInfoString(buf, "XMLELEMENT(");
break;
case IS_XMLFOREST:
appendStringInfoString(buf, "XMLFOREST(");
break;
case IS_XMLPARSE:
appendStringInfoString(buf, "XMLPARSE(");
break;
case IS_XMLPI:
appendStringInfoString(buf, "XMLPI(");
break;
case IS_XMLROOT:
appendStringInfoString(buf, "XMLROOT(");
break;
case IS_XMLSERIALIZE:
appendStringInfoString(buf, "XMLSERIALIZE(");
break;
case IS_DOCUMENT:
break;
}
if (xexpr->op == IS_XMLPARSE || xexpr->op == IS_XMLSERIALIZE)
{
if (xexpr->xmloption == XMLOPTION_DOCUMENT)
appendStringInfoString(buf, "DOCUMENT ");
else
appendStringInfoString(buf, "CONTENT ");
}
if (xexpr->name)
{
appendStringInfo(buf, "NAME %s",
quote_identifier(map_xml_name_to_sql_identifier(xexpr->name)));
needcomma = true;
}
if (xexpr->named_args)
{
if (xexpr->op != IS_XMLFOREST)
{
if (needcomma)
appendStringInfoString(buf, ", ");
appendStringInfoString(buf, "XMLATTRIBUTES(");
needcomma = false;
}
forboth(arg, xexpr->named_args, narg, xexpr->arg_names)
{
Node *e = (Node *) lfirst(arg);
char *argname = strVal(lfirst(narg));
if (needcomma)
appendStringInfoString(buf, ", ");
get_rule_expr((Node *) e, context, true);
appendStringInfo(buf, " AS %s",
quote_identifier(map_xml_name_to_sql_identifier(argname)));
needcomma = true;
}
if (xexpr->op != IS_XMLFOREST)
appendStringInfoChar(buf, ')');
}
if (xexpr->args)
{
if (needcomma)
appendStringInfoString(buf, ", ");
switch (xexpr->op)
{
case IS_XMLCONCAT:
case IS_XMLELEMENT:
case IS_XMLFOREST:
case IS_XMLPI:
case IS_XMLSERIALIZE:
/* no extra decoration needed */
get_rule_expr((Node *) xexpr->args, context, true);
break;
case IS_XMLPARSE:
Assert(list_length(xexpr->args) == 2);
get_rule_expr((Node *) linitial(xexpr->args),
context, true);
con = lsecond_node(Const, xexpr->args);
Assert(!con->constisnull);
if (DatumGetBool(con->constvalue))
appendStringInfoString(buf,
" PRESERVE WHITESPACE");
else
appendStringInfoString(buf,
" STRIP WHITESPACE");
break;
case IS_XMLROOT:
Assert(list_length(xexpr->args) == 3);
get_rule_expr((Node *) linitial(xexpr->args),
context, true);
appendStringInfoString(buf, ", VERSION ");
con = (Const *) lsecond(xexpr->args);
if (IsA(con, Const) &&
con->constisnull)
appendStringInfoString(buf, "NO VALUE");
else
get_rule_expr((Node *) con, context, false);
con = lthird_node(Const, xexpr->args);
if (con->constisnull)
/* suppress STANDALONE NO VALUE */ ;
else
{
switch (DatumGetInt32(con->constvalue))
{
case XML_STANDALONE_YES:
appendStringInfoString(buf,
", STANDALONE YES");
break;
case XML_STANDALONE_NO:
appendStringInfoString(buf,
", STANDALONE NO");
break;
case XML_STANDALONE_NO_VALUE:
appendStringInfoString(buf,
", STANDALONE NO VALUE");
break;
default:
break;
}
}
break;
case IS_DOCUMENT:
get_rule_expr_paren((Node *) xexpr->args, context, false, node);
break;
}
}
if (xexpr->op == IS_XMLSERIALIZE)
appendStringInfo(buf, " AS %s",
format_type_with_typemod(xexpr->type,
xexpr->typmod));
if (xexpr->op == IS_DOCUMENT)
appendStringInfoString(buf, " IS DOCUMENT");
else
appendStringInfoChar(buf, ')');
}
break;
case T_NullTest:
{
NullTest *ntest = (NullTest *) node;
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
get_rule_expr_paren((Node *) ntest->arg, context, true, node);
/*
* For scalar inputs, we prefer to print as IS [NOT] NULL,
* which is shorter and traditional. If it's a rowtype input
* but we're applying a scalar test, must print IS [NOT]
* DISTINCT FROM NULL to be semantically correct.
*/
if (ntest->argisrow ||
!type_is_rowtype(exprType((Node *) ntest->arg)))
{
switch (ntest->nulltesttype)
{
case IS_NULL:
appendStringInfoString(buf, " IS NULL");
break;
case IS_NOT_NULL:
appendStringInfoString(buf, " IS NOT NULL");
break;
default:
elog(ERROR, "unrecognized nulltesttype: %d",
(int) ntest->nulltesttype);
}
}
else
{
switch (ntest->nulltesttype)
{
case IS_NULL:
appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL");
break;
case IS_NOT_NULL:
appendStringInfoString(buf, " IS DISTINCT FROM NULL");
break;
default:
elog(ERROR, "unrecognized nulltesttype: %d",
(int) ntest->nulltesttype);
}
}
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
}
break;
case T_BooleanTest:
{
BooleanTest *btest = (BooleanTest *) node;
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
get_rule_expr_paren((Node *) btest->arg, context, false, node);
switch (btest->booltesttype)
{
case IS_TRUE:
appendStringInfoString(buf, " IS TRUE");
break;
case IS_NOT_TRUE:
appendStringInfoString(buf, " IS NOT TRUE");
break;
case IS_FALSE:
appendStringInfoString(buf, " IS FALSE");
break;
case IS_NOT_FALSE:
appendStringInfoString(buf, " IS NOT FALSE");
break;
case IS_UNKNOWN:
appendStringInfoString(buf, " IS UNKNOWN");
break;
case IS_NOT_UNKNOWN:
appendStringInfoString(buf, " IS NOT UNKNOWN");
break;
default:
elog(ERROR, "unrecognized booltesttype: %d",
(int) btest->booltesttype);
}
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
}
break;
case T_CoerceToDomain:
{
CoerceToDomain *ctest = (CoerceToDomain *) node;
Node *arg = (Node *) ctest->arg;
if (ctest->coercionformat == COERCE_IMPLICIT_CAST &&
!showimplicit)
{
/* don't show the implicit cast */
get_rule_expr(arg, context, false);
}
else
{
get_coercion_expr(arg, context,
ctest->resulttype,
ctest->resulttypmod,
node);
}
}
break;
case T_CoerceToDomainValue:
appendStringInfoString(buf, "VALUE");
break;
case T_SetToDefault:
appendStringInfoString(buf, "DEFAULT");
break;
case T_CurrentOfExpr:
{
CurrentOfExpr *cexpr = (CurrentOfExpr *) node;
if (cexpr->cursor_name)
appendStringInfo(buf, "CURRENT OF %s",
quote_identifier(cexpr->cursor_name));
else
appendStringInfo(buf, "CURRENT OF $%d",
cexpr->cursor_param);
}
break;
case T_NextValueExpr:
{
NextValueExpr *nvexpr = (NextValueExpr *) node;
/*
* This isn't exactly nextval(), but that seems close enough
* for EXPLAIN's purposes.
*/
appendStringInfoString(buf, "nextval(");
simple_quote_literal(buf,
generate_relation_name(nvexpr->seqid,
NIL));
appendStringInfoChar(buf, ')');
}
break;
case T_InferenceElem:
{
InferenceElem *iexpr = (InferenceElem *) node;
bool save_varprefix;
bool need_parens;
/*
* InferenceElem can only refer to target relation, so a
* prefix is not useful, and indeed would cause parse errors.
*/
save_varprefix = context->varprefix;
context->varprefix = false;
/*
* Parenthesize the element unless it's a simple Var or a bare
* function call. Follows pg_get_indexdef_worker().
*/
need_parens = !IsA(iexpr->expr, Var);
if (IsA(iexpr->expr, FuncExpr) &&
((FuncExpr *) iexpr->expr)->funcformat ==
COERCE_EXPLICIT_CALL)
need_parens = false;
if (need_parens)
appendStringInfoChar(buf, '(');
get_rule_expr((Node *) iexpr->expr,
context, false);
if (need_parens)
appendStringInfoChar(buf, ')');
context->varprefix = save_varprefix;
if (iexpr->infercollid)
appendStringInfo(buf, " COLLATE %s",
generate_collation_name(iexpr->infercollid));
/* Add the operator class name, if not default */
if (iexpr->inferopclass)
{
Oid inferopclass = iexpr->inferopclass;
Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass);
get_opclass_name(inferopclass, inferopcinputtype, buf);
}
}
break;
case T_PartitionBoundSpec:
{
PartitionBoundSpec *spec = (PartitionBoundSpec *) node;
ListCell *cell;
char *sep;
if (spec->is_default)
{
appendStringInfoString(buf, "DEFAULT");
break;
}
switch (spec->strategy)
{
case PARTITION_STRATEGY_HASH:
Assert(spec->modulus > 0 && spec->remainder >= 0);
Assert(spec->modulus > spec->remainder);
appendStringInfoString(buf, "FOR VALUES");
appendStringInfo(buf, " WITH (modulus %d, remainder %d)",
spec->modulus, spec->remainder);
break;
case PARTITION_STRATEGY_LIST:
Assert(spec->listdatums != NIL);
appendStringInfoString(buf, "FOR VALUES IN (");
sep = "";
foreach(cell, spec->listdatums)
{
Const *val = castNode(Const, lfirst(cell));
appendStringInfoString(buf, sep);
get_const_expr(val, context, -1);
sep = ", ";
}
appendStringInfoChar(buf, ')');
break;
case PARTITION_STRATEGY_RANGE:
Assert(spec->lowerdatums != NIL &&
spec->upperdatums != NIL &&
list_length(spec->lowerdatums) ==
list_length(spec->upperdatums));
appendStringInfo(buf, "FOR VALUES FROM %s TO %s",
get_range_partbound_string(spec->lowerdatums),
get_range_partbound_string(spec->upperdatums));
break;
default:
elog(ERROR, "unrecognized partition strategy: %d",
(int) spec->strategy);
break;
}
}
break;
case T_List:
{
char *sep;
ListCell *l;
sep = "";
foreach(l, (List *) node)
{
appendStringInfoString(buf, sep);
get_rule_expr((Node *) lfirst(l), context, showimplicit);
sep = ", ";
}
}
break;
case T_TableFunc:
get_tablefunc((TableFunc *) node, context, showimplicit);
break;
default:
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node));
break;
}
}
/*
* get_rule_expr_toplevel - Parse back a toplevel expression
*
* Same as get_rule_expr(), except that if the expr is just a Var, we pass
* istoplevel = true not false to get_variable(). This causes whole-row Vars
* to get printed with decoration that will prevent expansion of "*".
* We need to use this in contexts such as ROW() and VALUES(), where the
* parser would expand "foo.*" appearing at top level. (In principle we'd
* use this in get_target_list() too, but that has additional worries about
* whether to print AS, so it needs to invoke get_variable() directly anyway.)
*/
static void
get_rule_expr_toplevel(Node *node, deparse_context *context,
bool showimplicit)
{
if (node && IsA(node, Var))
(void) get_variable((Var *) node, 0, true, context);
else
get_rule_expr(node, context, showimplicit);
}
/*
* get_rule_expr_funccall - Parse back a function-call expression
*
* Same as get_rule_expr(), except that we guarantee that the output will
* look like a function call, or like one of the things the grammar treats as
* equivalent to a function call (see the func_expr_windowless production).
* This is needed in places where the grammar uses func_expr_windowless and
* you can't substitute a parenthesized a_expr. If what we have isn't going
* to look like a function call, wrap it in a dummy CAST() expression, which
* will satisfy the grammar --- and, indeed, is likely what the user wrote to
* produce such a thing.
*/
static void
get_rule_expr_funccall(Node *node, deparse_context *context,
bool showimplicit)
{
if (looks_like_function(node))
get_rule_expr(node, context, showimplicit);
else
{
StringInfo buf = context->buf;
appendStringInfoString(buf, "CAST(");
/* no point in showing any top-level implicit cast */
get_rule_expr(node, context, false);
appendStringInfo(buf, " AS %s)",
format_type_with_typemod(exprType(node),
exprTypmod(node)));
}
}
/*
* Helper function to identify node types that satisfy func_expr_windowless.
* If in doubt, "false" is always a safe answer.
*/
static bool
looks_like_function(Node *node)
{
if (node == NULL)
return false; /* probably shouldn't happen */
switch (nodeTag(node))
{
case T_FuncExpr:
/* OK, unless it's going to deparse as a cast */
return (((FuncExpr *) node)->funcformat == COERCE_EXPLICIT_CALL);
case T_NullIfExpr:
case T_CoalesceExpr:
case T_MinMaxExpr:
case T_SQLValueFunction:
case T_XmlExpr:
/* these are all accepted by func_expr_common_subexpr */
return true;
default:
break;
}
return false;
}
/*
* get_oper_expr - Parse back an OpExpr node
*/
static void
get_oper_expr(OpExpr *expr, deparse_context *context)
{
StringInfo buf = context->buf;
Oid opno = expr->opno;
List *args = expr->args;
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
if (list_length(args) == 2)
{
/* binary operator */
Node *arg1 = (Node *) linitial(args);
Node *arg2 = (Node *) lsecond(args);
get_rule_expr_paren(arg1, context, true, (Node *) expr);
appendStringInfo(buf, " %s ",
generate_operator_name(opno,
exprType(arg1),
exprType(arg2)));
get_rule_expr_paren(arg2, context, true, (Node *) expr);
}
else
{
/* unary operator --- but which side? */
Node *arg = (Node *) linitial(args);
HeapTuple tp;
Form_pg_operator optup;
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
if (!HeapTupleIsValid(tp))
elog(ERROR, "cache lookup failed for operator %u", opno);
optup = (Form_pg_operator) GETSTRUCT(tp);
switch (optup->oprkind)
{
case 'l':
appendStringInfo(buf, "%s ",
generate_operator_name(opno,
InvalidOid,
exprType(arg)));
get_rule_expr_paren(arg, context, true, (Node *) expr);
break;
case 'r':
get_rule_expr_paren(arg, context, true, (Node *) expr);
appendStringInfo(buf, " %s",
generate_operator_name(opno,
exprType(arg),
InvalidOid));
break;
default:
elog(ERROR, "bogus oprkind: %d", optup->oprkind);
}
ReleaseSysCache(tp);
}
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
}
/*
* get_func_expr - Parse back a FuncExpr node
*/
static void
get_func_expr(FuncExpr *expr, deparse_context *context,
bool showimplicit)
{
StringInfo buf = context->buf;
Oid funcoid = expr->funcid;
Oid argtypes[FUNC_MAX_ARGS];
int nargs;
List *argnames;
bool use_variadic;
ListCell *l;
/*
* If the function call came from an implicit coercion, then just show the
* first argument --- unless caller wants to see implicit coercions.
*/
if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit)
{
get_rule_expr_paren((Node *) linitial(expr->args), context,
false, (Node *) expr);
return;
}
/*
* If the function call came from a cast, then show the first argument
* plus an explicit cast operation.
*/
if (expr->funcformat == COERCE_EXPLICIT_CAST ||
expr->funcformat == COERCE_IMPLICIT_CAST)
{
Node *arg = linitial(expr->args);
Oid rettype = expr->funcresulttype;
int32 coercedTypmod;
/* Get the typmod if this is a length-coercion function */
(void) exprIsLengthCoercion((Node *) expr, &coercedTypmod);
get_coercion_expr(arg, context,
rettype, coercedTypmod,
(Node *) expr);
return;
}
/*
* Normal function: display as proname(args). First we need to extract
* the argument datatypes.
*/
if (list_length(expr->args) > FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
errmsg("too many arguments")));
nargs = 0;
argnames = NIL;
foreach(l, expr->args)
{
Node *arg = (Node *) lfirst(l);
if (IsA(arg, NamedArgExpr))
argnames = lappend(argnames, ((NamedArgExpr *) arg)->name);
argtypes[nargs] = exprType(arg);
nargs++;
}
appendStringInfo(buf, "%s(",
generate_function_name(funcoid, nargs,
argnames, argtypes,
expr->funcvariadic,
&use_variadic,
context->special_exprkind));
nargs = 0;
foreach(l, expr->args)
{
if (nargs++ > 0)
appendStringInfoString(buf, ", ");
if (use_variadic && lnext(expr->args, l) == NULL)
appendStringInfoString(buf, "VARIADIC ");
get_rule_expr((Node *) lfirst(l), context, true);
}
appendStringInfoChar(buf, ')');
}
/*
* get_agg_expr - Parse back an Aggref node
*/
static void
get_agg_expr(Aggref *aggref, deparse_context *context,
Aggref *original_aggref)
{
StringInfo buf = context->buf;
Oid argtypes[FUNC_MAX_ARGS];
int nargs;
bool use_variadic;
/*
* For a combining aggregate, we look up and deparse the corresponding
* partial aggregate instead. This is necessary because our input
* argument list has been replaced; the new argument list always has just
* one element, which will point to a partial Aggref that supplies us with
* transition states to combine.
*/
if (DO_AGGSPLIT_COMBINE(aggref->aggsplit))
{
TargetEntry *tle;
Assert(list_length(aggref->args) == 1);
tle = linitial_node(TargetEntry, aggref->args);
resolve_special_varno((Node *) tle->expr, context,
get_agg_combine_expr, original_aggref);
return;
}
/*
* Mark as PARTIAL, if appropriate. We look to the original aggref so as
* to avoid printing this when recursing from the code just above.
*/
if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit))
appendStringInfoString(buf, "PARTIAL ");
/* Extract the argument types as seen by the parser */
nargs = get_aggregate_argtypes(aggref, argtypes);
/* Print the aggregate name, schema-qualified if needed */
appendStringInfo(buf, "%s(%s",
generate_function_name(aggref->aggfnoid, nargs,
NIL, argtypes,
aggref->aggvariadic,
&use_variadic,
context->special_exprkind),
(aggref->aggdistinct != NIL) ? "DISTINCT " : "");
if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
{
/*
* Ordered-set aggregates do not use "*" syntax. Also, we needn't
* worry about inserting VARIADIC. So we can just dump the direct
* args as-is.
*/
Assert(!aggref->aggvariadic);
get_rule_expr((Node *) aggref->aggdirectargs, context, true);
Assert(aggref->aggorder != NIL);
appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY ");
get_rule_orderby(aggref->aggorder, aggref->args, false, context);
}
else
{
/* aggstar can be set only in zero-argument aggregates */
if (aggref->aggstar)
appendStringInfoChar(buf, '*');
else
{
ListCell *l;
int i;
i = 0;
foreach(l, aggref->args)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
Node *arg = (Node *) tle->expr;
Assert(!IsA(arg, NamedArgExpr));
if (tle->resjunk)
continue;
if (i++ > 0)
appendStringInfoString(buf, ", ");
if (use_variadic && i == nargs)
appendStringInfoString(buf, "VARIADIC ");
get_rule_expr(arg, context, true);
}
}
if (aggref->aggorder != NIL)
{
appendStringInfoString(buf, " ORDER BY ");
get_rule_orderby(aggref->aggorder, aggref->args, false, context);
}
}
if (aggref->aggfilter != NULL)
{
appendStringInfoString(buf, ") FILTER (WHERE ");
get_rule_expr((Node *) aggref->aggfilter, context, false);
}
appendStringInfoChar(buf, ')');
}
/*
* This is a helper function for get_agg_expr(). It's used when we deparse
* a combining Aggref; resolve_special_varno locates the corresponding partial
* Aggref and then calls this.
*/
static void
get_agg_combine_expr(Node *node, deparse_context *context, void *callback_arg)
{
Aggref *aggref;
Aggref *original_aggref = callback_arg;
if (!IsA(node, Aggref))
elog(ERROR, "combining Aggref does not point to an Aggref");
aggref = (Aggref *) node;
get_agg_expr(aggref, context, original_aggref);
}
/*
* get_windowfunc_expr - Parse back a WindowFunc node
*/
static void
get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context)
{
StringInfo buf = context->buf;
Oid argtypes[FUNC_MAX_ARGS];
int nargs;
List *argnames;
ListCell *l;
if (list_length(wfunc->args) > FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
errmsg("too many arguments")));
nargs = 0;
argnames = NIL;
foreach(l, wfunc->args)
{
Node *arg = (Node *) lfirst(l);
if (IsA(arg, NamedArgExpr))
argnames = lappend(argnames, ((NamedArgExpr *) arg)->name);
argtypes[nargs] = exprType(arg);
nargs++;
}
appendStringInfo(buf, "%s(",
generate_function_name(wfunc->winfnoid, nargs,
argnames, argtypes,
false, NULL,
context->special_exprkind));
/* winstar can be set only in zero-argument aggregates */
if (wfunc->winstar)
appendStringInfoChar(buf, '*');
else
get_rule_expr((Node *) wfunc->args, context, true);
if (wfunc->aggfilter != NULL)
{
appendStringInfoString(buf, ") FILTER (WHERE ");
get_rule_expr((Node *) wfunc->aggfilter, context, false);
}
appendStringInfoString(buf, ") OVER ");
foreach(l, context->windowClause)
{
WindowClause *wc = (WindowClause *) lfirst(l);
if (wc->winref == wfunc->winref)
{
if (wc->name)
appendStringInfoString(buf, quote_identifier(wc->name));
else
get_rule_windowspec(wc, context->windowTList, context);
break;
}
}
if (l == NULL)
{
if (context->windowClause)
elog(ERROR, "could not find window clause for winref %u",
wfunc->winref);
/*
* In EXPLAIN, we don't have window context information available, so
* we have to settle for this:
*/
appendStringInfoString(buf, "(?)");
}
}
/* ----------
* get_coercion_expr
*
* Make a string representation of a value coerced to a specific type
* ----------
*/
static void
get_coercion_expr(Node *arg, deparse_context *context,
Oid resulttype, int32 resulttypmod,
Node *parentNode)
{
StringInfo buf = context->buf;
/*
* Since parse_coerce.c doesn't immediately collapse application of
* length-coercion functions to constants, what we'll typically see in
* such cases is a Const with typmod -1 and a length-coercion function
* right above it. Avoid generating redundant output. However, beware of
* suppressing casts when the user actually wrote something like
* 'foo'::text::char(3).
*
* Note: it might seem that we are missing the possibility of needing to
* print a COLLATE clause for such a Const. However, a Const could only
* have nondefault collation in a post-constant-folding tree, in which the
* length coercion would have been folded too. See also the special
* handling of CollateExpr in coerce_to_target_type(): any collation
* marking will be above the coercion node, not below it.
*/
if (arg && IsA(arg, Const) &&
((Const *) arg)->consttype == resulttype &&
((Const *) arg)->consttypmod == -1)
{
/* Show the constant without normal ::typename decoration */
get_const_expr((Const *) arg, context, -1);
}
else
{
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
get_rule_expr_paren(arg, context, false, parentNode);
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
}
/*
* Never emit resulttype(arg) functional notation. A pg_proc entry could
* take precedence, and a resulttype in pg_temp would require schema
* qualification that format_type_with_typemod() would usually omit. We've
* standardized on arg::resulttype, but CAST(arg AS resulttype) notation
* would work fine.
*/
appendStringInfo(buf, "::%s",
format_type_with_typemod(resulttype, resulttypmod));
}
/* ----------
* get_const_expr
*
* Make a string representation of a Const
*
* showtype can be -1 to never show "::typename" decoration, or +1 to always
* show it, or 0 to show it only if the constant wouldn't be assumed to be
* the right type by default.
*
* If the Const's collation isn't default for its type, show that too.
* We mustn't do this when showtype is -1 (since that means the caller will
* print "::typename", and we can't put a COLLATE clause in between). It's
* caller's responsibility that collation isn't missed in such cases.
* ----------
*/
static void
get_const_expr(Const *constval, deparse_context *context, int showtype)
{
StringInfo buf = context->buf;
Oid typoutput;
bool typIsVarlena;
char *extval;
bool needlabel = false;
if (constval->constisnull)
{
/*
* Always label the type of a NULL constant to prevent misdecisions
* about type when reparsing.
*/
appendStringInfoString(buf, "NULL");
if (showtype >= 0)
{
appendStringInfo(buf, "::%s",
format_type_with_typemod(constval->consttype,
constval->consttypmod));
get_const_collation(constval, context);
}
return;
}
getTypeOutputInfo(constval->consttype,
&typoutput, &typIsVarlena);
extval = OidOutputFunctionCall(typoutput, constval->constvalue);
switch (constval->consttype)
{
case INT4OID:
/*
* INT4 can be printed without any decoration, unless it is
* negative; in that case print it as '-nnn'::integer to ensure
* that the output will re-parse as a constant, not as a constant
* plus operator. In most cases we could get away with printing
* (-nnn) instead, because of the way that gram.y handles negative
* literals; but that doesn't work for INT_MIN, and it doesn't
* seem that much prettier anyway.
*/
if (extval[0] != '-')
appendStringInfoString(buf, extval);
else
{
appendStringInfo(buf, "'%s'", extval);
needlabel = true; /* we must attach a cast */
}
break;
case NUMERICOID:
/*
* NUMERIC can be printed without quotes if it looks like a float
* constant (not an integer, and not Infinity or NaN) and doesn't
* have a leading sign (for the same reason as for INT4).
*/
if (isdigit((unsigned char) extval[0]) &&
strcspn(extval, "eE.") != strlen(extval))
{
appendStringInfoString(buf, extval);
}
else
{
appendStringInfo(buf, "'%s'", extval);
needlabel = true; /* we must attach a cast */
}
break;
case BOOLOID:
if (strcmp(extval, "t") == 0)
appendStringInfoString(buf, "true");
else
appendStringInfoString(buf, "false");
break;
default:
simple_quote_literal(buf, extval);
break;
}
pfree(extval);
if (showtype < 0)
return;
/*
* For showtype == 0, append ::typename unless the constant will be
* implicitly typed as the right type when it is read in.
*
* XXX this code has to be kept in sync with the behavior of the parser,
* especially make_const.
*/
switch (constval->consttype)
{
case BOOLOID:
case UNKNOWNOID:
/* These types can be left unlabeled */
needlabel = false;
break;
case INT4OID:
/* We determined above whether a label is needed */
break;
case NUMERICOID:
/*
* Float-looking constants will be typed as numeric, which we
* checked above; but if there's a nondefault typmod we need to
* show it.
*/
needlabel |= (constval->consttypmod >= 0);
break;
default:
needlabel = true;
break;
}
if (needlabel || showtype > 0)
appendStringInfo(buf, "::%s",
format_type_with_typemod(constval->consttype,
constval->consttypmod));
get_const_collation(constval, context);
}
/*
* helper for get_const_expr: append COLLATE if needed
*/
static void
get_const_collation(Const *constval, deparse_context *context)
{
StringInfo buf = context->buf;
if (OidIsValid(constval->constcollid))
{
Oid typcollation = get_typcollation(constval->consttype);
if (constval->constcollid != typcollation)
{
appendStringInfo(buf, " COLLATE %s",
generate_collation_name(constval->constcollid));
}
}
}
/*
* simple_quote_literal - Format a string as a SQL literal, append to buf
*/
static void
simple_quote_literal(StringInfo buf, const char *val)
{
const char *valptr;
/*
* We form the string literal according to the prevailing setting of
* standard_conforming_strings; we never use E''. User is responsible for
* making sure result is used correctly.
*/
appendStringInfoChar(buf, '\'');
for (valptr = val; *valptr; valptr++)
{
char ch = *valptr;
if (SQL_STR_DOUBLE(ch, !standard_conforming_strings))
appendStringInfoChar(buf, ch);
appendStringInfoChar(buf, ch);
}
appendStringInfoChar(buf, '\'');
}
/* ----------
* get_sublink_expr - Parse back a sublink
* ----------
*/
static void
get_sublink_expr(SubLink *sublink, deparse_context *context)
{
StringInfo buf = context->buf;
Query *query = (Query *) (sublink->subselect);
char *opname = NULL;
bool need_paren;
if (sublink->subLinkType == ARRAY_SUBLINK)
appendStringInfoString(buf, "ARRAY(");
else
appendStringInfoChar(buf, '(');
/*
* Note that we print the name of only the first operator, when there are
* multiple combining operators. This is an approximation that could go
* wrong in various scenarios (operators in different schemas, renamed
* operators, etc) but there is not a whole lot we can do about it, since
* the syntax allows only one operator to be shown.
*/
if (sublink->testexpr)
{
if (IsA(sublink->testexpr, OpExpr))
{
/* single combining operator */
OpExpr *opexpr = (OpExpr *) sublink->testexpr;
get_rule_expr(linitial(opexpr->args), context, true);
opname = generate_operator_name(opexpr->opno,
exprType(linitial(opexpr->args)),
exprType(lsecond(opexpr->args)));
}
else if (IsA(sublink->testexpr, BoolExpr))
{
/* multiple combining operators, = or <> cases */
char *sep;
ListCell *l;
appendStringInfoChar(buf, '(');
sep = "";
foreach(l, ((BoolExpr *) sublink->testexpr)->args)
{
OpExpr *opexpr = lfirst_node(OpExpr, l);
appendStringInfoString(buf, sep);
get_rule_expr(linitial(opexpr->args), context, true);
if (!opname)
opname = generate_operator_name(opexpr->opno,
exprType(linitial(opexpr->args)),
exprType(lsecond(opexpr->args)));
sep = ", ";
}
appendStringInfoChar(buf, ')');
}
else if (IsA(sublink->testexpr, RowCompareExpr))
{
/* multiple combining operators, < <= > >= cases */
RowCompareExpr *rcexpr = (RowCompareExpr *) sublink->testexpr;
appendStringInfoChar(buf, '(');
get_rule_expr((Node *) rcexpr->largs, context, true);
opname = generate_operator_name(linitial_oid(rcexpr->opnos),
exprType(linitial(rcexpr->largs)),
exprType(linitial(rcexpr->rargs)));
appendStringInfoChar(buf, ')');
}
else
elog(ERROR, "unrecognized testexpr type: %d",
(int) nodeTag(sublink->testexpr));
}
need_paren = true;
switch (sublink->subLinkType)
{
case EXISTS_SUBLINK:
appendStringInfoString(buf, "EXISTS ");
break;
case ANY_SUBLINK:
if (strcmp(opname, "=") == 0) /* Represent = ANY as IN */
appendStringInfoString(buf, " IN ");
else
appendStringInfo(buf, " %s ANY ", opname);
break;
case ALL_SUBLINK:
appendStringInfo(buf, " %s ALL ", opname);
break;
case ROWCOMPARE_SUBLINK:
appendStringInfo(buf, " %s ", opname);
break;
case EXPR_SUBLINK:
case MULTIEXPR_SUBLINK:
case ARRAY_SUBLINK:
need_paren = false;
break;
case CTE_SUBLINK: /* shouldn't occur in a SubLink */
default:
elog(ERROR, "unrecognized sublink type: %d",
(int) sublink->subLinkType);
break;
}
if (need_paren)
appendStringInfoChar(buf, '(');
get_query_def(query, buf, context->namespaces, NULL,
context->prettyFlags, context->wrapColumn,
context->indentLevel);
if (need_paren)
appendStringInfoString(buf, "))");
else
appendStringInfoChar(buf, ')');
}
/* ----------
* get_tablefunc - Parse back a table function
* ----------
*/
static void
get_tablefunc(TableFunc *tf, deparse_context *context, bool showimplicit)
{
StringInfo buf = context->buf;
/* XMLTABLE is the only existing implementation. */
appendStringInfoString(buf, "XMLTABLE(");
if (tf->ns_uris != NIL)
{
ListCell *lc1,
*lc2;
bool first = true;
appendStringInfoString(buf, "XMLNAMESPACES (");
forboth(lc1, tf->ns_uris, lc2, tf->ns_names)
{
Node *expr = (Node *) lfirst(lc1);
Value *ns_node = (Value *) lfirst(lc2);
if (!first)
appendStringInfoString(buf, ", ");
else
first = false;
if (ns_node != NULL)
{
get_rule_expr(expr, context, showimplicit);
appendStringInfo(buf, " AS %s", strVal(ns_node));
}
else
{
appendStringInfoString(buf, "DEFAULT ");
get_rule_expr(expr, context, showimplicit);
}
}
appendStringInfoString(buf, "), ");
}
appendStringInfoChar(buf, '(');
get_rule_expr((Node *) tf->rowexpr, context, showimplicit);
appendStringInfoString(buf, ") PASSING (");
get_rule_expr((Node *) tf->docexpr, context, showimplicit);
appendStringInfoChar(buf, ')');
if (tf->colexprs != NIL)
{
ListCell *l1;
ListCell *l2;
ListCell *l3;
ListCell *l4;
ListCell *l5;
int colnum = 0;
appendStringInfoString(buf, " COLUMNS ");
forfive(l1, tf->colnames, l2, tf->coltypes, l3, tf->coltypmods,
l4, tf->colexprs, l5, tf->coldefexprs)
{
char *colname = strVal(lfirst(l1));
Oid typid = lfirst_oid(l2);
int32 typmod = lfirst_int(l3);
Node *colexpr = (Node *) lfirst(l4);
Node *coldefexpr = (Node *) lfirst(l5);
bool ordinality = (tf->ordinalitycol == colnum);
bool notnull = bms_is_member(colnum, tf->notnulls);
if (colnum > 0)
appendStringInfoString(buf, ", ");
colnum++;
appendStringInfo(buf, "%s %s", quote_identifier(colname),
ordinality ? "FOR ORDINALITY" :
format_type_with_typemod(typid, typmod));
if (ordinality)
continue;
if (coldefexpr != NULL)
{
appendStringInfoString(buf, " DEFAULT (");
get_rule_expr((Node *) coldefexpr, context, showimplicit);
appendStringInfoChar(buf, ')');
}
if (colexpr != NULL)
{
appendStringInfoString(buf, " PATH (");
get_rule_expr((Node *) colexpr, context, showimplicit);
appendStringInfoChar(buf, ')');
}
if (notnull)
appendStringInfoString(buf, " NOT NULL");
}
}
appendStringInfoChar(buf, ')');
}
/* ----------
* get_from_clause - Parse back a FROM clause
*
* "prefix" is the keyword that denotes the start of the list of FROM
* elements. It is FROM when used to parse back SELECT and UPDATE, but
* is USING when parsing back DELETE.
* ----------
*/
static void
get_from_clause(Query *query, const char *prefix, deparse_context *context)
{
StringInfo buf = context->buf;
bool first = true;
ListCell *l;
/*
* We use the query's jointree as a guide to what to print. However, we
* must ignore auto-added RTEs that are marked not inFromCl. (These can
* only appear at the top level of the jointree, so it's sufficient to
* check here.) This check also ensures we ignore the rule pseudo-RTEs
* for NEW and OLD.
*/
foreach(l, query->jointree->fromlist)
{
Node *jtnode = (Node *) lfirst(l);
if (IsA(jtnode, RangeTblRef))
{
int varno = ((RangeTblRef *) jtnode)->rtindex;
RangeTblEntry *rte = rt_fetch(varno, query->rtable);
if (!rte->inFromCl)
continue;
}
if (first)
{
appendContextKeyword(context, prefix,
-PRETTYINDENT_STD, PRETTYINDENT_STD, 2);
first = false;
get_from_clause_item(jtnode, query, context);
}
else
{
StringInfoData itembuf;
appendStringInfoString(buf, ", ");
/*
* Put the new FROM item's text into itembuf so we can decide
* after we've got it whether or not it needs to go on a new line.
*/
initStringInfo(&itembuf);
context->buf = &itembuf;
get_from_clause_item(jtnode, query, context);
/* Restore context's output buffer */
context->buf = buf;
/* Consider line-wrapping if enabled */
if (PRETTY_INDENT(context) && context->wrapColumn >= 0)
{
/* Does the new item start with a new line? */
if (itembuf.len > 0 && itembuf.data[0] == '\n')
{
/* If so, we shouldn't add anything */
/* instead, remove any trailing spaces currently in buf */
removeStringInfoSpaces(buf);
}
else
{
char *trailing_nl;
/* Locate the start of the current line in the buffer */
trailing_nl = strrchr(buf->data, '\n');
if (trailing_nl == NULL)
trailing_nl = buf->data;
else
trailing_nl++;
/*
* Add a newline, plus some indentation, if the new item
* would cause an overflow.
*/
if (strlen(trailing_nl) + itembuf.len > context->wrapColumn)
appendContextKeyword(context, "", -PRETTYINDENT_STD,
PRETTYINDENT_STD,
PRETTYINDENT_VAR);
}
}
/* Add the new item */
appendBinaryStringInfo(buf, itembuf.data, itembuf.len);
/* clean up */
pfree(itembuf.data);
}
}
}
static void
get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
{
StringInfo buf = context->buf;
deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces);
if (IsA(jtnode, RangeTblRef))
{
int varno = ((RangeTblRef *) jtnode)->rtindex;
RangeTblEntry *rte = rt_fetch(varno, query->rtable);
char *refname = get_rtable_name(varno, context);
deparse_columns *colinfo = deparse_columns_fetch(varno, dpns);
RangeTblFunction *rtfunc1 = NULL;
bool printalias;
if (rte->lateral)
appendStringInfoString(buf, "LATERAL ");
/* Print the FROM item proper */
switch (rte->rtekind)
{
case RTE_RELATION:
/* Normal relation RTE */
appendStringInfo(buf, "%s%s",
only_marker(rte),
generate_relation_name(rte->relid,
context->namespaces));
break;
case RTE_SUBQUERY:
/* Subquery RTE */
appendStringInfoChar(buf, '(');
get_query_def(rte->subquery, buf, context->namespaces, NULL,
context->prettyFlags, context->wrapColumn,
context->indentLevel);
appendStringInfoChar(buf, ')');
break;
case RTE_FUNCTION:
/* Function RTE */
rtfunc1 = (RangeTblFunction *) linitial(rte->functions);
/*
* Omit ROWS FROM() syntax for just one function, unless it
* has both a coldeflist and WITH ORDINALITY. If it has both,
* we must use ROWS FROM() syntax to avoid ambiguity about
* whether the coldeflist includes the ordinality column.
*/
if (list_length(rte->functions) == 1 &&
(rtfunc1->funccolnames == NIL || !rte->funcordinality))
{
get_rule_expr_funccall(rtfunc1->funcexpr, context, true);
/* we'll print the coldeflist below, if it has one */
}
else
{
bool all_unnest;
ListCell *lc;
/*
* If all the function calls in the list are to unnest,
* and none need a coldeflist, then collapse the list back
* down to UNNEST(args). (If we had more than one
* built-in unnest function, this would get more
* difficult.)
*
* XXX This is pretty ugly, since it makes not-terribly-
* future-proof assumptions about what the parser would do
* with the output; but the alternative is to emit our
* nonstandard ROWS FROM() notation for what might have
* been a perfectly spec-compliant multi-argument
* UNNEST().
*/
all_unnest = true;
foreach(lc, rte->functions)
{
RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
if (!IsA(rtfunc->funcexpr, FuncExpr) ||
((FuncExpr *) rtfunc->funcexpr)->funcid != F_ARRAY_UNNEST ||
rtfunc->funccolnames != NIL)
{
all_unnest = false;
break;
}
}
if (all_unnest)
{
List *allargs = NIL;
foreach(lc, rte->functions)
{
RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
List *args = ((FuncExpr *) rtfunc->funcexpr)->args;
allargs = list_concat(allargs, args);
}
appendStringInfoString(buf, "UNNEST(");
get_rule_expr((Node *) allargs, context, true);
appendStringInfoChar(buf, ')');
}
else
{
int funcno = 0;
appendStringInfoString(buf, "ROWS FROM(");
foreach(lc, rte->functions)
{
RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
if (funcno > 0)
appendStringInfoString(buf, ", ");
get_rule_expr_funccall(rtfunc->funcexpr, context, true);
if (rtfunc->funccolnames != NIL)
{
/* Reconstruct the column definition list */
appendStringInfoString(buf, " AS ");
get_from_clause_coldeflist(rtfunc,
NULL,
context);
}
funcno++;
}
appendStringInfoChar(buf, ')');
}
/* prevent printing duplicate coldeflist below */
rtfunc1 = NULL;
}
if (rte->funcordinality)
appendStringInfoString(buf, " WITH ORDINALITY");
break;
case RTE_TABLEFUNC:
get_tablefunc(rte->tablefunc, context, true);
break;
case RTE_VALUES:
/* Values list RTE */
appendStringInfoChar(buf, '(');
get_values_def(rte->values_lists, context);
appendStringInfoChar(buf, ')');
break;
case RTE_CTE:
appendStringInfoString(buf, quote_identifier(rte->ctename));
break;
default:
elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind);
break;
}
/* Print the relation alias, if needed */
printalias = false;
if (rte->alias != NULL)
{
/* Always print alias if user provided one */
printalias = true;
}
else if (colinfo->printaliases)
{
/* Always print alias if we need to print column aliases */
printalias = true;
}
else if (rte->rtekind == RTE_RELATION)
{
/*
* No need to print alias if it's same as relation name (this
* would normally be the case, but not if set_rtable_names had to
* resolve a conflict).
*/
if (strcmp(refname, get_relation_name(rte->relid)) != 0)
printalias = true;
}
else if (rte->rtekind == RTE_FUNCTION)
{
/*
* For a function RTE, always print alias. This covers possible
* renaming of the function and/or instability of the
* FigureColname rules for things that aren't simple functions.
* Note we'd need to force it anyway for the columndef list case.
*/
printalias = true;
}
else if (rte->rtekind == RTE_VALUES)
{
/* Alias is syntactically required for VALUES */
printalias = true;
}
else if (rte->rtekind == RTE_CTE)
{
/*
* No need to print alias if it's same as CTE name (this would
* normally be the case, but not if set_rtable_names had to
* resolve a conflict).
*/
if (strcmp(refname, rte->ctename) != 0)
printalias = true;
}
if (printalias)
appendStringInfo(buf, " %s", quote_identifier(refname));
/* Print the column definitions or aliases, if needed */
if (rtfunc1 && rtfunc1->funccolnames != NIL)
{
/* Reconstruct the columndef list, which is also the aliases */
get_from_clause_coldeflist(rtfunc1, colinfo, context);
}
else
{
/* Else print column aliases as needed */
get_column_alias_list(colinfo, context);
}
/* Tablesample clause must go after any alias */
if (rte->rtekind == RTE_RELATION && rte->tablesample)
get_tablesample_def(rte->tablesample, context);
}
else if (IsA(jtnode, JoinExpr))
{
JoinExpr *j = (JoinExpr *) jtnode;
deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns);
bool need_paren_on_right;
need_paren_on_right = PRETTY_PAREN(context) &&
!IsA(j->rarg, RangeTblRef) &&
!(IsA(j->rarg, JoinExpr) && ((JoinExpr *) j->rarg)->alias != NULL);
if (!PRETTY_PAREN(context) || j->alias != NULL)
appendStringInfoChar(buf, '(');
get_from_clause_item(j->larg, query, context);
switch (j->jointype)
{
case JOIN_INNER:
if (j->quals)
appendContextKeyword(context, " JOIN ",
-PRETTYINDENT_STD,
PRETTYINDENT_STD,
PRETTYINDENT_JOIN);
else
appendContextKeyword(context, " CROSS JOIN ",
-PRETTYINDENT_STD,
PRETTYINDENT_STD,
PRETTYINDENT_JOIN);
break;
case JOIN_LEFT:
appendContextKeyword(context, " LEFT JOIN ",
-PRETTYINDENT_STD,
PRETTYINDENT_STD,
PRETTYINDENT_JOIN);
break;
case JOIN_FULL:
appendContextKeyword(context, " FULL JOIN ",
-PRETTYINDENT_STD,
PRETTYINDENT_STD,
PRETTYINDENT_JOIN);
break;
case JOIN_RIGHT:
appendContextKeyword(context, " RIGHT JOIN ",
-PRETTYINDENT_STD,
PRETTYINDENT_STD,
PRETTYINDENT_JOIN);
break;
default:
elog(ERROR, "unrecognized join type: %d",
(int) j->jointype);
}
if (need_paren_on_right)
appendStringInfoChar(buf, '(');
get_from_clause_item(j->rarg, query, context);
if (need_paren_on_right)
appendStringInfoChar(buf, ')');
if (j->usingClause)
{
ListCell *lc;
bool first = true;
appendStringInfoString(buf, " USING (");
/* Use the assigned names, not what's in usingClause */
foreach(lc, colinfo->usingNames)
{
char *colname = (char *) lfirst(lc);
if (first)
first = false;
else
appendStringInfoString(buf, ", ");
appendStringInfoString(buf, quote_identifier(colname));
}
appendStringInfoChar(buf, ')');
}
else if (j->quals)
{
appendStringInfoString(buf, " ON ");
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, '(');
get_rule_expr(j->quals, context, false);
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
}
else if (j->jointype != JOIN_INNER)
{
/* If we didn't say CROSS JOIN above, we must provide an ON */
appendStringInfoString(buf, " ON TRUE");
}
if (!PRETTY_PAREN(context) || j->alias != NULL)
appendStringInfoChar(buf, ')');
/* Yes, it's correct to put alias after the right paren ... */
if (j->alias != NULL)
{
/*
* Note that it's correct to emit an alias clause if and only if
* there was one originally. Otherwise we'd be converting a named
* join to unnamed or vice versa, which creates semantic
* subtleties we don't want. However, we might print a different
* alias name than was there originally.
*/
appendStringInfo(buf, " %s",
quote_identifier(get_rtable_name(j->rtindex,
context)));
get_column_alias_list(colinfo, context);
}
}
else
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(jtnode));
}
/*
* get_column_alias_list - print column alias list for an RTE
*
* Caller must already have printed the relation's alias name.
*/
static void
get_column_alias_list(deparse_columns *colinfo, deparse_context *context)
{
StringInfo buf = context->buf;
int i;
bool first = true;
/* Don't print aliases if not needed */
if (!colinfo->printaliases)
return;
for (i = 0; i < colinfo->num_new_cols; i++)
{
char *colname = colinfo->new_colnames[i];
if (first)
{
appendStringInfoChar(buf, '(');
first = false;
}
else
appendStringInfoString(buf, ", ");
appendStringInfoString(buf, quote_identifier(colname));
}
if (!first)
appendStringInfoChar(buf, ')');
}
/*
* get_from_clause_coldeflist - reproduce FROM clause coldeflist
*
* When printing a top-level coldeflist (which is syntactically also the
* relation's column alias list), use column names from colinfo. But when
* printing a coldeflist embedded inside ROWS FROM(), we prefer to use the
* original coldeflist's names, which are available in rtfunc->funccolnames.
* Pass NULL for colinfo to select the latter behavior.
*
* The coldeflist is appended immediately (no space) to buf. Caller is
* responsible for ensuring that an alias or AS is present before it.
*/
static void
get_from_clause_coldeflist(RangeTblFunction *rtfunc,
deparse_columns *colinfo,
deparse_context *context)
{
StringInfo buf = context->buf;
ListCell *l1;
ListCell *l2;
ListCell *l3;
ListCell *l4;
int i;
appendStringInfoChar(buf, '(');
i = 0;
forfour(l1, rtfunc->funccoltypes,
l2, rtfunc->funccoltypmods,
l3, rtfunc->funccolcollations,
l4, rtfunc->funccolnames)
{
Oid atttypid = lfirst_oid(l1);
int32 atttypmod = lfirst_int(l2);
Oid attcollation = lfirst_oid(l3);
char *attname;
if (colinfo)
attname = colinfo->colnames[i];
else
attname = strVal(lfirst(l4));
Assert(attname); /* shouldn't be any dropped columns here */
if (i > 0)
appendStringInfoString(buf, ", ");
appendStringInfo(buf, "%s %s",
quote_identifier(attname),
format_type_with_typemod(atttypid, atttypmod));
if (OidIsValid(attcollation) &&
attcollation != get_typcollation(atttypid))
appendStringInfo(buf, " COLLATE %s",
generate_collation_name(attcollation));
i++;
}
appendStringInfoChar(buf, ')');
}
/*
* get_tablesample_def - print a TableSampleClause
*/
static void
get_tablesample_def(TableSampleClause *tablesample, deparse_context *context)
{
StringInfo buf = context->buf;
Oid argtypes[1];
int nargs;
ListCell *l;
/*
* We should qualify the handler's function name if it wouldn't be
* resolved by lookup in the current search path.
*/
argtypes[0] = INTERNALOID;
appendStringInfo(buf, " TABLESAMPLE %s (",
generate_function_name(tablesample->tsmhandler, 1,
NIL, argtypes,
false, NULL, EXPR_KIND_NONE));
nargs = 0;
foreach(l, tablesample->args)
{
if (nargs++ > 0)
appendStringInfoString(buf, ", ");
get_rule_expr((Node *) lfirst(l), context, false);
}
appendStringInfoChar(buf, ')');
if (tablesample->repeatable != NULL)
{
appendStringInfoString(buf, " REPEATABLE (");
get_rule_expr((Node *) tablesample->repeatable, context, false);
appendStringInfoChar(buf, ')');
}
}
/*
* get_opclass_name - fetch name of an index operator class
*
* The opclass name is appended (after a space) to buf.
*
* Output is suppressed if the opclass is the default for the given
* actual_datatype. (If you don't want this behavior, just pass
* InvalidOid for actual_datatype.)
*/
static void
get_opclass_name(Oid opclass, Oid actual_datatype,
StringInfo buf)
{
HeapTuple ht_opc;
Form_pg_opclass opcrec;
char *opcname;
char *nspname;
ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass));
if (!HeapTupleIsValid(ht_opc))
elog(ERROR, "cache lookup failed for opclass %u", opclass);
opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc);
if (!OidIsValid(actual_datatype) ||
GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass)
{
/* Okay, we need the opclass name. Do we need to qualify it? */
opcname = NameStr(opcrec->opcname);
if (OpclassIsVisible(opclass))
appendStringInfo(buf, " %s", quote_identifier(opcname));
else
{
nspname = get_namespace_name(opcrec->opcnamespace);
appendStringInfo(buf, " %s.%s",
quote_identifier(nspname),
quote_identifier(opcname));
}
}
ReleaseSysCache(ht_opc);
}
/*
* generate_opclass_name
* Compute the name to display for a opclass specified by OID
*
* The result includes all necessary quoting and schema-prefixing.
*/
char *
generate_opclass_name(Oid opclass)
{
StringInfoData buf;
initStringInfo(&buf);
get_opclass_name(opclass, InvalidOid, &buf);
return &buf.data[1]; /* get_opclass_name() prepends space */
}
/*
* processIndirection - take care of array and subfield assignment
*
* We strip any top-level FieldStore or assignment SubscriptingRef nodes that
* appear in the input, printing them as decoration for the base column
* name (which we assume the caller just printed). We might also need to
* strip CoerceToDomain nodes, but only ones that appear above assignment
* nodes.
*
* Returns the subexpression that's to be assigned.
*/
static Node *
processIndirection(Node *node, deparse_context *context)
{
StringInfo buf = context->buf;
CoerceToDomain *cdomain = NULL;
for (;;)
{
if (node == NULL)
break;
if (IsA(node, FieldStore))
{
FieldStore *fstore = (FieldStore *) node;
Oid typrelid;
char *fieldname;
/* lookup tuple type */
typrelid = get_typ_typrelid(fstore->resulttype);
if (!OidIsValid(typrelid))
elog(ERROR, "argument type %s of FieldStore is not a tuple type",
format_type_be(fstore->resulttype));
/*
* Print the field name. There should only be one target field in
* stored rules. There could be more than that in executable
* target lists, but this function cannot be used for that case.
*/
Assert(list_length(fstore->fieldnums) == 1);
fieldname = get_attname(typrelid,
linitial_int(fstore->fieldnums), false);
appendStringInfo(buf, ".%s", quote_identifier(fieldname));
/*
* We ignore arg since it should be an uninteresting reference to
* the target column or subcolumn.
*/
node = (Node *) linitial(fstore->newvals);
}
else if (IsA(node, SubscriptingRef))
{
SubscriptingRef *sbsref = (SubscriptingRef *) node;
if (sbsref->refassgnexpr == NULL)
break;
printSubscripts(sbsref, context);
/*
* We ignore refexpr since it should be an uninteresting reference
* to the target column or subcolumn.
*/
node = (Node *) sbsref->refassgnexpr;
}
else if (IsA(node, CoerceToDomain))
{
cdomain = (CoerceToDomain *) node;
/* If it's an explicit domain coercion, we're done */
if (cdomain->coercionformat != COERCE_IMPLICIT_CAST)
break;
/* Tentatively descend past the CoerceToDomain */
node = (Node *) cdomain->arg;
}
else
break;
}
/*
* If we descended past a CoerceToDomain whose argument turned out not to
* be a FieldStore or array assignment, back up to the CoerceToDomain.
* (This is not enough to be fully correct if there are nested implicit
* CoerceToDomains, but such cases shouldn't ever occur.)
*/
if (cdomain && node == (Node *) cdomain->arg)
node = (Node *) cdomain;
return node;
}
static void
printSubscripts(SubscriptingRef *sbsref, deparse_context *context)
{
StringInfo buf = context->buf;
ListCell *lowlist_item;
ListCell *uplist_item;
lowlist_item = list_head(sbsref->reflowerindexpr); /* could be NULL */
foreach(uplist_item, sbsref->refupperindexpr)
{
appendStringInfoChar(buf, '[');
if (lowlist_item)
{
/* If subexpression is NULL, get_rule_expr prints nothing */
get_rule_expr((Node *) lfirst(lowlist_item), context, false);
appendStringInfoChar(buf, ':');
lowlist_item = lnext(sbsref->reflowerindexpr, lowlist_item);
}
/* If subexpression is NULL, get_rule_expr prints nothing */
get_rule_expr((Node *) lfirst(uplist_item), context, false);
appendStringInfoChar(buf, ']');
}
}
/*
* quote_identifier - Quote an identifier only if needed
*
* When quotes are needed, we palloc the required space; slightly
* space-wasteful but well worth it for notational simplicity.
*/
const char *
quote_identifier(const char *ident)
{
/*
* Can avoid quoting if ident starts with a lowercase letter or underscore
* and contains only lowercase letters, digits, and underscores, *and* is
* not any SQL keyword. Otherwise, supply quotes.
*/
int nquotes = 0;
bool safe;
const char *ptr;
char *result;
char *optr;
/*
* would like to use <ctype.h> macros here, but they might yield unwanted
* locale-specific results...
*/
safe = ((ident[0] >= 'a' && ident[0] <= 'z') || ident[0] == '_');
for (ptr = ident; *ptr; ptr++)
{
char ch = *ptr;
if ((ch >= 'a' && ch <= 'z') ||
(ch >= '0' && ch <= '9') ||
(ch == '_'))
{
/* okay */
}
else
{
safe = false;
if (ch == '"')
nquotes++;
}
}
if (quote_all_identifiers)
safe = false;
if (safe)
{
/*
* Check for keyword. We quote keywords except for unreserved ones.
* (In some cases we could avoid quoting a col_name or type_func_name
* keyword, but it seems much harder than it's worth to tell that.)
*
* Note: ScanKeywordLookup() does case-insensitive comparison, but
* that's fine, since we already know we have all-lower-case.
*/
int kwnum = ScanKeywordLookup(ident, &ScanKeywords);
if (kwnum >= 0 && ScanKeywordCategories[kwnum] != UNRESERVED_KEYWORD)
safe = false;
}
if (safe)
return ident; /* no change needed */
result = (char *) palloc(strlen(ident) + nquotes + 2 + 1);
optr = result;
*optr++ = '"';
for (ptr = ident; *ptr; ptr++)
{
char ch = *ptr;
if (ch == '"')
*optr++ = '"';
*optr++ = ch;
}
*optr++ = '"';
*optr = '\0';
return result;
}
/*
* quote_qualified_identifier - Quote a possibly-qualified identifier
*
* Return a name of the form qualifier.ident, or just ident if qualifier
* is NULL, quoting each component if necessary. The result is palloc'd.
*/
char *
quote_qualified_identifier(const char *qualifier,
const char *ident)
{
StringInfoData buf;
initStringInfo(&buf);
if (qualifier)
appendStringInfo(&buf, "%s.", quote_identifier(qualifier));
appendStringInfoString(&buf, quote_identifier(ident));
return buf.data;
}
/*
* get_relation_name
* Get the unqualified name of a relation specified by OID
*
* This differs from the underlying get_rel_name() function in that it will
* throw error instead of silently returning NULL if the OID is bad.
*/
static char *
get_relation_name(Oid relid)
{
char *relname = get_rel_name(relid);
if (!relname)
elog(ERROR, "cache lookup failed for relation %u", relid);
return relname;
}
/*
* generate_relation_name
* Compute the name to display for a relation specified by OID
*
* The result includes all necessary quoting and schema-prefixing.
*
* If namespaces isn't NIL, it must be a list of deparse_namespace nodes.
* We will forcibly qualify the relation name if it equals any CTE name
* visible in the namespace list.
*/
static char *
generate_relation_name(Oid relid, List *namespaces)
{
HeapTuple tp;
Form_pg_class reltup;
bool need_qual;
ListCell *nslist;
char *relname;
char *nspname;
char *result;
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tp))
elog(ERROR, "cache lookup failed for relation %u", relid);
reltup = (Form_pg_class) GETSTRUCT(tp);
relname = NameStr(reltup->relname);
/* Check for conflicting CTE name */
need_qual = false;
foreach(nslist, namespaces)
{
deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist);
ListCell *ctlist;
foreach(ctlist, dpns->ctes)
{
CommonTableExpr *cte = (CommonTableExpr *) lfirst(ctlist);
if (strcmp(cte->ctename, relname) == 0)
{
need_qual = true;
break;
}
}
if (need_qual)
break;
}
/* Otherwise, qualify the name if not visible in search path */
if (!need_qual)
need_qual = !RelationIsVisible(relid);
if (need_qual)
nspname = get_namespace_name(reltup->relnamespace);
else
nspname = NULL;
result = quote_qualified_identifier(nspname, relname);
ReleaseSysCache(tp);
return result;
}
/*
* generate_qualified_relation_name
* Compute the name to display for a relation specified by OID
*
* As above, but unconditionally schema-qualify the name.
*/
static char *
generate_qualified_relation_name(Oid relid)
{
HeapTuple tp;
Form_pg_class reltup;
char *relname;
char *nspname;
char *result;
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tp))
elog(ERROR, "cache lookup failed for relation %u", relid);
reltup = (Form_pg_class) GETSTRUCT(tp);
relname = NameStr(reltup->relname);
nspname = get_namespace_name(reltup->relnamespace);
if (!nspname)
elog(ERROR, "cache lookup failed for namespace %u",
reltup->relnamespace);
result = quote_qualified_identifier(nspname, relname);
ReleaseSysCache(tp);
return result;
}
/*
* generate_function_name
* Compute the name to display for a function specified by OID,
* given that it is being called with the specified actual arg names and
* types. (Those matter because of ambiguous-function resolution rules.)
*
* If we're dealing with a potentially variadic function (in practice, this
* means a FuncExpr or Aggref, not some other way of calling a function), then
* has_variadic must specify whether variadic arguments have been merged,
* and *use_variadic_p will be set to indicate whether to print VARIADIC in
* the output. For non-FuncExpr cases, has_variadic should be false and
* use_variadic_p can be NULL.
*
* The result includes all necessary quoting and schema-prefixing.
*/
static char *
generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes,
bool has_variadic, bool *use_variadic_p,
ParseExprKind special_exprkind)
{
char *result;
HeapTuple proctup;
Form_pg_proc procform;
char *proname;
bool use_variadic;
char *nspname;
FuncDetailCode p_result;
Oid p_funcid;
Oid p_rettype;
bool p_retset;
int p_nvargs;
Oid p_vatype;
Oid *p_true_typeids;
bool force_qualify = false;
proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
if (!HeapTupleIsValid(proctup))
elog(ERROR, "cache lookup failed for function %u", funcid);
procform = (Form_pg_proc) GETSTRUCT(proctup);
proname = NameStr(procform->proname);
/*
* Due to parser hacks to avoid needing to reserve CUBE, we need to force
* qualification in some special cases.
*/
if (special_exprkind == EXPR_KIND_GROUP_BY)
{
if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0)
force_qualify = true;
}
/*
* Determine whether VARIADIC should be printed. We must do this first
* since it affects the lookup rules in func_get_detail().
*
* We always print VARIADIC if the function has a merged variadic-array
* argument. Note that this is always the case for functions taking a
* VARIADIC argument type other than VARIADIC ANY. If we omitted VARIADIC
* and printed the array elements as separate arguments, the call could
* match a newer non-VARIADIC function.
*/
if (use_variadic_p)
{
/* Parser should not have set funcvariadic unless fn is variadic */
Assert(!has_variadic || OidIsValid(procform->provariadic));
use_variadic = has_variadic;
*use_variadic_p = use_variadic;
}
else
{
Assert(!has_variadic);
use_variadic = false;
}
/*
* The idea here is to schema-qualify only if the parser would fail to
* resolve the correct function given the unqualified func name with the
* specified argtypes and VARIADIC flag. But if we already decided to
* force qualification, then we can skip the lookup and pretend we didn't
* find it.
*/
if (!force_qualify)
p_result = func_get_detail(list_make1(makeString(proname)),
NIL, argnames, nargs, argtypes,
!use_variadic, true,
&p_funcid, &p_rettype,
&p_retset, &p_nvargs, &p_vatype,
&p_true_typeids, NULL);
else
{
p_result = FUNCDETAIL_NOTFOUND;
p_funcid = InvalidOid;
}
if ((p_result == FUNCDETAIL_NORMAL ||
p_result == FUNCDETAIL_AGGREGATE ||
p_result == FUNCDETAIL_WINDOWFUNC) &&
p_funcid == funcid)
nspname = NULL;
else
nspname = get_namespace_name(procform->pronamespace);
result = quote_qualified_identifier(nspname, proname);
ReleaseSysCache(proctup);
return result;
}
/*
* generate_operator_name
* Compute the name to display for an operator specified by OID,
* given that it is being called with the specified actual arg types.
* (Arg types matter because of ambiguous-operator resolution rules.
* Pass InvalidOid for unused arg of a unary operator.)
*
* The result includes all necessary quoting and schema-prefixing,
* plus the OPERATOR() decoration needed to use a qualified operator name
* in an expression.
*/
static char *
generate_operator_name(Oid operid, Oid arg1, Oid arg2)
{
StringInfoData buf;
HeapTuple opertup;
Form_pg_operator operform;
char *oprname;
char *nspname;
Operator p_result;
initStringInfo(&buf);
opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operid));
if (!HeapTupleIsValid(opertup))
elog(ERROR, "cache lookup failed for operator %u", operid);
operform = (Form_pg_operator) GETSTRUCT(opertup);
oprname = NameStr(operform->oprname);
/*
* The idea here is to schema-qualify only if the parser would fail to
* resolve the correct operator given the unqualified op name with the
* specified argtypes.
*/
switch (operform->oprkind)
{
case 'b':
p_result = oper(NULL, list_make1(makeString(oprname)), arg1, arg2,
true, -1);
break;
case 'l':
p_result = left_oper(NULL, list_make1(makeString(oprname)), arg2,
true, -1);
break;
case 'r':
p_result = right_oper(NULL, list_make1(makeString(oprname)), arg1,
true, -1);
break;
default:
elog(ERROR, "unrecognized oprkind: %d", operform->oprkind);
p_result = NULL; /* keep compiler quiet */
break;
}
if (p_result != NULL && oprid(p_result) == operid)
nspname = NULL;
else
{
nspname = get_namespace_name(operform->oprnamespace);
appendStringInfo(&buf, "OPERATOR(%s.", quote_identifier(nspname));
}
appendStringInfoString(&buf, oprname);
if (nspname)
appendStringInfoChar(&buf, ')');
if (p_result != NULL)
ReleaseSysCache(p_result);
ReleaseSysCache(opertup);
return buf.data;
}
/*
* generate_operator_clause --- generate a binary-operator WHERE clause
*
* This is used for internally-generated-and-executed SQL queries, where
* precision is essential and readability is secondary. The basic
* requirement is to append "leftop op rightop" to buf, where leftop and
* rightop are given as strings and are assumed to yield types leftoptype
* and rightoptype; the operator is identified by OID. The complexity
* comes from needing to be sure that the parser will select the desired
* operator when the query is parsed. We always name the operator using
* OPERATOR(schema.op) syntax, so as to avoid search-path uncertainties.
* We have to emit casts too, if either input isn't already the input type
* of the operator; else we are at the mercy of the parser's heuristics for
* ambiguous-operator resolution. The caller must ensure that leftop and
* rightop are suitable arguments for a cast operation; it's best to insert
* parentheses if they aren't just variables or parameters.
*/
void
generate_operator_clause(StringInfo buf,
const char *leftop, Oid leftoptype,
Oid opoid,
const char *rightop, Oid rightoptype)
{
HeapTuple opertup;
Form_pg_operator operform;
char *oprname;
char *nspname;
opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(opoid));
if (!HeapTupleIsValid(opertup))
elog(ERROR, "cache lookup failed for operator %u", opoid);
operform = (Form_pg_operator) GETSTRUCT(opertup);
Assert(operform->oprkind == 'b');
oprname = NameStr(operform->oprname);
nspname = get_namespace_name(operform->oprnamespace);
appendStringInfoString(buf, leftop);
if (leftoptype != operform->oprleft)
add_cast_to(buf, operform->oprleft);
appendStringInfo(buf, " OPERATOR(%s.", quote_identifier(nspname));
appendStringInfoString(buf, oprname);
appendStringInfo(buf, ") %s", rightop);
if (rightoptype != operform->oprright)
add_cast_to(buf, operform->oprright);
ReleaseSysCache(opertup);
}
/*
* Add a cast specification to buf. We spell out the type name the hard way,
* intentionally not using format_type_be(). This is to avoid corner cases
* for CHARACTER, BIT, and perhaps other types, where specifying the type
* using SQL-standard syntax results in undesirable data truncation. By
* doing it this way we can be certain that the cast will have default (-1)
* target typmod.
*/
static void
add_cast_to(StringInfo buf, Oid typid)
{
HeapTuple typetup;
Form_pg_type typform;
char *typname;
char *nspname;
typetup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
if (!HeapTupleIsValid(typetup))
elog(ERROR, "cache lookup failed for type %u", typid);
typform = (Form_pg_type) GETSTRUCT(typetup);
typname = NameStr(typform->typname);
nspname = get_namespace_name(typform->typnamespace);
appendStringInfo(buf, "::%s.%s",
quote_identifier(nspname), quote_identifier(typname));
ReleaseSysCache(typetup);
}
/*
* generate_qualified_type_name
* Compute the name to display for a type specified by OID
*
* This is different from format_type_be() in that we unconditionally
* schema-qualify the name. That also means no special syntax for
* SQL-standard type names ... although in current usage, this should
* only get used for domains, so such cases wouldn't occur anyway.
*/
static char *
generate_qualified_type_name(Oid typid)
{
HeapTuple tp;
Form_pg_type typtup;
char *typname;
char *nspname;
char *result;
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
if (!HeapTupleIsValid(tp))
elog(ERROR, "cache lookup failed for type %u", typid);
typtup = (Form_pg_type) GETSTRUCT(tp);
typname = NameStr(typtup->typname);
nspname = get_namespace_name(typtup->typnamespace);
if (!nspname)
elog(ERROR, "cache lookup failed for namespace %u",
typtup->typnamespace);
result = quote_qualified_identifier(nspname, typname);
ReleaseSysCache(tp);
return result;
}
/*
* generate_collation_name
* Compute the name to display for a collation specified by OID
*
* The result includes all necessary quoting and schema-prefixing.
*/
char *
generate_collation_name(Oid collid)
{
HeapTuple tp;
Form_pg_collation colltup;
char *collname;
char *nspname;
char *result;
tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
if (!HeapTupleIsValid(tp))
elog(ERROR, "cache lookup failed for collation %u", collid);
colltup = (Form_pg_collation) GETSTRUCT(tp);
collname = NameStr(colltup->collname);
if (!CollationIsVisible(collid))
nspname = get_namespace_name(colltup->collnamespace);
else
nspname = NULL;
result = quote_qualified_identifier(nspname, collname);
ReleaseSysCache(tp);
return result;
}
/*
* Given a C string, produce a TEXT datum.
*
* We assume that the input was palloc'd and may be freed.
*/
static text *
string_to_text(char *str)
{
text *result;
result = cstring_to_text(str);
pfree(str);
return result;
}
/*
* Generate a C string representing a relation options from text[] datum.
*/
static void
get_reloptions(StringInfo buf, Datum reloptions)
{
Datum *options;
int noptions;
int i;
deconstruct_array(DatumGetArrayTypeP(reloptions),
TEXTOID, -1, false, TYPALIGN_INT,
&options, NULL, &noptions);
for (i = 0; i < noptions; i++)
{
char *option = TextDatumGetCString(options[i]);
char *name;
char *separator;
char *value;
/*
* Each array element should have the form name=value. If the "=" is
* missing for some reason, treat it like an empty value.
*/
name = option;
separator = strchr(option, '=');
if (separator)
{
*separator = '\0';
value = separator + 1;
}
else
value = "";
if (i > 0)
appendStringInfoString(buf, ", ");
appendStringInfo(buf, "%s=", quote_identifier(name));
/*
* In general we need to quote the value; but to avoid unnecessary
* clutter, do not quote if it is an identifier that would not need
* quoting. (We could also allow numbers, but that is a bit trickier
* than it looks --- for example, are leading zeroes significant? We
* don't want to assume very much here about what custom reloptions
* might mean.)
*/
if (quote_identifier(value) == value)
appendStringInfoString(buf, value);
else
simple_quote_literal(buf, value);
pfree(option);
}
}
/*
* Generate a C string representing a relation's reloptions, or NULL if none.
*/
static char *
flatten_reloptions(Oid relid)
{
char *result = NULL;
HeapTuple tuple;
Datum reloptions;
bool isnull;
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", relid);
reloptions = SysCacheGetAttr(RELOID, tuple,
Anum_pg_class_reloptions, &isnull);
if (!isnull)
{
StringInfoData buf;
initStringInfo(&buf);
get_reloptions(&buf, reloptions);
result = buf.data;
}
ReleaseSysCache(tuple);
return result;
}
/*
* get_range_partbound_string
* A C string representation of one range partition bound
*/
char *
get_range_partbound_string(List *bound_datums)
{
deparse_context context;
StringInfo buf = makeStringInfo();
ListCell *cell;
char *sep;
memset(&context, 0, sizeof(deparse_context));
context.buf = buf;
appendStringInfoString(buf, "(");
sep = "";
foreach(cell, bound_datums)
{
PartitionRangeDatum *datum =
castNode(PartitionRangeDatum, lfirst(cell));
appendStringInfoString(buf, sep);
if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE)
appendStringInfoString(buf, "MINVALUE");
else if (datum->kind == PARTITION_RANGE_DATUM_MAXVALUE)
appendStringInfoString(buf, "MAXVALUE");
else
{
Const *val = castNode(Const, datum->value);
get_const_expr(val, &context, -1);
}
sep = ", ";
}
appendStringInfoChar(buf, ')');
return buf->data;
}
|
368743.c | /*
******************************************************************************
* @file iis2mdc_reg.c
* @author Sensors Software Solution Team
* @brief IIS2MDC driver file
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
#include "iis2mdc_reg.h"
/**
* @defgroup IIS2MDC
* @brief This file provides a set of functions needed to drive the
* iis2mdc enhanced inertial module.
* @{
*
*/
/**
* @defgroup IIS2MDC_Interfaces_Functions
* @brief This section provide a set of functions used to read and
* write a generic register of the device.
* MANDATORY: return 0 -> no Error.
* @{
*
*/
/**
* @brief Read generic device register
*
* @param ctx read / write interface definitions(ptr)
* @param reg register to read
* @param data pointer to buffer that store the data read(ptr)
* @param len number of consecutive register to read
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_read_reg(stmdev_ctx_t *ctx, uint8_t reg,
uint8_t *data,
uint16_t len)
{
int32_t ret;
ret = ctx->read_reg(ctx->handle, reg, data, len);
return ret;
}
/**
* @brief Write generic device register
*
* @param ctx read / write interface definitions(ptr)
* @param reg register to write
* @param data pointer to data to write in register reg(ptr)
* @param len number of consecutive register to write
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_write_reg(stmdev_ctx_t *ctx, uint8_t reg,
uint8_t *data,
uint16_t len)
{
int32_t ret;
ret = ctx->write_reg(ctx->handle, reg, data, len);
return ret;
}
/**
* @}
*
*/
/**
* @defgroup IIS2MDC_Sensitivity
* @brief These functions convert raw-data into engineering units.
* @{
*
*/
float_t iis2mdc_from_lsb_to_mgauss(int16_t lsb)
{
return ((float_t)lsb) * 1.5f;
}
float_t iis2mdc_from_lsb_to_celsius(int16_t lsb)
{
return (((float_t)lsb / 8.0f) + 25.0f);
}
/**
* @}
*
*/
/**
* @defgroup IIS2MDC_data_generation.
* @brief This section group all the functions concerning
* data generation.
* @{
*
*/
/**
* @brief These registers comprise a 3 group of 16-bit number and represent
* hard-iron offset in order to compensate environmental effects.
* Data format is the same of output data raw: two’s complement with
* 1LSb = 1.5mG.
* These values act on the magnetic output data value in order to
* delete the environmental offset.[set]
*
* @param ctx read / write interface definitions
* @param buff buffer that contains data to write
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_mag_user_offset_set(stmdev_ctx_t *ctx, int16_t *val)
{
uint8_t buff[6];
int32_t ret;
buff[1] = (uint8_t) ((uint16_t)val[0] / 256U);
buff[0] = (uint8_t) ((uint16_t)val[0] - (buff[1] * 256U));
buff[3] = (uint8_t) ((uint16_t)val[1] / 256U);
buff[2] = (uint8_t) ((uint16_t)val[1] - (buff[3] * 256U));
buff[5] = (uint8_t) ((uint16_t)val[2] / 256U);
buff[4] = (uint8_t) ((uint16_t)val[2] - (buff[5] * 256U));
ret = iis2mdc_write_reg(ctx, IIS2MDC_OFFSET_X_REG_L, buff, 6);
return ret;
}
/**
* @brief These registers comprise a 3 group of 16-bit number and represent
* hard-iron offset in order to compensate environmental effects.
* Data format is the same of output data raw: two’s complement with
* 1LSb = 1.5mG.
* These values act on the magnetic output data value in order to
* delete the environmental offset.[get]
*
* @param ctx read / write interface definitions
* @param buff buffer that stores data read
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_mag_user_offset_get(stmdev_ctx_t *ctx, int16_t *val)
{
uint8_t buff[6];
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_OFFSET_X_REG_L, buff, 6);
val[0] = (int16_t)buff[1];
val[0] = (val[0] * 256) + (int16_t)buff[0];
val[1] = (int16_t)buff[3];
val[1] = (val[1] * 256) + (int16_t)buff[2];
val[2] = (int16_t)buff[5];
val[2] = (val[2] * 256) + (int16_t)buff[4];
return ret;
}
/**
* @brief Operating mode selection.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of md in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_operating_mode_set(stmdev_ctx_t *ctx,
iis2mdc_md_t val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
if (ret == 0) {
reg.md = (uint8_t)val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Operating mode selection.[get]
*
* @param ctx read / write interface definitions
* @param val Get the values of md in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_operating_mode_get(stmdev_ctx_t *ctx,
iis2mdc_md_t *val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
switch (reg.md) {
case IIS2MDC_CONTINUOUS_MODE:
*val = IIS2MDC_CONTINUOUS_MODE;
break;
case IIS2MDC_SINGLE_TRIGGER:
*val = IIS2MDC_SINGLE_TRIGGER;
break;
case IIS2MDC_POWER_DOWN:
*val = IIS2MDC_POWER_DOWN;
break;
default:
*val = IIS2MDC_CONTINUOUS_MODE;
break;
}
return ret;
}
/**
* @brief Output data rate selection.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of odr in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_data_rate_set(stmdev_ctx_t *ctx, iis2mdc_odr_t val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
if (ret == 0) {
reg.odr = (uint8_t)val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Output data rate selection.[get]
*
* @param ctx read / write interface definitions
* @param val Get the values of odr in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_data_rate_get(stmdev_ctx_t *ctx, iis2mdc_odr_t *val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
switch (reg.odr) {
case IIS2MDC_ODR_10Hz:
*val = IIS2MDC_ODR_10Hz;
break;
case IIS2MDC_ODR_20Hz:
*val = IIS2MDC_ODR_20Hz;
break;
case IIS2MDC_ODR_50Hz:
*val = IIS2MDC_ODR_50Hz;
break;
case IIS2MDC_ODR_100Hz:
*val = IIS2MDC_ODR_100Hz;
break;
default:
*val = IIS2MDC_ODR_10Hz;
break;
}
return ret;
}
/**
* @brief Enables high-resolution/low-power mode.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of lp in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_power_mode_set(stmdev_ctx_t *ctx, iis2mdc_lp_t val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
if (ret == 0) {
reg.lp = (uint8_t)val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Enables high-resolution/low-power mode.[get]
*
* @param ctx read / write interface definitions
* @param val Get the values of lp in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_power_mode_get(stmdev_ctx_t *ctx, iis2mdc_lp_t *val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
switch (reg.lp) {
case IIS2MDC_HIGH_RESOLUTION:
*val = IIS2MDC_HIGH_RESOLUTION;
break;
case IIS2MDC_LOW_POWER:
*val = IIS2MDC_LOW_POWER;
break;
default:
*val = IIS2MDC_HIGH_RESOLUTION;
break;
}
return ret;
}
/**
* @brief Enables the magnetometer temperature compensation.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of comp_temp_en in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_offset_temp_comp_set(stmdev_ctx_t *ctx, uint8_t val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
if (ret == 0) {
reg.comp_temp_en = val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Enables the magnetometer temperature compensation.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of comp_temp_en in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_offset_temp_comp_get(stmdev_ctx_t *ctx, uint8_t *val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
*val = reg.comp_temp_en;
return ret;
}
/**
* @brief Low-pass bandwidth selection.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of lpf in reg CFG_REG_B
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_low_pass_bandwidth_set(stmdev_ctx_t *ctx,
iis2mdc_lpf_t val)
{
iis2mdc_cfg_reg_b_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
if (ret == 0) {
reg.lpf = (uint8_t)val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Low-pass bandwidth selection.[get]
*
* @param ctx read / write interface definitions
* @param val Get the values of lpf in reg CFG_REG_B
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_low_pass_bandwidth_get(stmdev_ctx_t *ctx,
iis2mdc_lpf_t *val)
{
iis2mdc_cfg_reg_b_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
switch (reg.lpf) {
case IIS2MDC_ODR_DIV_2:
*val = IIS2MDC_ODR_DIV_2;
break;
case IIS2MDC_ODR_DIV_4:
*val = IIS2MDC_ODR_DIV_4;
break;
default:
*val = IIS2MDC_ODR_DIV_2;
break;
}
return ret;
}
/**
* @brief Reset puse mode.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of set_rst in
* reg CFG_REG_B
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_set_rst_mode_set(stmdev_ctx_t *ctx,
iis2mdc_set_rst_t val)
{
iis2mdc_cfg_reg_b_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
if (ret == 0) {
reg.set_rst = (uint8_t)val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Reset puse mode.[get]
*
* @param ctx read / write interface definitions
* @param val Get the values of set_rst in reg CFG_REG_B
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_set_rst_mode_get(stmdev_ctx_t *ctx,
iis2mdc_set_rst_t *val)
{
iis2mdc_cfg_reg_b_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
switch (reg.set_rst) {
case IIS2MDC_SET_SENS_ODR_DIV_63:
*val = IIS2MDC_SET_SENS_ODR_DIV_63;
break;
case IIS2MDC_SENS_OFF_CANC_EVERY_ODR:
*val = IIS2MDC_SENS_OFF_CANC_EVERY_ODR;
break;
case IIS2MDC_SET_SENS_ONLY_AT_POWER_ON:
*val = IIS2MDC_SET_SENS_ONLY_AT_POWER_ON;
break;
default:
*val = IIS2MDC_SET_SENS_ODR_DIV_63;
break;
}
return ret;
}
/**
* @brief Enables offset cancellation in single measurement mode.
* The OFF_CANC bit must be set to 1 when enabling offset
* cancellation in single measurement mode this means a
* call function "set_rst_mode(SENS_OFF_CANC_EVERY_ODR)"
* is need.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of off_canc_one_shot in
* reg CFG_REG_B
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_set_rst_sensor_single_set(stmdev_ctx_t *ctx,
uint8_t val)
{
iis2mdc_cfg_reg_b_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
if (ret == 0) {
reg.off_canc_one_shot = val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Enables offset cancellation in single measurement mode.
* The OFF_CANC bit must be set to 1 when enabling offset
* cancellation in single measurement mode this means a
* call function "set_rst_mode(SENS_OFF_CANC_EVERY_ODR)"
* is need.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of off_canc_one_shot in reg CFG_REG_B
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_set_rst_sensor_single_get(stmdev_ctx_t *ctx,
uint8_t *val)
{
iis2mdc_cfg_reg_b_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
*val = reg.off_canc_one_shot;
return ret;
}
/**
* @brief Block data update.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of bdu in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_block_data_update_set(stmdev_ctx_t *ctx, uint8_t val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
if (ret == 0) {
reg.bdu = val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Block data update.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of bdu in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_block_data_update_get(stmdev_ctx_t *ctx, uint8_t *val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
*val = reg.bdu;
return ret;
}
/**
* @brief Magnetic set of data available.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of zyxda in reg STATUS_REG
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_mag_data_ready_get(stmdev_ctx_t *ctx, uint8_t *val)
{
iis2mdc_status_reg_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_STATUS_REG, (uint8_t *) ®, 1);
*val = reg.zyxda;
return ret;
}
/**
* @brief Magnetic set of data overrun.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of zyxor in reg STATUS_REG
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_mag_data_ovr_get(stmdev_ctx_t *ctx, uint8_t *val)
{
iis2mdc_status_reg_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_STATUS_REG, (uint8_t *) ®, 1);
*val = reg.zyxor;
return ret;
}
/**
* @brief Magnetic output value.[get]
*
* @param ctx read / write interface definitions
* @param buff buffer that stores data read
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_magnetic_raw_get(stmdev_ctx_t *ctx, int16_t *val)
{
uint8_t buff[6];
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_OUTX_L_REG, buff, 6);
val[0] = (int16_t)buff[1];
val[0] = (val[0] * 256) + (int16_t)buff[0];
val[1] = (int16_t)buff[3];
val[1] = (val[1] * 256) + (int16_t)buff[2];
val[2] = (int16_t)buff[5];
val[2] = (val[2] * 256) + (int16_t)buff[4];
return ret;
}
/**
* @brief Temperature output value.[get]
*
* @param ctx read / write interface definitions
* @param buff buffer that stores data read
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_temperature_raw_get(stmdev_ctx_t *ctx, int16_t *val)
{
uint8_t buff[2];
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_TEMP_OUT_L_REG, buff, 2);
*val = (int16_t)buff[1];
*val = (*val * 256) + (int16_t)buff[0];
return ret;
}
/**
* @}
*
*/
/**
* @defgroup IIS2MDC_common
* @brief This section group common useful functions
* @{
*
*/
/**
* @brief Device Who am I.[get]
*
* @param ctx read / write interface definitions
* @param buff buffer that stores data read
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_device_id_get(stmdev_ctx_t *ctx, uint8_t *buff)
{
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_WHO_AM_I, buff, 1);
return ret;
}
/**
* @brief Software reset. Restore the default values in user
* registers.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of soft_rst in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_reset_set(stmdev_ctx_t *ctx, uint8_t val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
if (ret == 0) {
reg.soft_rst = val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Software reset. Restore the default values in user registers.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of soft_rst in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_reset_get(stmdev_ctx_t *ctx, uint8_t *val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
*val = reg.soft_rst;
return ret;
}
/**
* @brief Reboot memory content. Reload the calibration parameters.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of reboot in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_boot_set(stmdev_ctx_t *ctx, uint8_t val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
if (ret == 0) {
reg.reboot = val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Reboot memory content. Reload the calibration parameters.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of reboot in reg CFG_REG_A
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_boot_get(stmdev_ctx_t *ctx, uint8_t *val)
{
iis2mdc_cfg_reg_a_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_A, (uint8_t *) ®, 1);
*val = reg.reboot;
return ret;
}
/**
* @brief Selftest.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of self_test in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_self_test_set(stmdev_ctx_t *ctx, uint8_t val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
if (ret == 0) {
reg.self_test = val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Selftest.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of self_test in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_self_test_get(stmdev_ctx_t *ctx, uint8_t *val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
*val = reg.self_test;
return ret;
}
/**
* @brief Big/Little Endian data selection.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of ble in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_data_format_set(stmdev_ctx_t *ctx, iis2mdc_ble_t val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
if (ret == 0) {
reg.ble = (uint8_t)val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Big/Little Endian data selection.[get]
*
* @param ctx read / write interface definitions
* @param val Get the values of ble in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_data_format_get(stmdev_ctx_t *ctx, iis2mdc_ble_t *val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
switch (reg.ble) {
case IIS2MDC_LSB_AT_LOW_ADD:
*val = IIS2MDC_LSB_AT_LOW_ADD;
break;
case IIS2MDC_MSB_AT_LOW_ADD:
*val = IIS2MDC_MSB_AT_LOW_ADD;
break;
default:
*val = IIS2MDC_LSB_AT_LOW_ADD;
break;
}
return ret;
}
/**
* @brief Info about device status.[get]
*
* @param ctx read / write interface definitions
* @param val registers STATUS_REG
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_status_get(stmdev_ctx_t *ctx,
iis2mdc_status_reg_t *val)
{
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_STATUS_REG, (uint8_t *) val, 1);
return ret;
}
/**
* @}
*
*/
/**
* @defgroup IIS2MDC_interrupts
* @brief This section group all the functions that manage
* interrupts.
* @{
*
*/
/**
* @brief The interrupt block recognition checks data after/before the
* hard-iron correction to discover the interrupt.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of int_on_dataoff in
* reg CFG_REG_B
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_offset_int_conf_set(stmdev_ctx_t *ctx,
iis2mdc_int_on_dataoff_t val)
{
iis2mdc_cfg_reg_b_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
if (ret == 0) {
reg.int_on_dataoff = (uint8_t)val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief The interrupt block recognition checks data after/before the
* hard-iron correction to discover the interrupt.[get]
*
* @param ctx read / write interface definitions
* @param val Get the values of int_on_dataoff in
* reg CFG_REG_B
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_offset_int_conf_get(stmdev_ctx_t *ctx,
iis2mdc_int_on_dataoff_t *val)
{
iis2mdc_cfg_reg_b_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_B, (uint8_t *) ®, 1);
switch (reg.int_on_dataoff) {
case IIS2MDC_CHECK_BEFORE:
*val = IIS2MDC_CHECK_BEFORE;
break;
case IIS2MDC_CHECK_AFTER:
*val = IIS2MDC_CHECK_AFTER;
break;
default:
*val = IIS2MDC_CHECK_BEFORE;
break;
}
return ret;
}
/**
* @brief Data-ready signal on INT_DRDY pin.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of drdy_on_pin in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_drdy_on_pin_set(stmdev_ctx_t *ctx, uint8_t val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
if (ret == 0) {
reg.drdy_on_pin = val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Data-ready signal on INT_DRDY pin.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of drdy_on_pin in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_drdy_on_pin_get(stmdev_ctx_t *ctx, uint8_t *val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
*val = reg.drdy_on_pin;
return ret;
}
/**
* @brief Interrupt signal on INT_DRDY pin.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of int_on_pin in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_int_on_pin_set(stmdev_ctx_t *ctx, uint8_t val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
if (ret == 0) {
reg.int_on_pin = val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Interrupt signal on INT_DRDY pin.[get]
*
* @param ctx read / write interface definitions
* @param val change the values of int_on_pin in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_int_on_pin_get(stmdev_ctx_t *ctx, uint8_t *val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
*val = reg.int_on_pin;
return ret;
}
/**
* @brief Interrupt generator configuration register.[set]
*
* @param ctx read / write interface definitions
* @param val registers INT_CRTL_REG
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_int_gen_conf_set(stmdev_ctx_t *ctx,
iis2mdc_int_crtl_reg_t *val)
{
int32_t ret;
ret = iis2mdc_write_reg(ctx, IIS2MDC_INT_CRTL_REG, (uint8_t *) val,
1);
return ret;
}
/**
* @brief Interrupt generator configuration register.[get]
*
* @param ctx read / write interface definitions
* @param val registers INT_CRTL_REG
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_int_gen_conf_get(stmdev_ctx_t *ctx,
iis2mdc_int_crtl_reg_t *val)
{
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_INT_CRTL_REG, (uint8_t *) val,
1);
return ret;
}
/**
* @brief Interrupt generator source register.[get]
*
* @param ctx read / write interface definitions
* @param val registers INT_SOURCE_REG
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_int_gen_source_get(stmdev_ctx_t *ctx,
iis2mdc_int_source_reg_t *val)
{
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_INT_SOURCE_REG, (uint8_t *) val,
1);
return ret;
}
/**
* @brief User-defined threshold value for xl interrupt event on generator.
* Data format is the same of output data raw:
* two’s complement with 1LSb = 1.5mG.[set]
*
* @param ctx read / write interface definitions
* @param buff buffer that contains data to write
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_int_gen_treshold_set(stmdev_ctx_t *ctx, int16_t val)
{
uint8_t buff[2];
int32_t ret;
buff[1] = (uint8_t) ((uint16_t)val / 256U);
buff[0] = (uint8_t) ((uint16_t)val - (buff[1] * 256U));
ret = iis2mdc_write_reg(ctx, IIS2MDC_INT_THS_L_REG, buff, 2);
return ret;
}
/**
* @brief User-defined threshold value for xl interrupt event on generator.
* Data format is the same of output data raw:
* two’s complement with 1LSb = 1.5mG.[get]
*
* @param ctx read / write interface definitions
* @param buff buffer that stores data read
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_int_gen_treshold_get(stmdev_ctx_t *ctx, int16_t *val)
{
uint8_t buff[2];
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_INT_THS_L_REG, buff, 2);
*val = (int16_t)buff[1];
*val = (*val * 256) + (int16_t)buff[0];
return ret;
}
/**
* @}
*
*/
/**
* @defgroup IIS2MDC_serial_interface
* @brief This section group all the functions concerning serial
* interface management
* @{
*
*/
/**
* @brief Enable/Disable I2C interface.[set]
*
* @param ctx read / write interface definitions
* @param val change the values of i2c_dis in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_i2c_interface_set(stmdev_ctx_t *ctx,
iis2mdc_i2c_dis_t val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
if (ret == 0) {
reg.i2c_dis = (uint8_t)val;
ret = iis2mdc_write_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
}
return ret;
}
/**
* @brief Enable/Disable I2C interface.[get]
*
* @param ctx read / write interface definitions
* @param val Get the values of i2c_dis in reg CFG_REG_C
* @retval interface status (MANDATORY: return 0 -> no Error)
*
*/
int32_t iis2mdc_i2c_interface_get(stmdev_ctx_t *ctx,
iis2mdc_i2c_dis_t *val)
{
iis2mdc_cfg_reg_c_t reg;
int32_t ret;
ret = iis2mdc_read_reg(ctx, IIS2MDC_CFG_REG_C, (uint8_t *) ®, 1);
switch (reg.i2c_dis) {
case IIS2MDC_I2C_ENABLE:
*val = IIS2MDC_I2C_ENABLE;
break;
case IIS2MDC_I2C_DISABLE:
*val = IIS2MDC_I2C_DISABLE;
break;
default:
*val = IIS2MDC_I2C_ENABLE;
break;
}
return ret;
}
/**
* @}
*
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
467048.c | /* compress.c -- compress a memory buffer
* Copyright (C) 1995-2005 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#ifdef BUILDSYSTEM_ENABLE_ZLIB_SUPPORT
/* @(#) $Id$ */
#define ZLIB_INTERNAL
#include "zlib.h"
/* ===========================================================================
Compresses the source buffer into the destination buffer. The level
parameter has the same meaning as in deflateInit. sourceLen is the byte
length of the source buffer. Upon entry, destLen is the total size of the
destination buffer, which must be at least 0.1% larger than sourceLen plus
12 bytes. Upon exit, destLen is the actual size of the compressed buffer.
compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_BUF_ERROR if there was not enough room in the output buffer,
Z_STREAM_ERROR if the level parameter is invalid.
*/
int ZEXPORT compress2 (dest, destLen, source, sourceLen, level)
Bytef *dest;
uLongf *destLen;
const Bytef *source;
uLong sourceLen;
int level;
{
z_stream stream;
int err;
stream.next_in = (z_const Bytef *)source;
stream.avail_in = (uInt)sourceLen;
#ifdef MAXSEG_64K
/* Check for source > 64K on 16-bit machine: */
if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR;
#endif
stream.next_out = dest;
stream.avail_out = (uInt)*destLen;
if ((uLong)stream.avail_out != *destLen) return Z_BUF_ERROR;
stream.zalloc = (alloc_func)0;
stream.zfree = (free_func)0;
stream.opaque = (voidpf)0;
err = deflateInit(&stream, level);
if (err != Z_OK) return err;
err = deflate(&stream, Z_FINISH);
if (err != Z_STREAM_END) {
deflateEnd(&stream);
return err == Z_OK ? Z_BUF_ERROR : err;
}
*destLen = stream.total_out;
err = deflateEnd(&stream);
return err;
}
/* ===========================================================================
*/
int ZEXPORT compress (dest, destLen, source, sourceLen)
Bytef *dest;
uLongf *destLen;
const Bytef *source;
uLong sourceLen;
{
return compress2(dest, destLen, source, sourceLen, Z_DEFAULT_COMPRESSION);
}
/* ===========================================================================
If the default memLevel or windowBits for deflateInit() is changed, then
this function needs to be updated.
*/
uLong ZEXPORT compressBound (sourceLen)
uLong sourceLen;
{
return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
(sourceLen >> 25) + 13;
}
#endif // BUILDSYSTEM_ENABLE_ZLIB_SUPPORT
|
261736.c | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static void
gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
u32 intr0 = nv_rd32(gpio, 0x00dc00);
u32 intr1 = nv_rd32(gpio, 0x00dc80);
u32 stat0 = nv_rd32(gpio, 0x00dc08) & intr0;
u32 stat1 = nv_rd32(gpio, 0x00dc88) & intr1;
*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
nv_wr32(gpio, 0x00dc00, intr0);
nv_wr32(gpio, 0x00dc80, intr1);
}
void
gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
u32 inte0 = nv_rd32(gpio, 0x00dc08);
u32 inte1 = nv_rd32(gpio, 0x00dc88);
if (type & NVKM_GPIO_LO)
inte0 = (inte0 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
mask >>= 16;
data >>= 16;
if (type & NVKM_GPIO_LO)
inte1 = (inte1 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte1 = (inte1 & ~mask) | data;
nv_wr32(gpio, 0x00dc08, inte0);
nv_wr32(gpio, 0x00dc88, inte1);
}
struct nvkm_oclass *
gk104_gpio_oclass = &(struct nvkm_gpio_impl) {
.base.handle = NV_SUBDEV(GPIO, 0xe0),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = _nvkm_gpio_ctor,
.dtor = _nvkm_gpio_dtor,
.init = _nvkm_gpio_init,
.fini = _nvkm_gpio_fini,
},
.lines = 32,
.intr_stat = gk104_gpio_intr_stat,
.intr_mask = gk104_gpio_intr_mask,
.drive = gf110_gpio_drive,
.sense = gf110_gpio_sense,
.reset = gf110_gpio_reset,
}.base;
|
246070.c | /***************************************************************************
sn76496.c
Routines to emulate the Texas Instruments SN76489 / SN76496 programmable
tone /noise generator. Also known as (or at least compatible with) TMS9919.
Noise emulation is not accurate due to lack of documentation. The noise
generator uses a shift register with a XOR-feedback network, but the exact
layout is unknown. It can be set for either period or white noise; again,
the details are unknown.
***************************************************************************/
#include <stdint.h>
#include <string.h>
#include "system.h"
#include "sn76496.h"
#define MAX_OUTPUT 0x7fff
#define AUDIO_CONV(A) (A)
#define STEP 0x10000
/* Formulas for noise generator */
/* bit0 = output */
/* noise feedback for white noise mode */
#define FB_WNOISE 0x12000 /* bit15.d(16bits) = bit0(out) ^ bit2 */
//#define FB_WNOISE 0x14000 /* bit15.d(16bits) = bit0(out) ^ bit1 */
//#define FB_WNOISE 0x28000 /* bit16.d(17bits) = bit0(out) ^ bit2 (same to AY-3-8910) */
//#define FB_WNOISE 0x50000 /* bit17.d(18bits) = bit0(out) ^ bit2 */
/* noise feedback for periodic noise mode */
/* it is correct maybe (it was in the Megadrive sound manual) */
//#define FB_PNOISE 0x10000 /* 16bit rorate */
#define FB_PNOISE 0x08000 /* JH 981127 - fixes Do Run Run */
/* noise generator start preset (for periodic noise) */
#define NG_PRESET 0x0f35
struct SN76496
{
int Channel;
int SampleRate;
unsigned int UpdateStep;
int VolTable[16]; /* volume table */
int Register[8]; /* registers */
int LastRegister; /* last register written */
int Volume[4]; /* volume of voice 0-2 and noise */
unsigned int RNG; /* noise generator */
int NoiseFB; /* noise feedback mask */
unsigned int Period[4];
int Count[4];
int Output[4];
};
static struct SN76496 sn[MAX_76496];
void SN76496_dump(int chip, uint8_t buf[16])
{
struct SN76496 *R = &sn[chip];
uint16_t tmp;
unsigned int i;
for (i = 0; (i < 8); ++i) {
tmp = h2le16(R->Register[i]);
memcpy(&buf[(i * 2)], &tmp, 2);
}
}
void SN76496_restore(int chip, uint8_t buf[16])
{
struct SN76496 *R = &sn[chip];
uint16_t tmp;
unsigned int i;
for (i = 0; (i < 8); ++i) {
memcpy(&tmp, &buf[(i * 2)], 2);
R->Register[i] = le2h16(tmp);
}
}
void SN76496Write(int chip,int data)
{
struct SN76496 *R = &sn[chip];
/* update the output buffer before changing the registers */
///// commented out by starshine
//stream_update(R->Channel,0);
if (data & 0x80)
{
int r = (data & 0x70) >> 4;
int c = r/2;
R->LastRegister = r;
R->Register[r] = (R->Register[r] & 0x3f0) | (data & 0x0f);
switch (r)
{
case 0: /* tone 0 : frequency */
case 2: /* tone 1 : frequency */
case 4: /* tone 2 : frequency */
R->Period[c] = R->UpdateStep * R->Register[r];
if (R->Period[c] == 0) R->Period[c] = R->UpdateStep;
if (r == 4)
{
/* update noise shift frequency */
if ((R->Register[6] & 0x03) == 0x03)
R->Period[3] = 2 * R->Period[2];
}
break;
case 1: /* tone 0 : volume */
case 3: /* tone 1 : volume */
case 5: /* tone 2 : volume */
case 7: /* noise : volume */
R->Volume[c] = R->VolTable[data & 0x0f];
break;
case 6: /* noise : frequency, mode */
{
int n = R->Register[6];
R->NoiseFB = (n & 4) ? FB_WNOISE : FB_PNOISE;
n &= 3;
/* N/512,N/1024,N/2048,Tone #3 output */
R->Period[3] = (n == 3) ? 2 * R->Period[2] : (R->UpdateStep << (5+n));
/* reset noise shifter */
R->RNG = NG_PRESET;
R->Output[3] = R->RNG & 1;
}
break;
}
}
else
{
int r = R->LastRegister;
int c = r/2;
switch (r)
{
case 0: /* tone 0 : frequency */
case 2: /* tone 1 : frequency */
case 4: /* tone 2 : frequency */
R->Register[r] = (R->Register[r] & 0x0f) | ((data & 0x3f) << 4);
R->Period[c] = R->UpdateStep * R->Register[r];
if (R->Period[c] == 0) R->Period[c] = R->UpdateStep;
if (r == 4)
{
/* update noise shift frequency */
if ((R->Register[6] & 0x03) == 0x03)
R->Period[3] = 2 * R->Period[2];
}
break;
}
}
}
void SN76496_0_w(int offset, int data) { (void)offset; SN76496Write(0, data); }
void SN76496_1_w(int offset, int data) { (void)offset; SN76496Write(1, data); }
void SN76496_2_w(int offset, int data) { (void)offset; SN76496Write(2, data); }
void SN76496_3_w(int offset, int data) { (void)offset; SN76496Write(3, data); }
void SN76496Update_8_2(int chip,void *buffer,int length)
{
#define DATATYPE unsigned char
#define DATACONV(A) AUDIO_CONV((A) / (STEP * 256))
#include "sn76496u.c"
#undef DATATYPE
#undef DATACONV
}
void SN76496Update_16_2(int chip,void *buffer,int length)
{
#define DATATYPE unsigned short
#define DATACONV(A) ((A) / STEP)
#include "sn76496u.c"
#undef DATATYPE
#undef DATACONV
}
void SN76496_set_clock(int chip,int clock)
{
struct SN76496 *R = &sn[chip];
/* the base clock for the tone generators is the chip clock divided by 16; */
/* for the noise generator, it is clock / 256. */
/* Here we calculate the number of steps which happen during one sample */
/* at the given sample rate. No. of events = sample rate / (clock/16). */
/* STEP is a multiplier used to turn the fraction into a fixed point */
/* number. */
R->UpdateStep = ((double)STEP * R->SampleRate * 16) / clock;
}
static void SN76496_set_volume(int chip,int volume,int gain)
{
struct SN76496 *R = &sn[chip];
int i;
double out;
(void)volume;
///// commented out by starshine
//stream_set_volume(R->Channel,volume);
gain &= 0xff;
/* increase max output basing on gain (0.2 dB per step) */
out = MAX_OUTPUT / 3;
while (gain-- > 0)
out *= 1.023292992; /* = (10 ^ (0.2/20)) */
/* build volume table (2dB per step) */
for (i = 0;i < 15;i++)
{
/* limit volume to avoid clipping */
if (out > MAX_OUTPUT / 3) R->VolTable[i] = MAX_OUTPUT / 3;
else R->VolTable[i] = out;
out /= 1.258925412; /* = 10 ^ (2/20) = 2dB */
}
R->VolTable[15] = 0;
}
int SN76496_init(int chip,int clock,int sample_rate,int sample_bits)
{
int i;
struct SN76496 *R = &sn[chip];
/* char name[40]; */
(void)sample_bits;
////// commented out by starshine
//sprintf(name,"SN76496 #%d",chip);
//R->Channel = stream_init(msound,
// name,sample_rate,sample_bits,
// chip,(sample_bits == 16) ? SN76496Update_16 : SN76496Update_8);
if (R->Channel == -1)
return 1;
R->SampleRate = sample_rate;
SN76496_set_clock(chip,clock);
SN76496_set_volume(chip,255,0);
for (i = 0;i < 4;i++) R->Volume[i] = 0;
R->LastRegister = 0;
for (i = 0;i < 8;i+=2)
{
R->Register[i] = 0;
R->Register[i + 1] = 0x0f; /* volume = 0 */
}
for (i = 0;i < 4;i++)
{
R->Output[i] = 0;
R->Period[i] = R->Count[i] = R->UpdateStep;
}
R->RNG = NG_PRESET;
R->Output[3] = R->RNG & 1;
return 0;
}
int SN76496_sh_start()
{
///// total commenting out by starshine
//int chip;
//const struct SN76496interface *intf = msound->sound_interface;
//for (chip = 0;chip < intf->num;chip++)
//{
// if (SN76496_init(msound,chip,intf->baseclock,Machine->sample_rate,Machine->sample_bits) != 0)
// return 1;
// SN76496_set_volume(chip,intf->volume[chip] & 0xff,(intf->volume[chip] >> 8) & 0xff);
//}
return 0;
}
|
70987.c | // SPDX-License-Identifier: GPL-2.0-only
/*
* CM3323 - Capella Color Light Sensor
*
* Copyright (c) 2015, Intel Corporation.
*
* IIO driver for CM3323 (7-bit I2C slave address 0x10)
*
* TODO: calibscale to correct the lens factor
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#define CM3323_DRV_NAME "cm3323"
#define CM3323_CMD_CONF 0x00
#define CM3323_CMD_RED_DATA 0x08
#define CM3323_CMD_GREEN_DATA 0x09
#define CM3323_CMD_BLUE_DATA 0x0A
#define CM3323_CMD_CLEAR_DATA 0x0B
#define CM3323_CONF_SD_BIT BIT(0) /* sensor disable */
#define CM3323_CONF_AF_BIT BIT(1) /* auto/manual force mode */
#define CM3323_CONF_IT_MASK GENMASK(6, 4)
#define CM3323_CONF_IT_SHIFT 4
#define CM3323_INT_TIME_AVAILABLE "0.04 0.08 0.16 0.32 0.64 1.28"
static const struct {
int val;
int val2;
} cm3323_int_time[] = {
{0, 40000}, /* 40 ms */
{0, 80000}, /* 80 ms */
{0, 160000}, /* 160 ms */
{0, 320000}, /* 320 ms */
{0, 640000}, /* 640 ms */
{1, 280000}, /* 1280 ms */
};
struct cm3323_data {
struct i2c_client *client;
u16 reg_conf;
struct mutex mutex;
};
#define CM3323_COLOR_CHANNEL(_color, _addr) { \
.type = IIO_INTENSITY, \
.modified = 1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
.channel2 = IIO_MOD_LIGHT_##_color, \
.address = _addr, \
}
static const struct iio_chan_spec cm3323_channels[] = {
CM3323_COLOR_CHANNEL(RED, CM3323_CMD_RED_DATA),
CM3323_COLOR_CHANNEL(GREEN, CM3323_CMD_GREEN_DATA),
CM3323_COLOR_CHANNEL(BLUE, CM3323_CMD_BLUE_DATA),
CM3323_COLOR_CHANNEL(CLEAR, CM3323_CMD_CLEAR_DATA),
};
static IIO_CONST_ATTR_INT_TIME_AVAIL(CM3323_INT_TIME_AVAILABLE);
static struct attribute *cm3323_attributes[] = {
&iio_const_attr_integration_time_available.dev_attr.attr,
NULL
};
static const struct attribute_group cm3323_attribute_group = {
.attrs = cm3323_attributes,
};
static int cm3323_init(struct iio_dev *indio_dev)
{
int ret;
struct cm3323_data *data = iio_priv(indio_dev);
ret = i2c_smbus_read_word_data(data->client, CM3323_CMD_CONF);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_conf\n");
return ret;
}
/* enable sensor and set auto force mode */
ret &= ~(CM3323_CONF_SD_BIT | CM3323_CONF_AF_BIT);
ret = i2c_smbus_write_word_data(data->client, CM3323_CMD_CONF, ret);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_conf\n");
return ret;
}
data->reg_conf = ret;
return 0;
}
static void cm3323_disable(void *data)
{
int ret;
struct iio_dev *indio_dev = data;
struct cm3323_data *cm_data = iio_priv(indio_dev);
ret = i2c_smbus_write_word_data(cm_data->client, CM3323_CMD_CONF,
CM3323_CONF_SD_BIT);
if (ret < 0)
dev_err(&cm_data->client->dev, "Error writing reg_conf\n");
}
static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
{
int i, ret;
u16 reg_conf;
for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) {
if (val == cm3323_int_time[i].val &&
val2 == cm3323_int_time[i].val2) {
reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK;
reg_conf |= i << CM3323_CONF_IT_SHIFT;
ret = i2c_smbus_write_word_data(data->client,
CM3323_CMD_CONF,
reg_conf);
if (ret < 0)
return ret;
data->reg_conf = reg_conf;
return 0;
}
}
return -EINVAL;
}
static int cm3323_get_it_bits(struct cm3323_data *data)
{
int bits;
bits = (data->reg_conf & CM3323_CONF_IT_MASK) >>
CM3323_CONF_IT_SHIFT;
if (bits >= ARRAY_SIZE(cm3323_int_time))
return -EINVAL;
return bits;
}
static int cm3323_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
{
int ret;
struct cm3323_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&data->mutex);
ret = i2c_smbus_read_word_data(data->client, chan->address);
if (ret < 0) {
mutex_unlock(&data->mutex);
return ret;
}
*val = ret;
mutex_unlock(&data->mutex);
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
mutex_lock(&data->mutex);
ret = cm3323_get_it_bits(data);
if (ret < 0) {
mutex_unlock(&data->mutex);
return ret;
}
*val = cm3323_int_time[ret].val;
*val2 = cm3323_int_time[ret].val2;
mutex_unlock(&data->mutex);
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
}
static int cm3323_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val,
int val2, long mask)
{
struct cm3323_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
mutex_lock(&data->mutex);
ret = cm3323_set_it_bits(data, val, val2);
mutex_unlock(&data->mutex);
return ret;
default:
return -EINVAL;
}
}
static const struct iio_info cm3323_info = {
.read_raw = cm3323_read_raw,
.write_raw = cm3323_write_raw,
.attrs = &cm3323_attribute_group,
};
static int cm3323_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct cm3323_data *data;
struct iio_dev *indio_dev;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
mutex_init(&data->mutex);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &cm3323_info;
indio_dev->name = CM3323_DRV_NAME;
indio_dev->channels = cm3323_channels;
indio_dev->num_channels = ARRAY_SIZE(cm3323_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
ret = cm3323_init(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "cm3323 chip init failed\n");
return ret;
}
ret = devm_add_action_or_reset(&client->dev, cm3323_disable, indio_dev);
if (ret < 0)
return ret;
return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id cm3323_id[] = {
{"cm3323", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, cm3323_id);
static struct i2c_driver cm3323_driver = {
.driver = {
.name = CM3323_DRV_NAME,
},
.probe = cm3323_probe,
.id_table = cm3323_id,
};
module_i2c_driver(cm3323_driver);
MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
MODULE_DESCRIPTION("Capella CM3323 Color Light Sensor driver");
MODULE_LICENSE("GPL v2");
|
709529.c | /* Copyright (C) 1999-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <string.h>
#include <utmp.h>
#include <utmpx.h>
/* Copy the information in UTMPX to UTMP. */
void
getutmp (const struct utmpx *utmpx, struct utmp *utmp)
{
#if _HAVE_UT_TYPE - 0
utmp->ut_type = utmpx->ut_type;
#endif
#if _HAVE_UT_PID - 0
utmp->ut_pid = utmpx->ut_pid;
#endif
memcpy (utmp->ut_line, utmpx->ut_line, sizeof (utmp->ut_line));
memcpy (utmp->ut_user, utmpx->ut_user, sizeof (utmp->ut_user));
#if _HAVE_UT_ID - 0
memcpy (utmp->ut_id, utmpx->ut_id, sizeof (utmp->ut_id));
#endif
#if _HAVE_UT_HOST - 0
memcpy (utmp->ut_host, utmpx->ut_host, sizeof (utmp->ut_host));
#endif
#if _HAVE_UT_TV - 0
utmp->ut_tv = utmpx->ut_tv;
#else
utmp->ut_time = utmpx->ut_time;
#endif
}
|
92192.c | #include <lib9.h>
#include <image.h>
#include <memimage.h>
#include "xmem.h"
static int
_loadmemimage(Memimage *i, Rectangle r, uchar *data, int ndata)
{
int y, l, lpart, rpart, mx, m, mr;
uchar *q;
l = bytesperline(r, i->ldepth);
if(ndata < l*Dy(r))
return -1;
ndata = l*Dy(r);
q = byteaddr(i, r.min);
mx = 7>>i->ldepth;
lpart = (r.min.x & mx) << i->ldepth;
rpart = (r.max.x & mx) << i->ldepth;
m = 0xFF >> lpart;
/* may need to do bit insertion on edges */
if(l == 1){ /* all in one byte */
if(rpart)
m ^= 0xFF >> rpart;
for(y=r.min.y; y<r.max.y; y++){
*q ^= (*data^*q) & m;
q += i->width*sizeof(ulong);
data++;
}
return ndata;
}
if(lpart==0 && rpart==0){ /* easy case */
for(y=r.min.y; y<r.max.y; y++){
memmove(q, data, l);
q += i->width*sizeof(ulong);
data += l;
}
return ndata;
}
mr = 0xFF ^ (0xFF >> rpart);
if(lpart!=0 && rpart==0){
for(y=r.min.y; y<r.max.y; y++){
*q ^= (*data^*q) & m;
if(l > 1)
memmove(q+1, data+1, l-1);
q += i->width*sizeof(ulong);
data += l;
}
return ndata;
}
if(lpart==0 && rpart!=0){
for(y=r.min.y; y<r.max.y; y++){
if(l > 1)
memmove(q, data, l-1);
q[l-1] ^= (data[l-1]^q[l-1]) & mr;
q += i->width*sizeof(ulong);
data += l;
}
return ndata;
}
for(y=r.min.y; y<r.max.y; y++){
*q ^= (*data^*q) & m;
if(l > 2)
memmove(q+1, data+1, l-2);
q[l-1] ^= (data[l-1]^q[l-1]) & mr;
q += i->width*sizeof(ulong);
data += l;
}
return ndata;
}
int
loadmemimage(Memimage *i, Rectangle r, uchar *data, int ndata)
{
XImage *x;
Xmem *xm;
int n;
if(!rectinrect(r, i->r))
return -1;
xm = i->X;
x = getXdata(i, Rpt(r.min, r.min));
n = _loadmemimage(i, r, data, ndata);
if(x != nil){
putXdata(i, x, r);
XDestroyImage(x);
i->data->data = xm->wordp;
i->data->base = nil;
}
xdirtied(i);
return n;
}
|
763238.c | /**
******************************************************************************
* @file HAL/HAL_TimeBase/Src/stm32f4xx_it.c
* @author MCD Application Team
* @version V1.2.6
* @date 06-May-2016
* @brief Main Interrupt Service Routines.
* This file provides template for all exceptions handler and
* peripherals interrupt service routine.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT(c) 2016 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "main.h"
#include "stm32f4xx_it.h"
/** @addtogroup STM32F4xx_HAL_Examples
* @{
*/
/** @addtogroup GPIO_EXTI
* @{
*/
/* Private typedef -----------------------------------------------------------*/
/* Private define ------------------------------------------------------------*/
/* Private macro -------------------------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
/* Private function prototypes -----------------------------------------------*/
/* Private functions ---------------------------------------------------------*/
/******************************************************************************/
/* Cortex-M4 Processor Exceptions Handlers */
/******************************************************************************/
/**
* @brief This function handles NMI exception.
* @param None
* @retval None
*/
void NMI_Handler(void)
{
}
/**
* @brief This function handles Hard Fault exception.
* @param None
* @retval None
*/
void HardFault_Handler(void)
{
/* Go to infinite loop when Hard Fault exception occurs */
while (1)
{
}
}
/**
* @brief This function handles Memory Manage exception.
* @param None
* @retval None
*/
void MemManage_Handler(void)
{
/* Go to infinite loop when Memory Manage exception occurs */
while (1)
{
}
}
/**
* @brief This function handles Bus Fault exception.
* @param None
* @retval None
*/
void BusFault_Handler(void)
{
/* Go to infinite loop when Bus Fault exception occurs */
while (1)
{
}
}
/**
* @brief This function handles Usage Fault exception.
* @param None
* @retval None
*/
void UsageFault_Handler(void)
{
/* Go to infinite loop when Usage Fault exception occurs */
while (1)
{
}
}
/**
* @brief This function handles SVCall exception.
* @param None
* @retval None
*/
void SVC_Handler(void)
{
}
/**
* @brief This function handles Debug Monitor exception.
* @param None
* @retval None
*/
void DebugMon_Handler(void)
{
}
/**
* @brief This function handles PendSVC exception.
* @param None
* @retval None
*/
void PendSV_Handler(void)
{
}
/**
* @brief This function handles SysTick Handler.
* @param None
* @retval None
*/
void SysTick_Handler(void)
{
}
/******************************************************************************/
/* STM32F4xx Peripherals Interrupt Handlers */
/* Add here the Interrupt Handler for the used peripheral(s) (PPP), for the */
/* available peripheral interrupt handler's name please refer to the startup */
/* file (startup_stm32f4xx.s). */
/******************************************************************************/
/**
* @brief This function handles External line 0 interrupt request.
* @param None
* @retval None
*/
void EXTI15_10_IRQHandler(void)
{
HAL_GPIO_EXTI_IRQHandler(KEY_BUTTON_PIN);
}
/**
* @brief This function handles PPP interrupt request.
* @param None
* @retval None
*/
/*void PPP_IRQHandler(void)
{
}*/
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
891354.c | /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.] */
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <openssl/base64.h>
#include <openssl/bio.h>
#include <openssl/buffer.h>
#include <openssl/evp.h>
#include <openssl/mem.h>
#define B64_BLOCK_SIZE 1024
#define B64_BLOCK_SIZE2 768
#define B64_NONE 0
#define B64_ENCODE 1
#define B64_DECODE 2
#define EVP_ENCODE_LENGTH(l) (((l+2)/3*4)+(l/48+1)*2+80)
typedef struct b64_struct {
int buf_len;
int buf_off;
int tmp_len; /* used to find the start when decoding */
int tmp_nl; /* If true, scan until '\n' */
int encode;
int start; /* have we started decoding yet? */
int cont; /* <= 0 when finished */
EVP_ENCODE_CTX base64;
char buf[EVP_ENCODE_LENGTH(B64_BLOCK_SIZE) + 10];
char tmp[B64_BLOCK_SIZE];
} BIO_B64_CTX;
static int b64_new(BIO *bio) {
BIO_B64_CTX *ctx;
ctx = OPENSSL_malloc(sizeof(*ctx));
if (ctx == NULL) {
return 0;
}
memset(ctx, 0, sizeof(*ctx));
ctx->cont = 1;
ctx->start = 1;
bio->init = 1;
bio->ptr = (char *)ctx;
return 1;
}
static int b64_free(BIO *bio) {
if (bio == NULL) {
return 0;
}
OPENSSL_free(bio->ptr);
bio->ptr = NULL;
bio->init = 0;
bio->flags = 0;
return 1;
}
static int b64_read(BIO *b, char *out, int outl) {
int ret = 0, i, ii, j, k, x, n, num, ret_code = 0;
BIO_B64_CTX *ctx;
uint8_t *p, *q;
if (out == NULL) {
return 0;
}
ctx = (BIO_B64_CTX *) b->ptr;
if (ctx == NULL || b->next_bio == NULL) {
return 0;
}
BIO_clear_retry_flags(b);
if (ctx->encode != B64_DECODE) {
ctx->encode = B64_DECODE;
ctx->buf_len = 0;
ctx->buf_off = 0;
ctx->tmp_len = 0;
EVP_DecodeInit(&ctx->base64);
}
/* First check if there are bytes decoded/encoded */
if (ctx->buf_len > 0) {
assert(ctx->buf_len >= ctx->buf_off);
i = ctx->buf_len - ctx->buf_off;
if (i > outl) {
i = outl;
}
assert(ctx->buf_off + i < (int)sizeof(ctx->buf));
memcpy(out, &ctx->buf[ctx->buf_off], i);
ret = i;
out += i;
outl -= i;
ctx->buf_off += i;
if (ctx->buf_len == ctx->buf_off) {
ctx->buf_len = 0;
ctx->buf_off = 0;
}
}
/* At this point, we have room of outl bytes and an empty buffer, so we
* should read in some more. */
ret_code = 0;
while (outl > 0) {
if (ctx->cont <= 0) {
break;
}
i = BIO_read(b->next_bio, &(ctx->tmp[ctx->tmp_len]),
B64_BLOCK_SIZE - ctx->tmp_len);
if (i <= 0) {
ret_code = i;
/* Should we continue next time we are called? */
if (!BIO_should_retry(b->next_bio)) {
ctx->cont = i;
/* If buffer empty break */
if (ctx->tmp_len == 0) {
break;
} else {
/* Fall through and process what we have */
i = 0;
}
} else {
/* else we retry and add more data to buffer */
break;
}
}
i += ctx->tmp_len;
ctx->tmp_len = i;
/* We need to scan, a line at a time until we have a valid line if we are
* starting. */
if (ctx->start && (BIO_test_flags(b, BIO_FLAGS_BASE64_NO_NL))) {
/* ctx->start = 1; */
ctx->tmp_len = 0;
} else if (ctx->start) {
q = p = (uint8_t *)ctx->tmp;
num = 0;
for (j = 0; j < i; j++) {
if (*(q++) != '\n') {
continue;
}
/* due to a previous very long line, we need to keep on scanning for a
* '\n' before we even start looking for base64 encoded stuff. */
if (ctx->tmp_nl) {
p = q;
ctx->tmp_nl = 0;
continue;
}
k = EVP_DecodeUpdate(&(ctx->base64), (uint8_t *)ctx->buf, &num, p,
q - p);
if (k <= 0 && num == 0 && ctx->start) {
EVP_DecodeInit(&ctx->base64);
} else {
if (p != (uint8_t *)&(ctx->tmp[0])) {
i -= (p - (uint8_t *)&(ctx->tmp[0]));
for (x = 0; x < i; x++) {
ctx->tmp[x] = p[x];
}
}
EVP_DecodeInit(&ctx->base64);
ctx->start = 0;
break;
}
p = q;
}
/* we fell off the end without starting */
if (j == i && num == 0) {
/* Is this is one long chunk?, if so, keep on reading until a new
* line. */
if (p == (uint8_t *)&(ctx->tmp[0])) {
/* Check buffer full */
if (i == B64_BLOCK_SIZE) {
ctx->tmp_nl = 1;
ctx->tmp_len = 0;
}
} else if (p != q) { /* finished on a '\n' */
n = q - p;
for (ii = 0; ii < n; ii++) {
ctx->tmp[ii] = p[ii];
}
ctx->tmp_len = n;
}
/* else finished on a '\n' */
continue;
} else {
ctx->tmp_len = 0;
}
} else if (i < B64_BLOCK_SIZE && ctx->cont > 0) {
/* If buffer isn't full and we can retry then restart to read in more
* data. */
continue;
}
if (BIO_test_flags(b, BIO_FLAGS_BASE64_NO_NL)) {
int z, jj;
jj = i & ~3; /* process per 4 */
z = EVP_DecodeBlock((uint8_t *)ctx->buf, (uint8_t *)ctx->tmp, jj);
if (jj > 2) {
if (ctx->tmp[jj - 1] == '=') {
z--;
if (ctx->tmp[jj - 2] == '=') {
z--;
}
}
}
/* z is now number of output bytes and jj is the number consumed. */
if (jj != i) {
memmove(ctx->tmp, &ctx->tmp[jj], i - jj);
ctx->tmp_len = i - jj;
}
ctx->buf_len = 0;
if (z > 0) {
ctx->buf_len = z;
}
i = z;
} else {
i = EVP_DecodeUpdate(&(ctx->base64), (uint8_t *)ctx->buf,
&ctx->buf_len, (uint8_t *)ctx->tmp, i);
ctx->tmp_len = 0;
}
ctx->buf_off = 0;
if (i < 0) {
ret_code = 0;
ctx->buf_len = 0;
break;
}
if (ctx->buf_len <= outl) {
i = ctx->buf_len;
} else {
i = outl;
}
memcpy(out, ctx->buf, i);
ret += i;
ctx->buf_off = i;
if (ctx->buf_off == ctx->buf_len) {
ctx->buf_len = 0;
ctx->buf_off = 0;
}
outl -= i;
out += i;
}
BIO_copy_next_retry(b);
return ret == 0 ? ret_code : ret;
}
static int b64_write(BIO *b, const char *in, int inl) {
int ret = 0, n, i;
BIO_B64_CTX *ctx;
ctx = (BIO_B64_CTX *)b->ptr;
BIO_clear_retry_flags(b);
if (ctx->encode != B64_ENCODE) {
ctx->encode = B64_ENCODE;
ctx->buf_len = 0;
ctx->buf_off = 0;
ctx->tmp_len = 0;
EVP_EncodeInit(&(ctx->base64));
}
assert(ctx->buf_off < (int)sizeof(ctx->buf));
assert(ctx->buf_len <= (int)sizeof(ctx->buf));
assert(ctx->buf_len >= ctx->buf_off);
n = ctx->buf_len - ctx->buf_off;
while (n > 0) {
i = BIO_write(b->next_bio, &(ctx->buf[ctx->buf_off]), n);
if (i <= 0) {
BIO_copy_next_retry(b);
return i;
}
assert(i <= n);
ctx->buf_off += i;
assert(ctx->buf_off <= (int)sizeof(ctx->buf));
assert(ctx->buf_len >= ctx->buf_off);
n -= i;
}
/* at this point all pending data has been written. */
ctx->buf_off = 0;
ctx->buf_len = 0;
if (in == NULL || inl <= 0) {
return 0;
}
while (inl > 0) {
n = (inl > B64_BLOCK_SIZE) ? B64_BLOCK_SIZE : inl;
if (BIO_test_flags(b, BIO_FLAGS_BASE64_NO_NL)) {
if (ctx->tmp_len > 0) {
assert(ctx->tmp_len <= 3);
n = 3 - ctx->tmp_len;
/* There's a theoretical possibility of this. */
if (n > inl) {
n = inl;
}
memcpy(&(ctx->tmp[ctx->tmp_len]), in, n);
ctx->tmp_len += n;
ret += n;
if (ctx->tmp_len < 3) {
break;
}
ctx->buf_len = EVP_EncodeBlock((uint8_t *)ctx->buf, (uint8_t *)ctx->tmp,
ctx->tmp_len);
assert(ctx->buf_len <= (int)sizeof(ctx->buf));
assert(ctx->buf_len >= ctx->buf_off);
/* Since we're now done using the temporary buffer, the length should
* be zeroed. */
ctx->tmp_len = 0;
} else {
if (n < 3) {
memcpy(ctx->tmp, in, n);
ctx->tmp_len = n;
ret += n;
break;
}
n -= n % 3;
ctx->buf_len =
EVP_EncodeBlock((uint8_t *)ctx->buf, (const uint8_t *)in, n);
assert(ctx->buf_len <= (int)sizeof(ctx->buf));
assert(ctx->buf_len >= ctx->buf_off);
ret += n;
}
} else {
EVP_EncodeUpdate(&(ctx->base64), (uint8_t *)ctx->buf, &ctx->buf_len,
(uint8_t *)in, n);
assert(ctx->buf_len <= (int)sizeof(ctx->buf));
assert(ctx->buf_len >= ctx->buf_off);
ret += n;
}
inl -= n;
in += n;
ctx->buf_off = 0;
n = ctx->buf_len;
while (n > 0) {
i = BIO_write(b->next_bio, &(ctx->buf[ctx->buf_off]), n);
if (i <= 0) {
BIO_copy_next_retry(b);
return ret == 0 ? i : ret;
}
assert(i <= n);
n -= i;
ctx->buf_off += i;
assert(ctx->buf_off <= (int)sizeof(ctx->buf));
assert(ctx->buf_len >= ctx->buf_off);
}
ctx->buf_len = 0;
ctx->buf_off = 0;
}
return ret;
}
static long b64_ctrl(BIO *b, int cmd, long num, void *ptr) {
BIO_B64_CTX *ctx;
long ret = 1;
int i;
ctx = (BIO_B64_CTX *)b->ptr;
switch (cmd) {
case BIO_CTRL_RESET:
ctx->cont = 1;
ctx->start = 1;
ctx->encode = B64_NONE;
ret = BIO_ctrl(b->next_bio, cmd, num, ptr);
break;
case BIO_CTRL_EOF: /* More to read */
if (ctx->cont <= 0) {
ret = 1;
} else {
ret = BIO_ctrl(b->next_bio, cmd, num, ptr);
}
break;
case BIO_CTRL_WPENDING: /* More to write in buffer */
assert(ctx->buf_len >= ctx->buf_off);
ret = ctx->buf_len - ctx->buf_off;
if ((ret == 0) && (ctx->encode != B64_NONE) && (ctx->base64.num != 0)) {
ret = 1;
} else if (ret <= 0) {
ret = BIO_ctrl(b->next_bio, cmd, num, ptr);
}
break;
case BIO_CTRL_PENDING: /* More to read in buffer */
assert(ctx->buf_len >= ctx->buf_off);
ret = ctx->buf_len - ctx->buf_off;
if (ret <= 0) {
ret = BIO_ctrl(b->next_bio, cmd, num, ptr);
}
break;
case BIO_CTRL_FLUSH:
/* do a final write */
again:
while (ctx->buf_len != ctx->buf_off) {
i = b64_write(b, NULL, 0);
if (i < 0) {
return i;
}
}
if (BIO_test_flags(b, BIO_FLAGS_BASE64_NO_NL)) {
if (ctx->tmp_len != 0) {
ctx->buf_len = EVP_EncodeBlock((uint8_t *)ctx->buf,
(uint8_t *)ctx->tmp, ctx->tmp_len);
ctx->buf_off = 0;
ctx->tmp_len = 0;
goto again;
}
} else if (ctx->encode != B64_NONE && ctx->base64.num != 0) {
ctx->buf_off = 0;
EVP_EncodeFinal(&(ctx->base64), (uint8_t *)ctx->buf, &(ctx->buf_len));
/* push out the bytes */
goto again;
}
/* Finally flush the underlying BIO */
ret = BIO_ctrl(b->next_bio, cmd, num, ptr);
break;
case BIO_C_DO_STATE_MACHINE:
BIO_clear_retry_flags(b);
ret = BIO_ctrl(b->next_bio, cmd, num, ptr);
BIO_copy_next_retry(b);
break;
case BIO_CTRL_INFO:
case BIO_CTRL_GET:
case BIO_CTRL_SET:
default:
ret = BIO_ctrl(b->next_bio, cmd, num, ptr);
break;
}
return ret;
}
static long b64_callback_ctrl(BIO *b, int cmd, bio_info_cb fp) {
long ret = 1;
if (b->next_bio == NULL) {
return 0;
}
switch (cmd) {
default:
ret = BIO_callback_ctrl(b->next_bio, cmd, fp);
break;
}
return ret;
}
static int b64_puts(BIO *b, const char *str) {
return b64_write(b, str, strlen(str));
}
static const BIO_METHOD b64_method = {
BIO_TYPE_BASE64, "base64 encoding", b64_write, b64_read, b64_puts,
NULL /* gets */, b64_ctrl, b64_new, b64_free, b64_callback_ctrl,
};
const BIO_METHOD *BIO_f_base64(void) { return &b64_method; }
|
325714.c | /******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2016 STMicroelectronics</center></h2>
*
* Licensed under ST MYLIBERTY SOFTWARE LICENSE AGREEMENT (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.st.com/myliberty
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
* AND SPECIFICALLY DISCLAIMING THE IMPLIED WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
/*
* PROJECT:
* $Revision: $
* LANGUAGE: ANSI C
*/
/*! \file
*
* \author
*
* \brief Debug log output utility implementation.
*
*/
/*
******************************************************************************
* INCLUDES
******************************************************************************
*/
#include "logger.h"
#include "st_errno.h"
#include <string.h>
#include <stdarg.h>
#include <stdio.h>
/*
******************************************************************************
* LOCAL DEFINES
******************************************************************************
*/
#if (USE_LOGGER == LOGGER_ON)
#define MAX_HEX_STR 4
#define MAX_HEX_STR_LENGTH 128
char hexStr[MAX_HEX_STR][MAX_HEX_STR_LENGTH];
uint8_t hexStrIdx = 0;
#endif /* #if USE_LOGGER == LOGGER_ON */
#if (USE_LOGGER == LOGGER_OFF && !defined(HAL_UART_MODULE_ENABLED))
#define UART_HandleTypeDef void
#endif
#define USART_TIMEOUT 1000
UART_HandleTypeDef *pLogUsart = 0;
uint8_t logUsartTx(uint8_t *data, uint16_t dataLen);
/**
* @brief This function initalize the UART handle.
* @param husart : already initalized handle to USART HW
* @retval none :
*/
void logUsartInit(UART_HandleTypeDef *husart)
{
pLogUsart = husart;
}
/**
* @brief This function Transmit data via USART
* @param data : data to be transmitted
* @param dataLen : length of data to be transmitted
* @retval ERR_INVALID_HANDLE : in case the SPI HW is not initalized yet
* @retval others : HAL status
*/
uint8_t logUsartTx(uint8_t *data, uint16_t dataLen)
{
if(pLogUsart == 0)
return ERR_INVALID_HANDLE;
#if (USE_LOGGER == LOGGER_ON)
{
return HAL_UART_Transmit(pLogUsart, data, dataLen, USART_TIMEOUT);
}
#else
{
(void)data;
(void)dataLen;
return 0;
}
#endif /* #if USE_LOGGER == LOGGER_ON */
}
int logUsart(const char* format, ...)
{
#if (USE_LOGGER == LOGGER_ON)
{
#define LOG_BUFFER_SIZE 256
char buf[LOG_BUFFER_SIZE];
va_list argptr;
va_start(argptr, format);
int cnt = vsnprintf(buf, LOG_BUFFER_SIZE, format, argptr);
va_end(argptr);
/* */
logUsartTx((uint8_t*)buf, strlen(buf));
return cnt;
}
#else
{
(void)format;
return 0;
}
#endif /* #if USE_LOGGER == LOGGER_ON */
}
/* */
char* hex2Str(unsigned char * data, size_t dataLen)
{
#if (USE_LOGGER == LOGGER_ON)
{
unsigned char * pin = data;
const char * hex = "0123456789ABCDEF";
char * pout = hexStr[hexStrIdx];
uint8_t i = 0;
uint8_t idx = hexStrIdx;
if(dataLen == 0)
{
pout[0] = 0;
} else
{
for(; i < dataLen - 1; ++i) {
*pout++ = hex[(*pin>>4)&0xF];
*pout++ = hex[(*pin++)&0xF];
}
*pout++ = hex[(*pin>>4)&0xF];
*pout++ = hex[(*pin)&0xF];
*pout = 0;
}
hexStrIdx++;
hexStrIdx %= MAX_HEX_STR;
return hexStr[idx];
}
#else
{
(void)data;
(void)dataLen;
return NULL;
}
#endif /* #if USE_LOGGER == LOGGER_ON */
}
void logITMTx(uint8_t *data, uint16_t dataLen)
{
#if (USE_LOGGER == LOGGER_ON)
while (dataLen != 0) {
ITM_SendChar(*data);
data++;
dataLen--;
}
#else
{
(void)data;
(void)dataLen;
return;
}
#endif /* #if USE_LOGGER == LOGGER_ON */
}
int logITM(const char* format, ...)
{
#if (USE_LOGGER == LOGGER_ON)
{
#define LOG_BUFFER_SIZE 256
char buf[LOG_BUFFER_SIZE];
va_list argptr;
va_start(argptr, format);
int cnt = vsnprintf(buf, LOG_BUFFER_SIZE, format, argptr);
va_end(argptr);
/* */
logITMTx((uint8_t*)buf, strlen(buf));
HAL_Delay((cnt + 9)/10); /* WA to avoid ITM overflow */
return cnt;
}
#else
{
(void)format;
return 0;
}
#endif /* #if USE_LOGGER == LOGGER_ON */
}
|
184388.c | /*
* FreeRTOS Kernel V10.3.0
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/******************************************************************************
* NOTE 1: This project provides two demo applications. A simple blinky
* style project, and a more comprehensive test and demo application. The
* mainSELECTED_APPLICATION setting in main.c is used to select between the
* two. See the notes on using mainSELECTED_APPLICATION in main.c. This file
* implements the simply blinky style version.
*
* NOTE 2: This file only contains the source code that is specific to the
* simple demo. Generic functions, such FreeRTOS hook functions, and functions
* required to configure the hardware are defined in main.c.
******************************************************************************
*
* main_blinky() creates one queue, and two tasks. It then starts the
* scheduler.
*
* The Queue Send Task:
* The queue send task is implemented by the prvQueueSendTask() function in
* this file. prvQueueSendTask() sits in a loop that causes it to repeatedly
* block for 200 milliseconds, before sending the value 100 to the queue that
* was created within main_blinky(). Once the value is sent, the task loops
* back around to block for another 200 milliseconds...and so on.
*
* The Queue Receive Task:
* The queue receive task is implemented by the prvQueueReceiveTask() function
* in this file. prvQueueReceiveTask() sits in a loop where it repeatedly
* blocks on attempts to read data from the queue that was created within
* main_blinky(). When data is received, the task checks the value of the
* data, and if the value equals the expected 100, outputs a message to the
* UART. The 'block time' parameter passed to the queue receive function
* specifies that the task should be held in the Blocked state indefinitely to
* wait for data to be available on the queue. The queue receive task will only
* leave the Blocked state when the queue send task writes to the queue. As the
* queue send task writes to the queue every 200 milliseconds, the queue receive
* task leaves the Blocked state every 200 milliseconds, and therefore outputs
* a message every 200 milliseconds.
*/
/* Kernel includes. */
#include "FreeRTOS.h"
#include "task.h"
#include "semphr.h"
/* Xilinx includes. */
#include "xil_printf.h"
/* Priorities at which the tasks are created. */
#define mainQUEUE_RECEIVE_TASK_PRIORITY ( tskIDLE_PRIORITY + 2 )
#define mainQUEUE_SEND_TASK_PRIORITY ( tskIDLE_PRIORITY + 1 )
/* The rate at which data is sent to the queue. The 200ms value is converted
to ticks using the portTICK_PERIOD_MS constant. */
#define mainQUEUE_SEND_FREQUENCY_MS pdMS_TO_TICKS( 200 )
/* The number of items the queue can hold. This is 1 as the receive task
will remove items as they are added, meaning the send task should always find
the queue empty. */
#define mainQUEUE_LENGTH ( 1 )
/*-----------------------------------------------------------*/
/*
* The tasks as described in the comments at the top of this file.
*/
static void prvQueueReceiveTask( void *pvParameters );
static void prvQueueSendTask( void *pvParameters );
/*-----------------------------------------------------------*/
/* The queue used by both tasks. */
static QueueHandle_t xQueue = NULL;
/*-----------------------------------------------------------*/
void main_blinky( void )
{
/* Create the queue. */
xQueue = xQueueCreate( mainQUEUE_LENGTH, sizeof( uint32_t ) );
if( xQueue != NULL )
{
/* Start the two tasks as described in the comments at the top of this
file. */
xTaskCreate( prvQueueReceiveTask, /* The function that implements the task. */
"Rx", /* The text name assigned to the task - for debug only as it is not used by the kernel. */
configMINIMAL_STACK_SIZE, /* The size of the stack to allocate to the task. */
NULL, /* The parameter passed to the task - not used in this case. */
mainQUEUE_RECEIVE_TASK_PRIORITY, /* The priority assigned to the task. */
NULL ); /* The task handle is not required, so NULL is passed. */
xTaskCreate( prvQueueSendTask, "TX", configMINIMAL_STACK_SIZE, NULL, mainQUEUE_SEND_TASK_PRIORITY, NULL );
/* Start the tasks and timer running. */
vTaskStartScheduler();
}
/* If all is well, the scheduler will now be running, and the following
line will never be reached. If the following line does execute, then
there was either insufficient FreeRTOS heap memory available for the idle
and/or timer tasks to be created, or vTaskStartScheduler() was called from
User mode. See the memory management section on the FreeRTOS web site for
more details on the FreeRTOS heap http://www.freertos.org/a00111.html. The
mode from which main() is called is set in the C start up code and must be
a privileged mode (not user mode). */
for( ;; );
}
/*-----------------------------------------------------------*/
static void prvQueueSendTask( void *pvParameters )
{
TickType_t xNextWakeTime;
const uint32_t ulValueToSend = 100UL;
/* Remove compiler warning about unused parameter. */
( void ) pvParameters;
/* Initialise xNextWakeTime - this only needs to be done once. */
xNextWakeTime = xTaskGetTickCount();
for( ;; )
{
/* Place this task in the blocked state until it is time to run again. */
vTaskDelayUntil( &xNextWakeTime, mainQUEUE_SEND_FREQUENCY_MS );
/* Send to the queue - causing the queue receive task to unblock and
toggle the LED. 0 is used as the block time so the sending operation
will not block - it shouldn't need to block as the queue should always
be empty at this point in the code. */
xQueueSend( xQueue, &ulValueToSend, 0U );
}
}
/*-----------------------------------------------------------*/
static void prvQueueReceiveTask( void *pvParameters )
{
uint32_t ulReceivedValue;
const uint32_t ulExpectedValue = 100UL;
/* Remove compiler warning about unused parameter. */
( void ) pvParameters;
for( ;; )
{
/* Wait until something arrives in the queue - this task will block
indefinitely provided INCLUDE_vTaskSuspend is set to 1 in
FreeRTOSConfig.h. */
xQueueReceive( xQueue, &ulReceivedValue, portMAX_DELAY );
/* To get here something must have been received from the queue, but is
it the expected value? If it is, toggle the LED. */
if( ulReceivedValue == ulExpectedValue )
{
xil_printf( "100 received\r\n" );
ulReceivedValue = 0U;
}
}
}
/*-----------------------------------------------------------*/
|
998321.c | #include <arm_neon.h>
#include "params.h"
#include "ntt.h"
#include "reduce.h"
#include "NTT_params.h"
/*************************************************
* Name: ntt
*
* Description: Inplace number-theoretic transform (NTT) in Rq.
* input is in standard order, output is in bitreversed order
*
* Arguments: - int16_t r[256]: pointer to input/output vector of elements of Zq
**************************************************/
// Merged NTT layer
void ntt(int16_t r[256]){
NTT(r);
}
/*************************************************
* Name: invntt_tomont
*
* Description: Inplace inverse number-theoretic transform in Rq and
* multiplication by Montgomery factor 2^16.
* Input is in bitreversed order, output is in standard order
*
* Arguments: - int16_t r[256] in {-(q-1)/2,...,(q-1)/2}
* pointer to input/output vector of elements of Zq
**************************************************/
void invntt(int16_t r[256])
{
iNTT(r);
}
|
302052.c | #include "../../lv_examples.h"
/**
* Using the Shadow style properties
*/
void lv_ex_style_4(void)
{
static lv_style_t style;
lv_style_init(&style);
/*Set a background color and a radius*/
lv_style_set_radius(&style, LV_STATE_DEFAULT, 5);
lv_style_set_bg_opa(&style, LV_STATE_DEFAULT, LV_OPA_COVER);
lv_style_set_bg_color(&style, LV_STATE_DEFAULT, LV_COLOR_SILVER);
/*Add a shadow*/
lv_style_set_shadow_width(&style, LV_STATE_DEFAULT, 8);
lv_style_set_shadow_color(&style, LV_STATE_DEFAULT, LV_COLOR_BLUE);
lv_style_set_shadow_ofs_x(&style, LV_STATE_DEFAULT, 10);
lv_style_set_shadow_ofs_y(&style, LV_STATE_DEFAULT, 20);
/*Create an object with the new style*/
lv_obj_t *obj = lv_obj_create(lv_scr_act(), NULL);
lv_obj_add_style(obj, LV_OBJ_PART_MAIN, &style);
lv_obj_align(obj, NULL, LV_ALIGN_CENTER, 0, 0);
}
|
142200.c | /************************************************************************************************************
* Copyright (c) 2017, Dolby Laboratories Inc.
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions
* and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************************************************/
/***************************************************************************\
*
* Module: Frame Info (FMI) API Module
*
* File: fmi_api.c
*
\***************************************************************************/
/**** Module Dependencies ****/
#include "gbl.h"
#include "err.h"
#include "dbg.h"
#include "drcd.h"
#include "bsod.h"
#include "bsi_def.h"
#include "bsi.h"
#include "bsid.h"
#include "fmi_api.h"
#include "fmi_pvt.h"
/*****************************************************************
* ddpi_ddpi_fmi_query:
*****************************************************************/
DSPerr ddpi_fmi_query(
DSPulong *p_fmi_size) /* output */
{
/* Check input arguments */
ERR_CHKARG(p_fmi_size);
/* Return size of frame info subroutine data */
*p_fmi_size = sizeof(FMI_MEM);
return (ERR_NO_ERROR);
}
/*****************************************************************
* ddpi_fmi_openframe:
*****************************************************************/
DSPerr ddpi_fmi_openframe(
void *p_fmihdl, /* modify */
const DDPI_FMI_BUFDESC_INFRM *p_frmbufdesc, /* input */
DSPshort *p_frm_nwords) /* output */
{
/* Declare local variables */
DSPerr err;
FMI_MEM *p_fmi;
/* Check input arguments */
ERR_CHKARG(p_fmihdl);
ERR_CHKARG(p_frmbufdesc);
ERR_CHKARG(p_frm_nwords);
/* Initialize pointers */
p_fmi = (FMI_MEM *)p_fmihdl;
/* Initialize output parameter */
*p_frm_nwords = 0;
/* Initialize frame info */
p_fmi->frm_status = DDPI_FMI_FRMSTATUS_UNKNOWN;
p_fmi->p_frmbuf = p_frmbufdesc->p_buf;
/* Set up bitstream data structure */
bsod_init(p_frmbufdesc->p_buf, 0, &p_fmi->bstrm);
/* Get frame size */
err = bsid_getfrmsize(
&p_fmi->bstrm, /* input */
&p_fmi->bsi, /* modify */
p_frm_nwords); /* output */
/* Check return error code */
if (err == BSI_ERR_UNSUPPORTED_BSID)
return (DDPI_FMI_ERR_UNSUPPORTED_FRAME);
else if (err)
return (DDPI_FMI_ERR_INVALID_BITSTREAM);
return (DDPI_FMI_ERR_NO_ERROR);
}
/*****************************************************************
* ddpi_fmi_getframeparam:
*****************************************************************/
DSPerr ddpi_fmi_getframeparam(
void *p_fmihdl, /* modify */
const DDPI_FMI_BUFDESC_INFRM *p_frmbufdesc, /* input */
const DDPI_FMI_FRM_ID paramid, /* input */
void *p_paramval, /* output */
DDPI_FMI_FRMSTATUS *p_frm_status) /* output */
{
/* Declare local variables */
DSPerr err;
FMI_MEM *p_fmi;
BSI_STRC *p_bsi;
/* Check input arguments */
ERR_CHKARG(p_fmihdl);
ERR_CHKARG(p_frmbufdesc);
ERR_CHKARG(p_paramval);
ERR_CHKARG(p_frm_status);
/* Initialize pointers */
p_fmi = (FMI_MEM *)p_fmihdl;
p_bsi = &p_fmi->bsi;
/* Perform CRC calculation for this frame */
err = ddpi_fmi_checkframe(p_fmihdl, p_frmbufdesc, p_frm_status);
ERR_CHKRTN(err);
/* Get the requested parameter value */
err = getparamval(
p_frmbufdesc, /* input */
p_bsi, /* input */
paramid, /* input */
p_paramval); /* output */
ERR_CHKRTN(err);
/* Determine return error code based on frame status */
if (*p_frm_status == DDPI_FMI_FRMSTATUS_FULL_ERR)
{
return (DDPI_FMI_ERR_CORRUPT_DATA);
}
else if ((*p_frm_status == DDPI_FMI_FRMSTATUS_PARTIAL_ERR) &&
((paramid == DDPI_FMI_FRM_AUXDATA_ID) ||
(BSI_ISDDP(p_bsi->bse_bsid) && (paramid == DDPI_FMI_FRM_ENCINFO_ID))))
{
return (DDPI_FMI_ERR_CORRUPT_DATA);
}
return (DDPI_FMI_ERR_NO_ERROR);
}
/*****************************************************************
* ddpi_fmi_checkframe:
*****************************************************************/
DSPerr ddpi_fmi_checkframe(
void *p_fmihdl, /* modify */
const DDPI_FMI_BUFDESC_INFRM *p_frmbufdesc, /* input */
DDPI_FMI_FRMSTATUS *p_frm_status) /* output */
{
/* Declare local variables */
DSPerr err;
DSPshort frm_nwords;
FMI_MEM *p_fmi;
const BSI_STRC *p_bsi;
/* Check input arguments */
ERR_CHKARG(p_fmihdl);
ERR_CHKARG(p_frmbufdesc);
/* Initialize pointers */
p_fmi = (FMI_MEM *)p_fmihdl;
p_bsi = &p_fmi->bsi;
/* If frame status is unknown for this frame */
if (p_fmi->frm_status == DDPI_FMI_FRMSTATUS_UNKNOWN)
{
/* Initialize output paramter */
*p_frm_status = p_fmi->frm_status;
if (BSI_ISDDP(p_bsi->bse_bsid))
{
/* Assert that frame buffer contains entire frame */
if (p_frmbufdesc->ndatawords < p_bsi->bse_frmsiz)
{
ERR_PRINTERRMSG("Insufficient data in input frame buffer");
return (DDPI_FMI_ERR_INSUFFICIENT_DATA);
}
}
else if (BSI_ISDD(p_bsi->bse_bsid))
{
frm_nwords = gbl_frmsizetab[p_bsi->samprate.bse_fscod][p_bsi->bse_frmsizecod];
/* Assert that frame buffer contains entire frame */
if (p_frmbufdesc->ndatawords < frm_nwords)
{
ERR_PRINTERRMSG("Insufficient data in input frame buffer");
return (DDPI_FMI_ERR_INSUFFICIENT_DATA);
}
}
else /* Not a DD or DD+ frame */
{
return (DDPI_FMI_ERR_UNSUPPORTED_FRAME);
}
/* Unpack the BSI since this if the first time through */
err = bsid_unp(
&p_fmi->bstrm,
&p_fmi->bsi);
/* Check return error code */
if (err == BSI_ERR_UNSUPPORTED_BSID)
return (DDPI_FMI_ERR_UNSUPPORTED_FRAME);
else if (err)
return (DDPI_FMI_ERR_INVALID_BITSTREAM);
}
/* Set frame status output parameter */
*p_frm_status = p_fmi->frm_status;
/* Return error code */
return (DDPI_FMI_ERR_NO_ERROR);
}
/*****************************************************************
* ddpi_fmi_closeframe:
*****************************************************************/
DSPerr ddpi_fmi_closeframe(
void *p_fmihdl) /* modify */
{
/* Declare local variables */
FMI_MEM *p_fmi;
/* Check input arguments */
ERR_CHKARG(p_fmihdl);
/* Cast void pointer to FMI_MEM */
p_fmi = (FMI_MEM*)p_fmihdl;
p_fmi->frm_status = DDPI_FMI_FRMSTATUS_UNKNOWN;
p_fmi->p_frmbuf = P_NULL;
/* Return error code */
return (DDPI_FMI_ERR_NO_ERROR);
}
|
165546.c | // SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* device driver for philips saa7134 based TV cards
* driver core
*
* (c) 2001-03 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
*/
#include "saa7134.h"
#include "saa7134-reg.h"
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/sound.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/dma-mapping.h>
#include <linux/pm.h>
MODULE_DESCRIPTION("v4l2 driver module for saa7130/34 based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
MODULE_VERSION(SAA7134_VERSION);
/* ------------------------------------------------------------------ */
static unsigned int irq_debug;
module_param(irq_debug, int, 0644);
MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]");
static unsigned int core_debug;
module_param(core_debug, int, 0644);
MODULE_PARM_DESC(core_debug,"enable debug messages [core]");
static unsigned int gpio_tracking;
module_param(gpio_tracking, int, 0644);
MODULE_PARM_DESC(gpio_tracking,"enable debug messages [gpio]");
static unsigned int alsa = 1;
module_param(alsa, int, 0644);
MODULE_PARM_DESC(alsa,"enable/disable ALSA DMA sound [dmasound]");
static unsigned int latency = UNSET;
module_param(latency, int, 0444);
MODULE_PARM_DESC(latency,"pci latency timer");
int saa7134_no_overlay=-1;
module_param_named(no_overlay, saa7134_no_overlay, int, 0444);
MODULE_PARM_DESC(no_overlay, "allow override overlay default (0 disables, 1 enables) [some VIA/SIS chipsets are known to have problem with overlay]");
bool saa7134_userptr;
module_param(saa7134_userptr, bool, 0644);
MODULE_PARM_DESC(saa7134_userptr, "enable page-aligned userptr support");
static unsigned int video_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
static unsigned int vbi_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
static unsigned int radio_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
static unsigned int tuner[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
static unsigned int card[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
module_param_array(radio_nr, int, NULL, 0444);
module_param_array(tuner, int, NULL, 0444);
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(video_nr, "video device number");
MODULE_PARM_DESC(vbi_nr, "vbi device number");
MODULE_PARM_DESC(radio_nr, "radio device number");
MODULE_PARM_DESC(tuner, "tuner type");
MODULE_PARM_DESC(card, "card type");
DEFINE_MUTEX(saa7134_devlist_lock);
EXPORT_SYMBOL(saa7134_devlist_lock);
LIST_HEAD(saa7134_devlist);
EXPORT_SYMBOL(saa7134_devlist);
static LIST_HEAD(mops_list);
static unsigned int saa7134_devcount;
int (*saa7134_dmasound_init)(struct saa7134_dev *dev);
int (*saa7134_dmasound_exit)(struct saa7134_dev *dev);
#define core_dbg(fmt, arg...) do { \
if (core_debug) \
printk(KERN_DEBUG pr_fmt("core: " fmt), ## arg); \
} while (0)
#define irq_dbg(level, fmt, arg...) do {\
if (irq_debug > level) \
printk(KERN_DEBUG pr_fmt("irq: " fmt), ## arg); \
} while (0)
void saa7134_track_gpio(struct saa7134_dev *dev, const char *msg)
{
unsigned long mode,status;
if (!gpio_tracking)
return;
/* rising SAA7134_GPIO_GPRESCAN reads the status */
saa_andorb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN,0);
saa_andorb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN,SAA7134_GPIO_GPRESCAN);
mode = saa_readl(SAA7134_GPIO_GPMODE0 >> 2) & 0xfffffff;
status = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2) & 0xfffffff;
core_dbg("%s: gpio: mode=0x%07lx in=0x%07lx out=0x%07lx [%s]\n",
dev->name, mode, (~mode) & status, mode & status, msg);
}
void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value)
{
u32 index, bitval;
index = 1 << bit_no;
switch (value) {
case 0: /* static value */
case 1:
core_dbg("setting GPIO%d to static %d\n", bit_no, value);
/* turn sync mode off if necessary */
if (index & 0x00c00000)
saa_andorb(SAA7134_VIDEO_PORT_CTRL6, 0x0f, 0x00);
if (value)
bitval = index;
else
bitval = 0;
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, index, index);
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, index, bitval);
break;
case 3: /* tristate */
core_dbg("setting GPIO%d to tristate\n", bit_no);
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, index, 0);
break;
}
}
/* ------------------------------------------------------------------ */
/* ----------------------------------------------------------- */
/* delayed request_module */
#if defined(CONFIG_MODULES) && defined(MODULE)
static void request_module_async(struct work_struct *work){
struct saa7134_dev* dev = container_of(work, struct saa7134_dev, request_module_wk);
if (card_is_empress(dev))
request_module("saa7134-empress");
if (card_is_dvb(dev))
request_module("saa7134-dvb");
if (card_is_go7007(dev))
request_module("saa7134-go7007");
if (alsa) {
if (dev->pci->device != PCI_DEVICE_ID_PHILIPS_SAA7130)
request_module("saa7134-alsa");
}
}
static void request_submodules(struct saa7134_dev *dev)
{
INIT_WORK(&dev->request_module_wk, request_module_async);
schedule_work(&dev->request_module_wk);
}
static void flush_request_submodules(struct saa7134_dev *dev)
{
flush_work(&dev->request_module_wk);
}
#else
#define request_submodules(dev)
#define flush_request_submodules(dev)
#endif /* CONFIG_MODULES */
/* ------------------------------------------------------------------ */
/* nr of (saa7134-)pages for the given buffer size */
static int saa7134_buffer_pages(int size)
{
size = PAGE_ALIGN(size);
size += PAGE_SIZE; /* for non-page-aligned buffers */
size /= 4096;
return size;
}
/* calc max # of buffers from size (must not exceed the 4MB virtual
* address space per DMA channel) */
int saa7134_buffer_count(unsigned int size, unsigned int count)
{
unsigned int maxcount;
maxcount = 1024 / saa7134_buffer_pages(size);
if (count > maxcount)
count = maxcount;
return count;
}
int saa7134_buffer_startpage(struct saa7134_buf *buf)
{
return saa7134_buffer_pages(vb2_plane_size(&buf->vb2.vb2_buf, 0))
* buf->vb2.vb2_buf.index;
}
unsigned long saa7134_buffer_base(struct saa7134_buf *buf)
{
unsigned long base;
struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2.vb2_buf, 0);
base = saa7134_buffer_startpage(buf) * 4096;
base += dma->sgl[0].offset;
return base;
}
/* ------------------------------------------------------------------ */
int saa7134_pgtable_alloc(struct pci_dev *pci, struct saa7134_pgtable *pt)
{
__le32 *cpu;
dma_addr_t dma_addr = 0;
cpu = pci_alloc_consistent(pci, SAA7134_PGTABLE_SIZE, &dma_addr);
if (NULL == cpu)
return -ENOMEM;
pt->size = SAA7134_PGTABLE_SIZE;
pt->cpu = cpu;
pt->dma = dma_addr;
return 0;
}
int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt,
struct scatterlist *list, unsigned int length,
unsigned int startpage)
{
__le32 *ptr;
unsigned int i, p;
BUG_ON(NULL == pt || NULL == pt->cpu);
ptr = pt->cpu + startpage;
for (i = 0; i < length; i++, list = sg_next(list)) {
for (p = 0; p * 4096 < list->length; p++, ptr++)
*ptr = cpu_to_le32(sg_dma_address(list) +
list->offset + p * 4096);
}
return 0;
}
void saa7134_pgtable_free(struct pci_dev *pci, struct saa7134_pgtable *pt)
{
if (NULL == pt->cpu)
return;
pci_free_consistent(pci, pt->size, pt->cpu, pt->dma);
pt->cpu = NULL;
}
/* ------------------------------------------------------------------ */
int saa7134_buffer_queue(struct saa7134_dev *dev,
struct saa7134_dmaqueue *q,
struct saa7134_buf *buf)
{
struct saa7134_buf *next = NULL;
unsigned long flags;
spin_lock_irqsave(&dev->slock, flags);
core_dbg("buffer_queue %p\n", buf);
if (NULL == q->curr) {
if (!q->need_two) {
q->curr = buf;
buf->activate(dev, buf, NULL);
} else if (list_empty(&q->queue)) {
list_add_tail(&buf->entry, &q->queue);
} else {
next = list_entry(q->queue.next, struct saa7134_buf,
entry);
q->curr = buf;
buf->activate(dev, buf, next);
}
} else {
list_add_tail(&buf->entry, &q->queue);
}
spin_unlock_irqrestore(&dev->slock, flags);
return 0;
}
void saa7134_buffer_finish(struct saa7134_dev *dev,
struct saa7134_dmaqueue *q,
unsigned int state)
{
core_dbg("buffer_finish %p\n", q->curr);
/* finish current buffer */
q->curr->vb2.vb2_buf.timestamp = ktime_get_ns();
q->curr->vb2.sequence = q->seq_nr++;
vb2_buffer_done(&q->curr->vb2.vb2_buf, state);
q->curr = NULL;
}
void saa7134_buffer_next(struct saa7134_dev *dev,
struct saa7134_dmaqueue *q)
{
struct saa7134_buf *buf,*next = NULL;
assert_spin_locked(&dev->slock);
BUG_ON(NULL != q->curr);
if (!list_empty(&q->queue)) {
/* activate next one from queue */
buf = list_entry(q->queue.next, struct saa7134_buf, entry);
core_dbg("buffer_next %p [prev=%p/next=%p]\n",
buf, q->queue.prev, q->queue.next);
list_del(&buf->entry);
if (!list_empty(&q->queue))
next = list_entry(q->queue.next, struct saa7134_buf, entry);
q->curr = buf;
buf->activate(dev, buf, next);
core_dbg("buffer_next #2 prev=%p/next=%p\n",
q->queue.prev, q->queue.next);
} else {
/* nothing to do -- just stop DMA */
core_dbg("buffer_next %p\n", NULL);
saa7134_set_dmabits(dev);
del_timer(&q->timeout);
}
}
void saa7134_buffer_timeout(struct timer_list *t)
{
struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
struct saa7134_dev *dev = q->dev;
unsigned long flags;
spin_lock_irqsave(&dev->slock, flags);
/* try to reset the hardware (SWRST) */
saa_writeb(SAA7134_REGION_ENABLE, 0x00);
saa_writeb(SAA7134_REGION_ENABLE, 0x80);
saa_writeb(SAA7134_REGION_ENABLE, 0x00);
/* flag current buffer as failed,
try to start over with the next one. */
if (q->curr) {
core_dbg("timeout on %p\n", q->curr);
saa7134_buffer_finish(dev, q, VB2_BUF_STATE_ERROR);
}
saa7134_buffer_next(dev, q);
spin_unlock_irqrestore(&dev->slock, flags);
}
void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
{
unsigned long flags;
struct list_head *pos, *n;
struct saa7134_buf *tmp;
spin_lock_irqsave(&dev->slock, flags);
if (!list_empty(&q->queue)) {
list_for_each_safe(pos, n, &q->queue) {
tmp = list_entry(pos, struct saa7134_buf, entry);
vb2_buffer_done(&tmp->vb2.vb2_buf,
VB2_BUF_STATE_ERROR);
list_del(pos);
tmp = NULL;
}
}
spin_unlock_irqrestore(&dev->slock, flags);
saa7134_buffer_timeout(&q->timeout); /* also calls del_timer(&q->timeout) */
}
EXPORT_SYMBOL_GPL(saa7134_stop_streaming);
/* ------------------------------------------------------------------ */
int saa7134_set_dmabits(struct saa7134_dev *dev)
{
u32 split, task=0, ctrl=0, irq=0;
enum v4l2_field cap = V4L2_FIELD_ANY;
enum v4l2_field ov = V4L2_FIELD_ANY;
assert_spin_locked(&dev->slock);
if (dev->insuspend)
return 0;
/* video capture -- dma 0 + video task A */
if (dev->video_q.curr) {
task |= 0x01;
ctrl |= SAA7134_MAIN_CTRL_TE0;
irq |= SAA7134_IRQ1_INTE_RA0_1 |
SAA7134_IRQ1_INTE_RA0_0;
cap = dev->field;
}
/* video capture -- dma 1+2 (planar modes) */
if (dev->video_q.curr && dev->fmt->planar) {
ctrl |= SAA7134_MAIN_CTRL_TE4 |
SAA7134_MAIN_CTRL_TE5;
}
/* screen overlay -- dma 0 + video task B */
if (dev->ovenable) {
task |= 0x10;
ctrl |= SAA7134_MAIN_CTRL_TE1;
ov = dev->ovfield;
}
/* vbi capture -- dma 0 + vbi task A+B */
if (dev->vbi_q.curr) {
task |= 0x22;
ctrl |= SAA7134_MAIN_CTRL_TE2 |
SAA7134_MAIN_CTRL_TE3;
irq |= SAA7134_IRQ1_INTE_RA0_7 |
SAA7134_IRQ1_INTE_RA0_6 |
SAA7134_IRQ1_INTE_RA0_5 |
SAA7134_IRQ1_INTE_RA0_4;
}
/* audio capture -- dma 3 */
if (dev->dmasound.dma_running) {
ctrl |= SAA7134_MAIN_CTRL_TE6;
irq |= SAA7134_IRQ1_INTE_RA3_1 |
SAA7134_IRQ1_INTE_RA3_0;
}
/* TS capture -- dma 5 */
if (dev->ts_q.curr) {
ctrl |= SAA7134_MAIN_CTRL_TE5;
irq |= SAA7134_IRQ1_INTE_RA2_1 |
SAA7134_IRQ1_INTE_RA2_0;
}
/* set task conditions + field handling */
if (V4L2_FIELD_HAS_BOTH(cap) || V4L2_FIELD_HAS_BOTH(ov) || cap == ov) {
/* default config -- use full frames */
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_A), 0x0d);
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_B), 0x0d);
saa_writeb(SAA7134_FIELD_HANDLING(TASK_A), 0x02);
saa_writeb(SAA7134_FIELD_HANDLING(TASK_B), 0x02);
split = 0;
} else {
/* split fields between tasks */
if (V4L2_FIELD_TOP == cap) {
/* odd A, even B, repeat */
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_A), 0x0d);
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_B), 0x0e);
} else {
/* odd B, even A, repeat */
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_A), 0x0e);
saa_writeb(SAA7134_TASK_CONDITIONS(TASK_B), 0x0d);
}
saa_writeb(SAA7134_FIELD_HANDLING(TASK_A), 0x01);
saa_writeb(SAA7134_FIELD_HANDLING(TASK_B), 0x01);
split = 1;
}
/* irqs */
saa_writeb(SAA7134_REGION_ENABLE, task);
saa_writel(SAA7134_IRQ1, irq);
saa_andorl(SAA7134_MAIN_CTRL,
SAA7134_MAIN_CTRL_TE0 |
SAA7134_MAIN_CTRL_TE1 |
SAA7134_MAIN_CTRL_TE2 |
SAA7134_MAIN_CTRL_TE3 |
SAA7134_MAIN_CTRL_TE4 |
SAA7134_MAIN_CTRL_TE5 |
SAA7134_MAIN_CTRL_TE6,
ctrl);
core_dbg("dmabits: task=0x%02x ctrl=0x%02x irq=0x%x split=%s\n",
task, ctrl, irq, split ? "no" : "yes");
return 0;
}
/* ------------------------------------------------------------------ */
/* IRQ handler + helpers */
static char *irqbits[] = {
"DONE_RA0", "DONE_RA1", "DONE_RA2", "DONE_RA3",
"AR", "PE", "PWR_ON", "RDCAP", "INTL", "FIDT", "MMC",
"TRIG_ERR", "CONF_ERR", "LOAD_ERR",
"GPIO16", "GPIO18", "GPIO22", "GPIO23"
};
#define IRQBITS ARRAY_SIZE(irqbits)
static void print_irqstatus(struct saa7134_dev *dev, int loop,
unsigned long report, unsigned long status)
{
unsigned int i;
irq_dbg(1, "[%d,%ld]: r=0x%lx s=0x%02lx",
loop, jiffies, report, status);
for (i = 0; i < IRQBITS; i++) {
if (!(report & (1 << i)))
continue;
pr_cont(" %s", irqbits[i]);
}
if (report & SAA7134_IRQ_REPORT_DONE_RA0) {
pr_cont(" | RA0=%s,%s,%s,%ld",
(status & 0x40) ? "vbi" : "video",
(status & 0x20) ? "b" : "a",
(status & 0x10) ? "odd" : "even",
(status & 0x0f));
}
pr_cont("\n");
}
static irqreturn_t saa7134_irq(int irq, void *dev_id)
{
struct saa7134_dev *dev = (struct saa7134_dev*) dev_id;
unsigned long report,status;
int loop, handled = 0;
if (dev->insuspend)
goto out;
for (loop = 0; loop < 10; loop++) {
report = saa_readl(SAA7134_IRQ_REPORT);
status = saa_readl(SAA7134_IRQ_STATUS);
/* If dmasound support is active and we get a sound report,
* mask out the report and let the saa7134-alsa module deal
* with it */
if ((report & SAA7134_IRQ_REPORT_DONE_RA3) &&
(dev->dmasound.priv_data != NULL) )
{
irq_dbg(2, "preserving DMA sound interrupt\n");
report &= ~SAA7134_IRQ_REPORT_DONE_RA3;
}
if (0 == report) {
irq_dbg(2, "no (more) work\n");
goto out;
}
handled = 1;
saa_writel(SAA7134_IRQ_REPORT,report);
if (irq_debug)
print_irqstatus(dev,loop,report,status);
if ((report & SAA7134_IRQ_REPORT_RDCAP) ||
(report & SAA7134_IRQ_REPORT_INTL))
saa7134_irq_video_signalchange(dev);
if ((report & SAA7134_IRQ_REPORT_DONE_RA0) &&
(status & 0x60) == 0)
saa7134_irq_video_done(dev,status);
if ((report & SAA7134_IRQ_REPORT_DONE_RA0) &&
(status & 0x40) == 0x40)
saa7134_irq_vbi_done(dev,status);
if ((report & SAA7134_IRQ_REPORT_DONE_RA2) &&
card_has_mpeg(dev)) {
if (dev->mops->irq_ts_done != NULL)
dev->mops->irq_ts_done(dev, status);
else
saa7134_irq_ts_done(dev, status);
}
if (report & SAA7134_IRQ_REPORT_GPIO16) {
switch (dev->has_remote) {
case SAA7134_REMOTE_GPIO:
if (!dev->remote)
break;
if (dev->remote->mask_keydown & 0x10000) {
saa7134_input_irq(dev);
}
break;
case SAA7134_REMOTE_I2C:
break; /* FIXME: invoke I2C get_key() */
default: /* GPIO16 not used by IR remote */
break;
}
}
if (report & SAA7134_IRQ_REPORT_GPIO18) {
switch (dev->has_remote) {
case SAA7134_REMOTE_GPIO:
if (!dev->remote)
break;
if ((dev->remote->mask_keydown & 0x40000) ||
(dev->remote->mask_keyup & 0x40000)) {
saa7134_input_irq(dev);
}
break;
case SAA7134_REMOTE_I2C:
break; /* FIXME: invoke I2C get_key() */
default: /* GPIO18 not used by IR remote */
break;
}
}
}
if (10 == loop) {
print_irqstatus(dev,loop,report,status);
if (report & SAA7134_IRQ_REPORT_PE) {
/* disable all parity error */
pr_warn("%s/irq: looping -- clearing PE (parity error!) enable bit\n",
dev->name);
saa_clearl(SAA7134_IRQ2,SAA7134_IRQ2_INTE_PE);
} else if (report & SAA7134_IRQ_REPORT_GPIO16) {
/* disable gpio16 IRQ */
pr_warn("%s/irq: looping -- clearing GPIO16 enable bit\n",
dev->name);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_P);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_N);
} else if (report & SAA7134_IRQ_REPORT_GPIO18) {
/* disable gpio18 IRQs */
pr_warn("%s/irq: looping -- clearing GPIO18 enable bit\n",
dev->name);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_N);
} else {
/* disable all irqs */
pr_warn("%s/irq: looping -- clearing all enable bits\n",
dev->name);
saa_writel(SAA7134_IRQ1,0);
saa_writel(SAA7134_IRQ2,0);
}
}
out:
return IRQ_RETVAL(handled);
}
/* ------------------------------------------------------------------ */
/* early init (no i2c, no irq) */
static int saa7134_hw_enable1(struct saa7134_dev *dev)
{
/* RAM FIFO config */
saa_writel(SAA7134_FIFO_SIZE, 0x08070503);
saa_writel(SAA7134_THRESHOULD, 0x02020202);
/* enable audio + video processing */
saa_writel(SAA7134_MAIN_CTRL,
SAA7134_MAIN_CTRL_VPLLE |
SAA7134_MAIN_CTRL_APLLE |
SAA7134_MAIN_CTRL_EXOSC |
SAA7134_MAIN_CTRL_EVFE1 |
SAA7134_MAIN_CTRL_EVFE2 |
SAA7134_MAIN_CTRL_ESFE |
SAA7134_MAIN_CTRL_EBDAC);
/*
* Initialize OSS _after_ enabling audio clock PLL and audio processing.
* OSS initialization writes to registers via the audio DSP; these
* writes will fail unless the audio clock has been started. At worst,
* audio will not work.
*/
/* enable peripheral devices */
saa_writeb(SAA7134_SPECIAL_MODE, 0x01);
/* set vertical line numbering start (vbi needs this) */
saa_writeb(SAA7134_SOURCE_TIMING2, 0x20);
return 0;
}
static int saa7134_hwinit1(struct saa7134_dev *dev)
{
core_dbg("hwinit1\n");
saa_writel(SAA7134_IRQ1, 0);
saa_writel(SAA7134_IRQ2, 0);
/* Clear any stale IRQ reports */
saa_writel(SAA7134_IRQ_REPORT, saa_readl(SAA7134_IRQ_REPORT));
mutex_init(&dev->lock);
spin_lock_init(&dev->slock);
saa7134_track_gpio(dev,"pre-init");
saa7134_video_init1(dev);
saa7134_vbi_init1(dev);
if (card_has_mpeg(dev))
saa7134_ts_init1(dev);
saa7134_input_init1(dev);
saa7134_hw_enable1(dev);
return 0;
}
/* late init (with i2c + irq) */
static int saa7134_hw_enable2(struct saa7134_dev *dev)
{
unsigned int irq2_mask;
/* enable IRQ's */
irq2_mask =
SAA7134_IRQ2_INTE_DEC3 |
SAA7134_IRQ2_INTE_DEC2 |
SAA7134_IRQ2_INTE_DEC1 |
SAA7134_IRQ2_INTE_DEC0 |
SAA7134_IRQ2_INTE_PE |
SAA7134_IRQ2_INTE_AR;
if (dev->has_remote == SAA7134_REMOTE_GPIO && dev->remote) {
if (dev->remote->mask_keydown & 0x10000)
irq2_mask |= SAA7134_IRQ2_INTE_GPIO16_N;
else { /* Allow enabling both IRQ edge triggers */
if (dev->remote->mask_keydown & 0x40000)
irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_P;
if (dev->remote->mask_keyup & 0x40000)
irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_N;
}
}
if (dev->has_remote == SAA7134_REMOTE_I2C) {
request_module("ir-kbd-i2c");
}
saa_writel(SAA7134_IRQ1, 0);
saa_writel(SAA7134_IRQ2, irq2_mask);
return 0;
}
static int saa7134_hwinit2(struct saa7134_dev *dev)
{
core_dbg("hwinit2\n");
saa7134_video_init2(dev);
saa7134_tvaudio_init2(dev);
saa7134_hw_enable2(dev);
return 0;
}
/* shutdown */
static int saa7134_hwfini(struct saa7134_dev *dev)
{
core_dbg("hwfini\n");
if (card_has_mpeg(dev))
saa7134_ts_fini(dev);
saa7134_input_fini(dev);
saa7134_vbi_fini(dev);
saa7134_tvaudio_fini(dev);
saa7134_video_fini(dev);
return 0;
}
static void must_configure_manually(int has_eeprom)
{
unsigned int i,p;
if (!has_eeprom)
pr_warn("saa7134: <rant>\n"
"saa7134: Congratulations! Your TV card vendor saved a few\n"
"saa7134: cents for a eeprom, thus your pci board has no\n"
"saa7134: subsystem ID and I can't identify it automatically\n"
"saa7134: </rant>\n"
"saa7134: I feel better now. Ok, here are the good news:\n"
"saa7134: You can use the card=<nr> insmod option to specify\n"
"saa7134: which board do you have. The list:\n");
else
pr_warn("saa7134: Board is currently unknown. You might try to use the card=<nr>\n"
"saa7134: insmod option to specify which board do you have, but this is\n"
"saa7134: somewhat risky, as might damage your card. It is better to ask\n"
"saa7134: for support at linux-media@vger.kernel.org.\n"
"saa7134: The supported cards are:\n");
for (i = 0; i < saa7134_bcount; i++) {
pr_warn("saa7134: card=%d -> %-40.40s",
i,saa7134_boards[i].name);
for (p = 0; saa7134_pci_tbl[p].driver_data; p++) {
if (saa7134_pci_tbl[p].driver_data != i)
continue;
pr_cont(" %04x:%04x",
saa7134_pci_tbl[p].subvendor,
saa7134_pci_tbl[p].subdevice);
}
pr_cont("\n");
}
}
static void saa7134_unregister_media_device(struct saa7134_dev *dev)
{
#ifdef CONFIG_MEDIA_CONTROLLER
if (!dev->media_dev)
return;
media_device_unregister(dev->media_dev);
media_device_cleanup(dev->media_dev);
kfree(dev->media_dev);
dev->media_dev = NULL;
#endif
}
static void saa7134_media_release(struct saa7134_dev *dev)
{
#ifdef CONFIG_MEDIA_CONTROLLER
int i;
for (i = 0; i < SAA7134_INPUT_MAX + 1; i++)
media_device_unregister_entity(&dev->input_ent[i]);
#endif
}
#if defined(CONFIG_MEDIA_CONTROLLER)
static void saa7134_create_entities(struct saa7134_dev *dev)
{
int ret, i;
struct media_entity *entity;
struct media_entity *decoder = NULL;
/* Check if it is using an external analog TV demod */
media_device_for_each_entity(entity, dev->media_dev) {
if (entity->function == MEDIA_ENT_F_ATV_DECODER) {
decoder = entity;
break;
}
}
/*
* saa713x is not using an external ATV demod.
* Register the internal one
*/
if (!decoder) {
dev->demod.name = "saa713x";
dev->demod_pad[SAA7134_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
dev->demod_pad[SAA7134_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
dev->demod_pad[SAA7134_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
dev->demod_pad[SAA7134_PAD_VID_OUT].sig_type = PAD_SIGNAL_DV;
dev->demod.function = MEDIA_ENT_F_ATV_DECODER;
ret = media_entity_pads_init(&dev->demod, SAA7134_NUM_PADS,
dev->demod_pad);
if (ret < 0)
pr_err("failed to initialize demod pad!\n");
ret = media_device_register_entity(dev->media_dev, &dev->demod);
if (ret < 0)
pr_err("failed to register demod entity!\n");
dev->decoder = &dev->demod;
} else {
dev->decoder = decoder;
}
/* Initialize Video, VBI and Radio pads */
dev->video_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&dev->video_dev->entity, 1,
&dev->video_pad);
if (ret < 0)
pr_err("failed to initialize video media entity!\n");
dev->vbi_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&dev->vbi_dev->entity, 1,
&dev->vbi_pad);
if (ret < 0)
pr_err("failed to initialize vbi media entity!\n");
/* Create entities for each input connector */
for (i = 0; i < SAA7134_INPUT_MAX; i++) {
struct media_entity *ent = &dev->input_ent[i];
struct saa7134_input *in = &card_in(dev, i);
if (in->type == SAA7134_NO_INPUT)
break;
/* This input uses the S-Video connector */
if (in->type == SAA7134_INPUT_COMPOSITE_OVER_SVIDEO)
continue;
ent->name = saa7134_input_name[in->type];
ent->flags = MEDIA_ENT_FL_CONNECTOR;
dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
switch (in->type) {
case SAA7134_INPUT_COMPOSITE:
case SAA7134_INPUT_COMPOSITE0:
case SAA7134_INPUT_COMPOSITE1:
case SAA7134_INPUT_COMPOSITE2:
case SAA7134_INPUT_COMPOSITE3:
case SAA7134_INPUT_COMPOSITE4:
ent->function = MEDIA_ENT_F_CONN_COMPOSITE;
break;
case SAA7134_INPUT_SVIDEO:
case SAA7134_INPUT_SVIDEO0:
case SAA7134_INPUT_SVIDEO1:
ent->function = MEDIA_ENT_F_CONN_SVIDEO;
break;
default:
/*
* SAA7134_INPUT_TV and SAA7134_INPUT_TV_MONO.
*
* Please notice that neither SAA7134_INPUT_MUTE or
* SAA7134_INPUT_RADIO are defined at
* saa7134_board.input.
*/
ent->function = MEDIA_ENT_F_CONN_RF;
break;
}
ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
if (ret < 0)
pr_err("failed to initialize input pad[%d]!\n", i);
ret = media_device_register_entity(dev->media_dev, ent);
if (ret < 0)
pr_err("failed to register input entity %d!\n", i);
}
/* Create input for Radio RF connector */
if (card_has_radio(dev)) {
struct saa7134_input *in = &saa7134_boards[dev->board].radio;
struct media_entity *ent = &dev->input_ent[i];
ent->name = saa7134_input_name[in->type];
ent->flags = MEDIA_ENT_FL_CONNECTOR;
dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
ent->function = MEDIA_ENT_F_CONN_RF;
ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
if (ret < 0)
pr_err("failed to initialize input pad[%d]!\n", i);
ret = media_device_register_entity(dev->media_dev, ent);
if (ret < 0)
pr_err("failed to register input entity %d!\n", i);
}
}
#endif
static struct video_device *vdev_init(struct saa7134_dev *dev,
struct video_device *template,
char *type)
{
struct video_device *vfd;
vfd = video_device_alloc();
if (NULL == vfd)
return NULL;
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
dev->name, type, saa7134_boards[dev->board].name);
video_set_drvdata(vfd, dev);
return vfd;
}
static void saa7134_unregister_video(struct saa7134_dev *dev)
{
saa7134_media_release(dev);
if (dev->video_dev) {
if (video_is_registered(dev->video_dev))
video_unregister_device(dev->video_dev);
else
video_device_release(dev->video_dev);
dev->video_dev = NULL;
}
if (dev->vbi_dev) {
if (video_is_registered(dev->vbi_dev))
video_unregister_device(dev->vbi_dev);
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
}
if (dev->radio_dev) {
if (video_is_registered(dev->radio_dev))
video_unregister_device(dev->radio_dev);
else
video_device_release(dev->radio_dev);
dev->radio_dev = NULL;
}
}
static void mpeg_ops_attach(struct saa7134_mpeg_ops *ops,
struct saa7134_dev *dev)
{
int err;
if (NULL != dev->mops)
return;
if (saa7134_boards[dev->board].mpeg != ops->type)
return;
err = ops->init(dev);
if (0 != err)
return;
dev->mops = ops;
}
static void mpeg_ops_detach(struct saa7134_mpeg_ops *ops,
struct saa7134_dev *dev)
{
if (NULL == dev->mops)
return;
if (dev->mops != ops)
return;
dev->mops->fini(dev);
dev->mops = NULL;
}
static int saa7134_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
struct saa7134_dev *dev;
struct saa7134_mpeg_ops *mops;
int err;
if (saa7134_devcount == SAA7134_MAXBOARDS)
return -ENOMEM;
dev = kzalloc(sizeof(*dev),GFP_KERNEL);
if (NULL == dev)
return -ENOMEM;
dev->nr = saa7134_devcount;
sprintf(dev->name, "saa%x[%d]", pci_dev->device, dev->nr);
#ifdef CONFIG_MEDIA_CONTROLLER
dev->media_dev = kzalloc(sizeof(*dev->media_dev), GFP_KERNEL);
if (!dev->media_dev) {
err = -ENOMEM;
goto fail0;
}
media_device_pci_init(dev->media_dev, pci_dev, dev->name);
dev->v4l2_dev.mdev = dev->media_dev;
#endif
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err)
goto fail0;
/* pci init */
dev->pci = pci_dev;
if (pci_enable_device(pci_dev)) {
err = -EIO;
goto fail1;
}
/* pci quirks */
if (pci_pci_problems) {
if (pci_pci_problems & PCIPCI_TRITON)
pr_info("%s: quirk: PCIPCI_TRITON\n", dev->name);
if (pci_pci_problems & PCIPCI_NATOMA)
pr_info("%s: quirk: PCIPCI_NATOMA\n", dev->name);
if (pci_pci_problems & PCIPCI_VIAETBF)
pr_info("%s: quirk: PCIPCI_VIAETBF\n", dev->name);
if (pci_pci_problems & PCIPCI_VSFX)
pr_info("%s: quirk: PCIPCI_VSFX\n", dev->name);
#ifdef PCIPCI_ALIMAGIK
if (pci_pci_problems & PCIPCI_ALIMAGIK) {
pr_info("%s: quirk: PCIPCI_ALIMAGIK -- latency fixup\n",
dev->name);
latency = 0x0A;
}
#endif
if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL)) {
pr_info("%s: quirk: this driver and your chipset may not work together in overlay mode.\n",
dev->name);
if (!saa7134_no_overlay) {
pr_info("%s: quirk: overlay mode will be disabled.\n",
dev->name);
saa7134_no_overlay = 1;
} else {
pr_info("%s: quirk: overlay mode will be forced. Use this option at your own risk.\n",
dev->name);
}
}
}
if (UNSET != latency) {
pr_info("%s: setting pci latency timer to %d\n",
dev->name,latency);
pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
}
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
pr_info("%s: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat,
(unsigned long long)pci_resource_start(pci_dev, 0));
pci_set_master(pci_dev);
err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
if (err) {
pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
goto fail1;
}
/* board config */
dev->board = pci_id->driver_data;
if ((unsigned)card[dev->nr] < saa7134_bcount)
dev->board = card[dev->nr];
if (SAA7134_BOARD_UNKNOWN == dev->board)
must_configure_manually(0);
else if (SAA7134_BOARD_NOAUTO == dev->board) {
must_configure_manually(1);
dev->board = SAA7134_BOARD_UNKNOWN;
}
dev->autodetected = card[dev->nr] != dev->board;
dev->tuner_type = saa7134_boards[dev->board].tuner_type;
dev->tuner_addr = saa7134_boards[dev->board].tuner_addr;
dev->radio_type = saa7134_boards[dev->board].radio_type;
dev->radio_addr = saa7134_boards[dev->board].radio_addr;
dev->tda9887_conf = saa7134_boards[dev->board].tda9887_conf;
if (UNSET != tuner[dev->nr])
dev->tuner_type = tuner[dev->nr];
pr_info("%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
dev->name,pci_dev->subsystem_vendor,
pci_dev->subsystem_device,saa7134_boards[dev->board].name,
dev->board, dev->autodetected ?
"autodetected" : "insmod option");
/* get mmio */
if (!request_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0),
dev->name)) {
err = -EBUSY;
pr_err("%s: can't get MMIO memory @ 0x%llx\n",
dev->name,(unsigned long long)pci_resource_start(pci_dev,0));
goto fail1;
}
dev->lmmio = ioremap(pci_resource_start(pci_dev, 0),
pci_resource_len(pci_dev, 0));
dev->bmmio = (__u8 __iomem *)dev->lmmio;
if (NULL == dev->lmmio) {
err = -EIO;
pr_err("%s: can't ioremap() MMIO memory\n",
dev->name);
goto fail2;
}
/* initialize hardware #1 */
saa7134_board_init1(dev);
saa7134_hwinit1(dev);
/* get irq */
err = request_irq(pci_dev->irq, saa7134_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get IRQ %d\n",
dev->name,pci_dev->irq);
goto fail3;
}
/* wait a bit, register i2c bus */
msleep(100);
saa7134_i2c_register(dev);
saa7134_board_init2(dev);
saa7134_hwinit2(dev);
/* load i2c helpers */
if (card_is_empress(dev)) {
dev->empress_sd =
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"saa6752hs",
saa7134_boards[dev->board].empress_addr, NULL);
if (dev->empress_sd)
dev->empress_sd->grp_id = GRP_EMPRESS;
}
if (saa7134_boards[dev->board].rds_addr) {
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_adap, "saa6588",
0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr));
if (sd) {
pr_info("%s: found RDS decoder\n", dev->name);
dev->has_rds = 1;
}
}
mutex_lock(&saa7134_devlist_lock);
list_for_each_entry(mops, &mops_list, next)
mpeg_ops_attach(mops, dev);
list_add_tail(&dev->devlist, &saa7134_devlist);
mutex_unlock(&saa7134_devlist_lock);
/* check for signal */
saa7134_irq_video_signalchange(dev);
if (TUNER_ABSENT != dev->tuner_type)
saa_call_all(dev, core, s_power, 0);
/* register v4l devices */
if (saa7134_no_overlay > 0)
pr_info("%s: Overlay support disabled.\n", dev->name);
dev->video_dev = vdev_init(dev,&saa7134_video_template,"video");
dev->video_dev->ctrl_handler = &dev->ctrl_handler;
dev->video_dev->lock = &dev->lock;
dev->video_dev->queue = &dev->video_vbq;
err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
pr_info("%s: can't register video device\n",
dev->name);
goto fail4;
}
pr_info("%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(dev->video_dev));
dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
dev->vbi_dev->ctrl_handler = &dev->ctrl_handler;
dev->vbi_dev->lock = &dev->lock;
dev->vbi_dev->queue = &dev->vbi_vbq;
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0)
goto fail4;
pr_info("%s: registered device %s\n",
dev->name, video_device_node_name(dev->vbi_dev));
if (card_has_radio(dev)) {
dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
dev->radio_dev->lock = &dev->lock;
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[dev->nr]);
if (err < 0)
goto fail4;
pr_info("%s: registered device %s\n",
dev->name, video_device_node_name(dev->radio_dev));
}
#ifdef CONFIG_MEDIA_CONTROLLER
saa7134_create_entities(dev);
err = v4l2_mc_create_media_graph(dev->media_dev);
if (err) {
pr_err("failed to create media graph\n");
goto fail4;
}
#endif
/* everything worked */
saa7134_devcount++;
if (saa7134_dmasound_init && !dev->dmasound.priv_data)
saa7134_dmasound_init(dev);
request_submodules(dev);
/*
* Do it at the end, to reduce dynamic configuration changes during
* the device init. Yet, as request_modules() can be async, the
* topology will likely change after load the saa7134 subdrivers.
*/
#ifdef CONFIG_MEDIA_CONTROLLER
err = media_device_register(dev->media_dev);
if (err)
goto fail4;
#endif
return 0;
fail4:
saa7134_unregister_video(dev);
saa7134_i2c_unregister(dev);
free_irq(pci_dev->irq, dev);
fail3:
saa7134_hwfini(dev);
iounmap(dev->lmmio);
fail2:
release_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0));
fail1:
v4l2_device_unregister(&dev->v4l2_dev);
fail0:
#ifdef CONFIG_MEDIA_CONTROLLER
kfree(dev->media_dev);
#endif
kfree(dev);
return err;
}
static void saa7134_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
struct saa7134_mpeg_ops *mops;
flush_request_submodules(dev);
/* Release DMA sound modules if present */
if (saa7134_dmasound_exit && dev->dmasound.priv_data) {
saa7134_dmasound_exit(dev);
}
/* debugging ... */
if (irq_debug) {
u32 report = saa_readl(SAA7134_IRQ_REPORT);
u32 status = saa_readl(SAA7134_IRQ_STATUS);
print_irqstatus(dev,42,report,status);
}
/* disable peripheral devices */
saa_writeb(SAA7134_SPECIAL_MODE,0);
/* shutdown hardware */
saa_writel(SAA7134_IRQ1,0);
saa_writel(SAA7134_IRQ2,0);
saa_writel(SAA7134_MAIN_CTRL,0);
/* shutdown subsystems */
saa7134_hwfini(dev);
/* unregister */
mutex_lock(&saa7134_devlist_lock);
list_del(&dev->devlist);
list_for_each_entry(mops, &mops_list, next)
mpeg_ops_detach(mops, dev);
mutex_unlock(&saa7134_devlist_lock);
saa7134_devcount--;
saa7134_i2c_unregister(dev);
saa7134_unregister_video(dev);
/* the DMA sound modules should be unloaded before reaching
this, but just in case they are still present... */
if (dev->dmasound.priv_data != NULL) {
free_irq(pci_dev->irq, &dev->dmasound);
dev->dmasound.priv_data = NULL;
}
/* release resources */
free_irq(pci_dev->irq, dev);
iounmap(dev->lmmio);
release_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0));
v4l2_device_unregister(&dev->v4l2_dev);
saa7134_unregister_media_device(dev);
/* free memory */
kfree(dev);
}
#ifdef CONFIG_PM
/* resends a current buffer in queue after resume */
static int saa7134_buffer_requeue(struct saa7134_dev *dev,
struct saa7134_dmaqueue *q)
{
struct saa7134_buf *buf, *next;
assert_spin_locked(&dev->slock);
buf = q->curr;
next = buf;
core_dbg("buffer_requeue\n");
if (!buf)
return 0;
core_dbg("buffer_requeue : resending active buffer\n");
if (!list_empty(&q->queue))
next = list_entry(q->queue.next, struct saa7134_buf,
entry);
buf->activate(dev, buf, next);
return 0;
}
static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
/* disable overlay - apps should enable it explicitly on resume*/
dev->ovenable = 0;
/* Disable interrupts, DMA, and rest of the chip*/
saa_writel(SAA7134_IRQ1, 0);
saa_writel(SAA7134_IRQ2, 0);
saa_writel(SAA7134_MAIN_CTRL, 0);
dev->insuspend = 1;
synchronize_irq(pci_dev->irq);
/* ACK interrupts once more, just in case,
since the IRQ handler won't ack them anymore*/
saa_writel(SAA7134_IRQ_REPORT, saa_readl(SAA7134_IRQ_REPORT));
/* Disable timeout timers - if we have active buffers, we will
fill them on resume*/
del_timer(&dev->video_q.timeout);
del_timer(&dev->vbi_q.timeout);
del_timer(&dev->ts_q.timeout);
if (dev->remote && dev->remote->dev->users)
saa7134_ir_close(dev->remote->dev);
pci_save_state(pci_dev);
pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
return 0;
}
static int saa7134_resume(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
unsigned long flags;
pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
/* Do things that are done in saa7134_initdev ,
except of initializing memory structures.*/
saa7134_board_init1(dev);
/* saa7134_hwinit1 */
if (saa7134_boards[dev->board].video_out)
saa7134_videoport_init(dev);
if (card_has_mpeg(dev))
saa7134_ts_init_hw(dev);
if (dev->remote && dev->remote->dev->users)
saa7134_ir_open(dev->remote->dev);
saa7134_hw_enable1(dev);
msleep(100);
saa7134_board_init2(dev);
/*saa7134_hwinit2*/
saa7134_set_tvnorm_hw(dev);
saa7134_tvaudio_setmute(dev);
saa7134_tvaudio_setvolume(dev, dev->ctl_volume);
saa7134_tvaudio_init(dev);
saa7134_enable_i2s(dev);
saa7134_hw_enable2(dev);
saa7134_irq_video_signalchange(dev);
/*resume unfinished buffer(s)*/
spin_lock_irqsave(&dev->slock, flags);
saa7134_buffer_requeue(dev, &dev->video_q);
saa7134_buffer_requeue(dev, &dev->vbi_q);
saa7134_buffer_requeue(dev, &dev->ts_q);
/* FIXME: Disable DMA audio sound - temporary till proper support
is implemented*/
dev->dmasound.dma_running = 0;
/* start DMA now*/
dev->insuspend = 0;
smp_wmb();
saa7134_set_dmabits(dev);
spin_unlock_irqrestore(&dev->slock, flags);
return 0;
}
#endif
/* ----------------------------------------------------------- */
int saa7134_ts_register(struct saa7134_mpeg_ops *ops)
{
struct saa7134_dev *dev;
mutex_lock(&saa7134_devlist_lock);
list_for_each_entry(dev, &saa7134_devlist, devlist)
mpeg_ops_attach(ops, dev);
list_add_tail(&ops->next,&mops_list);
mutex_unlock(&saa7134_devlist_lock);
return 0;
}
void saa7134_ts_unregister(struct saa7134_mpeg_ops *ops)
{
struct saa7134_dev *dev;
mutex_lock(&saa7134_devlist_lock);
list_del(&ops->next);
list_for_each_entry(dev, &saa7134_devlist, devlist)
mpeg_ops_detach(ops, dev);
mutex_unlock(&saa7134_devlist_lock);
}
EXPORT_SYMBOL(saa7134_ts_register);
EXPORT_SYMBOL(saa7134_ts_unregister);
/* ----------------------------------------------------------- */
static struct pci_driver saa7134_pci_driver = {
.name = "saa7134",
.id_table = saa7134_pci_tbl,
.probe = saa7134_initdev,
.remove = saa7134_finidev,
#ifdef CONFIG_PM
.suspend = saa7134_suspend,
.resume = saa7134_resume
#endif
};
static int __init saa7134_init(void)
{
INIT_LIST_HEAD(&saa7134_devlist);
pr_info("saa7130/34: v4l2 driver version %s loaded\n",
SAA7134_VERSION);
return pci_register_driver(&saa7134_pci_driver);
}
static void __exit saa7134_fini(void)
{
pci_unregister_driver(&saa7134_pci_driver);
}
module_init(saa7134_init);
module_exit(saa7134_fini);
/* ----------------------------------------------------------- */
EXPORT_SYMBOL(saa7134_set_gpio);
EXPORT_SYMBOL(saa7134_boards);
/* ----------------- for the DMA sound modules --------------- */
EXPORT_SYMBOL(saa7134_dmasound_init);
EXPORT_SYMBOL(saa7134_dmasound_exit);
EXPORT_SYMBOL(saa7134_pgtable_free);
EXPORT_SYMBOL(saa7134_pgtable_build);
EXPORT_SYMBOL(saa7134_pgtable_alloc);
EXPORT_SYMBOL(saa7134_set_dmabits);
|
55392.c | // Copyright (c) 2004-2020 Microchip Technology Inc. and its subsidiaries.
// SPDX-License-Identifier: MIT
/*
*****************************************************************************************
* Revision 0.1 2018/10/04 sunilp
* added CHAR board support
* Revision 0.1 2018/08/02 sunilp
* 0.1 is the first version for the Venice family of products (VSC8489/90/91(-xx))
*****************************************************************************************
*/
//***************************************************************************
//* This file contains board specific functions needed for running the PHY *
//* API on a Venice Char board. The Characterization board is equipped with *
//* a Rabbit CPU board, which do the communication with the PHY using a *
//* socket connection. The actual API is running on the host computer. The *
//* API has been tested with both Linux (Red Hat) and Cygwin. *
//***************************************************************************
#include <netdb.h> // For socket
#include <stdarg.h> // For va_list
#include "vtss_api.h" // For BOOL and friends
#include "vtss_appl.h" // For board types
#include <unistd.h> // For TCP read/write
#include <sys/stat.h>// For /sys/class/mdio file status
#include <sys/types.h>// For /sys/class/mdio file status
#include <errno.h>// For System errors
#include <unistd.h>// For /sys/class/mdio
#include <stdio.h> // for FILE types
#include <stdlib.h> // for EXIT_SUCCESS
#include "vtss/api/options.h"
// // Fixed socket port for the CPU board used
// #define CPU_BOARD_PORT "26"
// Define which trace group to use for VTSS printout in this file
#define VTSS_TRACE_GROUP VTSS_TRACE_GROUP_PHY
/* ================================================================= *
* Misc. functions
* ================================================================= */
// Function defining the port interface.
static vtss_port_interface_t port_interface(vtss_port_no_t port_no)
{
return VTSS_PORT_INTERFACE_RGMII;
}
/* ================================================================= *
* Board specific functions
* ================================================================= */
int sockfd;
FILE *miim_read_fp;
FILE *miim_write_fp;
char miim_read_file[64];
// Function for doing read access from the Rabbit CPU via socket
// In : Buffer - Pointer for the data read via the socket.
static void socket_read (char *buffer) {
int n;
n = read(sockfd, buffer, 255);
if (n < 0)
T_E("ERROR reading from socket");
//printf("...\n%s...\n", buffer);
}
// Function for doing write access to the Rabbit CPU via socket
// In : Buffer - Pointer to the text to send over the socket
static void socket_write (char *buffer) {
int n;
n = write(sockfd, buffer, strlen(buffer));
if (n < 0) {
T_E("ERROR writing to socket");
} else {
T_N("TX: %s\n", buffer);
}
}
// Function for initializing the socket connection to the Rabbit CPU.
// In : server_addr - IP address for the CPU board
// port - Port used for the socket connection
static void socket_init (const char *server_addr, const char *port) {
struct hostent *server;
struct sockaddr_in serv_addr;
int portno = atoi(port);
server = gethostbyname(server_addr);
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd < 0)
T_E("ERROR opening socket");
if (server == NULL) {
fprintf(stderr,"ERROR, no such host\n");
exit(0);
}
bzero((char *) &serv_addr, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
bcopy((char *)server->h_addr,
(char *)&serv_addr.sin_addr.s_addr,
server->h_length);
serv_addr.sin_port = htons(portno);
if (connect(sockfd, (struct sockaddr *) &serv_addr,sizeof(serv_addr)) < 0)
T_E("ERROR connecting");
}
#if defined(VTSS_CHIP_10G_PHY)
////////////////////////////////////////////////////////////////////////////////
// MMD_Read and MMD_Write Code for Rabbit
// ---Unfinished because the rabbit code is optimized for neumonic values not straight port/mmd/addr hex values
////////////////////////////////////////////////////////////////////////////////
vtss_rc mmd_read_rbt(const vtss_inst_t inst,
const vtss_port_no_t port_no,
const u8 mmd,
u16 addr,
u16 *const value)
{
char buffer[255];
int v;
sprintf(buffer, "mdio_rd %x %x %x", port_no, mmd, addr);
printf ("%s\n", buffer);
T_N("mmd_read_rbt: %s", buffer);
socket_write(&buffer[0]);
memset(buffer, 0, sizeof(buffer));
socket_read(&buffer[0]);
printf ("%s\n", buffer);
buffer[8] = '\0'; //NULL to strip off the CR/LF and prompt;
v = (u32)strtol(buffer,NULL,16); //TODO: put back the error checking that exists below.
*value = v;
// if (sscanf(buffer, "%x", &v) == 1) {
// // if (sprintf(num, "%x", &v) == 1) {
// *value = v;
// } else {
// T_E("missing value: buffer=%s, port 0x%X, mmd 0x%X, addr = 0x%X", buffer, port_no, mmd, addr);
// }
// T_N("mmd_read port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X", port_no, mmd, addr, *value);
//printf("mmd_read port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X\n", port_no, mmd, addr, *value);
return VTSS_RC_OK;
}
vtss_rc mmd_write_rbt(const vtss_inst_t inst,
const vtss_port_no_t port_no,
const u8 mmd,
u16 addr,
u16 data)
{
char buffer[255];
sprintf(buffer, "mdio_wr %x %x %x %x", port_no, mmd, addr, data);
// printf ("%s\n", buffer);
T_N("mmd_write_rbt: %s", buffer);
socket_write(&buffer[0]);
socket_read(&buffer[0]);
return VTSS_RC_OK;
}
vtss_rc mmd_read_rbt_test( u8 port_no,
u8 mmd,
u16 addr,
u16 *value)
{
char buffer[255];
int v;
//sprintf(buffer, "mdiord %x %x %x %x", port_no, mmd, addr);
sprintf(buffer, "spird %x %x %x\n", port_no, mmd, addr);
printf ("%s\n", buffer);
// T_N("mmd_read_rbt: %s", buffer);
socket_write(&buffer[0]);
memset(buffer, 0, sizeof(buffer));
socket_read(&buffer[0]);
buffer[8] = '\0'; //NULL;
u16 num = (u16)strtol(buffer,NULL,16); // TODO: fix this function to work with mdio, has to do with Rabbit parameters in mdio mode
printf("%X\n", num);
*value = num;
printf("tmp=%X\n", *value);
// if (sscanf(buffer, "%x", &v) == 1) {
// // if (sprintf(num, "%x", &v) == 1) {
// *value = v;
// } else {
// T_E("missing value: buffer=%s, port 0x%X, mmd 0x%X, addr = 0x%X", buffer, port_no, mmd, addr);
// }
// value = num * 1;
// T_N("mmd_read port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X", port_no, mmd, addr, *value);
printf("mmd_read port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X\n", port_no, mmd, addr, *value);
return VTSS_RC_OK;
}
vtss_rc spi_read_rbt_test( u8 port_no,
u8 mmd,
u16 addr,
u16 *value)
{
char buffer[255];
u16 v;
sprintf(buffer, "spird %x %x %x\n", port_no, mmd, addr);
//printf ("%s\n", buffer);
T_N("spi_read_rbt_test: %s", buffer);
socket_write(&buffer[0]);
memset(buffer, 0, sizeof(buffer));
socket_read(&buffer[0]);
buffer[8] = '\0'; //NULL to strip off the CR/LF and prompt;
v = (u32)strtol(buffer,NULL,16); //TODO: put back the error checking that exists below.
*value = v;
// if (sscanf(buffer, "%x", &v) == 1) {
// // if (sprintf(num, "%x", &v) == 1) {
// *value = v;
// } else {
// T_E("missing value: buffer=%s, port 0x%X, mmd 0x%X, addr = 0x%X", buffer, port_no, mmd, addr);
// }
// T_N("mmd_read port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X", port_no, mmd, addr, *value);
printf("mmd_read port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X\n", port_no, mmd, addr, *value);
return VTSS_RC_OK;
}
////////////////////////////////////////////////////////////////////////////////
// SPI_Read_Write Code for Rabbit
////////////////////////////////////////////////////////////////////////////////
vtss_rc spi_32bit_read_write_rbt(vtss_inst_t inst,
vtss_port_no_t port_no,
BOOL rd, // (1=rd, 0=wr)
u8 mmd,
u16 addr,
u32 *value)
{
char buffer[255];
u32 v;
int i;
errno = 0;
if(rd){
sprintf(buffer, "spird %x %x %x\n", port_no, mmd, addr);
//printf ("spi32_read_rbt: %s\n", buffer);
T_N("spi32_read_rbt: %s", buffer);
socket_write(&buffer[0]);
memset(buffer, 0, sizeof(buffer));
socket_read(&buffer[0]);
//printf ("buffer: %s\n", buffer);
// fprintf(stderr, "%s\n", buffer);
buffer[8] = '\0'; //NULL to strip off the CR/LF and prompt;
v = strtoul(buffer,NULL,16);
if(buffer[0] == 'V' && buffer[1] == 'a' && buffer[2] == 'l'){ //TODO: put in better error checking to ensure valid response
// if(NULL == buffer){
// fprintf(stderr, "%s: not a decimal number\n", buffer);
// } else if ((ULONG_MIN == v || ULONG_MAX == v) && errno == ERANGE) {
// fprintf(stderr, "%s: out of range\n", buffer);
// } else if (v > INT_MAX) {
// fprintf(stderr, "%X: > INT_MAX\n", v);
// } else if (v < INT_MIN) {
// fprintf(stderr, "%X: < INT_MIN\n", v);
T_E("missing value: buffer=%s, port 0x%X, mmd 0x%X, addr = 0x%X", buffer, port_no, mmd, addr);
printf("ERROR: missing value: buffer=%s, port 0x%X, mmd 0x%X, addr = 0x%X\n", buffer, port_no, mmd, addr);
fflush(stdout);
return VTSS_RC_ERROR;
} else{
*value = (u32)v;
}
T_N("spi32_rd port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X", port_no, mmd, addr, *value);
//printf("spi32_rd port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X\n", port_no, mmd, addr, *value);
} else {
sprintf(buffer, "spiwr %x %x %x %x\n", port_no, mmd, addr, *value);
//printf("spi_write_rbt: %s", buffer);
T_N("spi_write_rbt: %s", buffer);
socket_write(&buffer[0]);
socket_read(&buffer[0]);
T_N("spi32_wr port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X", port_no, mmd, addr, *value);
// printf("spi32_wr port_no = 0x%X, mmd = 0x%X, addr = 0x%X, value = 0x%X\n", port_no, mmd, addr, *value);
}
return VTSS_RC_OK;
}
#endif /* VTSS_CHIP_10G_PHY */
// Function for initializing the Rabbit Char board.
int venice_char_board_init(int argc, const char **argv, vtss_appl_board_t *board)
{
printf("In venice_char_board_init\n"); fflush(stdout);
board->descr = "Venice_Char";
board->target = VTSS_TARGET_10G_PHY; // 10G PHY
board->port_count = VTSS_PORTS; //Setup the number of port used
board->port_interface = port_interface; // Define the port interface
// board->init.init_conf->miim_read = miim_read; // Set pointer to the MIIM read function for this board.
// board->init.init_conf->miim_write = miim_write; // Set pointer to the MIIM write function for this board.
//#if defined(VTSS_CHIP_10G_PHY)
// board->init.init_conf->mmd_read = *mmd_read_rbt; // Set pointer to the MMD read function for this board.
// board->init.init_conf->mmd_read_inc = *mmd_read_inc_rbt; // Set pointer to the MMD read function for this board.
// board->init.init_conf->mmd_write = mmd_write_rbt; // Set pointer to the MMD write function for this board.
board->init.init_conf->spi_32bit_read_write = *spi_32bit_read_write_rbt; // Set pointer to the SPI read function for this board.
//#endif /* VTSS_CHIP_10G_PHY */
//board->init.init_conf->warm_start_enable = FALSE;
//if (board->init.init_conf->warm_start_enable != TRUE) {
if (argc != 2) {
printf("Usage : %s <Rabbit IP Address> \n" , argv[0]);
printf("Example: %s 10.10.132.59 \n" , argv[0]);
exit(EXIT_SUCCESS);
}
socket_init(argv[1], "2310"); // Connect to the CPU board
//}
//TEST CODE
u16 *val = (u16 *)malloc(sizeof(u16));
//mmd_read_rbt_test(0x0, 0x1e, 0x0, val);
// spi_read_rbt_test(0x0, 0x1e, 0x0, val);
// printf("Dev ID = 0x%x\n",*val); fflush(stdout);
// u32 *val = (u32 *)malloc(sizeof(u32));
//
printf("Out venice_char_board_init\n"); fflush(stdout);
return 0;
}
// Function for initializing the hardware board.
int venice_board_init(int argc, const char **argv, vtss_appl_board_t *board)
{
#if defined(RPI_MIIM)
rpi_venice_board_init(argc, argv, board); // Use init function for Raspberry PI MIIM.
#else
venice_char_board_init(argc, argv, board); // Use init function for Rabbit MIIM.
#endif /* RPI_MIIM */
return 0;
}
|
856154.c | /****************************************************************************
* drivers/clk/clk_rpmsg.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <string.h>
#include <nuttx/clk/clk.h>
#include <nuttx/clk/clk_provider.h>
#include <nuttx/kmalloc.h>
#include <nuttx/mutex.h>
#include <nuttx/rptun/openamp.h>
#include <nuttx/semaphore.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#define CLK_RPMSG_EPT_NAME "rpmsg-clk"
#define CLK_RPMSG_ENABLE 0
#define CLK_RPMSG_DISABLE 1
#define CLK_RPMSG_SETRATE 2
#define CLK_RPMSG_SETPHASE 3
#define CLK_RPMSG_GETPHASE 4
#define CLK_RPMSG_GETRATE 5
#define CLK_RPMSG_ROUNDRATE 6
#define CLK_RPMSG_ISENABLED 7
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
/****************************************************************************
* Private Types
****************************************************************************/
struct clk_rpmsg_priv_s
{
struct rpmsg_endpoint ept;
struct list_node clk_list;
struct list_node node;
FAR const char *cpuname;
};
struct clk_rpmsg_s
{
FAR struct clk_s *clk;
uint32_t count;
struct list_node node;
};
struct clk_rpmsg_cookie_s
{
sem_t sem;
int64_t result;
};
begin_packed_struct struct clk_rpmsg_header_s
{
uint32_t command;
uint32_t response;
int64_t result;
uint64_t cookie;
} end_packed_struct;
begin_packed_struct struct clk_rpmsg_enable_s
{
struct clk_rpmsg_header_s header;
char name[0];
} end_packed_struct;
#define clk_rpmsg_disable_s clk_rpmsg_enable_s
#define clk_rpmsg_isenabled_s clk_rpmsg_enable_s
begin_packed_struct struct clk_rpmsg_setrate_s
{
struct clk_rpmsg_header_s header;
uint64_t rate;
char name[0];
} end_packed_struct;
#define clk_rpmsg_getrate_s clk_rpmsg_enable_s
#define clk_rpmsg_roundrate_s clk_rpmsg_setrate_s
begin_packed_struct struct clk_rpmsg_setphase_s
{
struct clk_rpmsg_header_s header;
int32_t degrees;
char name[0];
} end_packed_struct;
#define clk_rpmsg_getphase_s clk_rpmsg_enable_s
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
static FAR struct clk_rpmsg_priv_s *clk_rpmsg_get_priv(FAR const char *name);
static FAR struct rpmsg_endpoint *clk_rpmsg_get_ept(FAR const char **name);
static FAR struct clk_rpmsg_s *
clk_rpmsg_get_clk(FAR struct rpmsg_endpoint *ept,
FAR const char *name);
static int clk_rpmsg_enable_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv);
static int clk_rpmsg_disable_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv);
static int clk_rpmsg_getrate_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv);
static int clk_rpmsg_roundrate_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv);
static int clk_rpmsg_setrate_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv);
static int clk_rpmsg_setphase_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv);
static int clk_rpmsg_getphase_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv);
static int clk_rpmsg_isenabled_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv);
static void clk_rpmsg_device_created(FAR struct rpmsg_device *rdev,
FAR void *priv_);
static void clk_rpmsg_device_destroy(FAR struct rpmsg_device *rdev,
FAR void *priv_);
static int clk_rpmsg_ept_cb(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv);
static int64_t clk_rpmsg_sendrecv(FAR struct rpmsg_endpoint *ept,
uint32_t command,
FAR struct clk_rpmsg_header_s *msg,
int32_t len);
static int clk_rpmsg_enable(FAR struct clk_s *clk);
static void clk_rpmsg_disable(FAR struct clk_s *clk);
static int clk_rpmsg_is_enabled(FAR struct clk_s *clk);
static uint32_t clk_rpmsg_round_rate(FAR struct clk_s *clk, uint32_t rate,
FAR uint32_t *parent_rate);
static int clk_rpmsg_set_rate(FAR struct clk_s *clk,
uint32_t rate, uint32_t parent_rate);
static uint32_t clk_rpmsg_recalc_rate(FAR struct clk_s *clk,
uint32_t parent_rate);
static int clk_rpmsg_get_phase(FAR struct clk_s *clk);
static int clk_rpmsg_set_phase(FAR struct clk_s *clk, int degrees);
/****************************************************************************
* Private Datas
****************************************************************************/
static mutex_t g_clk_rpmsg_lock = MUTEX_INITIALIZER;
static struct list_node g_clk_rpmsg_priv =
LIST_INITIAL_VALUE(g_clk_rpmsg_priv);
static const rpmsg_ept_cb g_clk_rpmsg_handler[] =
{
[CLK_RPMSG_ENABLE] = clk_rpmsg_enable_handler,
[CLK_RPMSG_DISABLE] = clk_rpmsg_disable_handler,
[CLK_RPMSG_SETRATE] = clk_rpmsg_setrate_handler,
[CLK_RPMSG_SETPHASE] = clk_rpmsg_setphase_handler,
[CLK_RPMSG_GETPHASE] = clk_rpmsg_getphase_handler,
[CLK_RPMSG_GETRATE] = clk_rpmsg_getrate_handler,
[CLK_RPMSG_ROUNDRATE] = clk_rpmsg_roundrate_handler,
[CLK_RPMSG_ISENABLED] = clk_rpmsg_isenabled_handler,
};
/****************************************************************************
* Private Functions
****************************************************************************/
static FAR struct clk_rpmsg_priv_s *clk_rpmsg_get_priv(FAR const char *name)
{
FAR struct clk_rpmsg_priv_s *priv;
nxmutex_lock(&g_clk_rpmsg_lock);
list_for_every_entry(&g_clk_rpmsg_priv, priv,
struct clk_rpmsg_priv_s, node)
{
size_t len = strlen(priv->cpuname);
if (!strncmp(priv->cpuname, name, len) &&
(name[len] == '/' || name[len] == 0))
{
goto out;
}
}
priv = NULL;
out:
nxmutex_unlock(&g_clk_rpmsg_lock);
return priv;
}
static FAR struct rpmsg_endpoint *clk_rpmsg_get_ept(FAR const char **name)
{
FAR struct clk_rpmsg_priv_s *priv;
priv = clk_rpmsg_get_priv(*name);
if (priv == NULL)
{
return NULL;
}
*name += strlen(priv->cpuname) + 1;
return &priv->ept;
}
static FAR struct clk_rpmsg_s *
clk_rpmsg_get_clk(FAR struct rpmsg_endpoint *ept, FAR const char *name)
{
FAR struct clk_rpmsg_priv_s *priv = ept->priv;
FAR struct list_node *clk_list = &priv->clk_list;
FAR struct clk_rpmsg_s *clkrp;
list_for_every_entry(clk_list, clkrp, struct clk_rpmsg_s, node)
{
if (!strcmp(clk_get_name(clkrp->clk), name))
{
return clkrp;
}
}
clkrp = kmm_zalloc(sizeof(*clkrp));
if (!clkrp)
{
return NULL;
}
clkrp->clk = clk_get(name);
if (!clkrp->clk)
{
kmm_free(clkrp);
return NULL;
}
list_add_head(clk_list, &clkrp->node);
return clkrp;
}
static int clk_rpmsg_enable_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv)
{
FAR struct clk_rpmsg_enable_s *msg = data;
FAR struct clk_rpmsg_s *clkrp = clk_rpmsg_get_clk(ept, msg->name);
if (clkrp)
{
msg->header.result = clk_enable(clkrp->clk);
if (!msg->header.result)
{
clkrp->count++;
}
}
else
{
msg->header.result = -ENOENT;
}
return rpmsg_send(ept, msg, sizeof(*msg));
}
static int clk_rpmsg_disable_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv)
{
FAR struct clk_rpmsg_disable_s *msg = data;
FAR struct clk_rpmsg_s *clkrp = clk_rpmsg_get_clk(ept, msg->name);
if (clkrp)
{
clk_disable(clkrp->clk);
clkrp->count--;
msg->header.result = 0;
}
else
{
msg->header.result = -ENOENT;
}
return rpmsg_send(ept, msg, sizeof(*msg));
}
static int clk_rpmsg_getrate_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv)
{
FAR struct clk_rpmsg_getrate_s *msg = data;
FAR struct clk_rpmsg_s *clkrp = clk_rpmsg_get_clk(ept, msg->name);
if (clkrp)
{
msg->header.result = clk_get_rate(clkrp->clk);
}
else
{
msg->header.result = -ENOENT;
}
return rpmsg_send(ept, msg, sizeof(*msg));
}
static int clk_rpmsg_roundrate_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv)
{
FAR struct clk_rpmsg_roundrate_s *msg = data;
FAR struct clk_rpmsg_s *clkrp = clk_rpmsg_get_clk(ept, msg->name);
if (clkrp)
{
msg->header.result = clk_round_rate(clkrp->clk, msg->rate);
}
else
{
msg->header.result = -ENOENT;
}
return rpmsg_send(ept, msg, sizeof(*msg));
}
static int clk_rpmsg_setrate_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv)
{
FAR struct clk_rpmsg_setrate_s *msg = data;
FAR struct clk_rpmsg_s *clkrp = clk_rpmsg_get_clk(ept, msg->name);
if (clkrp)
{
msg->header.result = clk_set_rate(clkrp->clk, msg->rate);
}
else
{
msg->header.result = -ENOENT;
}
return rpmsg_send(ept, msg, sizeof(*msg));
}
static int clk_rpmsg_setphase_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv)
{
FAR struct clk_rpmsg_setphase_s *msg = data;
FAR struct clk_rpmsg_s *clkrp = clk_rpmsg_get_clk(ept, msg->name);
if (clkrp)
{
msg->header.result = clk_set_phase(clkrp->clk, msg->degrees);
}
else
{
msg->header.result = -ENOENT;
}
return rpmsg_send(ept, msg, sizeof(*msg));
}
static int clk_rpmsg_getphase_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv)
{
FAR struct clk_rpmsg_getphase_s *msg = data;
FAR struct clk_rpmsg_s *clkrp = clk_rpmsg_get_clk(ept, msg->name);
if (clkrp)
{
msg->header.result = clk_get_phase(clkrp->clk);
}
else
{
msg->header.result = -ENOENT;
}
return rpmsg_send(ept, msg, sizeof(*msg));
}
static int clk_rpmsg_isenabled_handler(FAR struct rpmsg_endpoint *ept,
FAR void *data, size_t len,
uint32_t src, FAR void *priv)
{
FAR struct clk_rpmsg_isenabled_s *msg = data;
FAR struct clk_rpmsg_s *clkrp = clk_rpmsg_get_clk(ept, msg->name);
if (clkrp)
{
msg->header.result = clk_is_enabled(clkrp->clk);
}
else
{
msg->header.result = -ENOENT;
}
return rpmsg_send(ept, msg, sizeof(*msg));
}
static int64_t clk_rpmsg_sendrecv(FAR struct rpmsg_endpoint *ept,
uint32_t command,
FAR struct clk_rpmsg_header_s *msg,
int32_t len)
{
struct clk_rpmsg_cookie_s cookie;
int ret;
msg->command = command;
msg->response = 0;
msg->cookie = (uintptr_t)&cookie;
nxsem_init(&cookie.sem, 0, 0);
nxsem_set_protocol(&cookie.sem, SEM_PRIO_NONE);
cookie.result = -EIO;
ret = rpmsg_send_nocopy(ept, msg, len);
if (ret < 0)
{
return ret;
}
ret = nxsem_wait_uninterruptible(&cookie.sem);
if (ret < 0)
{
return ret;
}
return cookie.result;
}
static void clk_rpmsg_device_created(FAR struct rpmsg_device *rdev,
FAR void *priv_)
{
struct clk_rpmsg_priv_s *priv;
int ret;
priv = kmm_zalloc(sizeof(struct clk_rpmsg_priv_s));
if (!priv)
{
return;
}
priv->ept.priv = priv;
priv->cpuname = rpmsg_get_cpuname(rdev);
list_initialize(&priv->clk_list);
nxmutex_lock(&g_clk_rpmsg_lock);
list_add_head(&g_clk_rpmsg_priv, &priv->node);
nxmutex_unlock(&g_clk_rpmsg_lock);
ret = rpmsg_create_ept(&priv->ept, rdev, CLK_RPMSG_EPT_NAME,
RPMSG_ADDR_ANY, RPMSG_ADDR_ANY,
clk_rpmsg_ept_cb, NULL);
if (ret)
{
free(priv);
}
}
static void clk_rpmsg_device_destroy(FAR struct rpmsg_device *rdev,
FAR void *priv_)
{
struct clk_rpmsg_s *clkrp;
struct clk_rpmsg_s *clkrp_tmp;
struct clk_rpmsg_priv_s *priv;
priv = clk_rpmsg_get_priv(rpmsg_get_cpuname(rdev));
if (!priv)
{
return;
}
list_for_every_entry_safe(&priv->clk_list, clkrp, clkrp_tmp,
struct clk_rpmsg_s, node)
{
while (clkrp->count--)
{
clk_disable(clkrp->clk);
}
list_delete(&clkrp->node);
kmm_free(clkrp);
}
nxmutex_lock(&g_clk_rpmsg_lock);
list_delete(&priv->node);
nxmutex_unlock(&g_clk_rpmsg_lock);
rpmsg_destroy_ept(&priv->ept);
kmm_free(priv);
}
static int clk_rpmsg_ept_cb(FAR struct rpmsg_endpoint *ept, FAR void *data,
size_t len, uint32_t src, FAR void *priv)
{
FAR struct clk_rpmsg_header_s *hdr = data;
uint32_t cmd = hdr->command;
int ret = -EINVAL;
if (hdr->response)
{
FAR struct clk_rpmsg_cookie_s *cookie =
(struct clk_rpmsg_cookie_s *)(uintptr_t)hdr->cookie;
if (cookie)
{
cookie->result = hdr->result;
nxsem_post(&cookie->sem);
ret = 0;
}
}
else if (cmd < ARRAY_SIZE(g_clk_rpmsg_handler)
&& g_clk_rpmsg_handler[cmd])
{
hdr->response = 1;
ret = g_clk_rpmsg_handler[cmd](ept, data, len, src, priv);
}
return ret;
}
static int clk_rpmsg_enable(FAR struct clk_s *clk)
{
FAR struct rpmsg_endpoint *ept;
FAR struct clk_rpmsg_enable_s *msg;
FAR const char *name = clk->name;
uint32_t size;
uint32_t len;
ept = clk_rpmsg_get_ept(&name);
if (!ept)
{
return -ENODEV;
}
len = sizeof(*msg) + B2C(strlen(name) + 1);
msg = rpmsg_get_tx_payload_buffer(ept, &size, true);
if (!msg)
{
return -ENOMEM;
}
DEBUGASSERT(len <= size);
cstr2bstr(msg->name, name);
return clk_rpmsg_sendrecv(ept, CLK_RPMSG_ENABLE,
(struct clk_rpmsg_header_s *)msg,
len);
}
static void clk_rpmsg_disable(FAR struct clk_s *clk)
{
FAR struct rpmsg_endpoint *ept;
FAR struct clk_rpmsg_disable_s *msg;
FAR const char *name = clk->name;
uint32_t size;
uint32_t len;
ept = clk_rpmsg_get_ept(&name);
if (!ept)
{
return;
}
len = sizeof(*msg) + B2C(strlen(name) + 1);
msg = rpmsg_get_tx_payload_buffer(ept, &size, true);
if (!msg)
{
return;
}
DEBUGASSERT(len <= size);
cstr2bstr(msg->name, name);
clk_rpmsg_sendrecv(ept, CLK_RPMSG_DISABLE,
(struct clk_rpmsg_header_s *)msg, len);
}
static int clk_rpmsg_is_enabled(FAR struct clk_s *clk)
{
FAR struct rpmsg_endpoint *ept;
FAR struct clk_rpmsg_enable_s *msg;
FAR const char *name = clk->name;
uint32_t size;
uint32_t len;
ept = clk_rpmsg_get_ept(&name);
if (!ept)
{
return -ENODEV;
}
len = sizeof(*msg) + B2C(strlen(name) + 1);
msg = rpmsg_get_tx_payload_buffer(ept, &size, true);
if (!msg)
{
return -ENOMEM;
}
DEBUGASSERT(len <= size);
cstr2bstr(msg->name, name);
return clk_rpmsg_sendrecv(ept, CLK_RPMSG_ISENABLED,
(struct clk_rpmsg_header_s *)msg, len);
}
static uint32_t clk_rpmsg_round_rate(FAR struct clk_s *clk, uint32_t rate,
FAR uint32_t *parent_rate)
{
FAR struct rpmsg_endpoint *ept;
FAR struct clk_rpmsg_roundrate_s *msg;
FAR const char *name = clk->name;
uint32_t size;
uint32_t len;
int64_t ret;
ept = clk_rpmsg_get_ept(&name);
if (!ept)
{
return 0;
}
len = sizeof(*msg) + B2C(strlen(name) + 1);
msg = rpmsg_get_tx_payload_buffer(ept, &size, true);
if (!msg)
{
return 0;
}
DEBUGASSERT(len <= size);
msg->rate = rate;
cstr2bstr(msg->name, name);
ret = clk_rpmsg_sendrecv(ept, CLK_RPMSG_ROUNDRATE,
(struct clk_rpmsg_header_s *)msg, len);
if (ret < 0)
{
return 0;
}
return ret;
}
static int clk_rpmsg_set_rate(FAR struct clk_s *clk, uint32_t rate,
uint32_t parent_rate)
{
FAR struct rpmsg_endpoint *ept;
FAR struct clk_rpmsg_setrate_s *msg;
FAR const char *name = clk->name;
uint32_t size;
uint32_t len;
ept = clk_rpmsg_get_ept(&name);
if (!ept)
{
return -ENODEV;
}
len = sizeof(*msg) + B2C(strlen(name) + 1);
msg = rpmsg_get_tx_payload_buffer(ept, &size, true);
if (!msg)
{
return -ENOMEM;
}
DEBUGASSERT(len <= size);
msg->rate = rate;
cstr2bstr(msg->name, name);
return clk_rpmsg_sendrecv(ept, CLK_RPMSG_SETRATE,
(struct clk_rpmsg_header_s *)msg, len);
}
static uint32_t clk_rpmsg_recalc_rate(FAR struct clk_s *clk,
uint32_t parent_rate)
{
FAR struct rpmsg_endpoint *ept;
FAR struct clk_rpmsg_getrate_s *msg;
FAR const char *name = clk->name;
uint32_t size;
uint32_t len;
int64_t ret;
ept = clk_rpmsg_get_ept(&name);
if (!ept)
{
return 0;
}
len = sizeof(*msg) + B2C(strlen(name) + 1);
msg = rpmsg_get_tx_payload_buffer(ept, &size, true);
if (!msg)
{
return 0;
}
DEBUGASSERT(len <= size);
cstr2bstr(msg->name, name);
ret = clk_rpmsg_sendrecv(ept, CLK_RPMSG_GETRATE,
(struct clk_rpmsg_header_s *)msg, len);
if (ret < 0)
{
return 0;
}
return ret;
}
static int clk_rpmsg_get_phase(FAR struct clk_s *clk)
{
FAR struct rpmsg_endpoint *ept;
FAR struct clk_rpmsg_getphase_s *msg;
FAR const char *name = clk->name;
uint32_t size;
uint32_t len;
ept = clk_rpmsg_get_ept(&name);
if (!ept)
{
return -ENODEV;
}
len = sizeof(*msg) + B2C(strlen(name) + 1);
msg = rpmsg_get_tx_payload_buffer(ept, &size, true);
if (!msg)
{
return -ENOMEM;
}
DEBUGASSERT(len <= size);
cstr2bstr(msg->name, name);
return clk_rpmsg_sendrecv(ept, CLK_RPMSG_GETPHASE,
(struct clk_rpmsg_header_s *)msg, len);
}
static int clk_rpmsg_set_phase(FAR struct clk_s *clk, int degrees)
{
FAR struct rpmsg_endpoint *ept;
FAR struct clk_rpmsg_setphase_s *msg;
FAR const char *name = clk->name;
uint32_t size;
uint32_t len;
ept = clk_rpmsg_get_ept(&name);
if (!ept)
{
return -ENODEV;
}
len = sizeof(*msg) + B2C(strlen(name) + 1);
msg = rpmsg_get_tx_payload_buffer(ept, &size, true);
if (!msg)
{
return -ENOMEM;
}
DEBUGASSERT(len <= size);
msg->degrees = degrees;
cstr2bstr(msg->name, name);
return clk_rpmsg_sendrecv(ept, CLK_RPMSG_SETPHASE,
(struct clk_rpmsg_header_s *)msg, len);
}
/****************************************************************************
* Public Data
****************************************************************************/
const struct clk_ops_s g_clk_rpmsg_ops =
{
.enable = clk_rpmsg_enable,
.disable = clk_rpmsg_disable,
.is_enabled = clk_rpmsg_is_enabled,
.recalc_rate = clk_rpmsg_recalc_rate,
.round_rate = clk_rpmsg_round_rate,
.set_rate = clk_rpmsg_set_rate,
.set_phase = clk_rpmsg_set_phase,
.get_phase = clk_rpmsg_get_phase,
};
/****************************************************************************
* Public Functions
****************************************************************************/
FAR struct clk_s *clk_register_rpmsg(FAR const char *name, uint8_t flags)
{
if (strchr(name, '/') == NULL)
{
return NULL;
}
return clk_register(name, NULL, 0, flags | CLK_IS_CRITICAL,
&g_clk_rpmsg_ops, NULL, 0);
}
int clk_rpmsg_initialize(void)
{
return rpmsg_register_callback(NULL,
clk_rpmsg_device_created,
clk_rpmsg_device_destroy,
NULL);
}
|
871612.c |
#line 1 "ngx_http_zip_parsers.rl"
/* Ragel Parser definitions for mod_zip64 */
#include "ngx_http_zip_module.h"
#include "ngx_http_zip_parsers.h"
static void
ngx_http_zip_file_init(ngx_http_zip_file_t *parsing_file)
{
ngx_str_null(&parsing_file->uri);
ngx_str_null(&parsing_file->args);
ngx_str_null(&parsing_file->filename);
ngx_str_null(&parsing_file->filename_utf8);
parsing_file->header_sent = 0;
parsing_file->trailer_sent = 0;
parsing_file->crc32 = 0;
parsing_file->size = 0;
parsing_file->missing_crc32 = 0;
parsing_file->need_zip64 = 0;
parsing_file->need_zip64_offset = 0;
}
static size_t
destructive_url_decode_len(unsigned char* start, unsigned char* end)
{
unsigned char *read_pos = start, *write_pos = start;
for (; read_pos < end; read_pos++) {
unsigned char ch = *read_pos;
if (ch == '+') {
ch = ' ';
}
if (ch == '%' && (read_pos + 2 < end)) {
ch = ngx_hextoi(read_pos + 1, 2);
read_pos += 2;
}
*(write_pos++) = ch;
}
return write_pos - start;
}
static ngx_int_t
ngx_http_zip_clean_range(ngx_http_zip_range_t *range,
int prefix, int suffix, ngx_http_zip_ctx_t *ctx)
{
if (suffix) {
range->end = ctx->archive_size;
range->start = ctx->archive_size - range->start;
} else if (prefix) {
range->end = ctx->archive_size;
} else {
range->end++;
/*
* Download Accelerator sends the last byte position
* that equals to the file length
*/
if (range->end >= ctx->archive_size) {
range->end = ctx->archive_size;
}
}
if (range->start < 0) {
return NGX_ERROR;
}
if (range->start >= ctx->archive_size) {
return NGX_ERROR;
}
return NGX_OK;
}
#line 78 "ngx_http_zip_parsers.c"
static const char _request_actions[] = {
0, 1, 1, 1, 2, 1, 3, 1,
4, 1, 5, 1, 6, 1, 7, 1,
8, 2, 0, 6
};
static const char _request_key_offsets[] = {
0, 0, 7, 8, 11, 14, 16, 18,
19, 26, 27, 28, 31
};
static const char _request_trans_keys[] = {
45, 48, 57, 65, 70, 97, 102, 32,
32, 48, 57, 32, 48, 57, 32, 63,
32, 63, 32, 32, 48, 57, 65, 70,
97, 102, 32, 32, 0, 10, 13, 10,
13, 45, 48, 57, 65, 70, 97, 102,
0
};
static const char _request_single_lengths[] = {
0, 1, 1, 1, 1, 2, 2, 1,
1, 1, 1, 3, 3
};
static const char _request_range_lengths[] = {
0, 3, 0, 1, 1, 0, 0, 0,
3, 0, 0, 0, 3
};
static const char _request_index_offsets[] = {
0, 0, 5, 7, 10, 13, 16, 19,
21, 26, 28, 30, 34
};
static const char _request_indicies[] = {
0, 2, 2, 2, 1, 3, 1, 3,
4, 1, 5, 4, 1, 5, 1, 6,
8, 9, 7, 11, 10, 3, 12, 12,
12, 1, 1, 13, 15, 14, 1, 17,
17, 16, 18, 18, 0, 2, 2, 2,
1, 0
};
static const char _request_trans_targs[] = {
2, 0, 8, 3, 4, 5, 6, 6,
7, 9, 11, 7, 8, 10, 10, 7,
11, 12, 12
};
static const char _request_trans_actions[] = {
17, 0, 17, 0, 9, 0, 1, 0,
3, 3, 13, 0, 11, 5, 0, 7,
0, 15, 0
};
static const char _request_eof_actions[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 15, 0
};
static const int request_start = 1;
static const int request_en_main = 1;
#line 77 "ngx_http_zip_parsers.rl"
ngx_int_t
ngx_http_zip_parse_request(ngx_http_zip_ctx_t *ctx)
{
int cs;
u_char *p = ctx->unparsed_request->data;
u_char *pe = ctx->unparsed_request->data + ctx->unparsed_request->len;
u_char *eof = ctx->unparsed_request->data + ctx->unparsed_request->len;
ngx_http_zip_file_t *parsing_file = NULL;
#line 158 "ngx_http_zip_parsers.c"
{
cs = request_start;
}
#line 163 "ngx_http_zip_parsers.c"
{
int _klen;
unsigned int _trans;
const char *_acts;
unsigned int _nacts;
const char *_keys;
if ( p == pe )
goto _test_eof;
if ( cs == 0 )
goto _out;
_resume:
_keys = _request_trans_keys + _request_key_offsets[cs];
_trans = _request_index_offsets[cs];
_klen = _request_single_lengths[cs];
if ( _klen > 0 ) {
const char *_lower = _keys;
const char *_mid;
const char *_upper = _keys + _klen - 1;
while (1) {
if ( _upper < _lower )
break;
_mid = _lower + ((_upper-_lower) >> 1);
if ( (*p) < *_mid )
_upper = _mid - 1;
else if ( (*p) > *_mid )
_lower = _mid + 1;
else {
_trans += (unsigned int)(_mid - _keys);
goto _match;
}
}
_keys += _klen;
_trans += _klen;
}
_klen = _request_range_lengths[cs];
if ( _klen > 0 ) {
const char *_lower = _keys;
const char *_mid;
const char *_upper = _keys + (_klen<<1) - 2;
while (1) {
if ( _upper < _lower )
break;
_mid = _lower + (((_upper-_lower) >> 1) & ~1);
if ( (*p) < _mid[0] )
_upper = _mid - 2;
else if ( (*p) > _mid[1] )
_lower = _mid + 2;
else {
_trans += (unsigned int)((_mid - _keys)>>1);
goto _match;
}
}
_trans += _klen;
}
_match:
_trans = _request_indicies[_trans];
cs = _request_trans_targs[_trans];
if ( _request_trans_actions[_trans] == 0 )
goto _again;
_acts = _request_actions + _request_trans_actions[_trans];
_nacts = (unsigned int) *_acts++;
while ( _nacts-- > 0 )
{
switch ( *_acts++ )
{
case 0:
#line 90 "ngx_http_zip_parsers.rl"
{
parsing_file = ngx_array_push(&ctx->files);
ngx_http_zip_file_init(parsing_file);
parsing_file->index = ctx->files.nelts - 1;
}
break;
case 1:
#line 97 "ngx_http_zip_parsers.rl"
{
parsing_file->uri.data = p;
parsing_file->uri.len = 1;
}
break;
case 2:
#line 102 "ngx_http_zip_parsers.rl"
{
parsing_file->uri.len = destructive_url_decode_len(parsing_file->uri.data, p);
}
break;
case 3:
#line 105 "ngx_http_zip_parsers.rl"
{
parsing_file->args.data = p;
}
break;
case 4:
#line 108 "ngx_http_zip_parsers.rl"
{
parsing_file->args.len = p - parsing_file->args.data;
}
break;
case 5:
#line 111 "ngx_http_zip_parsers.rl"
{
parsing_file->size = parsing_file->size * 10 + ((*p) - '0');
}
break;
case 6:
#line 114 "ngx_http_zip_parsers.rl"
{
if ((*p) == '-') {
ctx->missing_crc32 = 1;
parsing_file->missing_crc32 = 1;
ngx_crc32_init(parsing_file->crc32);
} else {
parsing_file->crc32 *= 16;
parsing_file->crc32 += ngx_hextoi(p, 1);
}
}
break;
case 7:
#line 124 "ngx_http_zip_parsers.rl"
{
parsing_file->filename.data = p;
}
break;
case 8:
#line 127 "ngx_http_zip_parsers.rl"
{
parsing_file->filename.len = p - parsing_file->filename.data;
}
break;
#line 302 "ngx_http_zip_parsers.c"
}
}
_again:
if ( cs == 0 )
goto _out;
if ( ++p != pe )
goto _resume;
_test_eof: {}
if ( p == eof )
{
const char *__acts = _request_actions + _request_eof_actions[cs];
unsigned int __nacts = (unsigned int) *__acts++;
while ( __nacts-- > 0 ) {
switch ( *__acts++ ) {
case 8:
#line 127 "ngx_http_zip_parsers.rl"
{
parsing_file->filename.len = p - parsing_file->filename.data;
}
break;
#line 324 "ngx_http_zip_parsers.c"
}
}
}
_out: {}
}
#line 145 "ngx_http_zip_parsers.rl"
/* suppress warning */
(void)request_en_main;
if (cs < 11) {
return NGX_ERROR;
}
ctx->parsed = 1;
return NGX_OK;
}
#line 348 "ngx_http_zip_parsers.c"
static const char _range_actions[] = {
0, 1, 0, 1, 1, 1, 2, 2,
0, 1, 2, 3, 1
};
static const char _range_key_offsets[] = {
0, 0, 1, 2, 3, 4, 5, 6,
9, 11, 14, 17
};
static const char _range_trans_keys[] = {
98, 121, 116, 101, 115, 61, 45, 48,
57, 48, 57, 45, 48, 57, 44, 48,
57, 44, 48, 57, 0
};
static const char _range_single_lengths[] = {
0, 1, 1, 1, 1, 1, 1, 1,
0, 1, 1, 1
};
static const char _range_range_lengths[] = {
0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1
};
static const char _range_index_offsets[] = {
0, 0, 2, 4, 6, 8, 10, 12,
15, 17, 20, 23
};
static const char _range_trans_targs[] = {
2, 0, 3, 0, 4, 0, 5, 0,
6, 0, 7, 0, 8, 9, 0, 10,
0, 11, 9, 0, 7, 10, 0, 7,
11, 0, 0
};
static const char _range_trans_actions[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 7, 0, 10,
0, 0, 3, 0, 0, 3, 0, 0,
5, 0, 0
};
static const int range_start = 1;
static const int range_en_main = 1;
#line 162 "ngx_http_zip_parsers.rl"
ngx_int_t
ngx_http_zip_parse_range(ngx_http_request_t *r, ngx_str_t *range_str, ngx_http_zip_ctx_t *ctx)
{
int cs, prefix = 0, suffix = 0;
ngx_http_zip_range_t *range = NULL;
u_char *p = range_str->data;
u_char *pe = range_str->data + range_str->len;
#line 412 "ngx_http_zip_parsers.c"
{
cs = range_start;
}
#line 417 "ngx_http_zip_parsers.c"
{
int _klen;
unsigned int _trans;
const char *_acts;
unsigned int _nacts;
const char *_keys;
if ( p == pe )
goto _test_eof;
if ( cs == 0 )
goto _out;
_resume:
_keys = _range_trans_keys + _range_key_offsets[cs];
_trans = _range_index_offsets[cs];
_klen = _range_single_lengths[cs];
if ( _klen > 0 ) {
const char *_lower = _keys;
const char *_mid;
const char *_upper = _keys + _klen - 1;
while (1) {
if ( _upper < _lower )
break;
_mid = _lower + ((_upper-_lower) >> 1);
if ( (*p) < *_mid )
_upper = _mid - 1;
else if ( (*p) > *_mid )
_lower = _mid + 1;
else {
_trans += (unsigned int)(_mid - _keys);
goto _match;
}
}
_keys += _klen;
_trans += _klen;
}
_klen = _range_range_lengths[cs];
if ( _klen > 0 ) {
const char *_lower = _keys;
const char *_mid;
const char *_upper = _keys + (_klen<<1) - 2;
while (1) {
if ( _upper < _lower )
break;
_mid = _lower + (((_upper-_lower) >> 1) & ~1);
if ( (*p) < _mid[0] )
_upper = _mid - 2;
else if ( (*p) > _mid[1] )
_lower = _mid + 2;
else {
_trans += (unsigned int)((_mid - _keys)>>1);
goto _match;
}
}
_trans += _klen;
}
_match:
cs = _range_trans_targs[_trans];
if ( _range_trans_actions[_trans] == 0 )
goto _again;
_acts = _range_actions + _range_trans_actions[_trans];
_nacts = (unsigned int) *_acts++;
while ( _nacts-- > 0 )
{
switch ( *_acts++ )
{
case 0:
#line 174 "ngx_http_zip_parsers.rl"
{
if (range) {
if (ngx_http_zip_clean_range(range, prefix, suffix, ctx) == NGX_ERROR) {
return NGX_ERROR;
}
}
if ((range = ngx_array_push(&ctx->ranges)) == NULL) {
return NGX_ERROR;
}
range->start = 0; range->end = 0; range->boundary_sent = 0;
suffix = 0;
prefix = 1;
}
break;
case 1:
#line 188 "ngx_http_zip_parsers.rl"
{ range->start = range->start * 10 + ((*p) - '0'); }
break;
case 2:
#line 190 "ngx_http_zip_parsers.rl"
{ range->end = range->end * 10 + ((*p) - '0'); prefix = 0; }
break;
case 3:
#line 192 "ngx_http_zip_parsers.rl"
{ suffix = 1; }
break;
#line 518 "ngx_http_zip_parsers.c"
}
}
_again:
if ( cs == 0 )
goto _out;
if ( ++p != pe )
goto _resume;
_test_eof: {}
_out: {}
}
#line 205 "ngx_http_zip_parsers.rl"
/* suppress warning */
(void)range_en_main;
if (cs < 10) {
return NGX_ERROR;
}
if (range) {
if (ngx_http_zip_clean_range(range, prefix, suffix, ctx) == NGX_ERROR) {
return NGX_ERROR;
}
}
return NGX_OK;
}
|
978667.c | /* packet-rsvd.c
* Routines for RSVD dissection
* Copyright 2015, Richard Sharpe <realrichardsharpe@gmail.com>
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
/*
* RSVD, documented in [MS-RSVD].pdf, by Microsoft, the Remote Shared Virtual
* Disk protocol.
*/
#include "config.h"
#include <epan/conversation.h>
#include <epan/packet.h>
#include "packet-smb-common.h"
#include "packet-windows-common.h"
#include "packet-scsi.h"
void proto_register_rsvd(void);
static int proto_rsvd = -1;
static int hf_svhdx_protocol_id = -1;
static int hf_svhdx_protocol_version = -1;
static int hf_svhdx_operation_code = -1;
static int hf_svhdx_status = -1;
static int hf_svhdx_request_id = -1;
static int hf_svhdx_tunnel_scsi_length = -1;
static int hf_svhdx_tunnel_scsi_reserved1 = -1;
static int hf_svhdx_tunnel_scsi_cdb_length = -1;
static int hf_svhdx_tunnel_scsi_sense_info_ex_length = -1;
static int hf_svhdx_tunnel_scsi_data_in = -1;
static int hf_svhdx_tunnel_scsi_reserved2 = -1;
static int hf_svhdx_tunnel_scsi_srb_flags = -1;
static int hf_svhdx_tunnel_scsi_data_transfer_length = -1;
static int hf_svhdx_tunnel_scsi_reserved3 = -1;
static int hf_svhdx_tunnel_scsi_cdb = -1;
static int hf_svhdx_tunnel_scsi_cdb_padding = -1;
static int hf_svhdx_tunnel_scsi_data = -1;
static int hf_svhdx_tunnel_scsi_auto_generated_sense = -1;
static int hf_svhdx_tunnel_scsi_srb_status = -1;
static int hf_svhdx_tunnel_scsi_sense_data_ex = -1;
static int hf_svhdx_tunnel_scsi_status = -1;
static int hf_svhdx_tunnel_file_info_server_version = -1;
static int hf_svhdx_tunnel_file_info_sector_size = -1;
static int hf_svhdx_tunnel_file_info_physical_sector_size = -1;
static int hf_svhdx_tunnel_file_info_reserved = -1;
static int hf_svhdx_tunnel_file_info_virtual_size = -1;
static int hf_svhdx_tunnel_disk_info_reserved1 = -1;
static int hf_svhdx_tunnel_disk_info_blocksize = -1;
static int hf_svhdx_tunnel_disk_info_linkage_id = -1;
static int hf_svhdx_tunnel_disk_info_disk_type = -1;
static int hf_svhdx_tunnel_disk_info_disk_format = -1;
static int hf_svhdx_tunnel_disk_info_is_mounted = -1;
static int hf_svhdx_tunnel_disk_info_is_4k_aligned = -1;
static int hf_svhdx_tunnel_disk_info_reserved = -1;
static int hf_svhdx_tunnel_disk_info_file_size = -1;
static int hf_svhdx_tunnel_disk_info_virtual_disk_id = -1;
static int hf_svhdx_tunnel_validate_disk_reserved = -1;
static int hf_svhdx_tunnel_validate_disk_is_valid_disk = -1;
static int hf_svhdx_tunnel_srb_status_status_key = -1;
static int hf_svhdx_tunnel_srb_status_reserved = -1;
static int hf_svhdx_tunnel_srb_status_sense_info_auto_generated = -1;
static int hf_svhdx_tunnel_srb_status_srb_status = -1;
static int hf_svhdx_tunnel_srb_status_scsi_status = -1;
static int hf_svhdx_tunnel_srb_status_sense_info_ex_length = -1;
static int hf_svhdx_tunnel_srb_status_sense_data_ex = -1;
static int hf_svhdx_tunnel_safe_virtual_size = -1;
static int hf_svhdx_tunnel_transaction_id = -1;
static int hf_svhdx_tunnel_meta_operation_type = -1;
static int hf_svhdx_tunnel_padding = -1;
static int hf_svhdx_tunnel_resize_new_size = -1;
static int hf_svhdx_tunnel_resize_expand_only_flag = -1;
static int hf_svhdx_tunnel_resize_allow_unsafe_virt_size_flag = -1;
static int hf_svhdx_tunnel_resize_shrink_to_minimum_safe_size_flag = -1;
static int hf_svhdx_tunnel_meta_operation_start_reserved = -1;
static int hf_svhdx_tunnel_snapshot_type = -1;
static int hf_svhdx_tunnel_snapshot_id = -1;
static int hf_svhdx_tunnel_create_snapshot_flags = -1;
static int hf_svhdx_tunnel_create_snapshot_flag_enable_change_tracking = -1;
static int hf_svhdx_tunnel_create_snapshot_stage1 = -1;
static int hf_svhdx_tunnel_create_snapshot_stage2 = -1;
static int hf_svhdx_tunnel_create_snapshot_stage3 = -1;
static int hf_svhdx_tunnel_create_snapshot_stage4 = -1;
static int hf_svhdx_tunnel_create_snapshot_stage5 = -1;
static int hf_svhdx_tunnel_create_snapshot_stage6 = -1;
static int hf_svhdx_tunnel_create_snapshot_parameters_payload_size = -1;
static int hf_svhdx_tunnel_convert_dst_vhdset_name_len = -1;
static int hf_svhdx_tunnel_convert_dst_vhdset_name = -1;
static int hf_svhdx_tunnel_delete_snapshot_persist_reference = -1;
static int hf_svhdx_tunnel_meta_op_query_progress_current_progress = -1;
static int hf_svhdx_tunnel_meta_op_query_progress_complete_value = -1;
static int hf_svhdx_tunnel_vhdset_information_type = -1;
static int hf_svhdx_tunnel_vhdset_snapshot_creation_time = -1;
static int hf_svhdx_tunnel_vhdset_is_valid_snapshot = -1;
static int hf_svhdx_tunnel_vhdset_parent_snapshot_id = -1;
static int hf_svhdx_tunnel_vhdset_log_file_id = -1;
static gint ett_rsvd = -1;
static gint ett_svhdx_tunnel_op_header = -1;
static gint ett_svhdx_tunnel_scsi_request = -1;
static gint ett_rsvd_create_snapshot_flags = -1;
static const value_string rsvd_operation_code_vals[] = {
{ 0x02001001, "RSVD_TUNNEL_GET_INITIAL_INFO" },
{ 0x02001002, "RSVD_TUNNEL_SCSI" },
{ 0x02001003, "RSVD_TUNNEL_CHECK_CONNECTION_STATUS" },
{ 0x02001004, "RSVD_TUNNEL_SRB_STATUS" },
{ 0x02001005, "RSVD_TUNNEL_GET_DISK_INFO" },
{ 0x02001006, "RSVD_TUNNEL_VALIDATE_DISK" },
{ 0x02002101, "RSVD_TUNNEL_META_OPERATION_START" },
{ 0x02002002, "RSVD_TUNNEL_META_OPERATION_QUERY_PROGRESS" },
{ 0x02002005, "RSVD_TUNNEL_VHDSET_QUERY_INFORMATION" },
{ 0x02002006, "RSVD_TUNNEL_DELETE_SNAPSHOT" },
{ 0x02002008, "RSVD_TUNNEL_CHANGE_TRACKING_GET_PARAMETERS" },
{ 0x02002009, "RSVD_TUNNEL_CHANGE_TRACKING_START" },
{ 0x0200200A, "RSVD_TUNNEL_CHANGE_TRACKING_STOP" },
{ 0x0200200C, "RSVD_TUNNEL_QUERY_VIRTUAL_DISK_CHANGES" },
{ 0x0200200D, "RSVD_TUNNEL_QUERY_SAFE_SIZE" },
{ 0, NULL }
};
static const value_string rsvd_sense_info_vals[] = {
{ 0x0, "Sense Info Not Auto Generated" },
{ 0x1, "Sense Info Auto Generated" },
{ 0, NULL }
};
static const value_string rsvd_disk_type_vals[] = {
{ 0x02, "VHD_TYPE_FIXED" },
{ 0x03, "VHD_TYPE_DYNAMIC" },
{ 0, NULL }
};
static const value_string rsvd_disk_format_vals[] = {
{ 0x03, "VIRTUAL_STORAGE_TYPE_DEVICE_VHDX" },
{ 0x04, "VIRTUAL_STORAGE_TYPE_DEVICE_VHDSET" },
{ 0, NULL }
};
/*
* We need this data to handle SCSI requests and responses, I think
*/
typedef struct _rsvd_task_data_t {
guint32 request_frame;
guint32 response_frame;
itlq_nexus_t *itlq;
} rsvd_task_data_t;
typedef struct _rsvd_conv_data_t {
wmem_map_t *tasks;
wmem_tree_t *itl;
rsvd_task_data_t *task;
conversation_t *conversation;
} rsvd_conv_data_t;
static rsvd_conv_data_t *rsvd_conv_data = NULL;
static proto_tree *top_tree = NULL;
static itl_nexus_t *
get_itl_nexus(packet_info *pinfo)
{
itl_nexus_t *itl = NULL;
if (!(itl = (itl_nexus_t *)wmem_tree_lookup32_le(rsvd_conv_data->itl, pinfo->num))) {
itl = wmem_new(wmem_file_scope(), itl_nexus_t);
itl->cmdset = 0xff;
itl->conversation = rsvd_conv_data->conversation;
wmem_tree_insert32(rsvd_conv_data->itl, pinfo->num, itl);
}
return itl;
}
static int
dissect_RSVD_GET_INITIAL_INFO(tvbuff_t *tvb, proto_tree *parent_tree, int offset, gint16 len, gboolean request)
{
proto_tree *gfi_sub_tree;
proto_item *gfi_sub_item;
if (!request) {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_GET_INITIAL_INFO_RESPONSE");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_file_info_server_version, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_file_info_sector_size, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_file_info_physical_sector_size, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_file_info_reserved, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_file_info_virtual_size, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
}
return offset;
}
static const value_string rsvd_data_in_vals[] = {
{ 0x00, "Client is requesting data from the server" },
{ 0x01, "Client is sending data to the server" },
{ 0x02, "Client is neither sending nor requesting an additional data buffer" },
{ 0, NULL }
};
static void
dissect_scsi_payload_databuffer(tvbuff_t *tvb, packet_info *pinfo, int offset, guint32 data_transfer_length, gboolean request)
{
tvbuff_t *data_tvb = NULL;
int tvb_len, tvb_rlen;
tvb_len = tvb_captured_length_remaining(tvb, offset);
if (tvb_len > (int)data_transfer_length)
tvb_len = data_transfer_length;
tvb_rlen = tvb_reported_length_remaining(tvb, offset);
if (tvb_rlen > (int)data_transfer_length)
tvb_rlen = data_transfer_length;
data_tvb = tvb_new_subset_length_caplen(tvb, offset, tvb_len, tvb_rlen);
if (rsvd_conv_data->task && rsvd_conv_data->task->itlq) {
rsvd_conv_data->task->itlq->task_flags = SCSI_DATA_READ |
SCSI_DATA_WRITE;
rsvd_conv_data->task->itlq->data_length = data_transfer_length;
rsvd_conv_data->task->itlq->bidir_data_length = data_transfer_length;
dissect_scsi_payload(data_tvb, pinfo, top_tree, request,
rsvd_conv_data->task->itlq,
get_itl_nexus(pinfo), 0);
}
}
/*
* Dissect a tunnelled SCSI request and call the SCSI dissector where
* needed.
*/
static int
dissect_RSVD_TUNNEL_SCSI(tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, int offset, gint16 len, gboolean request, guint64 request_id)
{
proto_tree *sub_tree;
proto_item *sub_item;
guint32 cdb_length;
guint8 data_in;
guint32 data_transfer_length;
guint32 sense_info_ex_length;
conversation_t *conversation;
conversation = find_or_create_conversation(pinfo);
rsvd_conv_data = (rsvd_conv_data_t *)conversation_get_proto_data(conversation, proto_rsvd);
if (!rsvd_conv_data) {
rsvd_conv_data = wmem_new(wmem_file_scope(), rsvd_conv_data_t);
rsvd_conv_data->tasks = wmem_map_new(wmem_file_scope(),
wmem_int64_hash,
g_int64_equal);
rsvd_conv_data->itl = wmem_tree_new(wmem_file_scope());
rsvd_conv_data->conversation = conversation;
conversation_add_proto_data(conversation, proto_rsvd, rsvd_conv_data);
}
rsvd_conv_data->task = NULL;
if (!pinfo->fd->visited) {
guint64 *key_copy = wmem_new(wmem_file_scope(), guint64);
*key_copy = request_id;
rsvd_conv_data->task = wmem_new(wmem_file_scope(), rsvd_task_data_t);
rsvd_conv_data->task->request_frame=pinfo->num;
rsvd_conv_data->task->response_frame=0;
rsvd_conv_data->task->itlq = NULL;
wmem_map_insert(rsvd_conv_data->tasks, (const void *)key_copy,
rsvd_conv_data->task);
} else {
rsvd_conv_data->task = (rsvd_task_data_t *)wmem_map_lookup(rsvd_conv_data->tasks, (const void *)&request_id);
}
sub_tree = proto_tree_add_subtree_format(parent_tree, tvb, offset, len, ett_svhdx_tunnel_scsi_request, &sub_item, "SVHDX_TUNNEL_SCSI_%s", (request ? "REQUEST" : "RESPONSE"));
if (request) {
tvbuff_t *scsi_cdb = NULL;
/* Length */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_length, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
/* Reserved1 */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_reserved1, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
/* CDBLength */
cdb_length = tvb_get_guint8(tvb, offset);
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_cdb_length, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
/* SensInfoExLength */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_sense_info_ex_length, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
/* DataIn */
data_in = tvb_get_guint8(tvb, offset);
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_data_in, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
/* Reserved2 */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_reserved2, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
/* SrbFlags */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_srb_flags, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* DataTransferLength */
data_transfer_length = tvb_get_letohl(tvb, offset);
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_data_transfer_length, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* CDBBuffer */
scsi_cdb = tvb_new_subset_length_caplen(tvb,
offset,
cdb_length,
tvb_reported_length_remaining(tvb, offset));
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_cdb, tvb, offset, cdb_length, ENC_NA);
offset += cdb_length;
if (cdb_length < 16) {
/*
* CDBBuffer is always 16 bytes - see https://msdn.microsoft.com/en-us/library/dn393496.aspx
* If CDB is actually smaller, we need to define padding bytes
*/
guint32 cdb_padding_length = 16 - cdb_length;
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_cdb_padding, tvb, offset, cdb_padding_length, ENC_NA);
offset += cdb_padding_length;
}
/* Reserved3 */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_reserved3, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* DataBuffer */
if (data_transfer_length) {
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_data, tvb, offset, data_transfer_length, ENC_NA);
}
/*
* Now the SCSI Request
*/
if (rsvd_conv_data->task && !rsvd_conv_data->task->itlq) {
rsvd_conv_data->task->itlq = wmem_new(wmem_file_scope(),
itlq_nexus_t);
rsvd_conv_data->task->itlq->first_exchange_frame = pinfo->num;
rsvd_conv_data->task->itlq->last_exchange_frame = 0;
rsvd_conv_data->task->itlq->lun = 0xffff;
rsvd_conv_data->task->itlq->scsi_opcode = 0xffff;
rsvd_conv_data->task->itlq->task_flags = 0;
rsvd_conv_data->task->itlq->data_length = 0;
rsvd_conv_data->task->itlq->bidir_data_length = 0;
rsvd_conv_data->task->itlq->flags = 0;
rsvd_conv_data->task->itlq->alloc_len = 0;
rsvd_conv_data->task->itlq->fc_time = pinfo->abs_ts;
rsvd_conv_data->task->itlq->extra_data = NULL;
}
if (rsvd_conv_data->task && rsvd_conv_data->task->itlq) {
dissect_scsi_cdb(scsi_cdb, pinfo, top_tree, SCSI_DEV_SMC, rsvd_conv_data->task->itlq, get_itl_nexus(pinfo));
if (data_in == 0) { /* Only OUT operations have meaningful SCSI payload in request packet */
dissect_scsi_payload_databuffer(tvb, pinfo, offset, data_transfer_length, request);
}
}
/* increment after DataBuffer */
offset += data_transfer_length;
} else {
guint8 scsi_status = 0;
/* Length */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_length, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
/* A */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_auto_generated_sense, tvb, offset, 1, ENC_BIG_ENDIAN);
/* SrbStatus */
proto_tree_add_bits_item(sub_tree, hf_svhdx_tunnel_scsi_srb_status, tvb, offset * 8 + 1, 7, ENC_BIG_ENDIAN);
offset++;
/* ScsiStatus */
scsi_status = tvb_get_guint8(tvb, offset);
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_status, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
/* CdbLength */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_cdb_length, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
/* SensInfoExLength */
sense_info_ex_length = tvb_get_guint8(tvb, offset);
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_sense_info_ex_length, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
/* DataIn */
data_in = tvb_get_guint8(tvb, offset);
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_data_in, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
/* Reserved */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_reserved2, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
/* SrbFlags */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_srb_flags, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* DataTransferLength */
data_transfer_length = tvb_get_letohl(tvb, offset);
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_data_transfer_length, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* SenseDataEx */
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_sense_data_ex, tvb, offset, sense_info_ex_length, ENC_NA);
offset += sense_info_ex_length;
/* DataBuffer */
if (data_transfer_length) {
proto_tree_add_item(sub_tree, hf_svhdx_tunnel_scsi_data, tvb, offset, data_transfer_length, ENC_NA);
if (data_in == 1) { /* Only IN operations have meaningful SCSI payload in reply packet */
dissect_scsi_payload_databuffer(tvb, pinfo, offset, data_transfer_length, request);
}
offset += data_transfer_length;
}
/*
* Now, the SCSI response
*/
if (rsvd_conv_data->task && rsvd_conv_data->task->itlq) {
dissect_scsi_rsp(tvb, pinfo, top_tree, rsvd_conv_data->task->itlq, get_itl_nexus(pinfo), scsi_status);
}
}
return offset;
}
static int
dissect_RSVD_SRB_STATUS(tvbuff_t *tvb, proto_tree *parent_tree, int offset, gint16 len, gboolean request)
{
proto_tree *gfi_sub_tree;
proto_item *gfi_sub_item;
if (request) {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_SRB_STATUS_REQUEST");
/* StatusKey */
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_srb_status_status_key, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
/* Reserved */
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_srb_status_reserved, tvb, offset, 1, ENC_NA);
offset += 27;
} else {
guint8 sense_info_length;
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_SRB_STATUS_RESPONSE");
/* StatusKey */
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_srb_status_status_key, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
/* SenseInfoAutoGenerated and SrbStatus */
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_srb_status_sense_info_auto_generated, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree_add_bits_item(gfi_sub_tree, hf_svhdx_tunnel_srb_status_srb_status, tvb, offset * 8 + 1, 7, ENC_BIG_ENDIAN);
offset += 1;
/* ScsiStatus */
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_srb_status_scsi_status, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
/* SenseInfoExLength */
sense_info_length = tvb_get_guint8(tvb, offset);
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_srb_status_sense_info_ex_length, tvb, offset, 1, ENC_NA);
offset += 1;
/* SenseDataEx */
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_srb_status_sense_data_ex, tvb, offset, sense_info_length, ENC_NA);
offset += sense_info_length;
}
return offset;
}
static int
dissect_RSVD_GET_DISK_INFO(tvbuff_t *tvb, proto_tree *parent_tree, int offset, gint16 len, gboolean request)
{
proto_tree *gfi_sub_tree;
proto_item *gfi_sub_item;
if (request) {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_GET_DISK_INFO_REQUEST");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_reserved1, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_blocksize, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_linkage_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_is_mounted, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_is_4k_aligned, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_reserved, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_file_size, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_virtual_disk_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
} else {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_GET_DISK_INFO_RESPONSE");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_disk_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_disk_format, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_blocksize, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_linkage_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_is_mounted, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_is_4k_aligned, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_reserved, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_file_size, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_disk_info_virtual_disk_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
}
return offset;
}
static int
dissect_RSVD_VALIDATE_DISK(tvbuff_t *tvb, proto_tree *parent_tree, int offset, gint16 len, gboolean request)
{
proto_tree *gfi_sub_tree;
proto_item *gfi_sub_item;
if (request) {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_VALIDATE_DISK_REQUEST");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_validate_disk_reserved, tvb, offset, 56, ENC_NA);
offset += 56;
} else {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_VALIDATE_DISK_RESPONSE");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_validate_disk_is_valid_disk, tvb, offset, 1, ENC_NA);
offset += 1;
}
return offset;
}
static const value_string rsvd_meta_operation_type_vals[] = {
{ 0x00, "SvhdxMetaOperationTypeResize" },
{ 0x01, "SvhdxMetaOperationTypeCreateSnapshot" },
{ 0x02, "SvhdxMetaOperationTypeOptimize" },
{ 0x03, "SvhdxMetaOperationTypeExtractVHD" },
{ 0x04, "SvhdxMetaOperationTypeConvertToVHDSet" },
{ 0x05, "SvhdxMetaOperationTypeApplySnapshot" },
{ 0, NULL }
};
static const value_string svhdx_snapshot_type_vals[] = {
{ 0x01, "SvhdxSnapshotTypeVM" },
{ 0x03, "SvhdxSnapshotTypeCDP" },
{ 0x04, "SvhdxSnapshotTypeWriteable" },
{ 0, NULL }
};
static const value_string svhdx_snapshot_stage_vals[] = {
{ 0x00, "SvhdxSnapshotStageInvalid" },
{ 0x01, "SvhdxSnapshotStageInitialize" },
{ 0x02, "SvhdxSnapshotStageBlockIO" },
{ 0x03, "SvhdxSnapshotStageSwitchObjectStore" },
{ 0x04, "SvhdxSnapshotStageUnblockIO" },
{ 0x05, "SvhdxSnapshotStageFinalize" },
{ 0, NULL }
};
#define SVHDX_SNAPSHOT_DISK_FLAG_ENABLE_CHANGE_TRACKING 0x00000001
static int
dissect_RSVD2_META_OPERATION_START(tvbuff_t *tvb, proto_tree *parent_tree, int offset, gint16 len, gboolean request)
{
static const int * meta_operation_create_snapshot_flags[] = {
&hf_svhdx_tunnel_create_snapshot_flag_enable_change_tracking,
NULL
};
guint32 operation_type = 0;
guint32 length = 0;
proto_tree *gfi_sub_tree;
proto_item *gfi_sub_item;
if (request) {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_META_OPERATION_START_REQUEST");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_transaction_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
operation_type = tvb_get_letohl(tvb, offset);
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_meta_operation_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_padding, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
switch (operation_type) {
case 0x00: /* SvhdxMetaOperationTypeResize */
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_resize_new_size, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_resize_expand_only_flag, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_resize_allow_unsafe_virt_size_flag, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_resize_shrink_to_minimum_safe_size_flag, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_meta_operation_start_reserved, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
break;
case 0x01: /* SvhdxMetaOperationTypeCreateSnapshot */
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_bitmask(gfi_sub_tree, tvb, offset, hf_svhdx_tunnel_create_snapshot_flags,
ett_rsvd_create_snapshot_flags, meta_operation_create_snapshot_flags, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_create_snapshot_stage1, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_create_snapshot_stage2, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_create_snapshot_stage3, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_create_snapshot_stage4, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_create_snapshot_stage5, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_create_snapshot_stage6, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_create_snapshot_parameters_payload_size, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
break;
case 0x02: /* SvhdxMetaOperationTypeOptimize */
/* No Data, field MUST be empty */
break;
case 0x03: /* SvhdxMetaOperationTypeExtractVHD */
/* TODO */
break;
case 0x04: /* SvhdxMetaOperationTypeConvertToVHDSet */
length = tvb_get_letohl(tvb, offset);
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_convert_dst_vhdset_name_len, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
if (length) {
const char *name = "";
guint16 bc;
bc = tvb_captured_length_remaining(tvb, offset);
name = get_unicode_or_ascii_string(tvb, &offset,
TRUE, &length, TRUE, TRUE, &bc);
if (name) {
proto_tree_add_string(gfi_sub_tree, hf_svhdx_tunnel_convert_dst_vhdset_name, tvb,
offset, length, name);
}
}
break;
case 0x05: /* SvhdxMetaOperationTypeApplySnapshot */
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
break;
}
}
return offset;
}
static int
dissect_RSVD2_META_OPERATION_QUERY_PROGRESS(tvbuff_t *tvb,
proto_tree *parent_tree, int offset, gint16 len, gboolean request, guint32 status)
{
proto_tree *gfi_sub_tree;
proto_item *gfi_sub_item;
if (request) {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_META_OPERATION_QUERY_PROGRESS_REQUEST");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_transaction_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
} else {
if (status == 0) { /* If status is not successful, RSVD response buffer is filled by data from request buffer and we should not parse output structure */
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_META_OPERATION_QUERY_PROGRESS_RESPONSE");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_meta_op_query_progress_current_progress, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_meta_op_query_progress_complete_value, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
}
}
return offset;
}
static const value_string svhdx_vhdset_information_type_vals[] = {
{ 0x02, "SvhdxVHDSetInformationTypeSnapshotList" },
{ 0x05, "SvhdxVHDSetInformationTypeSnapshotEntry" },
{ 0x08, "SvhdxVHDSetInformationTypeOptimizeNeeded" },
{ 0x09, "SvhdxVHDSetInformationTypeCdpSnapshotRoot" },
{ 0x0A, "SvhdxVHDSetInformationTypeCdpSnapshotActiveList" },
{ 0x0C, "SvhdxVHDSetInformationTypeCdpSnapshotInactiveList" },
{ 0, NULL }
};
static int
dissect_RSVD2_VHDSET_QUERY_INFORMATION(tvbuff_t *tvb, proto_tree *parent_tree, int offset, gint16 len, gboolean request)
{
proto_tree *gfi_sub_tree;
proto_item *gfi_sub_item;
if (request) {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_VHDSET_QUERY_INFORMATION_REQUEST");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_vhdset_information_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
} else {
guint32 vhdset_info_type = tvb_get_letohl(tvb, offset);
switch (vhdset_info_type) {
case 0x02: /* SvhdxVHDSetInformationTypeSnapshotList */
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_VHDSET_QUERY_INFORMATION_SNAPSHOT_LIST_RESPONSE");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_vhdset_information_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* TODO: make full dissection */
break;
case 0x05: /* SvhdxVHDSetInformationTypeSnapshotEntry */
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_VHDSET_QUERY_INFORMATION_SNAPSHOT_ENTRY_RESPONSE");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_vhdset_information_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_padding, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
offset = dissect_nt_64bit_time(tvb, gfi_sub_tree, offset, hf_svhdx_tunnel_vhdset_snapshot_creation_time);
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_vhdset_is_valid_snapshot, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_vhdset_parent_snapshot_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_vhdset_log_file_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
break;
}
}
return offset;
}
static int
dissect_RSVD2_DELETE_SNAPSHOT(tvbuff_t *tvb, proto_tree *parent_tree, int offset, gint16 len, gboolean request)
{
proto_tree *gfi_sub_tree;
proto_item *gfi_sub_item;
if (request) {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_DELETE_SNAPSHOT_REQUEST");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_id, tvb, offset, 16, ENC_LITTLE_ENDIAN);
offset += 16;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_delete_snapshot_persist_reference, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_snapshot_type, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
}
return offset;
}
static int
dissect_RSVD2_QUERY_SAFE_SIZE(tvbuff_t *tvb, proto_tree *parent_tree, int offset, gint16 len, gboolean request)
{
proto_tree *gfi_sub_tree;
proto_item *gfi_sub_item;
if (!request) {
gfi_sub_tree = proto_tree_add_subtree(parent_tree, tvb, offset, len, ett_svhdx_tunnel_op_header, &gfi_sub_item, "RSVD_TUNNEL_QUERY_SAFE_SIZE_RESPONSE");
proto_tree_add_item(gfi_sub_tree, hf_svhdx_tunnel_safe_virtual_size, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
}
return offset;
}
static int
dissect_rsvd(tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, void *data)
{
guint32 header_bytes = 0;
guint proto_id = 0;
guint proto_version = 0;
guint32 operation_code = 0;
guint32 status;
proto_item *ti;
proto_tree *rsvd_tree;
proto_item *sub_item;
proto_tree *sub_tree;
guint offset = 0;
guint16 len;
guint64 request_id = 0;
gboolean request = *(gboolean *)data;
top_tree = parent_tree;
len = tvb_reported_length(tvb);
col_set_str(pinfo->cinfo, COL_PROTOCOL, "RSVD");
col_clear(pinfo->cinfo, COL_INFO);
/*
* The header bytes need to be pulled in as a 32bit LE value. And the
* header is the same in a request or a response ...
*/
header_bytes = tvb_get_letohl(tvb, 0); /* Get the header bytes */
proto_id = header_bytes >> 24;
proto_version = (header_bytes >> 12) & 0x0FFF;
operation_code = header_bytes;
ti = proto_tree_add_item(parent_tree, proto_rsvd, tvb, offset, -1, ENC_NA);
rsvd_tree = proto_item_add_subtree(ti, ett_rsvd);
sub_tree = proto_tree_add_subtree(rsvd_tree, tvb, offset, (len>16) ? 16 : len, ett_svhdx_tunnel_op_header, &sub_item, "SVHDX_TUNNEL_OPERATION_HEADER");
/* ProtocolID */
proto_tree_add_uint(sub_tree, hf_svhdx_protocol_id, tvb, offset, 4, proto_id);
/* ProtocolVersion */
proto_tree_add_uint(sub_tree, hf_svhdx_protocol_version, tvb, offset, 4, proto_version);
/* Operation Code */
proto_tree_add_uint(sub_tree, hf_svhdx_operation_code, tvb, offset, 4, operation_code);
offset += 4;
/* Status */
status = tvb_get_letohl(tvb, offset);
proto_tree_add_item(sub_tree, hf_svhdx_status, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
/* RequestId */
request_id = tvb_get_ntoh64(tvb, offset);
proto_tree_add_item(sub_tree, hf_svhdx_request_id, tvb, offset, 8, ENC_LITTLE_ENDIAN);
offset += 8;
col_append_fstr(pinfo->cinfo, COL_INFO, "%s %s",
val_to_str(operation_code,
rsvd_operation_code_vals,
"Unknown Operation Code (0x%08X)"),
request ? "Request" : "Response");
proto_item_append_text(ti, ", %s %s",
val_to_str(operation_code,
rsvd_operation_code_vals,
"Unknown Operation Code (0x%08X)"),
request ? "Request" : "Response");
/*
* Now process the individual requests ...
*/
switch (operation_code) {
case 0x02001001:
offset += dissect_RSVD_GET_INITIAL_INFO(tvb, rsvd_tree, offset, len - offset, request);
break;
case 0x02001002:
offset += dissect_RSVD_TUNNEL_SCSI(tvb, pinfo, rsvd_tree, offset, len - offset, request, request_id);
break;
case 0x02001003:
/*
* There is nothing more here.
*/
break;
case 0x02001004:
offset += dissect_RSVD_SRB_STATUS(tvb, rsvd_tree, offset, len - offset, request);
break;
case 0x02001005:
offset += dissect_RSVD_GET_DISK_INFO(tvb, rsvd_tree, offset, len - offset, request);
break;
case 0x02001006:
offset += dissect_RSVD_VALIDATE_DISK(tvb, rsvd_tree, offset, len - offset, request);
break;
/* RSVD v2 operations */
case 0x02002101:
offset += dissect_RSVD2_META_OPERATION_START(tvb, rsvd_tree, offset, len - offset, request);
break;
case 0x02002002:
offset += dissect_RSVD2_META_OPERATION_QUERY_PROGRESS(tvb, rsvd_tree, offset, len - offset, request, status);
break;
case 0x02002005:
offset += dissect_RSVD2_VHDSET_QUERY_INFORMATION(tvb, rsvd_tree, offset, len - offset, request);
break;
case 0x02002006:
offset += dissect_RSVD2_DELETE_SNAPSHOT(tvb, rsvd_tree, offset, len - offset, request);
break;
case 0x0200200D:
offset += dissect_RSVD2_QUERY_SAFE_SIZE(tvb, rsvd_tree, offset, len - offset, request);
break;
/* TODO: implement more dissectors for RSVD v2 */
default:
break;
}
return offset;
}
void
proto_register_rsvd(void)
{
static hf_register_info hf[] = {
{ &hf_svhdx_protocol_id,
{ "ProtocolId", "rsvd.svhdx_protocol_id", FT_UINT8, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_protocol_version,
{ "ProtocolVersion", "rsvd.svhdx_protocol_version", FT_UINT16, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_operation_code,
{ "OperationCode", "rsvd.svhdx_operation_code", FT_UINT32, BASE_HEX,
VALS(rsvd_operation_code_vals), 0, "Operation Code", HFILL }},
{ &hf_svhdx_status,
{ "Status", "rsvd.svhdx_status", FT_UINT32, BASE_HEX | BASE_EXT_STRING,
&NT_errors_ext, 0, NULL, HFILL }},
{ &hf_svhdx_request_id,
{ "RequestId", "rsvd.svhdx_request_id", FT_UINT64, BASE_HEX,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_length,
{ "Length", "rsvd.svhdx_length", FT_UINT16, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_reserved1,
{ "Reserved1", "rsvd.svhdx_scsi_reserved1", FT_UINT16, BASE_HEX,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_cdb_length,
{ "CDBLength", "rsvd.svhdx_scsi_cdb_length", FT_UINT8, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_sense_info_ex_length,
{ "SenseInfoExLength", "rsvd.svhdx_scsi_sense_info_ex_length", FT_UINT8, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_data_in,
{ "DataIn", "rsvd.svhdx_scsi_data_in", FT_UINT8, BASE_HEX,
VALS(rsvd_data_in_vals), 0, "SCSI CDB transfer type", HFILL }},
{ &hf_svhdx_tunnel_scsi_reserved2,
{ "Reserved2", "rsvd.svhdx_scsi_reserved2", FT_UINT8, BASE_HEX,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_srb_flags,
{ "SRBFlags", "rsvd.svhdx_scsi_srbflags", FT_UINT32, BASE_HEX,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_data_transfer_length,
{ "DataTransferLength", "rsvd.svhdx_scsi_data_transfer_length", FT_UINT32, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_reserved3,
{ "Reserved3", "rsvd.svhdx_scsi_reserved3", FT_UINT32, BASE_HEX,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_cdb,
{ "CDB", "rsvd.svhdx_scsi_cdb", FT_BYTES, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_cdb_padding,
{ "CDBPadding", "rsvd.svhdx_scsi_cdb_padding", FT_BYTES, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_data,
{"Data", "rsvd.svhdx_scsi_data", FT_BYTES, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_auto_generated_sense,
{"AutoGeneratedSenseInfo", "rsvd.svhdx_auto_generated_sense_info", FT_UINT8, BASE_HEX,
VALS(rsvd_sense_info_vals), 0x80, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_srb_status,
{ "SrbStatus", "rsvd.svhdx_srb_status", FT_UINT8, BASE_HEX,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_status,
{ "ScsiStatus", "rsvd.svhdx_scsi_status", FT_UINT8, BASE_HEX,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_scsi_sense_data_ex,
{ "SenseDataEx", "rsvd.svhdx_scsi_sense_data_ex", FT_BYTES, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_file_info_server_version,
{ "ServerVersion", "rsvd.svhdx_file_info_server_version", FT_UINT32, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_file_info_sector_size,
{ "SectorSize", "rsvd.svhdx_file_info_sector_size", FT_UINT32, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_file_info_physical_sector_size,
{ "PhysicalSectorSize", "rsvd.svhdx_file_info_physical_sector_size", FT_UINT32, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_file_info_reserved,
{ "Reserved", "rsvd.svhdx_file_info_reserved", FT_UINT32, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_file_info_virtual_size,
{ "VirtualSize", "rsvd.svhdx_file_info_virtual_size", FT_UINT64, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_disk_info_reserved1,
{ "Reserved1", "rsvd.svhdx_disk_info_reserved1", FT_UINT64, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_disk_info_blocksize,
{ "BlockSize", "rsvd.svhdx_disk_info_blocksize", FT_UINT32, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_disk_info_linkage_id,
{ "LinkageID", "rsvd.svhdx_disk_info_linkage_id", FT_GUID, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_disk_info_disk_type,
{ "DiskType", "rsvd.svhdx_disk_info_disk_type", FT_UINT16, BASE_HEX,
VALS(rsvd_disk_type_vals), 0, "Disk Type", HFILL }},
{ &hf_svhdx_tunnel_disk_info_disk_format,
{ "DiskFormat", "rsvd.svhdx_disk_info_disk_format", FT_UINT16, BASE_HEX,
VALS(rsvd_disk_format_vals), 0, "Disk Format", HFILL }},
{ &hf_svhdx_tunnel_disk_info_is_mounted,
{ "IsMounted", "rsvd.svhdx_tunnel_disk_info_is_mounted", FT_UINT8, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_disk_info_is_4k_aligned,
{ "Is4KAligned", "rsvd.svhdx_tunnel_disk_info_is_4k_aligned", FT_UINT8, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_disk_info_reserved,
{ "Reserved", "rsvd.svhdx_disk_info_reserved", FT_UINT16, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_disk_info_file_size,
{ "FileSize", "rsvd.svhdx_disk_info_file_size", FT_UINT64, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_disk_info_virtual_disk_id,
{ "VirtualDiskId", "rsvd.svhdx_disk_info_virtual_disk_id", FT_GUID, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_validate_disk_reserved,
{ "Reserved", "rsvd.svhdx_tunnel_validate_disk_reserved", FT_BYTES, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_validate_disk_is_valid_disk,
{ "IsValidDisk", "rsvd.svhdx_validate_disk_is_valid_disk", FT_BYTES, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_srb_status_status_key,
{ "StatusKey", "rsvd.svhdx_srb_status_key", FT_UINT8, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_srb_status_reserved,
{ "Reserved", "rsvd.svhdx_srb_status_reserved", FT_BYTES, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_srb_status_sense_info_auto_generated,
{ "SenseInfoAutoGenerated", "rsvd.svhdx_sense_info_auto_generated", FT_UINT8, BASE_HEX,
VALS(rsvd_sense_info_vals), 0x80, NULL, HFILL }},
{ &hf_svhdx_tunnel_srb_status_srb_status,
{ "SrbStatus", "rsvd.svhdx_srb_status_srb_status", FT_UINT8, BASE_HEX,
NULL, 0x7f, NULL, HFILL }},
{ &hf_svhdx_tunnel_srb_status_scsi_status,
{ "SrbStatus", "rsvd.svhdx_srb_status_scsi_status", FT_UINT8, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_srb_status_sense_info_ex_length,
{ "SenseInfoExLength", "rsvd.svhdx_srb_status_sense_info_ex_length", FT_UINT8, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_srb_status_sense_data_ex,
{ "Reserved", "rsvd.svhdx_srb_status_sense_data_ex", FT_BYTES, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_safe_virtual_size,
{ "SafeVirtualSize", "rsvd.svhdx_safe_size", FT_UINT64, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_transaction_id,
{ "TransactionId", "rsvd.svhdx_meta_operation.transaction_id", FT_GUID, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_meta_operation_type,
{ "OperationType", "rsvd.svhdx_meta_operation.type", FT_UINT32, BASE_HEX,
VALS(rsvd_meta_operation_type_vals), 0, "Type of meta-operation", HFILL }},
{ &hf_svhdx_tunnel_padding,
{ "Padding", "rsvd.svhdx_padding", FT_UINT32, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_resize_new_size,
{ "NewSize", "rsvd.svhdx_meta_operation.new_size", FT_UINT64, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_resize_expand_only_flag,
{ "ExpandOnly", "rsvd.svhdx_meta_operation.expand_only", FT_BOOLEAN, 8,
NULL, 0, "Indicates that shared virtual disk size can only expand", HFILL }},
{ &hf_svhdx_tunnel_resize_allow_unsafe_virt_size_flag,
{ "AllowUnsafeVirtualSize", "rsvd.svhdx_meta_operation.allow_unsafe_virt_size", FT_BOOLEAN, 8,
NULL, 0, "Indicates that the shared virtual disk size can be less than the data it currently contains", HFILL }},
{ &hf_svhdx_tunnel_resize_shrink_to_minimum_safe_size_flag,
{ "ShrinkToMinimumSafeSize", "rsvd.svhdx_meta_operation.shrink_to_minimum_safe_size", FT_BOOLEAN, 8,
NULL, 0, "Indicates that the shared virtual disk size can be shrunk to the data it currently contains", HFILL }},
{ &hf_svhdx_tunnel_meta_operation_start_reserved,
{ "Reserved", "rsvd.svhdx_meta_operation.reserved", FT_UINT8, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_snapshot_type,
{ "SnapshotType", "rsvd.svhdx_snapshot_type", FT_UINT32, BASE_HEX,
VALS(svhdx_snapshot_type_vals), 0, "Type of snapshot", HFILL }},
{ &hf_svhdx_tunnel_snapshot_id,
{ "SnapshotId", "rsvd.svhdx_snapshot_id", FT_GUID, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_create_snapshot_flags,
{ "Flags", "rsvd.svhdx_meta_operation.create_snapshot_flags", FT_UINT32, BASE_HEX,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_create_snapshot_flag_enable_change_tracking,
{ "SVHDX_SNAPSHOT_DISK_FLAG_ENABLE_CHANGE_TRACKING", "rsvd.svhdx_meta_operation.create_snapshot_flag_enable_change_tracking", FT_BOOLEAN, 32,
NULL, SVHDX_SNAPSHOT_DISK_FLAG_ENABLE_CHANGE_TRACKING, "Change tracking to be enabled when snapshot is taken", HFILL }},
{ &hf_svhdx_tunnel_create_snapshot_stage1,
{ "Stage1", "rsvd.svhdx_meta_operation.create_snapshot_stage1", FT_UINT32, BASE_HEX,
VALS(svhdx_snapshot_stage_vals), 0, "The first stage", HFILL }},
{ &hf_svhdx_tunnel_create_snapshot_stage2,
{ "Stage2", "rsvd.svhdx_meta_operation.create_snapshot_stage2", FT_UINT32, BASE_HEX,
VALS(svhdx_snapshot_stage_vals), 0, "The second stage", HFILL }},
{ &hf_svhdx_tunnel_create_snapshot_stage3,
{ "Stage3", "rsvd.svhdx_meta_operation.create_snapshot_stage3", FT_UINT32, BASE_HEX,
VALS(svhdx_snapshot_stage_vals), 0, "The third stage", HFILL }},
{ &hf_svhdx_tunnel_create_snapshot_stage4,
{ "Stage4", "rsvd.svhdx_meta_operation.create_snapshot_stage4", FT_UINT32, BASE_HEX,
VALS(svhdx_snapshot_stage_vals), 0, "The fourth stage", HFILL }},
{ &hf_svhdx_tunnel_create_snapshot_stage5,
{ "Stage5", "rsvd.svhdx_meta_operation.create_snapshot_stage5", FT_UINT32, BASE_HEX,
VALS(svhdx_snapshot_stage_vals), 0, "The fifth stage", HFILL }},
{ &hf_svhdx_tunnel_create_snapshot_stage6,
{ "Stage6", "rsvd.svhdx_meta_operation.create_snapshot_stage6", FT_UINT32, BASE_HEX,
VALS(svhdx_snapshot_stage_vals), 0, "The sixth stage", HFILL }},
{ &hf_svhdx_tunnel_create_snapshot_parameters_payload_size,
{ "ParametersPayloadSize", "rsvd.svhdx_meta_operation.create_snapshot_params_payload_size", FT_UINT32, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_convert_dst_vhdset_name_len,
{ "DestinationVhdSetNameLength", "rsvd.svhdx_meta_operation.dst_vhdset_name_len", FT_UINT32, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_convert_dst_vhdset_name,
{ "DestinationVhdSetName", "rsvd.svhdx_meta_operation.dst_vhdset_name", FT_STRING, BASE_NONE,
NULL, 0, "Name for the new VHD set be created", HFILL }},
{ &hf_svhdx_tunnel_delete_snapshot_persist_reference,
{ "PersistReference", "rsvd.svhdx_delete_snapshot_persist_reference", FT_BOOLEAN, 4,
NULL, 0, "Indicate if the snapshot needs to be persisted", HFILL }},
{ &hf_svhdx_tunnel_meta_op_query_progress_current_progress,
{ "CurrentProgressValue", "rsvd.svhdx_query_progress.current_progress", FT_UINT64, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_meta_op_query_progress_complete_value,
{ "CompleteValue", "rsvd.svhdx_query_progress.complete_value", FT_UINT64, BASE_DEC,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_vhdset_information_type,
{ "VHDSetInformationType", "rsvd.svhdx_vhdset_information_type", FT_UINT32, BASE_HEX,
VALS(svhdx_vhdset_information_type_vals), 0, "The information type requested", HFILL }},
{ &hf_svhdx_tunnel_vhdset_snapshot_creation_time,
{ "SnapshotCreationTime", "rsvd.svhdx_vhdset_snapshot_creation_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL,
NULL, 0, "Time when this object was created", HFILL }},
{ &hf_svhdx_tunnel_vhdset_is_valid_snapshot,
{ "IsValidSnapshot", "rsvd.svhdx_vhdset_is_valid_snapshot", FT_BOOLEAN, 4,
NULL, 0, "Set to 1 when the snapshot is valid", HFILL }},
{ &hf_svhdx_tunnel_vhdset_parent_snapshot_id,
{ "ParentSnapshotId", "rsvd.svhdx_vhdxset_parent_snapshot_id", FT_GUID, BASE_NONE,
NULL, 0, NULL, HFILL }},
{ &hf_svhdx_tunnel_vhdset_log_file_id,
{ "LogFileId", "rsvd.svhdx_vhdxset_log_file_id", FT_GUID, BASE_NONE,
NULL, 0, NULL, HFILL }}
};
static gint *ett[] = {
&ett_rsvd,
&ett_svhdx_tunnel_op_header,
&ett_svhdx_tunnel_scsi_request,
&ett_rsvd_create_snapshot_flags
};
proto_rsvd = proto_register_protocol("Remote Shared Virtual Disk",
"RSVD", "rsvd");
register_dissector("rsvd", dissect_rsvd, proto_rsvd);
proto_register_field_array(proto_rsvd, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
}
/*
* Editor modelines - https://www.wireshark.org/tools/modelines.html
*
* Local variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* vi: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/
|
143188.c | #include <iostream>
#include<algorithm>
#include<map>
#include<cstdio>
#include<cmath>
#include<cstring>
#include<vector>
using namespace std;
int main()
{
int t,n,m,i,p,a[1002];
vector<int>v;
scanf("%d",&t);
while(t--)
{
scanf("%d",&n);
for(i=1;i<=n;i++)
v.push_back(i);
scanf("%d",&m);
p=0;
for(i=0;i<m;i++)
{
scanf("%d",&a[i]);
}
sort(a,a+m);
for(i=0;i<m;i++)
{
v.erase(v.begin()+a[i]-1-p);
p++;
}
for(i=0;i<n-m;i=i+2)
{
printf("%d\t",v[i]);
}
printf("\n");
for(i=1;i<n-m;i=i+2)
{
printf("%d\t",v[i]);
}
printf("\n");
v.clear();
}
return 0;
}
|
185000.c | /* Copyright (C) 2013 The Regents of the University of California
* See README in this or parent directory for licensing information. */
/* metaRa - stuff to parse and interpret a genome-hub meta.txt file, which is in
* a hierarchical ra format. That is something like:
* meta topLevel
* cellLine HELA
*
* meta midLevel
* target H3K4Me3
* antibody abCamAntiH3k4me3
*
* meta lowLevel
* fileName hg19/chipSeq/helaH3k4me3.narrowPeak.bigBed
* The file is interpreted so that lower level stanzas inherit tags from higher level ones.
* NOTE: this file has largely been superceded by the tagStorm module, which does not
* require meta tags, but is otherwise similar.
*/
#include "common.h"
#include "linefile.h"
#include "hash.h"
#include "errAbort.h"
#include "meta.h"
#include "net.h"
#include "ra.h"
struct metaTagVal *metaTagValNew(char *tag, char *val)
/* Create new meta tag/val */
{
struct metaTagVal *mtv;
AllocVar(mtv);
mtv->tag = cloneString(tag);
mtv->val = cloneString(val);
return mtv;
}
void metaTagValFree(struct metaTagVal **pMtv)
/* Free up metaTagVal. */
{
struct metaTagVal *mtv = *pMtv;
if (mtv != NULL)
{
freeMem(mtv->tag);
freeMem(mtv->val);
freez(pMtv);
}
}
void metaTagValFreeList(struct metaTagVal **pList)
/* Free a list of dynamically allocated metaTagVal's */
{
struct metaTagVal *el, *next;
for (el = *pList; el != NULL; el = next)
{
next = el->next;
metaTagValFree(&el);
}
*pList = NULL;
}
int metaTagValCmp(const void *va, const void *vb)
/* Compare to sort based on tag name . */
{
const struct metaTagVal *a = *((struct metaTagVal **)va);
const struct metaTagVal *b = *((struct metaTagVal **)vb);
return strcmp(a->tag, b->tag);
}
void metaFree(struct meta **pMeta)
/* Free up memory associated with a meta. */
{
struct meta *meta = *pMeta;
if (meta != NULL)
{
metaTagValFreeList(&meta->tagList);
freez(pMeta);
}
}
void metaFreeList(struct meta **pList)
/* Free a list of dynamically allocated meta's. Use metaFreeForest to free children too. */
{
struct meta *el, *next;
for (el = *pList; el != NULL; el = next)
{
next = el->next;
metaFree(&el);
}
*pList = NULL;
}
void metaFreeForest(struct meta **pForest)
/* Free up all metas in forest and their children. */
{
struct meta *meta;
for (meta = *pForest; meta != NULL; meta = meta->next)
{
if (meta->children)
metaFreeForest(&meta->children);
}
metaFreeList(pForest);
}
void metaSortTags(struct meta *meta)
/* Do canonical sort so that the first tag stays first but the
* rest are alphabetical. */
{
slSort(&meta->tagList->next, metaTagValCmp);
}
int countLeadingSpacesDetabbing(char *s, int tabStop)
/* Count up leading chars including those implied by tab. Set tabStop to 8
* for usual UNIX results. */
{
int count = 0;
char c;
while ((c = *s++) != 0)
{
if (c == ' ')
++count;
else if (c == '\t')
{
int tabBefore = (count % tabStop) * tabStop;
count = (tabBefore+1)*tabStop;
}
else
break;
}
return count;
}
struct meta *metaNextStanza(struct lineFile *lf)
/* Return next stanza in a meta file. Does not set parent/child/next pointers.
* Returns NULL at end of file. Does a little error checking, making sure
* that indentation level is consistent across all lines of stanza. Returns
* indentation level. */
{
/* See if anything left in file, and if not return. */
if (!raSkipLeadingEmptyLines(lf, NULL))
return NULL;
/* Allocate return structure and vars to help parse. */
struct meta *meta;
AllocVar(meta);
struct dyString *dy = dyStringNew(256);
char *tag,*val;
/* Loop to get all tags in stanza. */
boolean firstTime = TRUE;
int initialIndent = 0;
for (;;)
{
dyStringClear(dy);
if (!raNextTagVal(lf, &tag, &val, dy))
break;
/* Make tag/val and add it to list. */
struct metaTagVal *mtv;
AllocVar(mtv);
mtv->tag = cloneString(tag);
mtv->val = cloneString(val);
slAddHead(&meta->tagList, mtv);
/* Check indentation. */
int indent = countLeadingSpacesDetabbing(dy->string, 8);
if (firstTime)
{
initialIndent = indent;
firstTime = FALSE;
}
else
{
if (indent != initialIndent)
{
warn("Error line %d of %s\n", lf->lineIx, lf->fileName);
warn("Indentation level %d doesn't match level %d at start of stanza.",
indent, initialIndent);
if (strchr(dy->string, '\t'))
warn("There are tabs in the indentation, be sure tab stop is set to 8 spaces.");
noWarnAbort();
}
}
}
slReverse(&meta->tagList);
/* Set up remaining fields and return. */
assert(meta->tagList != NULL);
meta->name = meta->tagList->val;
meta->indent = initialIndent;
return meta;
}
static struct meta *rReverseMetaList(struct meta *list)
/* Return reverse list, and reverse all childen lists too. Needed because
* we addHead instead of addTail while building tree because it's faster,
* especially as lists get long. */
{
slReverse(&list);
struct meta *meta;
for (meta = list; meta != NULL; meta = meta->next)
{
if (meta->children != NULL)
meta->children = rReverseMetaList(meta->children);
}
return list;
}
struct meta *metaLoadAll(char *fileName, char *keyTag, char *parentTag,
boolean ignoreOtherStanzas, boolean ignoreIndent)
/* Loads in all ra stanzas from file and turns them into a list of meta, some of which
* may have children. The keyTag parameter is optional. If non-null it should be set to
* the tag name that starts a stanza. If null, the first tag of the first stanza will be used.
* The parentTag if non-NULL will be a tag name used to define the parent of a stanza.
* The ignoreOtherStanzas flag if set will ignore stanzas that start with other tags.
* If not set the routine will abort on such stanzas. The ignoreIndent if set will
* use the parentTag (which must be set) to define the hierarchy. Otherwise the program
* will look at the indentation, and if there is a parentTag complain about any
* disagreements between indentation and parentTag. */
{
struct lineFile *lf = netLineFileOpen(fileName);
struct meta *meta, *forest = NULL, *lastMeta = NULL;
if (ignoreIndent)
{
errAbort("Currently metaLoadAll can't ignore indentation, sorry.");
}
while ((meta = metaNextStanza(lf)) != NULL)
{
struct meta **pList;
if (forest == NULL) /* First time. */
{
if (meta->indent != 0)
errAbort("Initial stanza of %s should not be indented", fileName);
if (keyTag == NULL)
keyTag = meta->tagList->tag;
pList = &forest;
}
else
{
if (!sameString(keyTag, meta->tagList->tag))
{
if (ignoreOtherStanzas)
{
metaFree(&meta);
continue;
}
else
errAbort("Stanza beginning with %s instead of %s line %d of %s",
meta->tagList->tag, keyTag, lf->lineIx, lf->fileName);
}
if (meta->indent > lastMeta->indent)
{
pList = &lastMeta->children;
meta->parent = lastMeta;
}
else if (meta->indent == lastMeta->indent)
{
if (meta->indent == 0)
pList = &forest;
else
{
pList = &lastMeta->parent->children;
meta->parent = lastMeta->parent;
}
}
else /* meta->indent < lastMeta->indent */
{
/* Find sibling at same level as us. */
struct meta *olderSibling;
for (olderSibling = lastMeta->parent;
olderSibling != NULL; olderSibling = olderSibling->parent)
{
if (meta->indent == olderSibling->indent)
break;
}
if (olderSibling == NULL)
{
warn("Indentation inconsistent in stanza ending line %d of %s.",
lf->lineIx, lf->fileName);
warn("If you are using tabs, check your tab stop is set to 8.");
warn("Otherwise check that when you are reducing indentation in a stanza");
warn("that it is the same as the previous stanza at the same level.");
noWarnAbort();
}
if (olderSibling->parent == NULL)
pList = &forest;
else
{
pList = &olderSibling->parent->children;
meta->parent = olderSibling->parent;
}
}
}
slAddHead(pList, meta);
lastMeta = meta;
}
lineFileClose(&lf);
forest = rReverseMetaList(forest);
return forest;
}
static void rMetaListWrite(struct meta *metaList, struct meta *parent,
int level, int maxLevel, int indent, boolean withParent, FILE *f)
/* Write out list of stanzas at same level to file, their children too. */
{
int totalIndent = level * indent;
struct meta *meta;
int nextLevel = level+1;
for (meta = metaList; meta != NULL; meta = meta->next)
{
struct metaTagVal *mtv;
boolean gotParent = FALSE;
for (mtv = meta->tagList; mtv != NULL; mtv = mtv->next)
{
if (sameString(mtv->tag, "parent"))
{
if (withParent)
gotParent = TRUE;
else
continue;
}
spaceOut(f, totalIndent);
fprintf(f, "%s %s\n", mtv->tag, mtv->val);
}
if (withParent && !gotParent && parent != NULL)
{
spaceOut(f, totalIndent);
fprintf(f, "%s %s\n", "parent", parent->name);
}
fprintf(f, "\n");
if (meta->children && nextLevel < maxLevel)
rMetaListWrite(meta->children, meta, nextLevel, maxLevel, indent, withParent, f);
}
}
void metaWriteAll(struct meta *metaList, char *fileName, int indent, boolean withParent,
int maxDepth)
/* Write out metadata, optionally adding meta tag. If maxDepth is non-zero just write
* up to that many levels. Root level is 0. */
{
FILE *f = mustOpen(fileName, "w");
if (maxDepth == 0)
maxDepth = BIGNUM;
rMetaListWrite(metaList, NULL, 0, maxDepth, indent, withParent, f);
carefulClose(&f);
}
char *metaLocalTagVal(struct meta *meta, char *name)
/* Return value of tag found in this node, not going up to parents. */
{
struct metaTagVal *mtv;
for (mtv = meta->tagList; mtv != NULL; mtv = mtv->next)
if (sameString(mtv->tag, name))
return mtv->val;
return NULL;
}
char *metaTagVal(struct meta *meta, char *name)
/* Return value of tag found in this node or if its not there in parents.
* Returns NULL if tag not found. */
{
struct meta *m;
for (m = meta; m != NULL; m = m->parent)
{
char *val = metaLocalTagVal(m, name);
if (val != NULL)
return val;
}
return NULL;
}
void metaAddTag(struct meta *meta, char *tag, char *val)
/* Add tag to meta, replacing existing tag if any */
{
/* First loop through to replace an existing tag. */
struct metaTagVal *mtv;
for (mtv = meta->tagList; mtv != NULL; mtv = mtv->next)
{
if (sameString(mtv->tag, tag))
{
freeMem(mtv->val);
mtv->val = cloneString(val);
return;
}
}
/* If didn't make it then add new tag (at end) */
mtv = metaTagValNew(tag, val);
slAddTail(&meta->tagList, mtv);
}
static void rHashMetaList(struct hash *hash, struct meta *list)
/* Add list, and any children of list to hash */
{
struct meta *meta;
for (meta = list; meta != NULL; meta = meta->next)
{
hashAddUnique(hash, meta->name, meta);
if (meta->children)
rHashMetaList(hash, meta->children);
}
}
struct hash *metaHash(struct meta *forest)
/* Return hash of meta at all levels of heirarchy keyed by forest. */
{
struct hash *hash = hashNew(0);
rHashMetaList(hash, forest);
return hash;
}
|
848390.c | #include "asm/includes.h"
//#include "asm/ldo.h"
//#include "asm/cache.h"
#include "asm/wdt.h"
#include "asm/debug.h"
#include "asm/efuse.h"
#include "asm/power/p33.h"
#include "system/task.h"
#include "timer.h"
#include "system/init.h"
#include "system/includes.h"
#include "app_config.h"
#include "gpio.h"
//#include "power_manage.h"
//
#define LOG_TAG_CONST SETUP
#define LOG_TAG "[SETUP]"
#define LOG_ERROR_ENABLE
#define LOG_DEBUG_ENABLE
#define LOG_INFO_ENABLE
/* #define LOG_DUMP_ENABLE */
#define LOG_CLI_ENABLE
#include "debug.h"
//extern void dv15_dac_early_init(u8 ldo_sel, u8 pwr_sel, u32 dly_msecs);
//
extern void sys_timer_init(void);
extern void tick_timer_init(void);
extern void vPortSysSleepInit(void);
extern void reset_source_dump(void);
extern u8 power_reset_source_dump(void);
extern void exception_irq_handler(void);
int __crc16_mutex_init();
extern int __crc16_mutex_init();
#define DEBUG_SINGAL_IDLE(x) //if (x) IO_DEBUG_1(A, 7) else IO_DEBUG_0(A, 7)
#define DEBUG_SINGAL_1S(x) //if (x) IO_DEBUG_1(A, 6) else IO_DEBUG_0(A, 6)
#if (defined CONFIG_DEBUG_ENABLE) || (defined CONFIG_DEBUG_LITE_ENABLE)
void debug_uart_init(const struct uart_platform_data *data);
#endif
#if 0
___interrupt
void exception_irq_handler(void)
{
___trig;
exception_analyze();
log_flush();
while (1);
}
#endif
#if 0
#define CACHE_LINE_COUNT (32)
#define ONE_TIME_CODE (1*1024)
extern u32 text_code_begin;
extern u32 text_code_end;
static u32 load_ptr = 0xffffffff;
void load_code2cache()
{
u32 i = 0;
volatile u8 tmp;
u32 ali_start = ((u32)&text_code_begin & ~(CACHE_LINE_COUNT - 1));
u32 ali_end = (u32)&text_code_end + (4 * CACHE_LINE_COUNT);
if (load_ptr == 0xffffffff) {
load_ptr = ali_start;
}
while (1) {
i += CACHE_LINE_COUNT;
load_ptr += CACHE_LINE_COUNT;
tmp = *(u8 *)load_ptr;
if (load_ptr >= ali_end) {
load_ptr = ali_start;
}
if (i >= ONE_TIME_CODE) {
/* y_printf("-- %x \n",load_ptr); */
return;
}
}
}
#endif
/*
* 此函数在cpu0上电后首先被调用,负责初始化cpu内部模块
*
* 此函数返回后,操作系统才开始初始化并运行
*
*/
#if 0
static void early_putchar(char a)
{
if (a == '\n') {
UT2_BUF = '\r';
__asm_csync();
while ((UT2_CON & BIT(15)) == 0);
}
UT2_BUF = a;
__asm_csync();
while ((UT2_CON & BIT(15)) == 0);
}
void early_puts(char *s)
{
do {
early_putchar(*s);
} while (*(++s));
}
#endif
void cpu_assert_debug()
{
#ifdef CONFIG_DEBUG_ENABLE
log_flush();
local_irq_disable();
while (1);
#else
cpu_reset();
#endif
}
void timer(void *p)
{
/* DEBUG_SINGAL_1S(1); */
sys_timer_dump_time();
/* DEBUG_SINGAL_1S(0);*/
}
u8 power_reset_src = 0;
extern void sputchar(char c);
extern void sput_buf(const u8 *buf, int len);
void sput_u32hex(u32 dat);
void *vmem_get_phy_adr(void *vaddr);
void test_fun()
{
wdt_close();
while (1);
}
AT_VOLATILE_RAM_CODE
void __lvd_irq_handler(void)
{
VLVD_PND_CLR(1);
}
void load_common_code();
void app_load_common_code()
{
#ifdef CONFIG_CODE_BANK_ENABLE
/* load_common_code(); */
#endif
}
u32 stack_magic[4] sec(.stack_magic);
u32 stack_magic0[4] sec(.stack_magic0);
extern void lvd_enable(void);
void memory_init(void);
void setup_arch()
{
memory_init();
memset(stack_magic, 0x5a, sizeof(stack_magic));
memset(stack_magic0, 0x5a, sizeof(stack_magic0));
wdt_init(WDT_4S);
/* wdt_close(); */
#if (AUDIO_OUTPUT_WAY == AUDIO_OUTPUT_WAY_FM)
clk_init_osc_ldos(2);
#else
clk_init_osc_ldos(3);
#endif
clk_init_osc_cap(0x0a, 0x0a);
clk_voltage_init(TCFG_CLOCK_MODE, SYSVDD_VOL_SEL_126V, TCFG_LOWPOWER_POWER_SEL, VDC13_VOL_SEL_140V);
clk_early_init(TCFG_CLOCK_SYS_SRC, TCFG_CLOCK_OSC_HZ, TCFG_CLOCK_SYS_HZ);
tick_timer_init();
/* lvd_enable(); */
/*interrupt_init();*/
#if (defined CONFIG_DEBUG_ENABLE) || (defined CONFIG_DEBUG_LITE_ENABLE)
debug_uart_init(NULL);
#ifdef CONFIG_DEBUG_ENABLE
log_early_init(1024);
#endif
#endif
#ifdef CONFIG_CODE_BANK_ENABLE
extern void bank_syscall_entry();
request_irq(IRQ_SYSCALL_IDX, 0, bank_syscall_entry, 0);
#endif
printf("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
printf(" setup_arch %s %s \n", __DATE__, __TIME__);
printf("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
printf("\n~~~~~~~~~~~~~ chip id %x ~~~~~~~~~~~~~\n", JL_SYSTEM->CHIP_ID & 0x0f);
clock_dump();
/* log_info("resour est: %d", get_boot_flag()); */
//set_boot_flag(99);
/* log_info("resour est: %d", get_boot_flag()); */
reset_source_dump();
power_reset_src = power_reset_source_dump();
//Register debugger interrupt
request_irq(0, 2, exception_irq_handler, 0);
request_irq(1, 2, exception_irq_handler, 0);
debug_init();
/* CHGBG_EN(0); */
/* CHARGE_EN(0); */
sys_timer_init();
/* sys_timer_add(NULL, timer, 10 * 1000); */
__crc16_mutex_init();
}
/*-----------------------------------------------------------*/
|
593964.c | void test1(){
return;
}
int test2(){
return 2;
}
int test3(int a){
return 1;
}
int test5(int a, int b){
return 1;
}
int test6(){
int a = 5;
string b = 6;
return 1;
}
int test7(){
int b = test6(5);
return 2;
}
int test8(){
return test7();
}
int test9(){
int a = test7()[0];
return 1;
}
int test10(){
int a = test7(1, 2, 4)[0];
return 1;
}
int test11(){
int a = *b;
return *a;
}
int test12(){
int a[5] = funcall();
string a[] = funcall();
return 0;
}
void test13(){
if(a){
return;
}
}
void test14(){
while(b){
return;
}
while(z)
return;
}
int test15(int a){
return a + 5 - 4;
}
int fibo(int n){
if(n >= 0)
return 0;
if(n == 1)
return 1;
return fibo(n - 1) + fibo(n - 2);
}
int main(){
int b = fun("ddddd");
test(b);
return fun(5, 6, 7);
}
|
435153.c | /* $OpenBSD: proc.c,v 1.29 2015/12/07 16:05:56 reyk Exp $ */
/*
* Copyright (c) 2010 - 2014 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2008 Pierre-Yves Ritschard <pyr@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <signal.h>
#include <pwd.h>
#include <imsg.h>
#include <event2/event.h>
#include <openssl/rand.h>
#include "iked.h"
enum privsep_procid privsep_process;
void proc_open(struct privsep *, struct privsep_proc *,
struct privsep_proc *, size_t);
void proc_close(struct privsep *);
int proc_ispeer(struct privsep_proc *, unsigned int, enum privsep_procid);
void proc_shutdown(struct privsep_proc *);
void proc_sig_handler(int, short, void *);
void proc_range(struct privsep *, enum privsep_procid, int *, int *);
int proc_dispatch_null(int, struct privsep_proc *, struct imsg *);
int
proc_ispeer(struct privsep_proc *procs, unsigned int nproc,
enum privsep_procid type)
{
unsigned int i;
for (i = 0; i < nproc; i++)
if (procs[i].p_id == type)
return (1);
return (0);
}
void
proc_init(struct privsep *ps, struct privsep_proc *procs, unsigned int nproc)
{
unsigned int i, j, src, dst;
struct privsep_pipes *pp;
/*
* Allocate pipes for all process instances (incl. parent)
*
* - ps->ps_pipes: N:M mapping
* N source processes connected to M destination processes:
* [src][instances][dst][instances], for example
* [PROC_RELAY][3][PROC_CA][3]
*
* - ps->ps_pp: per-process 1:M part of ps->ps_pipes
* Each process instance has a destination array of socketpair fds:
* [dst][instances], for example
* [PROC_PARENT][0]
*/
for (src = 0; src < PROC_MAX; src++) {
/* Allocate destination array for each process */
if ((ps->ps_pipes[src] = calloc(ps->ps_ninstances,
sizeof(struct privsep_pipes))) == NULL)
fatal("proc_init: calloc");
for (i = 0; i < ps->ps_ninstances; i++) {
pp = &ps->ps_pipes[src][i];
for (dst = 0; dst < PROC_MAX; dst++) {
/* Allocate maximum fd integers */
if ((pp->pp_pipes[dst] =
calloc(ps->ps_ninstances,
sizeof(int))) == NULL)
fatal("proc_init: calloc");
/* Mark fd as unused */
for (j = 0; j < ps->ps_ninstances; j++)
pp->pp_pipes[dst][j] = -1;
}
}
}
/*
* Setup and run the parent and its children
*/
privsep_process = PROC_PARENT;
ps->ps_instances[PROC_PARENT] = 1;
ps->ps_title[PROC_PARENT] = "parent";
ps->ps_pid[PROC_PARENT] = getpid();
ps->ps_pp = &ps->ps_pipes[privsep_process][0];
for (i = 0; i < nproc; i++) {
/* Default to 1 process instance */
if (ps->ps_instances[procs[i].p_id] < 1)
ps->ps_instances[procs[i].p_id] = 1;
ps->ps_title[procs[i].p_id] = procs[i].p_title;
}
proc_open(ps, NULL, procs, nproc);
/* Engage! */
for (i = 0; i < nproc; i++)
ps->ps_pid[procs[i].p_id] = (*procs[i].p_init)(ps, &procs[i]);
}
int
proc_reap(struct privsep *ps, pid_t pid, int status)
{
int id;
if (pid <= 0)
return (0);
for (id = 0; id < PROC_MAX; id++) {
if (pid == ps->ps_pid[id])
break;
}
if (id == PROC_MAX)
return (0);
if (WIFSIGNALED(status)) {
if (WTERMSIG(status) != SIGKILL && WTERMSIG(status) != SIGABRT)
ps->ps_restart = 1;
log_warnx("%s[%d] terminated with signal %d",
ps->ps_title[id], pid, WTERMSIG(status));
} else if (WIFEXITED(status)) {
if (WEXITSTATUS(status) != 0) {
ps->ps_restart = 1;
log_warnx("%s[%d] terminated with exit code %d",
ps->ps_title[id], pid, WEXITSTATUS(status));
} else
log_info("%s[%d] terminated", ps->ps_title[id], pid);
} else
log_warnx("%s[%d] unexpected SIGCHLD", ps->ps_title[id], pid);
ps->ps_pid[id] = 0;
return (1);
}
void
proc_kill(struct privsep *ps)
{
pid_t pid;
int i, status;
if (privsep_process != PROC_PARENT)
return;
for (i = 0; i < PROC_MAX; i++) {
if (ps->ps_pid[i] == 0)
continue;
killpg(ps->ps_pid[i], SIGTERM);
}
do {
pid = waitpid(WAIT_ANY, &status, 0);
if (pid > 0)
proc_reap(ps, pid, status);
} while (pid != -1 || (pid == -1 && errno == EINTR));
}
void
proc_open(struct privsep *ps, struct privsep_proc *p,
struct privsep_proc *procs, size_t nproc)
{
struct privsep_pipes *pa, *pb;
int fds[2];
unsigned int i, j, src, proc;
if (p == NULL)
src = privsep_process; /* parent */
else
src = p->p_id;
/*
* Open socket pairs for our peers
*/
for (proc = 0; proc < nproc; proc++) {
procs[proc].p_ps = ps;
procs[proc].p_env = ps->ps_env;
if (procs[proc].p_cb == NULL)
procs[proc].p_cb = proc_dispatch_null;
for (i = 0; i < ps->ps_instances[src]; i++) {
for (j = 0; j < ps->ps_instances[procs[proc].p_id];
j++) {
pa = &ps->ps_pipes[src][i];
pb = &ps->ps_pipes[procs[proc].p_id][j];
/* Check if fds are already set by peer */
if (pa->pp_pipes[procs[proc].p_id][j] != -1)
continue;
if (bsd_socketpair(AF_UNIX,
SOCK_STREAM | SOCK_NONBLOCK,
PF_UNSPEC, fds) == -1)
fatal("socketpair");
pa->pp_pipes[procs[proc].p_id][j] = fds[0];
pb->pp_pipes[src][i] = fds[1];
}
}
}
}
void
proc_listen(struct privsep *ps, struct privsep_proc *procs, size_t nproc)
{
struct iked *env = ps->ps_env;
struct privsep_pipes *pp;
unsigned int i, dst, src, n, m;
/*
* Close unused pipes
*/
for (src = 0; src < PROC_MAX; src++) {
for (n = 0; n < ps->ps_instances[src]; n++) {
/* Ingore current process */
if (src == (unsigned int)privsep_process &&
n == ps->ps_instance)
continue;
pp = &ps->ps_pipes[src][n];
for (dst = 0; dst < PROC_MAX; dst++) {
if (src == dst)
continue;
for (m = 0; m < ps->ps_instances[dst]; m++) {
if (pp->pp_pipes[dst][m] == -1)
continue;
/* Close and invalidate fd */
close(pp->pp_pipes[dst][m]);
pp->pp_pipes[dst][m] = -1;
}
}
}
}
src = privsep_process;
ps->ps_pp = pp = &ps->ps_pipes[src][ps->ps_instance];
/*
* Listen on appropriate pipes
*/
for (i = 0; i < nproc; i++) {
dst = procs[i].p_id;
if (src == dst)
fatal("%s: cannot peer with oneself", __func__);
if ((ps->ps_ievs[dst] = calloc(ps->ps_instances[dst],
sizeof(struct imsgev))) == NULL)
fatal("%s: calloc", __func__);
for (n = 0; n < ps->ps_instances[dst]; n++) {
if (pp->pp_pipes[dst][n] == -1)
continue;
imsg_init(&(ps->ps_ievs[dst][n].ibuf),
pp->pp_pipes[dst][n]);
ps->ps_ievs[dst][n].handler = proc_dispatch;
ps->ps_ievs[dst][n].events = EV_READ;
ps->ps_ievs[dst][n].proc = &procs[i];
ps->ps_ievs[dst][n].data = &ps->ps_ievs[dst][n];
procs[i].p_instance = n;
ps->ps_ievs[dst][n].ev = event_new(env->sc_evbase,
ps->ps_ievs[dst][n].ibuf.fd,
ps->ps_ievs[dst][n].events,
ps->ps_ievs[dst][n].handler,
ps->ps_ievs[dst][n].data);
if (ps->ps_ievs[dst][n].ev == NULL)
fatal("%s: event_new", __func__);
event_add(ps->ps_ievs[dst][n].ev, NULL);
}
}
}
void
proc_close(struct privsep *ps)
{
unsigned int dst, n;
struct privsep_pipes *pp;
if (ps == NULL)
return;
pp = ps->ps_pp;
for (dst = 0; dst < PROC_MAX; dst++) {
if (ps->ps_ievs[dst] == NULL)
continue;
for (n = 0; n < ps->ps_instances[dst]; n++) {
if (pp->pp_pipes[dst][n] == -1)
continue;
/* Cancel the fd, close and invalidate the fd */
event_free(ps->ps_ievs[dst][n].ev);
imsg_clear(&(ps->ps_ievs[dst][n].ibuf));
close(pp->pp_pipes[dst][n]);
pp->pp_pipes[dst][n] = -1;
}
free(ps->ps_ievs[dst]);
}
}
void
proc_shutdown(struct privsep_proc *p)
{
struct privsep *ps = p->p_ps;
if (p->p_id == PROC_CONTROL && ps)
control_cleanup(&ps->ps_csock);
if (p->p_shutdown != NULL)
(*p->p_shutdown)();
proc_close(ps);
log_info("%s exiting, pid %d", p->p_title, getpid());
_exit(0);
}
void
proc_sig_handler(int sig, short event, void *arg)
{
struct privsep_proc *p = arg;
switch (sig) {
case SIGINT:
case SIGTERM:
proc_shutdown(p);
break;
case SIGCHLD:
case SIGHUP:
case SIGPIPE:
case SIGUSR1:
/* ignore */
break;
default:
fatalx("proc_sig_handler: unexpected signal");
/* NOTREACHED */
}
}
pid_t
proc_run(struct privsep *ps, struct privsep_proc *p,
struct privsep_proc *procs, unsigned int nproc,
void (*run)(struct privsep *, struct privsep_proc *, void *), void *arg)
{
struct iked *env;
pid_t pid;
struct passwd *pw;
const char *root;
struct control_sock *rcs;
unsigned int n;
#ifndef LIBRESSL_VERSION_NUMBER
uint32_t seed[256];
#endif
proc_open(ps, p, procs, nproc);
/* Fork child handlers */
switch (pid = fork()) {
case -1:
fatal("proc_run: cannot fork");
case 0:
log_procinit(p->p_title);
/* Set the process group of the current process */
setpgid(0, 0);
break;
default:
return (pid);
}
env = ps->ps_env;
pw = ps->ps_pw;
env->sc_evbase = event_base_new();
if (p->p_id == PROC_CONTROL && ps->ps_instance == 0) {
if (control_init(ps, &ps->ps_csock) == -1)
fatalx(__func__);
TAILQ_FOREACH(rcs, &ps->ps_rcsocks, cs_entry)
if (control_init(ps, rcs) == -1)
fatalx(__func__);
}
/* Change root directory */
if (p->p_chroot != NULL)
root = p->p_chroot;
else
root = pw->pw_dir;
if (chroot(root) == -1)
fatal("proc_run: chroot");
if (chdir("/") == -1)
fatal("proc_run: chdir(\"/\")");
privsep_process = p->p_id;
setproctitle("%s", p->p_title);
if (setgroups(1, &pw->pw_gid) ||
setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
fatal("proc_run: cannot drop privileges");
/* Fork child handlers */
for (n = 1; n < ps->ps_instances[p->p_id]; n++) {
if (fork() == 0) {
ps->ps_instance = p->p_instance = n;
break;
}
}
#ifdef DEBUG
log_debug("%s: %s %d/%d, pid %d", __func__, p->p_title,
ps->ps_instance + 1, ps->ps_instances[p->p_id], getpid());
#endif
ps->ps_evsigint = evsignal_new(env->sc_evbase, SIGINT,
proc_sig_handler, p);
evsignal_add(ps->ps_evsigint, NULL);
ps->ps_evsigterm = evsignal_new(env->sc_evbase, SIGTERM,
proc_sig_handler, p);
evsignal_add(ps->ps_evsigterm, NULL);
ps->ps_evsigchld = evsignal_new(env->sc_evbase, SIGCHLD,
proc_sig_handler, p);
evsignal_add(ps->ps_evsigchld, NULL);
ps->ps_evsighup = evsignal_new(env->sc_evbase, SIGHUP,
proc_sig_handler, p);
evsignal_add(ps->ps_evsighup, NULL);
ps->ps_evsigpipe = evsignal_new(env->sc_evbase, SIGPIPE,
proc_sig_handler, p);
evsignal_add(ps->ps_evsigpipe, NULL);
ps->ps_evsigusr1 = evsignal_new(env->sc_evbase, SIGUSR1,
proc_sig_handler, p);
evsignal_add(ps->ps_evsigusr1, NULL);
#ifndef LIBRESSL_VERSION_NUMBER
arc4random_buf(seed, sizeof(seed));
RAND_seed(seed, sizeof(seed));
#endif
proc_listen(ps, procs, nproc);
if (p->p_id == PROC_CONTROL && ps->ps_instance == 0) {
if (control_listen(ps, &ps->ps_csock) == -1)
fatalx(__func__);
TAILQ_FOREACH(rcs, &ps->ps_rcsocks, cs_entry)
if (control_listen(ps, rcs) == -1)
fatalx(__func__);
}
if (run != NULL)
run(ps, p, arg);
event_base_dispatch(env->sc_evbase);
proc_shutdown(p);
return (0);
}
void
proc_dispatch(int fd, short event, void *arg)
{
struct imsgev *iev = arg;
struct privsep_proc *p = iev->proc;
struct privsep *ps = p->p_ps;
struct imsgbuf *ibuf;
struct imsg imsg;
ssize_t n;
int verbose;
const char *title;
title = ps->ps_title[privsep_process];
ibuf = &iev->ibuf;
if (event & EV_READ) {
if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
fatal(__func__);
if (n == 0) {
/* this pipe is dead, so remove the event handler */
event_del(iev->ev);
event_base_loopexit(ps->ps_env->sc_evbase, NULL);
return;
}
}
if (event & EV_WRITE) {
if (msgbuf_write(&ibuf->w) <= 0 && errno != EAGAIN)
fatal(__func__);
}
for (;;) {
if ((n = imsg_get(ibuf, &imsg)) == -1)
fatal(__func__);
if (n == 0)
break;
#if DEBUG > 1
log_debug("%s: %s %d got imsg %d peerid %d from %s %d",
__func__, title, ps->ps_instance + 1,
imsg.hdr.type, imsg.hdr.peerid, p->p_title, p->p_instance);
#endif
/*
* Check the message with the program callback
*/
if ((p->p_cb)(fd, p, &imsg) == 0) {
/* Message was handled by the callback, continue */
imsg_free(&imsg);
continue;
}
/*
* Generic message handling
*/
switch (imsg.hdr.type) {
case IMSG_CTL_VERBOSE:
IMSG_SIZE_CHECK(&imsg, &verbose);
memcpy(&verbose, imsg.data, sizeof(verbose));
log_verbose(verbose);
break;
default:
log_warnx("%s: %s %d got invalid imsg %d peerid %d "
"from %s %d",
__func__, title, ps->ps_instance + 1,
imsg.hdr.type, imsg.hdr.peerid,
p->p_title, p->p_instance);
fatalx(__func__);
}
imsg_free(&imsg);
}
imsg_event_add(iev);
}
int
proc_dispatch_null(int fd, struct privsep_proc *p, struct imsg *imsg)
{
return (-1);
}
/*
* imsg helper functions
*/
void
imsg_event_add(struct imsgev *iev)
{
if (iev->handler == NULL) {
imsg_flush(&iev->ibuf);
return;
}
iev->events = EV_READ;
if (iev->ibuf.w.queued)
iev->events |= EV_WRITE;
if (iev->ev != NULL) {
event_del(iev->ev);
event_assign(iev->ev, iev->proc->p_env->sc_evbase,
iev->ibuf.fd, iev->events, iev->handler, iev->data);
} else {
iev->ev = event_new(iev->proc->p_env->sc_evbase, iev->ibuf.fd,
iev->events, iev->handler, iev->data);
assert(iev->ev != NULL);
}
event_add(iev->ev, NULL);
}
int
imsg_compose_event(struct imsgev *iev, uint16_t type, uint32_t peerid,
pid_t pid, int fd, void *data, uint16_t datalen)
{
int ret;
if ((ret = imsg_compose(&iev->ibuf, type, peerid,
pid, fd, data, datalen)) == -1)
return (ret);
imsg_event_add(iev);
return (ret);
}
int
imsg_composev_event(struct imsgev *iev, uint16_t type, uint32_t peerid,
pid_t pid, int fd, const struct iovec *iov, int iovcnt)
{
int ret;
if ((ret = imsg_composev(&iev->ibuf, type, peerid,
pid, fd, iov, iovcnt)) == -1)
return (ret);
imsg_event_add(iev);
return (ret);
}
void
proc_range(struct privsep *ps, enum privsep_procid id, int *n, int *m)
{
if (*n == -1) {
/* Use a range of all target instances */
*n = 0;
*m = ps->ps_instances[id];
} else {
/* Use only a single slot of the specified peer process */
*m = *n + 1;
}
}
int
proc_compose_imsg(struct privsep *ps, enum privsep_procid id, int n,
uint16_t type, uint32_t peerid, int fd, void *data, uint16_t datalen)
{
int m;
proc_range(ps, id, &n, &m);
for (; n < m; n++) {
if (imsg_compose_event(&ps->ps_ievs[id][n],
type, peerid, 0, fd, data, datalen) == -1)
return (-1);
}
return (0);
}
int
proc_compose(struct privsep *ps, enum privsep_procid id,
uint16_t type, void *data, uint16_t datalen)
{
return (proc_compose_imsg(ps, id, -1, type, -1, -1, data, datalen));
}
int
proc_composev_imsg(struct privsep *ps, enum privsep_procid id, int n,
uint16_t type, uint32_t peerid, int fd, const struct iovec *iov, int iovcnt)
{
int m;
proc_range(ps, id, &n, &m);
for (; n < m; n++)
if (imsg_composev_event(&ps->ps_ievs[id][n],
type, peerid, 0, fd, iov, iovcnt) == -1)
return (-1);
return (0);
}
int
proc_composev(struct privsep *ps, enum privsep_procid id,
uint16_t type, const struct iovec *iov, int iovcnt)
{
return (proc_composev_imsg(ps, id, -1, type, -1, -1, iov, iovcnt));
}
int
proc_forward_imsg(struct privsep *ps, struct imsg *imsg,
enum privsep_procid id, int n)
{
return (proc_compose_imsg(ps, id, n, imsg->hdr.type,
imsg->hdr.peerid, imsg->fd, imsg->data, IMSG_DATA_SIZE(imsg)));
}
struct imsgbuf *
proc_ibuf(struct privsep *ps, enum privsep_procid id, int n)
{
int m;
proc_range(ps, id, &n, &m);
return (&ps->ps_ievs[id][n].ibuf);
}
struct imsgev *
proc_iev(struct privsep *ps, enum privsep_procid id, int n)
{
int m;
proc_range(ps, id, &n, &m);
return (&ps->ps_ievs[id][n]);
}
|
412615.c | /* $OpenBSD: log.c,v 1.23 2014/07/12 14:34:13 reyk Exp $ */
/*
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER
* IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/types.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/tree.h>
#include <net/if.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
#include <event.h>
#include <netdb.h>
#include <ctype.h>
#include <openssl/ssl.h>
#include "relayd.h"
int debug;
int verbose;
void vlog(int, const char *, va_list)
__attribute__((__format__ (printf, 2, 0)));
void logit(int, const char *, ...)
__attribute__((__format__ (printf, 2, 3)));
void
log_init(int n_debug)
{
extern char *__progname;
debug = n_debug;
verbose = n_debug;
if (!debug)
openlog(__progname, LOG_PID | LOG_NDELAY, LOG_DAEMON);
tzset();
}
void
log_verbose(int v)
{
verbose = v;
}
void
logit(int pri, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vlog(pri, fmt, ap);
va_end(ap);
}
void
vlog(int pri, const char *fmt, va_list ap)
{
char *nfmt;
if (debug) {
/* best effort in out of mem situations */
if (asprintf(&nfmt, "%s\n", fmt) == -1) {
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
} else {
vfprintf(stderr, nfmt, ap);
free(nfmt);
}
fflush(stderr);
} else
vsyslog(pri, fmt, ap);
}
void
log_warn(const char *emsg, ...)
{
char *nfmt;
va_list ap;
/* best effort to even work in out of memory situations */
if (emsg == NULL)
logit(LOG_CRIT, "%s", strerror(errno));
else {
va_start(ap, emsg);
if (asprintf(&nfmt, "%s: %s", emsg, strerror(errno)) == -1) {
/* we tried it... */
vlog(LOG_CRIT, emsg, ap);
logit(LOG_CRIT, "%s", strerror(errno));
} else {
vlog(LOG_CRIT, nfmt, ap);
free(nfmt);
}
va_end(ap);
}
}
void
log_warnx(const char *emsg, ...)
{
va_list ap;
va_start(ap, emsg);
vlog(LOG_CRIT, emsg, ap);
va_end(ap);
}
void
log_info(const char *emsg, ...)
{
va_list ap;
va_start(ap, emsg);
vlog(LOG_INFO, emsg, ap);
va_end(ap);
}
void
log_debug(const char *emsg, ...)
{
va_list ap;
if (verbose > 1) {
va_start(ap, emsg);
vlog(LOG_DEBUG, emsg, ap);
va_end(ap);
}
}
void
fatal(const char *emsg)
{
if (emsg == NULL)
logit(LOG_CRIT, "fatal: %s", strerror(errno));
else
if (errno)
logit(LOG_CRIT, "fatal: %s: %s",
emsg, strerror(errno));
else
logit(LOG_CRIT, "fatal: %s", emsg);
exit(1);
}
void
fatalx(const char *emsg)
{
errno = 0;
fatal(emsg);
}
const char *
host_error(enum host_error he)
{
switch (he) {
case HCE_NONE:
return ("none");
break;
case HCE_ABORT:
return ("aborted");
break;
case HCE_INTERVAL_TIMEOUT:
return ("interval timeout");
break;
case HCE_ICMP_OK:
return ("icmp ok");
break;
case HCE_ICMP_READ_TIMEOUT:
return ("icmp read timeout");
break;
case HCE_ICMP_WRITE_TIMEOUT:
return ("icmp write timeout");
break;
case HCE_TCP_SOCKET_ERROR:
return ("tcp socket error");
break;
case HCE_TCP_SOCKET_LIMIT:
return ("tcp socket limit");
break;
case HCE_TCP_SOCKET_OPTION:
return ("tcp socket option");
break;
case HCE_TCP_CONNECT_FAIL:
return ("tcp connect failed");
break;
case HCE_TCP_CONNECT_TIMEOUT:
return ("tcp connect timeout");
break;
case HCE_TCP_CONNECT_OK:
return ("tcp connect ok");
break;
case HCE_TCP_WRITE_TIMEOUT:
return ("tcp write timeout");
break;
case HCE_TCP_WRITE_FAIL:
return ("tcp write failed");
break;
case HCE_TCP_READ_TIMEOUT:
return ("tcp read timeout");
break;
case HCE_TCP_READ_FAIL:
return ("tcp read failed");
break;
case HCE_SCRIPT_OK:
return ("script ok");
break;
case HCE_SCRIPT_FAIL:
return ("script failed");
break;
case HCE_SSL_CONNECT_OK:
return ("ssl connect ok");
break;
case HCE_SSL_CONNECT_FAIL:
return ("ssl connect failed");
break;
case HCE_SSL_CONNECT_TIMEOUT:
return ("ssl connect timeout");
break;
case HCE_SSL_CONNECT_ERROR:
return ("ssl connect error");
break;
case HCE_SSL_READ_TIMEOUT:
return ("ssl read timeout");
break;
case HCE_SSL_WRITE_TIMEOUT:
return ("ssl write timeout");
break;
case HCE_SSL_READ_ERROR:
return ("ssl read error");
break;
case HCE_SSL_WRITE_ERROR:
return ("ssl write error");
break;
case HCE_SEND_EXPECT_FAIL:
return ("send/expect failed");
break;
case HCE_SEND_EXPECT_OK:
return ("send/expect ok");
break;
case HCE_HTTP_CODE_ERROR:
return ("http code malformed");
break;
case HCE_HTTP_CODE_FAIL:
return ("http code mismatch");
break;
case HCE_HTTP_CODE_OK:
return ("http code ok");
break;
case HCE_HTTP_DIGEST_ERROR:
return ("http digest malformed");
break;
case HCE_HTTP_DIGEST_FAIL:
return ("http digest mismatch");
break;
case HCE_HTTP_DIGEST_OK:
return ("http digest ok");
break;
}
/* NOTREACHED */
return ("invalid");
}
const char *
host_status(enum host_status status)
{
switch (status) {
case HOST_DOWN:
return ("down");
case HOST_UNKNOWN:
return ("unknown");
case HOST_UP:
return ("up");
};
/* NOTREACHED */
return ("invalid");
}
const char *
table_check(enum table_check check)
{
switch (check) {
case CHECK_NOCHECK:
return ("none");
case CHECK_ICMP:
return ("icmp");
case CHECK_TCP:
return ("tcp");
case CHECK_HTTP_CODE:
return ("http code");
case CHECK_HTTP_DIGEST:
return ("http digest");
case CHECK_SEND_EXPECT:
return ("send expect");
case CHECK_SCRIPT:
return ("script");
};
/* NOTREACHED */
return ("invalid");
}
const char *
print_availability(u_long cnt, u_long up)
{
static char buf[BUFSIZ];
if (cnt == 0)
return ("");
bzero(buf, sizeof(buf));
snprintf(buf, sizeof(buf), "%.2f%%", (double)up / cnt * 100);
return (buf);
}
const char *
print_host(struct sockaddr_storage *ss, char *buf, size_t len)
{
if (getnameinfo((struct sockaddr *)ss, ss->ss_len,
buf, len, NULL, 0, NI_NUMERICHOST) != 0) {
buf[0] = '\0';
return (NULL);
}
return (buf);
}
const char *
print_time(struct timeval *a, struct timeval *b, char *buf, size_t len)
{
struct timeval tv;
u_long h, sec, min;
timerclear(&tv);
timersub(a, b, &tv);
sec = tv.tv_sec % 60;
min = tv.tv_sec / 60 % 60;
h = tv.tv_sec / 60 / 60;
snprintf(buf, len, "%.2lu:%.2lu:%.2lu", h, min, sec);
return (buf);
}
const char *
printb_flags(const u_int32_t v, const char *bits)
{
static char buf[2][BUFSIZ];
static int idx = 0;
int i, any = 0;
char c, *p, *r;
p = r = buf[++idx % 2];
bzero(p, BUFSIZ);
if (bits) {
bits++;
while ((i = *bits++)) {
if (v & (1 << (i - 1))) {
if (any) {
*p++ = ',';
*p++ = ' ';
}
any = 1;
for (; (c = *bits) > 32; bits++) {
if (c == '_')
*p++ = ' ';
else
*p++ = tolower((u_char)c);
}
} else
for (; *bits > 32; bits++)
;
}
}
return (r);
}
void
getmonotime(struct timeval *tv)
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
fatal("clock_gettime");
TIMESPEC_TO_TIMEVAL(tv, &ts);
}
|
652447.c | /* ----------------------------------------------------------------------- *
*
* Copyright 2006-2008 H. Peter Anvin - All Rights Reserved
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall
* be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* ----------------------------------------------------------------------- */
#include <inttypes.h>
#include <colortbl.h>
#include <string.h>
#include "vesa.h"
#include "video.h"
#include "fill.h"
/*
* Visible cursor information
*/
static uint8_t cursor_pattern[FONT_MAX_HEIGHT];
static struct vesa_char *cursor_pointer = NULL;
static int cursor_x, cursor_y;
static inline void *copy_dword(void *dst, void *src, size_t dword_count)
{
asm volatile ("rep; movsl":"+D" (dst), "+S"(src), "+c"(dword_count));
return dst; /* Updated destination pointer */
}
static inline __attribute__ ((always_inline))
uint8_t alpha_val(uint8_t fg, uint8_t bg, uint8_t alpha)
{
unsigned int tmp;
tmp = __vesacon_srgb_to_linear[fg] * alpha;
tmp += __vesacon_srgb_to_linear[bg] * (255 - alpha);
return __vesacon_linear_to_srgb[tmp >> 12];
}
static uint32_t alpha_pixel(uint32_t fg, uint32_t bg)
{
uint8_t alpha = fg >> 24;
uint8_t fg_r = fg >> 16;
uint8_t fg_g = fg >> 8;
uint8_t fg_b = fg;
uint8_t bg_r = bg >> 16;
uint8_t bg_g = bg >> 8;
uint8_t bg_b = bg;
return
(alpha_val(fg_r, bg_r, alpha) << 16) |
(alpha_val(fg_g, bg_g, alpha) << 8) | (alpha_val(fg_b, bg_b, alpha));
}
static void vesacon_update_characters(int row, int col, int nrows, int ncols)
{
const int height = __vesacon_font_height;
const int width = FONT_WIDTH;
uint32_t *bgrowptr, *bgptr, bgval, fgval;
uint32_t fgcolor = 0, bgcolor = 0, color;
uint8_t chbits = 0, chxbits = 0, chsbits = 0;
int i, j, jx, pixrow, pixsrow;
struct vesa_char *rowptr, *rowsptr, *cptr, *csptr;
unsigned int bytes_per_pixel = __vesacon_bytes_per_pixel;
unsigned long pixel_offset;
uint32_t row_buffer[__vesa_info.mi.h_res], *rowbufptr;
size_t fbrowptr;
uint8_t sha;
pixel_offset = ((row * height + VIDEO_BORDER) * __vesa_info.mi.h_res) +
(col * width + VIDEO_BORDER);
bgrowptr = &__vesacon_background[pixel_offset];
fbrowptr = (row * height + VIDEO_BORDER) * __vesa_info.mi.logical_scan +
(col * width + VIDEO_BORDER) * bytes_per_pixel;
/* Note that we keep a 1-character guard area around the real text area... */
rowptr = &__vesacon_text_display[(row+1)*(__vesacon_text_cols+2)+(col+1)];
rowsptr = rowptr - ((__vesacon_text_cols+2)+1);
pixrow = 0;
pixsrow = height - 1;
for (i = height * nrows; i >= 0; i--) {
bgptr = bgrowptr;
rowbufptr = row_buffer;
cptr = rowptr;
csptr = rowsptr;
chsbits = __vesacon_graphics_font[csptr->ch][pixsrow];
if (__unlikely(csptr == cursor_pointer))
chsbits |= cursor_pattern[pixsrow];
sha = console_color_table[csptr->attr].shadow;
chsbits &= (sha & 0x02) ? 0xff : 0x00;
chsbits ^= (sha & 0x01) ? 0xff : 0x00;
chsbits <<= (width - 2);
csptr++;
/* Draw two pixels beyond the end of the line. One for the shadow,
and one to make sure we have a whole dword of data for the copy
operation at the end. Note that this code depends on the fact that
all characters begin on dword boundaries in the frame buffer. */
for (jx = 1, j = width * ncols + 1; j >= 0; j--) {
chbits <<= 1;
chsbits <<= 1;
chxbits <<= 1;
switch (jx) {
case 1:
chbits = __vesacon_graphics_font[cptr->ch][pixrow];
if (__unlikely(cptr == cursor_pointer))
chbits |= cursor_pattern[pixrow];
sha = console_color_table[cptr->attr].shadow;
chxbits = chbits;
chxbits &= (sha & 0x02) ? 0xff : 0x00;
chxbits ^= (sha & 0x01) ? 0xff : 0x00;
fgcolor = console_color_table[cptr->attr].argb_fg;
bgcolor = console_color_table[cptr->attr].argb_bg;
cptr++;
jx--;
break;
case 0:
chsbits = __vesacon_graphics_font[csptr->ch][pixsrow];
if (__unlikely(csptr == cursor_pointer))
chsbits |= cursor_pattern[pixsrow];
sha = console_color_table[csptr->attr].shadow;
chsbits &= (sha & 0x02) ? 0xff : 0x00;
chsbits ^= (sha & 0x01) ? 0xff : 0x00;
csptr++;
jx = width - 1;
break;
default:
jx--;
break;
}
/* If this pixel is raised, use the offsetted value */
bgval = (chxbits & 0x80)
? bgptr[__vesa_info.mi.h_res + 1] : *bgptr;
bgptr++;
/* If this pixel is set, use the fg color, else the bg color */
fgval = (chbits & 0x80) ? fgcolor : bgcolor;
/* Produce the combined color pixel value */
color = alpha_pixel(fgval, bgval);
/* Apply the shadow (75% shadow) */
if ((chsbits & ~chxbits) & 0x80) {
color >>= 2;
color &= 0x3f3f3f;
}
*rowbufptr++ = color;
}
/* Copy to frame buffer */
__vesacon_copy_to_screen(fbrowptr, row_buffer, rowbufptr - row_buffer);
bgrowptr += __vesa_info.mi.h_res;
fbrowptr += __vesa_info.mi.logical_scan;
if (++pixrow == height) {
rowptr += __vesacon_text_cols + 2;
pixrow = 0;
}
if (++pixsrow == height) {
rowsptr += __vesacon_text_cols + 2;
pixsrow = 0;
}
}
}
/* Bounding box for changed text. The (x1, y1) coordinates are +1! */
static unsigned int upd_x0 = -1U, upd_x1, upd_y0 = -1U, upd_y1;
/* Update the range already touched by various variables */
void __vesacon_doit(void)
{
if (upd_x1 > upd_x0 && upd_y1 > upd_y0) {
vesacon_update_characters(upd_y0, upd_x0, upd_y1 - upd_y0,
upd_x1 - upd_x0);
upd_x0 = upd_y0 = -1U;
upd_x1 = upd_y1 = 0;
}
}
/* Mark a range for update; note argument sequence is the same as
vesacon_update_characters() */
static inline void vesacon_touch(int row, int col, int rows, int cols)
{
unsigned int y0 = row;
unsigned int x0 = col;
unsigned int y1 = y0 + rows;
unsigned int x1 = x0 + cols;
if (y0 < upd_y0)
upd_y0 = y0;
if (y1 > upd_y1)
upd_y1 = y1;
if (x0 < upd_x0)
upd_x0 = x0;
if (x1 > upd_x1)
upd_x1 = x1;
}
/* Erase a region of the screen */
void __vesacon_erase(int x0, int y0, int x1, int y1, attr_t attr)
{
int y;
struct vesa_char *ptr = &__vesacon_text_display
[(y0 + 1) * (__vesacon_text_cols + 2) + (x0 + 1)];
struct vesa_char fill = {
.ch = ' ',
.attr = attr,
};
int ncols = x1 - x0 + 1;
for (y = y0; y <= y1; y++) {
vesacon_fill(ptr, fill, ncols);
ptr += __vesacon_text_cols + 2;
}
vesacon_touch(y0, x0, y1 - y0 + 1, ncols);
}
/* Scroll the screen up */
void __vesacon_scroll_up(int nrows, attr_t attr)
{
struct vesa_char *fromptr = &__vesacon_text_display
[(nrows + 1) * (__vesacon_text_cols + 2)];
struct vesa_char *toptr = &__vesacon_text_display
[(__vesacon_text_cols + 2)];
int dword_count =
(__vesacon_text_rows - nrows) * (__vesacon_text_cols + 2);
struct vesa_char fill = {
.ch = ' ',
.attr = attr,
};
toptr = copy_dword(toptr, fromptr, dword_count);
dword_count = nrows * (__vesacon_text_cols + 2);
vesacon_fill(toptr, fill, dword_count);
vesacon_touch(0, 0, __vesacon_text_rows, __vesacon_text_cols);
}
/* Draw one character text at a specific area of the screen */
void __vesacon_write_char(int x, int y, uint8_t ch, attr_t attr)
{
struct vesa_char *ptr = &__vesacon_text_display
[(y + 1) * (__vesacon_text_cols + 2) + (x + 1)];
ptr->ch = ch;
ptr->attr = attr;
vesacon_touch(y, x, 1, 1);
}
void __vesacon_set_cursor(int x, int y, bool visible)
{
struct vesa_char *ptr = &__vesacon_text_display
[(y + 1) * (__vesacon_text_cols + 2) + (x + 1)];
if (cursor_pointer)
vesacon_touch(cursor_y, cursor_x, 1, 1);
if (!visible) {
/* Invisible cursor */
cursor_pointer = NULL;
} else {
cursor_pointer = ptr;
vesacon_touch(y, x, 1, 1);
}
cursor_x = x;
cursor_y = y;
}
void __vesacon_init_cursor(int font_height)
{
int r0 = font_height - (font_height < 10 ? 2 : 3);
if (r0 < 0)
r0 = 0;
memset(cursor_pattern, 0, font_height);
cursor_pattern[r0] = 0xff;
cursor_pattern[r0 + 1] = 0xff;
}
void __vesacon_redraw_text(void)
{
vesacon_update_characters(0, 0, __vesacon_text_rows, __vesacon_text_cols);
}
|
675301.c | /*
*------------------------------------------------------------------
* Copyright (c) 2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*------------------------------------------------------------------
*/
#include <vlib/vlib.h>
#include <vnet/plugin/plugin.h>
#include <vnet/crypto/crypto.h>
#include <crypto_native/crypto_native.h>
crypto_native_main_t crypto_native_main;
static void
crypto_native_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
vnet_crypto_key_index_t idx)
{
vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
crypto_native_main_t *cm = &crypto_native_main;
if (cm->key_fn[key->alg] == 0)
return;
if (kop == VNET_CRYPTO_KEY_OP_DEL)
{
if (idx >= vec_len (cm->key_data))
return;
if (cm->key_data[idx] == 0)
return;
clib_mem_free_s (cm->key_data[idx]);
cm->key_data[idx] = 0;
return;
}
vec_validate_aligned (cm->key_data, idx, CLIB_CACHE_LINE_BYTES);
if (kop == VNET_CRYPTO_KEY_OP_MODIFY && cm->key_data[idx])
{
clib_mem_free_s (cm->key_data[idx]);
}
cm->key_data[idx] = cm->key_fn[key->alg] (key);
}
clib_error_t *
crypto_native_init (vlib_main_t * vm)
{
crypto_native_main_t *cm = &crypto_native_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
clib_error_t *error = 0;
if (clib_cpu_supports_x86_aes () == 0 &&
clib_cpu_supports_aarch64_aes () == 0)
return 0;
vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
CLIB_CACHE_LINE_BYTES);
cm->crypto_engine_index =
vnet_crypto_register_engine (vm, "native", 100,
"Native ISA Optimized Crypto");
#if __x86_64__
if (clib_cpu_supports_vaes ())
error = crypto_native_aes_cbc_init_vaes (vm);
else if (clib_cpu_supports_avx512f ())
error = crypto_native_aes_cbc_init_avx512 (vm);
else if (clib_cpu_supports_avx2 ())
error = crypto_native_aes_cbc_init_avx2 (vm);
else
error = crypto_native_aes_cbc_init_sse42 (vm);
if (error)
goto error;
if (clib_cpu_supports_pclmulqdq ())
{
if (clib_cpu_supports_vaes ())
error = crypto_native_aes_gcm_init_vaes (vm);
else if (clib_cpu_supports_avx512f ())
error = crypto_native_aes_gcm_init_avx512 (vm);
else if (clib_cpu_supports_avx2 ())
error = crypto_native_aes_gcm_init_avx2 (vm);
else
error = crypto_native_aes_gcm_init_sse42 (vm);
if (error)
goto error;
}
#endif
#if __aarch64__
if ((error = crypto_native_aes_cbc_init_neon (vm)))
goto error;
if ((error = crypto_native_aes_gcm_init_neon (vm)))
goto error;
#endif
vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
crypto_native_key_handler);
error:
if (error)
vec_free (cm->per_thread_data);
return error;
}
/* *INDENT-OFF* */
VLIB_INIT_FUNCTION (crypto_native_init) =
{
.runs_after = VLIB_INITS ("vnet_crypto_init"),
};
/* *INDENT-ON* */
#include <vpp/app/version.h>
/* *INDENT-OFF* */
VLIB_PLUGIN_REGISTER () = {
.version = VPP_BUILD_VER,
.description = "Intel IA32 Software Crypto Engine",
};
/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/
|
714936.c | #include "emu.h"
#include "includes/wc90b.h"
/***************************************************************************
Callbacks for the TileMap code
***************************************************************************/
static TILE_GET_INFO( get_bg_tile_info )
{
wc90b_state *state = machine.driver_data<wc90b_state>();
int attr = state->m_bgvideoram[tile_index];
int tile = state->m_bgvideoram[tile_index + 0x800];
SET_TILE_INFO(
9 + ((attr & 3) + ((attr >> 1) & 4)),
tile,
attr >> 4,
0);
}
static TILE_GET_INFO( get_fg_tile_info )
{
wc90b_state *state = machine.driver_data<wc90b_state>();
int attr = state->m_fgvideoram[tile_index];
int tile = state->m_fgvideoram[tile_index + 0x800];
SET_TILE_INFO(
1 + ((attr & 3) + ((attr >> 1) & 4)),
tile,
attr >> 4,
0);
}
static TILE_GET_INFO( get_tx_tile_info )
{
wc90b_state *state = machine.driver_data<wc90b_state>();
SET_TILE_INFO(
0,
state->m_txvideoram[tile_index + 0x800] + ((state->m_txvideoram[tile_index] & 0x07) << 8),
state->m_txvideoram[tile_index] >> 4,
0);
}
/***************************************************************************
Start the video hardware emulation.
***************************************************************************/
VIDEO_START( wc90b )
{
wc90b_state *state = machine.driver_data<wc90b_state>();
state->m_bg_tilemap = tilemap_create(machine, get_bg_tile_info,tilemap_scan_rows, 16,16,64,32);
state->m_fg_tilemap = tilemap_create(machine, get_fg_tile_info,tilemap_scan_rows,16,16,64,32);
state->m_tx_tilemap = tilemap_create(machine, get_tx_tile_info,tilemap_scan_rows, 8, 8,64,32);
tilemap_set_transparent_pen(state->m_fg_tilemap,15);
tilemap_set_transparent_pen(state->m_tx_tilemap,15);
}
/***************************************************************************
Memory handlers
***************************************************************************/
WRITE8_HANDLER( wc90b_bgvideoram_w )
{
wc90b_state *state = space->machine().driver_data<wc90b_state>();
state->m_bgvideoram[offset] = data;
tilemap_mark_tile_dirty(state->m_bg_tilemap,offset & 0x7ff);
}
WRITE8_HANDLER( wc90b_fgvideoram_w )
{
wc90b_state *state = space->machine().driver_data<wc90b_state>();
state->m_fgvideoram[offset] = data;
tilemap_mark_tile_dirty(state->m_fg_tilemap,offset & 0x7ff);
}
WRITE8_HANDLER( wc90b_txvideoram_w )
{
wc90b_state *state = space->machine().driver_data<wc90b_state>();
state->m_txvideoram[offset] = data;
tilemap_mark_tile_dirty(state->m_tx_tilemap,offset & 0x7ff);
}
/***************************************************************************
Display refresh
***************************************************************************/
static void draw_sprites(running_machine &machine, bitmap_t *bitmap, const rectangle *cliprect, int priority )
{
wc90b_state *state = machine.driver_data<wc90b_state>();
UINT8 *spriteram = state->m_spriteram;
int offs, sx, sy;
/* draw all visible sprites of specified priority */
for ( offs = state->m_spriteram_size - 8 ; offs >= 0 ; offs -= 8 )
{
if ( ( ~( spriteram[offs+3] >> 7 ) & 1 ) == priority )
{
int code = ( spriteram[offs + 3] & 0x3f ) << 4;
int bank = spriteram[offs + 0];
int flags = spriteram[offs + 4];
code += ( bank & 0xf0 ) >> 4;
code <<= 2;
code += ( bank & 0x0f ) >> 2;
sx = spriteram[offs + 2];
if (!(spriteram[offs + 3] & 0x40)) sx -= 0x0100;
sy = 240 - spriteram[offs + 1];
drawgfx_transpen( bitmap, cliprect,machine.gfx[17], code,
flags >> 4, /* color */
bank & 1, /* flipx */
bank & 2, /* flipy */
sx,
sy,15 );
}
}
}
SCREEN_UPDATE( wc90b )
{
wc90b_state *state = screen->machine().driver_data<wc90b_state>();
tilemap_set_scrollx(state->m_bg_tilemap,0,8 * (state->m_scroll2x[0] & 0x7f) + 256 - 4 + (state->m_scroll_x_lo[0] & 0x07));
tilemap_set_scrolly(state->m_bg_tilemap,0,state->m_scroll2y[0] + 1 + ((state->m_scroll2x[0] & 0x80) ? 256 : 0));
tilemap_set_scrollx(state->m_fg_tilemap,0,8 * (state->m_scroll1x[0] & 0x7f) + 256 - 6 + ((state->m_scroll_x_lo[0] & 0x38) >> 3));
tilemap_set_scrolly(state->m_fg_tilemap,0,state->m_scroll1y[0] + 1 + ((state->m_scroll1x[0] & 0x80) ? 256 : 0));
tilemap_draw(bitmap,cliprect,state->m_bg_tilemap,0,0);
tilemap_draw(bitmap,cliprect,state->m_fg_tilemap,0,0);
draw_sprites(screen->machine(), bitmap,cliprect, 1 );
tilemap_draw(bitmap,cliprect,state->m_tx_tilemap,0,0);
draw_sprites(screen->machine(), bitmap,cliprect, 0 );
return 0;
}
|
849018.c | /* -----------------------------------------------------------------
* Programmer(s): Jean M. Sexton @ SMU
* Slaven Peles @ LLNL
* -----------------------------------------------------------------
* Based on work by Scott D. Cohen, Alan C. Hindmarsh, Radu Serban,
* and Aaron Collier @ LLNL
* -----------------------------------------------------------------
* LLNS Copyright Start
* Copyright (c) 2014, Lawrence Livermore National Security
* This work was performed under the auspices of the U.S. Department
* of Energy by Lawrence Livermore National Laboratory in part under
* Contract W-7405-Eng-48 and in part under Contract DE-AC52-07NA27344.
* Produced at the Lawrence Livermore National Laboratory.
* All rights reserved.
* For details, see the LICENSE file.
* LLNS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for a parhyp MPI implementation
* of the NVECTOR package.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_parhyp.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Error Message */
#define BAD_N1 "N_VNew_ParHyp -- Sum of local vector lengths differs from "
#define BAD_N2 "input global length. \n\n"
#define BAD_N BAD_N1 BAD_N2
/*
* -----------------------------------------------------------------
* Simplifying macros NV_CONTENT_PH, NV_DATA_PH, NV_LOCLENGTH_PH,
* NV_GLOBLENGTH_PH, and NV_COMM_PH
* -----------------------------------------------------------------
* In the descriptions below, the following user declarations
* are assumed:
*
* N_Vector v;
* sunindextype v_len, s_len, i;
*
* (1) NV_CONTENT_PH
*
* This routines gives access to the contents of the HYPRE
* vector wrapper (the N_Vector).
*
* The assignment v_cont = NV_CONTENT_PH(v) sets v_cont to be
* a pointer to the N_Vector content structure.
*
* (2) NV_DATA_PH, NV_LOCLENGTH_PH, NV_GLOBLENGTH_PH, and NV_COMM_PH
*
* These routines give access to the individual parts of
* the content structure of a parhyp N_Vector.
*
* The assignment v_llen = NV_LOCLENGTH_PH(v) sets v_llen to
* be the length of the local part of the vector v. The call
* NV_LOCLENGTH_PH(v) = llen_v generally should NOT be used! It
* will change locally stored value with the HYPRE local vector
* length, but it will NOT change the length of the actual HYPRE
* local vector.
*
* The assignment v_glen = NV_GLOBLENGTH_PH(v) sets v_glen to
* be the global length of the vector v. The call
* NV_GLOBLENGTH_PH(v) = glen_v generally should NOT be used! It
* will change locally stored value with the HYPRE parallel vector
* length, but it will NOT change the length of the actual HYPRE
* parallel vector.
*
* The assignment v_comm = NV_COMM_PH(v) sets v_comm to be the
* MPI communicator of the vector v. The assignment
* NV_COMM_C(v) = comm_v sets the MPI communicator of v to be
* NV_COMM_PH(v) = comm_v generally should NOT be used! It
* will change locally stored value with the HYPRE parallel vector
* communicator, but it will NOT change the communicator of the
* actual HYPRE parallel vector.
*
* (3) NV_DATA_PH, NV_HYPRE_PARVEC_PH
*
* The assignment v_data = NV_DATA_PH(v) sets v_data to be
* a pointer to the first component of the data inside the
* local vector of the HYPRE_parhyp vector for the vector v.
* The assignment NV_DATA_PH(v) = data_v should NOT be used.
* Instead, use NV_HYPRE_PARVEC_PH to obtain pointer to HYPRE
* vector and then use HYPRE functions to manipulate vector data.
*
* The assignment v_parhyp = NV_HYPRE_PARVEC_PH(v) sets v_parhyp
* to be a pointer to hypre_ParVector of vector v. The assignment
* NV_HYPRE_PARVEC_PH(v) = parhyp_v sets pointer to
* hypre_ParVector of vector v to be parhyp_v.
*
* -----------------------------------------------------------------
*/
#define NV_CONTENT_PH(v) ( (N_VectorContent_ParHyp)(v->content) )
#define NV_LOCLENGTH_PH(v) ( NV_CONTENT_PH(v)->local_length )
#define NV_GLOBLENGTH_PH(v) ( NV_CONTENT_PH(v)->global_length )
#define NV_OWN_PARVEC_PH(v) ( NV_CONTENT_PH(v)->own_parvector )
#define NV_HYPRE_PARVEC_PH(v) ( NV_CONTENT_PH(v)->x )
#define NV_DATA_PH(v) ( NV_HYPRE_PARVEC_PH(v) == NULL ? NULL : hypre_VectorData(hypre_ParVectorLocalVector(NV_HYPRE_PARVEC_PH(v))) )
#define NV_COMM_PH(v) ( NV_CONTENT_PH(v)->comm )
/* Private function prototypes */
/* Reduction operations add/max/min over the processor group */
static realtype VAllReduce_ParHyp(realtype d, int op, MPI_Comm comm);
/* z=x */
/* static void VCopy_ParHyp(N_Vector x, N_Vector z); */
/* z=x+y */
static void VSum_ParHyp(N_Vector x, N_Vector y, N_Vector z);
/* z=x-y */
static void VDiff_ParHyp(N_Vector x, N_Vector y, N_Vector z);
/* z=-x */
/* static void VNeg_ParHyp(N_Vector x, N_Vector z); */
/* z=c(x+y) */
static void VScaleSum_ParHyp(realtype c, N_Vector x, N_Vector y, N_Vector z);
/* z=c(x-y) */
static void VScaleDiff_ParHyp(realtype c, N_Vector x, N_Vector y, N_Vector z);
/* z=ax+y */
static void VLin1_ParHyp(realtype a, N_Vector x, N_Vector y, N_Vector z);
/* z=ax-y */
static void VLin2_ParHyp(realtype a, N_Vector x, N_Vector y, N_Vector z);
/* y <- ax+y */
/* static void Vaxpy_ParHyp(realtype a, N_Vector x, N_Vector y); */
/* x <- ax */
/* static void VScaleBy_ParHyp(realtype a, N_Vector x); */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_ParHyp(N_Vector v)
{
return SUNDIALS_NVEC_PARHYP;
}
/* ----------------------------------------------------------------
* Function to create a new parhyp vector without underlying
* HYPRE vector.
*/
N_Vector N_VNewEmpty_ParHyp(MPI_Comm comm,
sunindextype local_length,
sunindextype global_length)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_ParHyp content;
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvgetvectorid = N_VGetVectorID_ParHyp;
ops->nvclone = N_VClone_ParHyp;
ops->nvcloneempty = N_VCloneEmpty_ParHyp;
ops->nvdestroy = N_VDestroy_ParHyp;
ops->nvspace = N_VSpace_ParHyp;
ops->nvgetarraypointer = N_VGetArrayPointer_ParHyp;
ops->nvsetarraypointer = N_VSetArrayPointer_ParHyp;
ops->nvlinearsum = N_VLinearSum_ParHyp;
ops->nvconst = N_VConst_ParHyp;
ops->nvprod = N_VProd_ParHyp;
ops->nvdiv = N_VDiv_ParHyp;
ops->nvscale = N_VScale_ParHyp;
ops->nvabs = N_VAbs_ParHyp;
ops->nvinv = N_VInv_ParHyp;
ops->nvaddconst = N_VAddConst_ParHyp;
ops->nvdotprod = N_VDotProd_ParHyp;
ops->nvmaxnorm = N_VMaxNorm_ParHyp;
ops->nvwrmsnormmask = N_VWrmsNormMask_ParHyp;
ops->nvwrmsnorm = N_VWrmsNorm_ParHyp;
ops->nvmin = N_VMin_ParHyp;
ops->nvwl2norm = N_VWL2Norm_ParHyp;
ops->nvl1norm = N_VL1Norm_ParHyp;
ops->nvcompare = N_VCompare_ParHyp;
ops->nvinvtest = N_VInvTest_ParHyp;
ops->nvconstrmask = N_VConstrMask_ParHyp;
ops->nvminquotient = N_VMinQuotient_ParHyp;
/* Create content */
content = NULL;
content = (N_VectorContent_ParHyp) malloc(sizeof(struct _N_VectorContent_ParHyp));
if (content == NULL) { free(ops); free(v); return(NULL); }
/* Attach lengths and communicator */
content->local_length = local_length;
content->global_length = global_length;
content->comm = comm;
content->own_parvector = SUNFALSE;
content->x = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/* ----------------------------------------------------------------
* Function to create a parhyp N_Vector wrapper around user
* supplie HYPRE vector.
*/
N_Vector N_VMake_ParHyp(hypre_ParVector *x)
{
N_Vector v;
MPI_Comm comm = hypre_ParVectorComm(x);
HYPRE_Int global_length = hypre_ParVectorGlobalSize(x);
HYPRE_Int local_begin = hypre_ParVectorFirstIndex(x);
HYPRE_Int local_end = hypre_ParVectorLastIndex(x);
HYPRE_Int local_length = local_end - local_begin + 1;
v = NULL;
v = N_VNewEmpty_ParHyp(comm, local_length, global_length);
if (v == NULL)
return(NULL);
NV_OWN_PARVEC_PH(v) = SUNFALSE;
NV_HYPRE_PARVEC_PH(v) = x;
return(v);
}
/* ----------------------------------------------------------------
* Function to create an array of new parhyp vectors.
*/
N_Vector *N_VCloneVectorArray_ParHyp(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_ParHyp(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_ParHyp(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------
* Function to create an array of new parhyp vector wrappers
* without uderlying HYPRE vectors.
*/
N_Vector *N_VCloneVectorArrayEmpty_ParHyp(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_ParHyp(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_ParHyp(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_ParHyp
*/
void N_VDestroyVectorArray_ParHyp(N_Vector *vs, int count)
{
int j;
for (j = 0; j < count; j++)
N_VDestroy_ParHyp(vs[j]);
free(vs);
vs = NULL;
return;
}
/* ----------------------------------------------------------------
* Extract HYPRE vector
*/
hypre_ParVector* N_VGetVector_ParHyp(N_Vector v)
{
return NV_HYPRE_PARVEC_PH(v);
}
/* ----------------------------------------------------------------
* Function to print a parhyp vector.
* TODO: Consider using a HYPRE function for this.
*/
void N_VPrint_ParHyp(N_Vector x)
{
N_VPrintFile_ParHyp(x, stdout);
}
/* ----------------------------------------------------------------
* Function to print a parhyp vector.
* TODO: Consider using a HYPRE function for this.
*/
void N_VPrintFile_ParHyp(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
fprintf(outfile, "%Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
fprintf(outfile, "%g\n", xd[i]);
#else
fprintf(outfile, "%g\n", xd[i]);
#endif
}
fprintf(outfile, "\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
N_Vector N_VCloneEmpty_ParHyp(N_Vector w)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_ParHyp content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Added variables for hypre_parhyp intialization */
int nprocs, myid;
MPI_Comm_size(NV_COMM_PH(w), &nprocs);
MPI_Comm_rank(NV_COMM_PH(w), &myid);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvgetvectorid = w->ops->nvgetvectorid;
ops->nvclone = w->ops->nvclone;
ops->nvcloneempty = w->ops->nvcloneempty;
ops->nvdestroy = w->ops->nvdestroy;
ops->nvspace = w->ops->nvspace;
ops->nvgetarraypointer = w->ops->nvgetarraypointer;
ops->nvsetarraypointer = w->ops->nvsetarraypointer;
ops->nvlinearsum = w->ops->nvlinearsum;
ops->nvconst = w->ops->nvconst;
ops->nvprod = w->ops->nvprod;
ops->nvdiv = w->ops->nvdiv;
ops->nvscale = w->ops->nvscale;
ops->nvabs = w->ops->nvabs;
ops->nvinv = w->ops->nvinv;
ops->nvaddconst = w->ops->nvaddconst;
ops->nvdotprod = w->ops->nvdotprod;
ops->nvmaxnorm = w->ops->nvmaxnorm;
ops->nvwrmsnormmask = w->ops->nvwrmsnormmask;
ops->nvwrmsnorm = w->ops->nvwrmsnorm;
ops->nvmin = w->ops->nvmin;
ops->nvwl2norm = w->ops->nvwl2norm;
ops->nvl1norm = w->ops->nvl1norm;
ops->nvcompare = w->ops->nvcompare;
ops->nvinvtest = w->ops->nvinvtest;
ops->nvconstrmask = w->ops->nvconstrmask;
ops->nvminquotient = w->ops->nvminquotient;
/* Create content */
content = NULL;
content = (N_VectorContent_ParHyp) malloc(sizeof(struct _N_VectorContent_ParHyp));
if (content == NULL) { free(ops); free(v); return(NULL); }
/* Attach lengths and communicator */
content->local_length = NV_LOCLENGTH_PH(w);
content->global_length = NV_GLOBLENGTH_PH(w);
content->comm = NV_COMM_PH(w);
content->own_parvector = SUNFALSE;
content->x = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/*
* Clone HYPRE vector wrapper.
*
*/
N_Vector N_VClone_ParHyp(N_Vector w)
{
N_Vector v;
hypre_ParVector *vx;
const hypre_ParVector *wx = NV_HYPRE_PARVEC_PH(w);
v = NULL;
v = N_VCloneEmpty_ParHyp(w);
if (v==NULL)
return(NULL);
vx = hypre_ParVectorCreate(wx->comm, wx->global_size, wx->partitioning);
hypre_ParVectorInitialize(vx);
hypre_ParVectorSetPartitioningOwner(vx, 0);
hypre_ParVectorSetDataOwner(vx, 1);
hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(vx), 1);
NV_HYPRE_PARVEC_PH(v) = vx;
NV_OWN_PARVEC_PH(v) = SUNTRUE;
return(v);
}
void N_VDestroy_ParHyp(N_Vector v)
{
if ((NV_OWN_PARVEC_PH(v) == SUNTRUE)) {
hypre_ParVectorDestroy(NV_HYPRE_PARVEC_PH(v));
}
free(v->content); v->content = NULL;
free(v->ops); v->ops = NULL;
free(v); v = NULL;
return;
}
void N_VSpace_ParHyp(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
MPI_Comm comm;
int npes;
comm = NV_COMM_PH(v);
MPI_Comm_size(comm, &npes);
*lrw = NV_GLOBLENGTH_PH(v);
*liw = 2*npes;
return;
}
/*
* This function is disabled in ParHyp implementation and returns NULL.
* The user should extract HYPRE vector using N_VGetVector_ParHyp and
* then use HYPRE functions to get pointer to raw data of the local HYPRE
* vector.
*/
realtype *N_VGetArrayPointer_ParHyp(N_Vector v)
{
return NULL; /* ((realtype *) NV_DATA_PH(v)); */
}
/*
* This method is not implemented for HYPRE vector wrapper.
* TODO: Put error handler in the function body.
*/
void N_VSetArrayPointer_ParHyp(realtype *v_data, N_Vector v)
{
/* Not implemented for Hypre vector */
}
/*
* Computes z[i] = a*x[i] + b*y[i]
*
*/
void N_VLinearSum_ParHyp(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd, *yd, *zd;
N_Vector v1, v2;
booleantype test;
xd = yd = zd = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
HYPRE_Complex alpha=a;
HYPRE_ParVectorAxpy( alpha, (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(x),
(HYPRE_ParVector) NV_HYPRE_PARVEC_PH(y));
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
HYPRE_Complex beta=b;
HYPRE_ParVectorAxpy( beta, (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(y),
(HYPRE_ParVector) NV_HYPRE_PARVEC_PH(x));
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_ParHyp(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_ParHyp(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_ParHyp(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_ParHyp(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_ParHyp(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_ParHyp(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
yd = NV_DATA_PH(y);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+(b*yd[i]);
return;
}
void N_VConst_ParHyp(realtype c, N_Vector z)
{
HYPRE_Complex value = c;
HYPRE_ParVectorSetConstantValues( (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(z), value);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_ParHyp(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
yd = NV_DATA_PH(y);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = xd[i]*yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_ParHyp(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
yd = NV_DATA_PH(y);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = xd[i]/yd[i];
return;
}
void N_VScale_ParHyp(realtype c, N_Vector x, N_Vector z)
{
HYPRE_Complex value = c;
if (x != z) {
HYPRE_ParVectorCopy((HYPRE_ParVector) NV_HYPRE_PARVEC_PH(x), (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(z));
}
HYPRE_ParVectorScale(value, (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(z));
return;
}
void N_VAbs_ParHyp(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = SUNRabs(xd[i]);
return;
}
void N_VInv_ParHyp(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = ONE/xd[i];
return;
}
void N_VAddConst_ParHyp(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = xd[i] + b;
return;
}
realtype N_VDotProd_ParHyp(N_Vector x, N_Vector y)
{
HYPRE_Real gsum;
HYPRE_ParVectorInnerProd( (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(x),
(HYPRE_ParVector) NV_HYPRE_PARVEC_PH(y), &gsum);
return(gsum);
}
realtype N_VMaxNorm_ParHyp(N_Vector x)
{
sunindextype i, N;
realtype max, *xd, gmax;
MPI_Comm comm;
xd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
comm = NV_COMM_PH(x);
max = ZERO;
for (i = 0; i < N; i++) {
if (SUNRabs(xd[i]) > max) max = SUNRabs(xd[i]);
}
gmax = VAllReduce_ParHyp(max, 2, comm);
return(gmax);
}
realtype N_VWrmsNorm_ParHyp(N_Vector x, N_Vector w)
{
sunindextype i, N, N_global;
realtype sum, prodi, *xd, *wd, gsum;
MPI_Comm comm;
sum = ZERO;
xd = wd = NULL;
N = NV_LOCLENGTH_PH(x);
N_global = NV_GLOBLENGTH_PH(x);
xd = NV_DATA_PH(x);
wd = NV_DATA_PH(w);
comm = NV_COMM_PH(x);
for (i = 0; i < N; i++) {
prodi = xd[i]*wd[i];
sum += SUNSQR(prodi);
}
gsum = VAllReduce_ParHyp(sum, 1, comm);
return(SUNRsqrt(gsum/N_global));
}
realtype N_VWrmsNormMask_ParHyp(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N, N_global;
realtype sum, prodi, *xd, *wd, *idd, gsum;
MPI_Comm comm;
sum = ZERO;
xd = wd = idd = NULL;
N = NV_LOCLENGTH_PH(x);
N_global = NV_GLOBLENGTH_PH(x);
xd = NV_DATA_PH(x);
wd = NV_DATA_PH(w);
idd = NV_DATA_PH(id);
comm = NV_COMM_PH(x);
for (i = 0; i < N; i++) {
if (idd[i] > ZERO) {
prodi = xd[i]*wd[i];
sum += SUNSQR(prodi);
}
}
gsum = VAllReduce_ParHyp(sum, 1, comm);
return(SUNRsqrt(gsum/N_global));
}
realtype N_VMin_ParHyp(N_Vector x)
{
sunindextype i, N;
realtype min, *xd, gmin;
MPI_Comm comm;
xd = NULL;
N = NV_LOCLENGTH_PH(x);
comm = NV_COMM_PH(x);
min = BIG_REAL;
if (N > 0) {
xd = NV_DATA_PH(x);
min = xd[0];
for (i = 1; i < N; i++) {
if (xd[i] < min)
min = xd[i];
}
}
gmin = VAllReduce_ParHyp(min, 3, comm);
return(gmin);
}
realtype N_VWL2Norm_ParHyp(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, prodi, *xd, *wd, gsum;
MPI_Comm comm;
sum = ZERO;
xd = wd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
wd = NV_DATA_PH(w);
comm = NV_COMM_PH(x);
for (i = 0; i < N; i++) {
prodi = xd[i]*wd[i];
sum += SUNSQR(prodi);
}
gsum = VAllReduce_ParHyp(sum, 1, comm);
return(SUNRsqrt(gsum));
}
realtype N_VL1Norm_ParHyp(N_Vector x)
{
sunindextype i, N;
realtype sum, gsum, *xd;
MPI_Comm comm;
sum = ZERO;
xd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
comm = NV_COMM_PH(x);
for (i = 0; i<N; i++)
sum += SUNRabs(xd[i]);
gsum = VAllReduce_ParHyp(sum, 1, comm);
return(gsum);
}
void N_VCompare_ParHyp(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++) {
zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO;
}
return;
}
booleantype N_VInvTest_ParHyp(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd, val, gval;
MPI_Comm comm;
xd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
zd = NV_DATA_PH(z);
comm = NV_COMM_PH(x);
val = ONE;
for (i = 0; i < N; i++) {
if (xd[i] == ZERO)
val = ZERO;
else
zd[i] = ONE/xd[i];
}
gval = VAllReduce_ParHyp(val, 3, comm);
if (gval == ZERO)
return(SUNFALSE);
else
return(SUNTRUE);
}
booleantype N_VConstrMask_ParHyp(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd, *xd, *md;
MPI_Comm comm;
cd = xd = md = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
cd = NV_DATA_PH(c);
md = NV_DATA_PH(m);
comm = NV_COMM_PH(x);
temp = ONE;
for (i = 0; i < N; i++) {
md[i] = ZERO;
if (cd[i] == ZERO) continue;
if (cd[i] > ONEPT5 || cd[i] < -ONEPT5) {
if (xd[i]*cd[i] <= ZERO) {
temp = ZERO;
md[i] = ONE;
}
continue;
}
if (cd[i] > HALF || cd[i] < -HALF) {
if (xd[i]*cd[i] < ZERO ) {
temp = ZERO;
md[i] = ONE;
}
}
}
temp = VAllReduce_ParHyp(temp, 3, comm);
if (temp == ONE)
return(SUNTRUE);
else
return(SUNFALSE);
}
realtype N_VMinQuotient_ParHyp(N_Vector num, N_Vector denom)
{
booleantype notEvenOnce;
sunindextype i, N;
realtype *nd, *dd, min;
MPI_Comm comm;
nd = dd = NULL;
N = NV_LOCLENGTH_PH(num);
nd = NV_DATA_PH(num);
dd = NV_DATA_PH(denom);
comm = NV_COMM_PH(num);
notEvenOnce = SUNTRUE;
min = BIG_REAL;
for (i = 0; i < N; i++) {
if (dd[i] == ZERO) continue;
else {
if (!notEvenOnce) min = SUNMIN(min, nd[i]/dd[i]);
else {
min = nd[i]/dd[i];
notEvenOnce = SUNFALSE;
}
}
}
return(VAllReduce_ParHyp(min, 3, comm));
}
/*
* -----------------------------------------------------------------
* private functions
* -----------------------------------------------------------------
*/
static realtype VAllReduce_ParHyp(realtype d, int op, MPI_Comm comm)
{
/*
* This function does a global reduction. The operation is
* sum if op = 1,
* max if op = 2,
* min if op = 3.
* The operation is over all processors in the communicator
*/
realtype out;
switch (op) {
case 1: MPI_Allreduce(&d, &out, 1, PVEC_REAL_MPI_TYPE, MPI_SUM, comm);
break;
case 2: MPI_Allreduce(&d, &out, 1, PVEC_REAL_MPI_TYPE, MPI_MAX, comm);
break;
case 3: MPI_Allreduce(&d, &out, 1, PVEC_REAL_MPI_TYPE, MPI_MIN, comm);
break;
default: break;
}
return(out);
}
static void VSum_ParHyp(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
yd = NV_DATA_PH(y);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = xd[i]+yd[i];
return;
}
static void VDiff_ParHyp(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
yd = NV_DATA_PH(y);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = xd[i]-yd[i];
return;
}
static void VScaleSum_ParHyp(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
yd = NV_DATA_PH(y);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]+yd[i]);
return;
}
static void VScaleDiff_ParHyp(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
yd = NV_DATA_PH(y);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]-yd[i]);
return;
}
static void VLin1_ParHyp(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
yd = NV_DATA_PH(y);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+yd[i];
return;
}
static void VLin2_ParHyp(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LOCLENGTH_PH(x);
xd = NV_DATA_PH(x);
yd = NV_DATA_PH(y);
zd = NV_DATA_PH(z);
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])-yd[i];
return;
}
|
674089.c | // This is an open source non-commercial project. Dear PVS-Studio, please check
// it. PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
// screen.c: code for displaying on the screen
//
// Output to the screen (console, terminal emulator or GUI window) is minimized
// by remembering what is already on the screen, and only updating the parts
// that changed.
//
// The grid_*() functions write to the screen and handle updating grid->lines[].
//
// update_screen() is the function that updates all windows and status lines.
// It is called from the main loop when must_redraw is non-zero. It may be
// called from other places when an immediate screen update is needed.
//
// The part of the buffer that is displayed in a window is set with:
// - w_topline (first buffer line in window)
// - w_topfill (filler lines above the first line)
// - w_leftcol (leftmost window cell in window),
// - w_skipcol (skipped window cells of first line)
//
// Commands that only move the cursor around in a window, do not need to take
// action to update the display. The main loop will check if w_topline is
// valid and update it (scroll the window) when needed.
//
// Commands that scroll a window change w_topline and must call
// check_cursor() to move the cursor into the visible part of the window, and
// call redraw_later(VALID) to have the window displayed by update_screen()
// later.
//
// Commands that change text in the buffer must call changed_bytes() or
// changed_lines() to mark the area that changed and will require updating
// later. The main loop will call update_screen(), which will update each
// window that shows the changed buffer. This assumes text above the change
// can remain displayed as it is. Text after the change may need updating for
// scrolling, folding and syntax highlighting.
//
// Commands that change how a window is displayed (e.g., setting 'list') or
// invalidate the contents of a window in another way (e.g., change fold
// settings), must call redraw_later(NOT_VALID) to have the whole window
// redisplayed by update_screen() later.
//
// Commands that change how a buffer is displayed (e.g., setting 'tabstop')
// must call redraw_curbuf_later(NOT_VALID) to have all the windows for the
// buffer redisplayed by update_screen() later.
//
// Commands that change highlighting and possibly cause a scroll too must call
// redraw_later(SOME_VALID) to update the whole window but still use scrolling
// to avoid redrawing everything. But the length of displayed lines must not
// change, use NOT_VALID then.
//
// Commands that move the window position must call redraw_later(NOT_VALID).
// TODO(neovim): should minimize redrawing by scrolling when possible.
//
// Commands that change everything (e.g., resizing the screen) must call
// redraw_all_later(NOT_VALID) or redraw_all_later(CLEAR).
//
// Things that are handled indirectly:
// - When messages scroll the screen up, msg_scrolled will be set and
// update_screen() called to redraw.
///
#include <assert.h>
#include <inttypes.h>
#include <stdbool.h>
#include <string.h>
#include "nvim/log.h"
#include "nvim/vim.h"
#include "nvim/ascii.h"
#include "nvim/arabic.h"
#include "nvim/screen.h"
#include "nvim/buffer.h"
#include "nvim/charset.h"
#include "nvim/cursor.h"
#include "nvim/cursor_shape.h"
#include "nvim/diff.h"
#include "nvim/eval.h"
#include "nvim/ex_cmds.h"
#include "nvim/ex_cmds2.h"
#include "nvim/ex_getln.h"
#include "nvim/edit.h"
#include "nvim/fileio.h"
#include "nvim/fold.h"
#include "nvim/indent.h"
#include "nvim/getchar.h"
#include "nvim/highlight.h"
#include "nvim/main.h"
#include "nvim/mark.h"
#include "nvim/mbyte.h"
#include "nvim/memline.h"
#include "nvim/memory.h"
#include "nvim/menu.h"
#include "nvim/message.h"
#include "nvim/misc1.h"
#include "nvim/garray.h"
#include "nvim/move.h"
#include "nvim/normal.h"
#include "nvim/option.h"
#include "nvim/os_unix.h"
#include "nvim/path.h"
#include "nvim/popupmnu.h"
#include "nvim/quickfix.h"
#include "nvim/regexp.h"
#include "nvim/search.h"
#include "nvim/sign.h"
#include "nvim/spell.h"
#include "nvim/state.h"
#include "nvim/strings.h"
#include "nvim/syntax.h"
#include "nvim/terminal.h"
#include "nvim/ui.h"
#include "nvim/ui_compositor.h"
#include "nvim/undo.h"
#include "nvim/version.h"
#include "nvim/window.h"
#include "nvim/os/time.h"
#include "nvim/api/private/helpers.h"
#define MB_FILLER_CHAR '<' /* character used when a double-width character
* doesn't fit. */
#define W_ENDCOL(wp) (wp->w_wincol + wp->w_width)
#define W_ENDROW(wp) (wp->w_winrow + wp->w_height)
// temporary buffer for rendering a single screenline, so it can be
// comparared with previous contents to calulate smallest delta.
static size_t linebuf_size = 0;
static schar_T *linebuf_char = NULL;
static sattr_T *linebuf_attr = NULL;
static match_T search_hl; /* used for 'hlsearch' highlight matching */
static foldinfo_T win_foldinfo; /* info for 'foldcolumn' */
StlClickDefinition *tab_page_click_defs = NULL;
long tab_page_click_defs_size = 0;
// for line_putchar. Contains the state that needs to be remembered from
// putting one character to the next.
typedef struct {
const char_u *p;
int prev_c; // previous Arabic character
int prev_c1; // first composing char for prev_c
} LineState;
#define LINE_STATE(p) { p, 0, 0 }
/// Whether to call "ui_call_grid_resize" in win_grid_alloc
static bool send_grid_resize = false;
static bool conceal_cursor_used = false;
static bool redraw_popupmenu = false;
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "screen.c.generated.h"
#endif
#define SEARCH_HL_PRIORITY 0
/*
* Redraw the current window later, with update_screen(type).
* Set must_redraw only if not already set to a higher value.
* e.g. if must_redraw is CLEAR, type NOT_VALID will do nothing.
*/
void redraw_later(int type)
{
redraw_win_later(curwin, type);
}
void redraw_win_later(win_T *wp, int type)
{
if (!exiting && wp->w_redr_type < type) {
wp->w_redr_type = type;
if (type >= NOT_VALID)
wp->w_lines_valid = 0;
if (must_redraw < type) /* must_redraw is the maximum of all windows */
must_redraw = type;
}
}
/*
* Mark all windows to be redrawn later.
*/
void redraw_all_later(int type)
{
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
redraw_win_later(wp, type);
}
// This may be needed when switching tabs.
if (must_redraw < type) {
must_redraw = type;
}
}
void screen_invalidate_highlights(void)
{
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
redraw_win_later(wp, NOT_VALID);
wp->w_grid.valid = false;
}
}
/*
* Mark all windows that are editing the current buffer to be updated later.
*/
void redraw_curbuf_later(int type)
{
redraw_buf_later(curbuf, type);
}
void redraw_buf_later(buf_T *buf, int type)
{
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (wp->w_buffer == buf) {
redraw_win_later(wp, type);
}
}
}
void redraw_buf_line_later(buf_T *buf, linenr_T line)
{
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (wp->w_buffer == buf
&& line >= wp->w_topline && line < wp->w_botline) {
redrawWinline(wp, line);
}
}
}
/*
* Changed something in the current window, at buffer line "lnum", that
* requires that line and possibly other lines to be redrawn.
* Used when entering/leaving Insert mode with the cursor on a folded line.
* Used to remove the "$" from a change command.
* Note that when also inserting/deleting lines w_redraw_top and w_redraw_bot
* may become invalid and the whole window will have to be redrawn.
*/
void
redrawWinline(
win_T *wp,
linenr_T lnum
)
{
if (lnum >= wp->w_topline
&& lnum < wp->w_botline) {
if (wp->w_redraw_top == 0 || wp->w_redraw_top > lnum) {
wp->w_redraw_top = lnum;
}
if (wp->w_redraw_bot == 0 || wp->w_redraw_bot < lnum) {
wp->w_redraw_bot = lnum;
}
redraw_win_later(wp, VALID);
}
}
/*
* update all windows that are editing the current buffer
*/
void update_curbuf(int type)
{
redraw_curbuf_later(type);
update_screen(type);
}
/// Redraw the parts of the screen that is marked for redraw.
///
/// Most code shouldn't call this directly, rather use redraw_later() and
/// and redraw_all_later() to mark parts of the screen as needing a redraw.
///
/// @param type set to a NOT_VALID to force redraw of entire screen
void update_screen(int type)
{
static int did_intro = FALSE;
int did_one;
// Don't do anything if the screen structures are (not yet) valid.
if (!default_grid.chars) {
return;
}
if (must_redraw) {
if (type < must_redraw) /* use maximal type */
type = must_redraw;
/* must_redraw is reset here, so that when we run into some weird
* reason to redraw while busy redrawing (e.g., asynchronous
* scrolling), or update_topline() in win_update() will cause a
* scroll, the screen will be redrawn later or in win_update(). */
must_redraw = 0;
}
/* Need to update w_lines[]. */
if (curwin->w_lines_valid == 0 && type < NOT_VALID)
type = NOT_VALID;
/* Postpone the redrawing when it's not needed and when being called
* recursively. */
if (!redrawing() || updating_screen) {
redraw_later(type); /* remember type for next time */
must_redraw = type;
if (type > INVERTED_ALL)
curwin->w_lines_valid = 0; /* don't use w_lines[].wl_size now */
return;
}
updating_screen = TRUE;
++display_tick; /* let syntax code know we're in a next round of
* display updating */
// Tricky: vim code can reset msg_scrolled behind our back, so need
// separate bookkeeping for now.
if (msg_did_scroll) {
ui_call_win_scroll_over_reset();
msg_did_scroll = false;
}
// if the screen was scrolled up when displaying a message, scroll it down
if (msg_scrolled) {
clear_cmdline = true;
if (dy_flags & DY_MSGSEP) {
int valid = MAX(Rows - msg_scrollsize(), 0);
if (valid == 0) {
redraw_tabline = true;
}
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (W_ENDROW(wp) > valid) {
wp->w_redr_type = NOT_VALID;
wp->w_lines_valid = 0;
}
if (W_ENDROW(wp) + wp->w_status_height > valid) {
wp->w_redr_status = true;
}
}
} else if (msg_scrolled > Rows - 5) { // clearing is faster
type = CLEAR;
} else if (type != CLEAR) {
check_for_delay(false);
grid_ins_lines(&default_grid, 0, msg_scrolled, (int)Rows,
0, (int)Columns);
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (wp->w_floating) {
continue;
}
if (wp->w_winrow < msg_scrolled) {
if (W_ENDROW(wp) > msg_scrolled
&& wp->w_redr_type < REDRAW_TOP
&& wp->w_lines_valid > 0
&& wp->w_topline == wp->w_lines[0].wl_lnum) {
wp->w_upd_rows = msg_scrolled - wp->w_winrow;
wp->w_redr_type = REDRAW_TOP;
} else {
wp->w_redr_type = NOT_VALID;
if (W_ENDROW(wp) + wp->w_status_height
<= msg_scrolled) {
wp->w_redr_status = TRUE;
}
}
}
}
redraw_cmdline = TRUE;
redraw_tabline = TRUE;
}
msg_scrolled = 0;
need_wait_return = FALSE;
}
if (type >= CLEAR || !default_grid.valid) {
ui_comp_set_screen_valid(false);
}
win_ui_flush_positions();
msg_ext_check_clear();
/* reset cmdline_row now (may have been changed temporarily) */
compute_cmdrow();
/* Check for changed highlighting */
if (need_highlight_changed)
highlight_changed();
if (type == CLEAR) { // first clear screen
screenclear(); // will reset clear_cmdline
cmdline_screen_cleared(); // clear external cmdline state
type = NOT_VALID;
// must_redraw may be set indirectly, avoid another redraw later
must_redraw = 0;
} else if (!default_grid.valid) {
grid_invalidate(&default_grid);
default_grid.valid = true;
}
ui_comp_set_screen_valid(true);
if (clear_cmdline) /* going to clear cmdline (done below) */
check_for_delay(FALSE);
/* Force redraw when width of 'number' or 'relativenumber' column
* changes. */
if (curwin->w_redr_type < NOT_VALID
&& curwin->w_nrwidth != ((curwin->w_p_nu || curwin->w_p_rnu)
? number_width(curwin) : 0))
curwin->w_redr_type = NOT_VALID;
/*
* Only start redrawing if there is really something to do.
*/
if (type == INVERTED)
update_curswant();
if (curwin->w_redr_type < type
&& !((type == VALID
&& curwin->w_lines[0].wl_valid
&& curwin->w_topfill == curwin->w_old_topfill
&& curwin->w_botfill == curwin->w_old_botfill
&& curwin->w_topline == curwin->w_lines[0].wl_lnum)
|| (type == INVERTED
&& VIsual_active
&& curwin->w_old_cursor_lnum == curwin->w_cursor.lnum
&& curwin->w_old_visual_mode == VIsual_mode
&& (curwin->w_valid & VALID_VIRTCOL)
&& curwin->w_old_curswant == curwin->w_curswant)
))
curwin->w_redr_type = type;
// Redraw the tab pages line if needed.
if (redraw_tabline || type >= NOT_VALID) {
update_window_hl(curwin, type >= NOT_VALID);
FOR_ALL_TABS(tp) {
if (tp != curtab) {
update_window_hl(tp->tp_curwin, type >= NOT_VALID);
}
}
draw_tabline();
}
/*
* Correct stored syntax highlighting info for changes in each displayed
* buffer. Each buffer must only be done once.
*/
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
update_window_hl(wp, type >= NOT_VALID);
if (wp->w_buffer->b_mod_set) {
win_T *wwp;
// Check if we already did this buffer.
for (wwp = firstwin; wwp != wp; wwp = wwp->w_next) {
if (wwp->w_buffer == wp->w_buffer) {
break;
}
}
if (wwp == wp && syntax_present(wp)) {
syn_stack_apply_changes(wp->w_buffer);
}
}
}
/*
* Go from top to bottom through the windows, redrawing the ones that need
* it.
*/
did_one = FALSE;
search_hl.rm.regprog = NULL;
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (wp->w_redr_type == CLEAR && wp->w_floating && wp->w_grid.chars) {
grid_invalidate(&wp->w_grid);
wp->w_redr_type = NOT_VALID;
}
if (wp->w_redr_type != 0) {
if (!did_one) {
did_one = TRUE;
start_search_hl();
}
win_update(wp);
}
/* redraw status line after the window to minimize cursor movement */
if (wp->w_redr_status) {
win_redr_status(wp);
}
}
end_search_hl();
// May need to redraw the popup menu.
if (pum_drawn() && redraw_popupmenu) {
pum_redraw();
}
send_grid_resize = false;
redraw_popupmenu = false;
/* Reset b_mod_set flags. Going through all windows is probably faster
* than going through all buffers (there could be many buffers). */
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
wp->w_buffer->b_mod_set = false;
}
updating_screen = FALSE;
/* Clear or redraw the command line. Done last, because scrolling may
* mess up the command line. */
if (clear_cmdline || redraw_cmdline) {
showmode();
}
/* May put up an introductory message when not editing a file */
if (!did_intro)
maybe_intro_message();
did_intro = TRUE;
// either cmdline is cleared, not drawn or mode is last drawn
cmdline_was_last_drawn = false;
}
/*
* Return TRUE if the cursor line in window "wp" may be concealed, according
* to the 'concealcursor' option.
*/
int conceal_cursor_line(win_T *wp)
{
int c;
if (*wp->w_p_cocu == NUL)
return FALSE;
if (get_real_state() & VISUAL)
c = 'v';
else if (State & INSERT)
c = 'i';
else if (State & NORMAL)
c = 'n';
else if (State & CMDLINE)
c = 'c';
else
return FALSE;
return vim_strchr(wp->w_p_cocu, c) != NULL;
}
// Check if the cursor line needs to be redrawn because of 'concealcursor'.
//
// When cursor is moved at the same time, both lines will be redrawn regardless.
void conceal_check_cursor_line(void)
{
bool should_conceal = conceal_cursor_line(curwin);
if (curwin->w_p_cole > 0 && (conceal_cursor_used != should_conceal)) {
redrawWinline(curwin, curwin->w_cursor.lnum);
// Need to recompute cursor column, e.g., when starting Visual mode
// without concealing. */
curs_columns(true);
}
}
/// Whether cursorline is drawn in a special way
///
/// If true, both old and new cursorline will need
/// need to be redrawn when moving cursor within windows.
bool win_cursorline_standout(win_T *wp)
{
return wp->w_p_cul || (wp->w_p_cole > 0 && !conceal_cursor_line(wp));
}
/*
* Update a single window.
*
* This may cause the windows below it also to be redrawn (when clearing the
* screen or scrolling lines).
*
* How the window is redrawn depends on wp->w_redr_type. Each type also
* implies the one below it.
* NOT_VALID redraw the whole window
* SOME_VALID redraw the whole window but do scroll when possible
* REDRAW_TOP redraw the top w_upd_rows window lines, otherwise like VALID
* INVERTED redraw the changed part of the Visual area
* INVERTED_ALL redraw the whole Visual area
* VALID 1. scroll up/down to adjust for a changed w_topline
* 2. update lines at the top when scrolled down
* 3. redraw changed text:
* - if wp->w_buffer->b_mod_set set, update lines between
* b_mod_top and b_mod_bot.
* - if wp->w_redraw_top non-zero, redraw lines between
* wp->w_redraw_top and wp->w_redr_bot.
* - continue redrawing when syntax status is invalid.
* 4. if scrolled up, update lines at the bottom.
* This results in three areas that may need updating:
* top: from first row to top_end (when scrolled down)
* mid: from mid_start to mid_end (update inversion or changed text)
* bot: from bot_start to last row (when scrolled up)
*/
static void win_update(win_T *wp)
{
buf_T *buf = wp->w_buffer;
int type;
int top_end = 0; /* Below last row of the top area that needs
updating. 0 when no top area updating. */
int mid_start = 999; /* first row of the mid area that needs
updating. 999 when no mid area updating. */
int mid_end = 0; /* Below last row of the mid area that needs
updating. 0 when no mid area updating. */
int bot_start = 999; /* first row of the bot area that needs
updating. 999 when no bot area updating */
int scrolled_down = FALSE; /* TRUE when scrolled down when
w_topline got smaller a bit */
matchitem_T *cur; /* points to the match list */
int top_to_mod = FALSE; /* redraw above mod_top */
int row; /* current window row to display */
linenr_T lnum; /* current buffer lnum to display */
int idx; /* current index in w_lines[] */
int srow; /* starting row of the current line */
int eof = FALSE; /* if TRUE, we hit the end of the file */
int didline = FALSE; /* if TRUE, we finished the last line */
int i;
long j;
static int recursive = FALSE; /* being called recursively */
int old_botline = wp->w_botline;
long fold_count;
// Remember what happened to the previous line.
#define DID_NONE 1 // didn't update a line
#define DID_LINE 2 // updated a normal line
#define DID_FOLD 3 // updated a folded line
int did_update = DID_NONE;
linenr_T syntax_last_parsed = 0; /* last parsed text line */
linenr_T mod_top = 0;
linenr_T mod_bot = 0;
int save_got_int;
// If we can compute a change in the automatic sizing of the sign column
// under 'signcolumn=auto:X' and signs currently placed in the buffer, better
// figuring it out here so we can redraw the entire screen for it.
buf_signcols(buf);
type = wp->w_redr_type;
win_grid_alloc(wp);
if (type >= NOT_VALID) {
wp->w_redr_status = true;
wp->w_lines_valid = 0;
}
// Window is zero-height: nothing to draw.
if (wp->w_grid.Rows == 0) {
wp->w_redr_type = 0;
return;
}
// Window is zero-width: Only need to draw the separator.
if (wp->w_grid.Columns == 0) {
// draw the vertical separator right of this window
draw_vsep_win(wp, 0);
wp->w_redr_type = 0;
return;
}
init_search_hl(wp);
/* Force redraw when width of 'number' or 'relativenumber' column
* changes. */
i = (wp->w_p_nu || wp->w_p_rnu) ? number_width(wp) : 0;
if (wp->w_nrwidth != i) {
type = NOT_VALID;
wp->w_nrwidth = i;
if (buf->terminal) {
terminal_check_size(buf->terminal);
}
} else if (buf->b_mod_set
&& buf->b_mod_xlines != 0
&& wp->w_redraw_top != 0) {
// When there are both inserted/deleted lines and specific lines to be
// redrawn, w_redraw_top and w_redraw_bot may be invalid, just redraw
// everything (only happens when redrawing is off for while).
type = NOT_VALID;
} else {
/*
* Set mod_top to the first line that needs displaying because of
* changes. Set mod_bot to the first line after the changes.
*/
mod_top = wp->w_redraw_top;
if (wp->w_redraw_bot != 0)
mod_bot = wp->w_redraw_bot + 1;
else
mod_bot = 0;
if (buf->b_mod_set) {
if (mod_top == 0 || mod_top > buf->b_mod_top) {
mod_top = buf->b_mod_top;
/* Need to redraw lines above the change that may be included
* in a pattern match. */
if (syntax_present(wp)) {
mod_top -= buf->b_s.b_syn_sync_linebreaks;
if (mod_top < 1)
mod_top = 1;
}
}
if (mod_bot == 0 || mod_bot < buf->b_mod_bot)
mod_bot = buf->b_mod_bot;
/* When 'hlsearch' is on and using a multi-line search pattern, a
* change in one line may make the Search highlighting in a
* previous line invalid. Simple solution: redraw all visible
* lines above the change.
* Same for a match pattern.
*/
if (search_hl.rm.regprog != NULL
&& re_multiline(search_hl.rm.regprog))
top_to_mod = TRUE;
else {
cur = wp->w_match_head;
while (cur != NULL) {
if (cur->match.regprog != NULL
&& re_multiline(cur->match.regprog)) {
top_to_mod = TRUE;
break;
}
cur = cur->next;
}
}
}
if (mod_top != 0 && hasAnyFolding(wp)) {
linenr_T lnumt, lnumb;
/*
* A change in a line can cause lines above it to become folded or
* unfolded. Find the top most buffer line that may be affected.
* If the line was previously folded and displayed, get the first
* line of that fold. If the line is folded now, get the first
* folded line. Use the minimum of these two.
*/
/* Find last valid w_lines[] entry above mod_top. Set lnumt to
* the line below it. If there is no valid entry, use w_topline.
* Find the first valid w_lines[] entry below mod_bot. Set lnumb
* to this line. If there is no valid entry, use MAXLNUM. */
lnumt = wp->w_topline;
lnumb = MAXLNUM;
for (i = 0; i < wp->w_lines_valid; ++i)
if (wp->w_lines[i].wl_valid) {
if (wp->w_lines[i].wl_lastlnum < mod_top)
lnumt = wp->w_lines[i].wl_lastlnum + 1;
if (lnumb == MAXLNUM && wp->w_lines[i].wl_lnum >= mod_bot) {
lnumb = wp->w_lines[i].wl_lnum;
// When there is a fold column it might need updating
// in the next line ("J" just above an open fold).
if (compute_foldcolumn(wp, 0) > 0) {
lnumb++;
}
}
}
(void)hasFoldingWin(wp, mod_top, &mod_top, NULL, true, NULL);
if (mod_top > lnumt) {
mod_top = lnumt;
}
// Now do the same for the bottom line (one above mod_bot).
mod_bot--;
(void)hasFoldingWin(wp, mod_bot, NULL, &mod_bot, true, NULL);
mod_bot++;
if (mod_bot < lnumb) {
mod_bot = lnumb;
}
}
/* When a change starts above w_topline and the end is below
* w_topline, start redrawing at w_topline.
* If the end of the change is above w_topline: do like no change was
* made, but redraw the first line to find changes in syntax. */
if (mod_top != 0 && mod_top < wp->w_topline) {
if (mod_bot > wp->w_topline)
mod_top = wp->w_topline;
else if (syntax_present(wp))
top_end = 1;
}
/* When line numbers are displayed need to redraw all lines below
* inserted/deleted lines. */
if (mod_top != 0 && buf->b_mod_xlines != 0 && wp->w_p_nu)
mod_bot = MAXLNUM;
}
wp->w_redraw_top = 0; // reset for next time
wp->w_redraw_bot = 0;
/*
* When only displaying the lines at the top, set top_end. Used when
* window has scrolled down for msg_scrolled.
*/
if (type == REDRAW_TOP) {
j = 0;
for (i = 0; i < wp->w_lines_valid; ++i) {
j += wp->w_lines[i].wl_size;
if (j >= wp->w_upd_rows) {
top_end = j;
break;
}
}
if (top_end == 0)
/* not found (cannot happen?): redraw everything */
type = NOT_VALID;
else
/* top area defined, the rest is VALID */
type = VALID;
}
/*
* If there are no changes on the screen that require a complete redraw,
* handle three cases:
* 1: we are off the top of the screen by a few lines: scroll down
* 2: wp->w_topline is below wp->w_lines[0].wl_lnum: may scroll up
* 3: wp->w_topline is wp->w_lines[0].wl_lnum: find first entry in
* w_lines[] that needs updating.
*/
if ((type == VALID || type == SOME_VALID
|| type == INVERTED || type == INVERTED_ALL)
&& !wp->w_botfill && !wp->w_old_botfill
) {
if (mod_top != 0 && wp->w_topline == mod_top) {
/*
* w_topline is the first changed line, the scrolling will be done
* further down.
*/
} else if (wp->w_lines[0].wl_valid
&& (wp->w_topline < wp->w_lines[0].wl_lnum
|| (wp->w_topline == wp->w_lines[0].wl_lnum
&& wp->w_topfill > wp->w_old_topfill)
)) {
/*
* New topline is above old topline: May scroll down.
*/
if (hasAnyFolding(wp)) {
linenr_T ln;
/* count the number of lines we are off, counting a sequence
* of folded lines as one */
j = 0;
for (ln = wp->w_topline; ln < wp->w_lines[0].wl_lnum; ln++) {
j++;
if (j >= wp->w_grid.Rows - 2) {
break;
}
(void)hasFoldingWin(wp, ln, NULL, &ln, true, NULL);
}
} else
j = wp->w_lines[0].wl_lnum - wp->w_topline;
if (j < wp->w_grid.Rows - 2) { // not too far off
i = plines_m_win(wp, wp->w_topline, wp->w_lines[0].wl_lnum - 1);
/* insert extra lines for previously invisible filler lines */
if (wp->w_lines[0].wl_lnum != wp->w_topline)
i += diff_check_fill(wp, wp->w_lines[0].wl_lnum)
- wp->w_old_topfill;
if (i < wp->w_grid.Rows - 2) { // less than a screen off
// Try to insert the correct number of lines.
// If not the last window, delete the lines at the bottom.
// win_ins_lines may fail when the terminal can't do it.
win_scroll_lines(wp, 0, i);
if (wp->w_lines_valid != 0) {
// Need to update rows that are new, stop at the
// first one that scrolled down.
top_end = i;
scrolled_down = true;
// Move the entries that were scrolled, disable
// the entries for the lines to be redrawn.
if ((wp->w_lines_valid += j) > wp->w_grid.Rows) {
wp->w_lines_valid = wp->w_grid.Rows;
}
for (idx = wp->w_lines_valid; idx - j >= 0; idx--) {
wp->w_lines[idx] = wp->w_lines[idx - j];
}
while (idx >= 0) {
wp->w_lines[idx--].wl_valid = false;
}
}
} else {
mid_start = 0; // redraw all lines
}
} else {
mid_start = 0; // redraw all lines
}
} else {
/*
* New topline is at or below old topline: May scroll up.
* When topline didn't change, find first entry in w_lines[] that
* needs updating.
*/
/* try to find wp->w_topline in wp->w_lines[].wl_lnum */
j = -1;
row = 0;
for (i = 0; i < wp->w_lines_valid; i++) {
if (wp->w_lines[i].wl_valid
&& wp->w_lines[i].wl_lnum == wp->w_topline) {
j = i;
break;
}
row += wp->w_lines[i].wl_size;
}
if (j == -1) {
/* if wp->w_topline is not in wp->w_lines[].wl_lnum redraw all
* lines */
mid_start = 0;
} else {
/*
* Try to delete the correct number of lines.
* wp->w_topline is at wp->w_lines[i].wl_lnum.
*/
/* If the topline didn't change, delete old filler lines,
* otherwise delete filler lines of the new topline... */
if (wp->w_lines[0].wl_lnum == wp->w_topline)
row += wp->w_old_topfill;
else
row += diff_check_fill(wp, wp->w_topline);
/* ... but don't delete new filler lines. */
row -= wp->w_topfill;
if (row > 0) {
win_scroll_lines(wp, 0, -row);
bot_start = wp->w_grid.Rows - row;
}
if ((row == 0 || bot_start < 999) && wp->w_lines_valid != 0) {
/*
* Skip the lines (below the deleted lines) that are still
* valid and don't need redrawing. Copy their info
* upwards, to compensate for the deleted lines. Set
* bot_start to the first row that needs redrawing.
*/
bot_start = 0;
idx = 0;
for (;; ) {
wp->w_lines[idx] = wp->w_lines[j];
/* stop at line that didn't fit, unless it is still
* valid (no lines deleted) */
if (row > 0 && bot_start + row
+ (int)wp->w_lines[j].wl_size > wp->w_grid.Rows) {
wp->w_lines_valid = idx + 1;
break;
}
bot_start += wp->w_lines[idx++].wl_size;
/* stop at the last valid entry in w_lines[].wl_size */
if (++j >= wp->w_lines_valid) {
wp->w_lines_valid = idx;
break;
}
}
/* Correct the first entry for filler lines at the top
* when it won't get updated below. */
if (wp->w_p_diff && bot_start > 0)
wp->w_lines[0].wl_size =
plines_win_nofill(wp, wp->w_topline, true)
+ wp->w_topfill;
}
}
}
// When starting redraw in the first line, redraw all lines.
if (mid_start == 0) {
mid_end = wp->w_grid.Rows;
}
} else {
/* Not VALID or INVERTED: redraw all lines. */
mid_start = 0;
mid_end = wp->w_grid.Rows;
}
if (type == SOME_VALID) {
/* SOME_VALID: redraw all lines. */
mid_start = 0;
mid_end = wp->w_grid.Rows;
type = NOT_VALID;
}
/* check if we are updating or removing the inverted part */
if ((VIsual_active && buf == curwin->w_buffer)
|| (wp->w_old_cursor_lnum != 0 && type != NOT_VALID)) {
linenr_T from, to;
if (VIsual_active) {
if (VIsual_mode != wp->w_old_visual_mode || type == INVERTED_ALL) {
// If the type of Visual selection changed, redraw the whole
// selection. Also when the ownership of the X selection is
// gained or lost.
if (curwin->w_cursor.lnum < VIsual.lnum) {
from = curwin->w_cursor.lnum;
to = VIsual.lnum;
} else {
from = VIsual.lnum;
to = curwin->w_cursor.lnum;
}
/* redraw more when the cursor moved as well */
if (wp->w_old_cursor_lnum < from)
from = wp->w_old_cursor_lnum;
if (wp->w_old_cursor_lnum > to)
to = wp->w_old_cursor_lnum;
if (wp->w_old_visual_lnum < from)
from = wp->w_old_visual_lnum;
if (wp->w_old_visual_lnum > to)
to = wp->w_old_visual_lnum;
} else {
/*
* Find the line numbers that need to be updated: The lines
* between the old cursor position and the current cursor
* position. Also check if the Visual position changed.
*/
if (curwin->w_cursor.lnum < wp->w_old_cursor_lnum) {
from = curwin->w_cursor.lnum;
to = wp->w_old_cursor_lnum;
} else {
from = wp->w_old_cursor_lnum;
to = curwin->w_cursor.lnum;
if (from == 0) /* Visual mode just started */
from = to;
}
if (VIsual.lnum != wp->w_old_visual_lnum
|| VIsual.col != wp->w_old_visual_col) {
if (wp->w_old_visual_lnum < from
&& wp->w_old_visual_lnum != 0)
from = wp->w_old_visual_lnum;
if (wp->w_old_visual_lnum > to)
to = wp->w_old_visual_lnum;
if (VIsual.lnum < from)
from = VIsual.lnum;
if (VIsual.lnum > to)
to = VIsual.lnum;
}
}
/*
* If in block mode and changed column or curwin->w_curswant:
* update all lines.
* First compute the actual start and end column.
*/
if (VIsual_mode == Ctrl_V) {
colnr_T fromc, toc;
int save_ve_flags = ve_flags;
if (curwin->w_p_lbr)
ve_flags = VE_ALL;
getvcols(wp, &VIsual, &curwin->w_cursor, &fromc, &toc);
ve_flags = save_ve_flags;
++toc;
if (curwin->w_curswant == MAXCOL)
toc = MAXCOL;
if (fromc != wp->w_old_cursor_fcol
|| toc != wp->w_old_cursor_lcol) {
if (from > VIsual.lnum)
from = VIsual.lnum;
if (to < VIsual.lnum)
to = VIsual.lnum;
}
wp->w_old_cursor_fcol = fromc;
wp->w_old_cursor_lcol = toc;
}
} else {
/* Use the line numbers of the old Visual area. */
if (wp->w_old_cursor_lnum < wp->w_old_visual_lnum) {
from = wp->w_old_cursor_lnum;
to = wp->w_old_visual_lnum;
} else {
from = wp->w_old_visual_lnum;
to = wp->w_old_cursor_lnum;
}
}
/*
* There is no need to update lines above the top of the window.
*/
if (from < wp->w_topline)
from = wp->w_topline;
/*
* If we know the value of w_botline, use it to restrict the update to
* the lines that are visible in the window.
*/
if (wp->w_valid & VALID_BOTLINE) {
if (from >= wp->w_botline)
from = wp->w_botline - 1;
if (to >= wp->w_botline)
to = wp->w_botline - 1;
}
/*
* Find the minimal part to be updated.
* Watch out for scrolling that made entries in w_lines[] invalid.
* E.g., CTRL-U makes the first half of w_lines[] invalid and sets
* top_end; need to redraw from top_end to the "to" line.
* A middle mouse click with a Visual selection may change the text
* above the Visual area and reset wl_valid, do count these for
* mid_end (in srow).
*/
if (mid_start > 0) {
lnum = wp->w_topline;
idx = 0;
srow = 0;
if (scrolled_down)
mid_start = top_end;
else
mid_start = 0;
while (lnum < from && idx < wp->w_lines_valid) { /* find start */
if (wp->w_lines[idx].wl_valid)
mid_start += wp->w_lines[idx].wl_size;
else if (!scrolled_down)
srow += wp->w_lines[idx].wl_size;
++idx;
if (idx < wp->w_lines_valid && wp->w_lines[idx].wl_valid)
lnum = wp->w_lines[idx].wl_lnum;
else
++lnum;
}
srow += mid_start;
mid_end = wp->w_grid.Rows;
for (; idx < wp->w_lines_valid; idx++) { // find end
if (wp->w_lines[idx].wl_valid
&& wp->w_lines[idx].wl_lnum >= to + 1) {
/* Only update until first row of this line */
mid_end = srow;
break;
}
srow += wp->w_lines[idx].wl_size;
}
}
}
if (VIsual_active && buf == curwin->w_buffer) {
wp->w_old_visual_mode = VIsual_mode;
wp->w_old_cursor_lnum = curwin->w_cursor.lnum;
wp->w_old_visual_lnum = VIsual.lnum;
wp->w_old_visual_col = VIsual.col;
wp->w_old_curswant = curwin->w_curswant;
} else {
wp->w_old_visual_mode = 0;
wp->w_old_cursor_lnum = 0;
wp->w_old_visual_lnum = 0;
wp->w_old_visual_col = 0;
}
/* reset got_int, otherwise regexp won't work */
save_got_int = got_int;
got_int = 0;
// Set the time limit to 'redrawtime'.
proftime_T syntax_tm = profile_setlimit(p_rdt);
syn_set_timeout(&syntax_tm);
win_foldinfo.fi_level = 0;
/*
* Update all the window rows.
*/
idx = 0; /* first entry in w_lines[].wl_size */
row = 0;
srow = 0;
lnum = wp->w_topline; /* first line shown in window */
for (;; ) {
/* stop updating when reached the end of the window (check for _past_
* the end of the window is at the end of the loop) */
if (row == wp->w_grid.Rows) {
didline = true;
break;
}
/* stop updating when hit the end of the file */
if (lnum > buf->b_ml.ml_line_count) {
eof = TRUE;
break;
}
/* Remember the starting row of the line that is going to be dealt
* with. It is used further down when the line doesn't fit. */
srow = row;
// Update a line when it is in an area that needs updating, when it
// has changes or w_lines[idx] is invalid.
// "bot_start" may be halfway a wrapped line after using
// win_scroll_lines(), check if the current line includes it.
// When syntax folding is being used, the saved syntax states will
// already have been updated, we can't see where the syntax state is
// the same again, just update until the end of the window.
if (row < top_end
|| (row >= mid_start && row < mid_end)
|| top_to_mod
|| idx >= wp->w_lines_valid
|| (row + wp->w_lines[idx].wl_size > bot_start)
|| (mod_top != 0
&& (lnum == mod_top
|| (lnum >= mod_top
&& (lnum < mod_bot
|| did_update == DID_FOLD
|| (did_update == DID_LINE
&& syntax_present(wp)
&& ((foldmethodIsSyntax(wp)
&& hasAnyFolding(wp))
|| syntax_check_changed(lnum)))
// match in fixed position might need redraw
// if lines were inserted or deleted
|| (wp->w_match_head != NULL
&& buf->b_mod_xlines != 0)))))) {
if (lnum == mod_top) {
top_to_mod = false;
}
/*
* When at start of changed lines: May scroll following lines
* up or down to minimize redrawing.
* Don't do this when the change continues until the end.
* Don't scroll when dollar_vcol >= 0, keep the "$".
*/
if (lnum == mod_top
&& mod_bot != MAXLNUM
&& !(dollar_vcol >= 0 && mod_bot == mod_top + 1)) {
int old_rows = 0;
int new_rows = 0;
int xtra_rows;
linenr_T l;
/* Count the old number of window rows, using w_lines[], which
* should still contain the sizes for the lines as they are
* currently displayed. */
for (i = idx; i < wp->w_lines_valid; ++i) {
/* Only valid lines have a meaningful wl_lnum. Invalid
* lines are part of the changed area. */
if (wp->w_lines[i].wl_valid
&& wp->w_lines[i].wl_lnum == mod_bot)
break;
old_rows += wp->w_lines[i].wl_size;
if (wp->w_lines[i].wl_valid
&& wp->w_lines[i].wl_lastlnum + 1 == mod_bot) {
/* Must have found the last valid entry above mod_bot.
* Add following invalid entries. */
++i;
while (i < wp->w_lines_valid
&& !wp->w_lines[i].wl_valid)
old_rows += wp->w_lines[i++].wl_size;
break;
}
}
if (i >= wp->w_lines_valid) {
/* We can't find a valid line below the changed lines,
* need to redraw until the end of the window.
* Inserting/deleting lines has no use. */
bot_start = 0;
} else {
/* Able to count old number of rows: Count new window
* rows, and may insert/delete lines */
j = idx;
for (l = lnum; l < mod_bot; l++) {
if (hasFoldingWin(wp, l, NULL, &l, true, NULL)) {
new_rows++;
} else if (l == wp->w_topline) {
new_rows += plines_win_nofill(wp, l, true) + wp->w_topfill;
} else {
new_rows += plines_win(wp, l, true);
}
j++;
if (new_rows > wp->w_grid.Rows - row - 2) {
// it's getting too much, must redraw the rest
new_rows = 9999;
break;
}
}
xtra_rows = new_rows - old_rows;
if (xtra_rows < 0) {
/* May scroll text up. If there is not enough
* remaining text or scrolling fails, must redraw the
* rest. If scrolling works, must redraw the text
* below the scrolled text. */
if (row - xtra_rows >= wp->w_grid.Rows - 2) {
mod_bot = MAXLNUM;
} else {
win_scroll_lines(wp, row, xtra_rows);
bot_start = wp->w_grid.Rows + xtra_rows;
}
} else if (xtra_rows > 0) {
/* May scroll text down. If there is not enough
* remaining text of scrolling fails, must redraw the
* rest. */
if (row + xtra_rows >= wp->w_grid.Rows - 2) {
mod_bot = MAXLNUM;
} else {
win_scroll_lines(wp, row + old_rows, xtra_rows);
if (top_end > row + old_rows) {
// Scrolled the part at the top that requires
// updating down.
top_end += xtra_rows;
}
}
}
/* When not updating the rest, may need to move w_lines[]
* entries. */
if (mod_bot != MAXLNUM && i != j) {
if (j < i) {
int x = row + new_rows;
/* move entries in w_lines[] upwards */
for (;; ) {
/* stop at last valid entry in w_lines[] */
if (i >= wp->w_lines_valid) {
wp->w_lines_valid = j;
break;
}
wp->w_lines[j] = wp->w_lines[i];
/* stop at a line that won't fit */
if (x + (int)wp->w_lines[j].wl_size
> wp->w_grid.Rows) {
wp->w_lines_valid = j + 1;
break;
}
x += wp->w_lines[j++].wl_size;
++i;
}
if (bot_start > x)
bot_start = x;
} else { /* j > i */
/* move entries in w_lines[] downwards */
j -= i;
wp->w_lines_valid += j;
if (wp->w_lines_valid > wp->w_grid.Rows) {
wp->w_lines_valid = wp->w_grid.Rows;
}
for (i = wp->w_lines_valid; i - j >= idx; i--) {
wp->w_lines[i] = wp->w_lines[i - j];
}
/* The w_lines[] entries for inserted lines are
* now invalid, but wl_size may be used above.
* Reset to zero. */
while (i >= idx) {
wp->w_lines[i].wl_size = 0;
wp->w_lines[i--].wl_valid = FALSE;
}
}
}
}
}
/*
* When lines are folded, display one line for all of them.
* Otherwise, display normally (can be several display lines when
* 'wrap' is on).
*/
fold_count = foldedCount(wp, lnum, &win_foldinfo);
if (fold_count != 0) {
fold_line(wp, fold_count, &win_foldinfo, lnum, row);
++row;
--fold_count;
wp->w_lines[idx].wl_folded = TRUE;
wp->w_lines[idx].wl_lastlnum = lnum + fold_count;
did_update = DID_FOLD;
} else if (idx < wp->w_lines_valid
&& wp->w_lines[idx].wl_valid
&& wp->w_lines[idx].wl_lnum == lnum
&& lnum > wp->w_topline
&& !(dy_flags & (DY_LASTLINE | DY_TRUNCATE))
&& srow + wp->w_lines[idx].wl_size > wp->w_grid.Rows
&& diff_check_fill(wp, lnum) == 0
) {
/* This line is not going to fit. Don't draw anything here,
* will draw "@ " lines below. */
row = wp->w_grid.Rows + 1;
} else {
prepare_search_hl(wp, lnum);
/* Let the syntax stuff know we skipped a few lines. */
if (syntax_last_parsed != 0 && syntax_last_parsed + 1 < lnum
&& syntax_present(wp))
syntax_end_parsing(syntax_last_parsed + 1);
/*
* Display one line.
*/
row = win_line(wp, lnum, srow, wp->w_grid.Rows, mod_top == 0, false);
wp->w_lines[idx].wl_folded = FALSE;
wp->w_lines[idx].wl_lastlnum = lnum;
did_update = DID_LINE;
syntax_last_parsed = lnum;
}
wp->w_lines[idx].wl_lnum = lnum;
wp->w_lines[idx].wl_valid = true;
if (row > wp->w_grid.Rows) { // past end of grid
// we may need the size of that too long line later on
if (dollar_vcol == -1) {
wp->w_lines[idx].wl_size = plines_win(wp, lnum, true);
}
idx++;
break;
}
if (dollar_vcol == -1)
wp->w_lines[idx].wl_size = row - srow;
++idx;
lnum += fold_count + 1;
} else {
if (wp->w_p_rnu) {
// 'relativenumber' set: The text doesn't need to be drawn, but
// the number column nearly always does.
fold_count = foldedCount(wp, lnum, &win_foldinfo);
if (fold_count != 0) {
fold_line(wp, fold_count, &win_foldinfo, lnum, row);
} else {
(void)win_line(wp, lnum, srow, wp->w_grid.Rows, true, true);
}
}
// This line does not need to be drawn, advance to the next one.
row += wp->w_lines[idx++].wl_size;
if (row > wp->w_grid.Rows) { // past end of screen
break;
}
lnum = wp->w_lines[idx - 1].wl_lastlnum + 1;
did_update = DID_NONE;
}
if (lnum > buf->b_ml.ml_line_count) {
eof = TRUE;
break;
}
}
/*
* End of loop over all window lines.
*/
if (idx > wp->w_lines_valid)
wp->w_lines_valid = idx;
/*
* Let the syntax stuff know we stop parsing here.
*/
if (syntax_last_parsed != 0 && syntax_present(wp))
syntax_end_parsing(syntax_last_parsed + 1);
/*
* If we didn't hit the end of the file, and we didn't finish the last
* line we were working on, then the line didn't fit.
*/
wp->w_empty_rows = 0;
wp->w_filler_rows = 0;
if (!eof && !didline) {
int at_attr = hl_combine_attr(wp->w_hl_attr_normal,
win_hl_attr(wp, HLF_AT));
if (lnum == wp->w_topline) {
/*
* Single line that does not fit!
* Don't overwrite it, it can be edited.
*/
wp->w_botline = lnum + 1;
} else if (diff_check_fill(wp, lnum) >= wp->w_grid.Rows - srow) {
// Window ends in filler lines.
wp->w_botline = lnum;
wp->w_filler_rows = wp->w_grid.Rows - srow;
} else if (dy_flags & DY_TRUNCATE) { // 'display' has "truncate"
int scr_row = wp->w_grid.Rows - 1;
// Last line isn't finished: Display "@@@" in the last screen line.
grid_puts_len(&wp->w_grid, (char_u *)"@@", 2, scr_row, 0, at_attr);
grid_fill(&wp->w_grid, scr_row, scr_row + 1, 2, (int)wp->w_grid.Columns,
'@', ' ', at_attr);
set_empty_rows(wp, srow);
wp->w_botline = lnum;
} else if (dy_flags & DY_LASTLINE) { // 'display' has "lastline"
// Last line isn't finished: Display "@@@" at the end.
grid_fill(&wp->w_grid, wp->w_grid.Rows - 1, wp->w_grid.Rows,
wp->w_grid.Columns - 3, wp->w_grid.Columns, '@', '@', at_attr);
set_empty_rows(wp, srow);
wp->w_botline = lnum;
} else {
win_draw_end(wp, '@', ' ', true, srow, wp->w_grid.Rows, at_attr);
wp->w_botline = lnum;
}
} else {
if (eof) { // we hit the end of the file
wp->w_botline = buf->b_ml.ml_line_count + 1;
j = diff_check_fill(wp, wp->w_botline);
if (j > 0 && !wp->w_botfill) {
// display filler lines at the end of the file
if (char2cells(wp->w_p_fcs_chars.diff) > 1) {
i = '-';
} else {
i = wp->w_p_fcs_chars.diff;
}
if (row + j > wp->w_grid.Rows) {
j = wp->w_grid.Rows - row;
}
win_draw_end(wp, i, i, true, row, row + (int)j, HLF_DED);
row += j;
}
} else if (dollar_vcol == -1)
wp->w_botline = lnum;
// make sure the rest of the screen is blank
// write the 'eob' character to rows that aren't part of the file.
win_draw_end(wp, wp->w_p_fcs_chars.eob, ' ', false, row, wp->w_grid.Rows,
HLF_EOB);
}
if (wp->w_redr_type >= REDRAW_TOP) {
draw_vsep_win(wp, 0);
}
syn_set_timeout(NULL);
/* Reset the type of redrawing required, the window has been updated. */
wp->w_redr_type = 0;
wp->w_old_topfill = wp->w_topfill;
wp->w_old_botfill = wp->w_botfill;
if (dollar_vcol == -1) {
/*
* There is a trick with w_botline. If we invalidate it on each
* change that might modify it, this will cause a lot of expensive
* calls to plines() in update_topline() each time. Therefore the
* value of w_botline is often approximated, and this value is used to
* compute the value of w_topline. If the value of w_botline was
* wrong, check that the value of w_topline is correct (cursor is on
* the visible part of the text). If it's not, we need to redraw
* again. Mostly this just means scrolling up a few lines, so it
* doesn't look too bad. Only do this for the current window (where
* changes are relevant).
*/
wp->w_valid |= VALID_BOTLINE;
if (wp == curwin && wp->w_botline != old_botline && !recursive) {
recursive = TRUE;
curwin->w_valid &= ~VALID_TOPLINE;
update_topline(); /* may invalidate w_botline again */
if (must_redraw != 0) {
/* Don't update for changes in buffer again. */
i = curbuf->b_mod_set;
curbuf->b_mod_set = false;
win_update(curwin);
must_redraw = 0;
curbuf->b_mod_set = i;
}
recursive = FALSE;
}
}
/* restore got_int, unless CTRL-C was hit while redrawing */
if (!got_int)
got_int = save_got_int;
}
/// Returns width of the signcolumn that should be used for the whole window
///
/// @param wp window we want signcolumn width from
/// @return max width of signcolumn (cell unit)
///
/// @note Returns a constant for now but hopefully we can improve neovim so that
/// the returned value width adapts to the maximum number of marks to draw
/// for the window
/// TODO(teto)
int win_signcol_width(win_T *wp)
{
// 2 is vim default value
return 2;
}
/// Call grid_fill() with columns adjusted for 'rightleft' if needed.
/// Return the new offset.
static int win_fill_end(win_T *wp, int c1, int c2, int off, int width, int row,
int endrow, int attr)
{
int nn = off + width;
if (nn > wp->w_grid.Columns) {
nn = wp->w_grid.Columns;
}
if (wp->w_p_rl) {
grid_fill(&wp->w_grid, row, endrow, W_ENDCOL(wp) - nn, W_ENDCOL(wp) - off,
c1, c2, attr);
} else {
grid_fill(&wp->w_grid, row, endrow, off, nn, c1, c2, attr);
}
return nn;
}
/// Clear lines near the end of the window and mark the unused lines with "c1".
/// Use "c2" as filler character.
/// When "draw_margin" is true, then draw the sign/fold/number columns.
static void win_draw_end(win_T *wp, int c1, int c2, bool draw_margin, int row,
int endrow, hlf_T hl)
{
int n = 0;
if (draw_margin) {
// draw the fold column
int fdc = compute_foldcolumn(wp, 0);
if (fdc > 0) {
n = win_fill_end(wp, ' ', ' ', n, fdc, row, endrow,
win_hl_attr(wp, HLF_FC));
}
// draw the sign column
int count = win_signcol_count(wp);
if (count > 0) {
n = win_fill_end(wp, ' ', ' ', n, win_signcol_width(wp) * count, row,
endrow, win_hl_attr(wp, HLF_SC));
}
// draw the number column
if ((wp->w_p_nu || wp->w_p_rnu) && vim_strchr(p_cpo, CPO_NUMCOL) == NULL) {
n = win_fill_end(wp, ' ', ' ', n, number_width(wp) + 1, row, endrow,
win_hl_attr(wp, HLF_N));
}
}
int attr = hl_combine_attr(wp->w_hl_attr_normal, win_hl_attr(wp, hl));
if (wp->w_p_rl) {
grid_fill(&wp->w_grid, row, endrow, wp->w_wincol, W_ENDCOL(wp) - 1 - n,
c2, c2, attr);
grid_fill(&wp->w_grid, row, endrow, W_ENDCOL(wp) - 1 - n, W_ENDCOL(wp) - n,
c1, c2, attr);
} else {
grid_fill(&wp->w_grid, row, endrow, n, wp->w_grid.Columns, c1, c2, attr);
}
set_empty_rows(wp, row);
}
/*
* Advance **color_cols and return TRUE when there are columns to draw.
*/
static int advance_color_col(int vcol, int **color_cols)
{
while (**color_cols >= 0 && vcol > **color_cols)
++*color_cols;
return **color_cols >= 0;
}
// Compute the width of the foldcolumn. Based on 'foldcolumn' and how much
// space is available for window "wp", minus "col".
static int compute_foldcolumn(win_T *wp, int col)
{
int fdc = wp->w_p_fdc;
int wmw = wp == curwin && p_wmw == 0 ? 1 : p_wmw;
int wwidth = wp->w_grid.Columns;
if (fdc > wwidth - (col + wmw)) {
fdc = wwidth - (col + wmw);
}
return fdc;
}
/// Put a single char from an UTF-8 buffer into a line buffer.
///
/// Handles composing chars and arabic shaping state.
static int line_putchar(LineState *s, schar_T *dest, int maxcells, bool rl)
{
const char_u *p = s->p;
int cells = utf_ptr2cells(p);
int c_len = utfc_ptr2len(p);
int u8c, u8cc[MAX_MCO];
if (cells > maxcells) {
return -1;
}
u8c = utfc_ptr2char(p, u8cc);
if (*p < 0x80 && u8cc[0] == 0) {
schar_from_ascii(dest[0], *p);
s->prev_c = u8c;
} else {
if (p_arshape && !p_tbidi && arabic_char(u8c)) {
// Do Arabic shaping.
int pc, pc1, nc;
int pcc[MAX_MCO];
int firstbyte = *p;
// The idea of what is the previous and next
// character depends on 'rightleft'.
if (rl) {
pc = s->prev_c;
pc1 = s->prev_c1;
nc = utf_ptr2char(p + c_len);
s->prev_c1 = u8cc[0];
} else {
pc = utfc_ptr2char(p + c_len, pcc);
nc = s->prev_c;
pc1 = pcc[0];
}
s->prev_c = u8c;
u8c = arabic_shape(u8c, &firstbyte, &u8cc[0], pc, pc1, nc);
} else {
s->prev_c = u8c;
}
schar_from_cc(dest[0], u8c, u8cc);
}
if (cells > 1) {
dest[1][0] = 0;
}
s->p += c_len;
return cells;
}
/*
* Display one folded line.
*/
static void fold_line(win_T *wp, long fold_count, foldinfo_T *foldinfo, linenr_T lnum, int row)
{
char_u buf[FOLD_TEXT_LEN];
pos_T *top, *bot;
linenr_T lnume = lnum + fold_count - 1;
int len;
char_u *text;
int fdc;
int col;
int txtcol;
int off;
int ri;
/* Build the fold line:
* 1. Add the cmdwin_type for the command-line window
* 2. Add the 'foldcolumn'
* 3. Add the 'number' or 'relativenumber' column
* 4. Compose the text
* 5. Add the text
* 6. set highlighting for the Visual area an other text
*/
col = 0;
off = 0;
/*
* 1. Add the cmdwin_type for the command-line window
* Ignores 'rightleft', this window is never right-left.
*/
if (cmdwin_type != 0 && wp == curwin) {
schar_from_ascii(linebuf_char[off], cmdwin_type);
linebuf_attr[off] = win_hl_attr(wp, HLF_AT);
col++;
}
// 2. Add the 'foldcolumn'
// Reduce the width when there is not enough space.
fdc = compute_foldcolumn(wp, col);
if (fdc > 0) {
fill_foldcolumn(buf, wp, TRUE, lnum);
if (wp->w_p_rl) {
int i;
copy_text_attr(off + wp->w_grid.Columns - fdc - col, buf, fdc,
win_hl_attr(wp, HLF_FC));
// reverse the fold column
for (i = 0; i < fdc; i++) {
schar_from_ascii(linebuf_char[off + wp->w_grid.Columns - i - 1 - col],
buf[i]);
}
} else {
copy_text_attr(off + col, buf, fdc, win_hl_attr(wp, HLF_FC));
}
col += fdc;
}
# define RL_MEMSET(p, v, l) if (wp->w_p_rl) { \
for (ri = 0; ri < l; ri++) { \
linebuf_attr[off + (wp->w_grid.Columns - (p) - (l)) + ri] = v; \
} \
} else { \
for (ri = 0; ri < l; ri++) { \
linebuf_attr[off + (p) + ri] = v; \
} \
}
/* Set all attributes of the 'number' or 'relativenumber' column and the
* text */
RL_MEMSET(col, win_hl_attr(wp, HLF_FL), wp->w_grid.Columns - col);
// If signs are being displayed, add spaces.
if (win_signcol_count(wp) > 0) {
len = wp->w_grid.Columns - col;
if (len > 0) {
int len_max = win_signcol_width(wp) * win_signcol_count(wp);
if (len > len_max) {
len = len_max;
}
copy_text_attr(off + col, (char_u *)" ", len,
win_hl_attr(wp, HLF_FL));
col += len;
}
}
/*
* 3. Add the 'number' or 'relativenumber' column
*/
if (wp->w_p_nu || wp->w_p_rnu) {
len = wp->w_grid.Columns - col;
if (len > 0) {
int w = number_width(wp);
long num;
char *fmt = "%*ld ";
if (len > w + 1)
len = w + 1;
if (wp->w_p_nu && !wp->w_p_rnu)
/* 'number' + 'norelativenumber' */
num = (long)lnum;
else {
/* 'relativenumber', don't use negative numbers */
num = labs((long)get_cursor_rel_lnum(wp, lnum));
if (num == 0 && wp->w_p_nu && wp->w_p_rnu) {
/* 'number' + 'relativenumber': cursor line shows absolute
* line number */
num = lnum;
fmt = "%-*ld ";
}
}
snprintf((char *)buf, FOLD_TEXT_LEN, fmt, w, num);
if (wp->w_p_rl) {
// the line number isn't reversed
copy_text_attr(off + wp->w_grid.Columns - len - col, buf, len,
win_hl_attr(wp, HLF_FL));
} else {
copy_text_attr(off + col, buf, len, win_hl_attr(wp, HLF_FL));
}
col += len;
}
}
/*
* 4. Compose the folded-line string with 'foldtext', if set.
*/
text = get_foldtext(wp, lnum, lnume, foldinfo, buf);
txtcol = col; /* remember where text starts */
// 5. move the text to linebuf_char[off]. Fill up with "fold".
// Right-left text is put in columns 0 - number-col, normal text is put
// in columns number-col - window-width.
int idx;
if (wp->w_p_rl) {
idx = off;
} else {
idx = off + col;
}
LineState s = LINE_STATE(text);
while (*s.p != NUL) {
// TODO(bfredl): cargo-culted from the old Vim code:
// if(col + cells > wp->w_width - (wp->w_p_rl ? col : 0)) { break; }
// This is obvious wrong. If Vim ever fixes this, solve for "cells" again
// in the correct condition.
int maxcells = wp->w_grid.Columns - col - (wp->w_p_rl ? col : 0);
int cells = line_putchar(&s, &linebuf_char[idx], maxcells, wp->w_p_rl);
if (cells == -1) {
break;
}
col += cells;
idx += cells;
}
/* Fill the rest of the line with the fold filler */
if (wp->w_p_rl)
col -= txtcol;
schar_T sc;
schar_from_char(sc, wp->w_p_fcs_chars.fold);
while (col < wp->w_grid.Columns
- (wp->w_p_rl ? txtcol : 0)
) {
schar_copy(linebuf_char[off+col++], sc);
}
if (text != buf)
xfree(text);
/*
* 6. set highlighting for the Visual area an other text.
* If all folded lines are in the Visual area, highlight the line.
*/
if (VIsual_active && wp->w_buffer == curwin->w_buffer) {
if (ltoreq(curwin->w_cursor, VIsual)) {
/* Visual is after curwin->w_cursor */
top = &curwin->w_cursor;
bot = &VIsual;
} else {
/* Visual is before curwin->w_cursor */
top = &VIsual;
bot = &curwin->w_cursor;
}
if (lnum >= top->lnum
&& lnume <= bot->lnum
&& (VIsual_mode != 'v'
|| ((lnum > top->lnum
|| (lnum == top->lnum
&& top->col == 0))
&& (lnume < bot->lnum
|| (lnume == bot->lnum
&& (bot->col - (*p_sel == 'e'))
>= (colnr_T)STRLEN(ml_get_buf(wp->w_buffer, lnume,
FALSE))))))) {
if (VIsual_mode == Ctrl_V) {
// Visual block mode: highlight the chars part of the block
if (wp->w_old_cursor_fcol + txtcol < (colnr_T)wp->w_grid.Columns) {
if (wp->w_old_cursor_lcol != MAXCOL
&& wp->w_old_cursor_lcol + txtcol
< (colnr_T)wp->w_grid.Columns) {
len = wp->w_old_cursor_lcol;
} else {
len = wp->w_grid.Columns - txtcol;
}
RL_MEMSET(wp->w_old_cursor_fcol + txtcol, win_hl_attr(wp, HLF_V),
len - (int)wp->w_old_cursor_fcol);
}
} else {
// Set all attributes of the text
RL_MEMSET(txtcol, win_hl_attr(wp, HLF_V), wp->w_grid.Columns - txtcol);
}
}
}
// Show colorcolumn in the fold line, but let cursorcolumn override it.
if (wp->w_p_cc_cols) {
int i = 0;
int j = wp->w_p_cc_cols[i];
int old_txtcol = txtcol;
while (j > -1) {
txtcol += j;
if (wp->w_p_wrap) {
txtcol -= wp->w_skipcol;
} else {
txtcol -= wp->w_leftcol;
}
if (txtcol >= 0 && txtcol < wp->w_grid.Columns) {
linebuf_attr[off + txtcol] =
hl_combine_attr(linebuf_attr[off + txtcol], win_hl_attr(wp, HLF_MC));
}
txtcol = old_txtcol;
j = wp->w_p_cc_cols[++i];
}
}
/* Show 'cursorcolumn' in the fold line. */
if (wp->w_p_cuc) {
txtcol += wp->w_virtcol;
if (wp->w_p_wrap)
txtcol -= wp->w_skipcol;
else
txtcol -= wp->w_leftcol;
if (txtcol >= 0 && txtcol < wp->w_grid.Columns) {
linebuf_attr[off + txtcol] = hl_combine_attr(
linebuf_attr[off + txtcol], win_hl_attr(wp, HLF_CUC));
}
}
grid_put_linebuf(&wp->w_grid, row, 0, wp->w_grid.Columns, wp->w_grid.Columns,
false, wp, wp->w_hl_attr_normal, false);
/*
* Update w_cline_height and w_cline_folded if the cursor line was
* updated (saves a call to plines() later).
*/
if (wp == curwin
&& lnum <= curwin->w_cursor.lnum
&& lnume >= curwin->w_cursor.lnum) {
curwin->w_cline_row = row;
curwin->w_cline_height = 1;
curwin->w_cline_folded = true;
curwin->w_valid |= (VALID_CHEIGHT|VALID_CROW);
conceal_cursor_used = conceal_cursor_line(curwin);
}
}
/// Copy "buf[len]" to linebuf_char["off"] and set attributes to "attr".
///
/// Only works for ASCII text!
static void copy_text_attr(int off, char_u *buf, int len, int attr)
{
int i;
for (i = 0; i < len; i++) {
schar_from_ascii(linebuf_char[off + i], buf[i]);
linebuf_attr[off + i] = attr;
}
}
/*
* Fill the foldcolumn at "p" for window "wp".
* Only to be called when 'foldcolumn' > 0.
*/
static void
fill_foldcolumn (
char_u *p,
win_T *wp,
int closed, /* TRUE of FALSE */
linenr_T lnum /* current line number */
)
{
int i = 0;
int level;
int first_level;
int empty;
int fdc = compute_foldcolumn(wp, 0);
// Init to all spaces.
memset(p, ' ', (size_t)fdc);
level = win_foldinfo.fi_level;
if (level > 0) {
// If there is only one column put more info in it.
empty = (fdc == 1) ? 0 : 1;
// If the column is too narrow, we start at the lowest level that
// fits and use numbers to indicated the depth.
first_level = level - fdc - closed + 1 + empty;
if (first_level < 1) {
first_level = 1;
}
for (i = 0; i + empty < fdc; i++) {
if (win_foldinfo.fi_lnum == lnum
&& first_level + i >= win_foldinfo.fi_low_level) {
p[i] = '-';
} else if (first_level == 1) {
p[i] = '|';
} else if (first_level + i <= 9) {
p[i] = '0' + first_level + i;
} else {
p[i] = '>';
}
if (first_level + i == level) {
break;
}
}
}
if (closed) {
p[i >= fdc ? i - 1 : i] = '+';
}
}
/*
* Display line "lnum" of window 'wp' on the screen.
* Start at row "startrow", stop when "endrow" is reached.
* wp->w_virtcol needs to be valid.
*
* Return the number of last row the line occupies.
*/
static int
win_line (
win_T *wp,
linenr_T lnum,
int startrow,
int endrow,
bool nochange, // not updating for changed text
bool number_only // only update the number column
)
{
int c = 0; // init for GCC
long vcol = 0; // virtual column (for tabs)
long vcol_sbr = -1; // virtual column after showbreak
long vcol_prev = -1; // "vcol" of previous character
char_u *line; // current line
char_u *ptr; // current position in "line"
int row; // row in the window, excl w_winrow
ScreenGrid *grid = &wp->w_grid; // grid specfic to the window
char_u extra[18]; // line number and 'fdc' must fit in here
int n_extra = 0; // number of extra chars
char_u *p_extra = NULL; // string of extra chars, plus NUL
char_u *p_extra_free = NULL; // p_extra needs to be freed
int c_extra = NUL; // extra chars, all the same
int c_final = NUL; // final char, mandatory if set
int extra_attr = 0; // attributes when n_extra != 0
static char_u *at_end_str = (char_u *)""; // used for p_extra when displaying
// curwin->w_p_lcs_chars.eol at
// end-of-line
int lcs_eol_one = wp->w_p_lcs_chars.eol; // 'eol' until it's been used
int lcs_prec_todo = wp->w_p_lcs_chars.prec; // 'prec' until it's been used
/* saved "extra" items for when draw_state becomes WL_LINE (again) */
int saved_n_extra = 0;
char_u *saved_p_extra = NULL;
int saved_c_extra = 0;
int saved_c_final = 0;
int saved_char_attr = 0;
int n_attr = 0; /* chars with special attr */
int saved_attr2 = 0; /* char_attr saved for n_attr */
int n_attr3 = 0; /* chars with overruling special attr */
int saved_attr3 = 0; /* char_attr saved for n_attr3 */
int n_skip = 0; /* nr of chars to skip for 'nowrap' */
int fromcol = 0, tocol = 0; // start/end of inverting
int fromcol_prev = -2; // start of inverting after cursor
int noinvcur = false; // don't invert the cursor
pos_T *top, *bot;
int lnum_in_visual_area = false;
pos_T pos;
long v;
int char_attr = 0; /* attributes for next character */
int attr_pri = FALSE; /* char_attr has priority */
int area_highlighting = FALSE; /* Visual or incsearch highlighting
in this line */
int attr = 0; /* attributes for area highlighting */
int area_attr = 0; /* attributes desired by highlighting */
int search_attr = 0; /* attributes desired by 'hlsearch' */
int vcol_save_attr = 0; /* saved attr for 'cursorcolumn' */
int syntax_attr = 0; /* attributes desired by syntax */
int has_syntax = FALSE; /* this buffer has syntax highl. */
int save_did_emsg;
int eol_hl_off = 0; // 1 if highlighted char after EOL
int draw_color_col = false; // highlight colorcolumn
int *color_cols = NULL; // pointer to according columns array
bool has_spell = false; // this buffer has spell checking
# define SPWORDLEN 150
char_u nextline[SPWORDLEN * 2]; /* text with start of the next line */
int nextlinecol = 0; /* column where nextline[] starts */
int nextline_idx = 0; /* index in nextline[] where next line
starts */
int spell_attr = 0; /* attributes desired by spelling */
int word_end = 0; /* last byte with same spell_attr */
static linenr_T checked_lnum = 0; /* line number for "checked_col" */
static int checked_col = 0; /* column in "checked_lnum" up to which
* there are no spell errors */
static int cap_col = -1; // column to check for Cap word
static linenr_T capcol_lnum = 0; // line number where "cap_col"
int cur_checked_col = 0; // checked column for current line
int extra_check = 0; // has syntax or linebreak
int multi_attr = 0; // attributes desired by multibyte
int mb_l = 1; // multi-byte byte length
int mb_c = 0; // decoded multi-byte character
bool mb_utf8 = false; // screen char is UTF-8 char
int u8cc[MAX_MCO]; // composing UTF-8 chars
int filler_lines; // nr of filler lines to be drawn
int filler_todo; // nr of filler lines still to do + 1
hlf_T diff_hlf = (hlf_T)0; // type of diff highlighting
int change_start = MAXCOL; // first col of changed area
int change_end = -1; // last col of changed area
colnr_T trailcol = MAXCOL; // start of trailing spaces
int need_showbreak = false; // overlong line, skip first x chars
int line_attr = 0; // attribute for the whole line
int line_attr_lowprio = 0; // low-priority attribute for the line
matchitem_T *cur; // points to the match list
match_T *shl; // points to search_hl or a match
int shl_flag; // flag to indicate whether search_hl
// has been processed or not
bool prevcol_hl_flag; // flag to indicate whether prevcol
// equals startcol of search_hl or one
// of the matches
int prev_c = 0; // previous Arabic character
int prev_c1 = 0; // first composing char for prev_c
bool search_attr_from_match = false; // if search_attr is from :match
BufhlLineInfo bufhl_info; // bufhl data for this line
bool has_bufhl = false; // this buffer has highlight matches
bool do_virttext = false; // draw virtual text for this line
/* draw_state: items that are drawn in sequence: */
#define WL_START 0 /* nothing done yet */
# define WL_CMDLINE WL_START + 1 /* cmdline window column */
# define WL_FOLD WL_CMDLINE + 1 /* 'foldcolumn' */
# define WL_SIGN WL_FOLD + 1 /* column for signs */
#define WL_NR WL_SIGN + 1 /* line number */
# define WL_BRI WL_NR + 1 /* 'breakindent' */
# define WL_SBR WL_BRI + 1 /* 'showbreak' or 'diff' */
#define WL_LINE WL_SBR + 1 /* text in the line */
int draw_state = WL_START; /* what to draw next */
int syntax_flags = 0;
int syntax_seqnr = 0;
int prev_syntax_id = 0;
int conceal_attr = win_hl_attr(wp, HLF_CONCEAL);
int is_concealing = false;
int boguscols = 0; ///< nonexistent columns added to
///< force wrapping
int vcol_off = 0; ///< offset for concealed characters
int did_wcol = false;
int match_conc = 0; ///< cchar for match functions
int old_boguscols = 0;
# define VCOL_HLC (vcol - vcol_off)
# define FIX_FOR_BOGUSCOLS \
{ \
n_extra += vcol_off; \
vcol -= vcol_off; \
vcol_off = 0; \
col -= boguscols; \
old_boguscols = boguscols; \
boguscols = 0; \
}
if (startrow > endrow) /* past the end already! */
return startrow;
row = startrow;
if (!number_only) {
// To speed up the loop below, set extra_check when there is linebreak,
// trailing white space and/or syntax processing to be done.
extra_check = wp->w_p_lbr;
if (syntax_present(wp) && !wp->w_s->b_syn_error && !wp->w_s->b_syn_slow) {
// Prepare for syntax highlighting in this line. When there is an
// error, stop syntax highlighting.
save_did_emsg = did_emsg;
did_emsg = false;
syntax_start(wp, lnum);
if (did_emsg) {
wp->w_s->b_syn_error = true;
} else {
did_emsg = save_did_emsg;
if (!wp->w_s->b_syn_slow) {
has_syntax = true;
extra_check = true;
}
}
}
if (bufhl_start_line(wp->w_buffer, lnum, &bufhl_info)) {
if (kv_size(bufhl_info.line->items)) {
has_bufhl = true;
extra_check = true;
}
if (kv_size(bufhl_info.line->virt_text)) {
do_virttext = true;
}
}
// Check for columns to display for 'colorcolumn'.
color_cols = wp->w_buffer->terminal ? NULL : wp->w_p_cc_cols;
if (color_cols != NULL) {
draw_color_col = advance_color_col(VCOL_HLC, &color_cols);
}
if (wp->w_p_spell
&& *wp->w_s->b_p_spl != NUL
&& !GA_EMPTY(&wp->w_s->b_langp)
&& *(char **)(wp->w_s->b_langp.ga_data) != NULL) {
// Prepare for spell checking.
has_spell = true;
extra_check = true;
// Get the start of the next line, so that words that wrap to the next
// line are found too: "et<line-break>al.".
// Trick: skip a few chars for C/shell/Vim comments
nextline[SPWORDLEN] = NUL;
if (lnum < wp->w_buffer->b_ml.ml_line_count) {
line = ml_get_buf(wp->w_buffer, lnum + 1, false);
spell_cat_line(nextline + SPWORDLEN, line, SPWORDLEN);
}
// When a word wrapped from the previous line the start of the current
// line is valid.
if (lnum == checked_lnum) {
cur_checked_col = checked_col;
}
checked_lnum = 0;
// When there was a sentence end in the previous line may require a
// word starting with capital in this line. In line 1 always check
// the first word.
if (lnum != capcol_lnum) {
cap_col = -1;
}
if (lnum == 1) {
cap_col = 0;
}
capcol_lnum = 0;
}
//
// handle visual active in this window
//
fromcol = -10;
tocol = MAXCOL;
if (VIsual_active && wp->w_buffer == curwin->w_buffer) {
// Visual is after curwin->w_cursor
if (ltoreq(curwin->w_cursor, VIsual)) {
top = &curwin->w_cursor;
bot = &VIsual;
} else { // Visual is before curwin->w_cursor
top = &VIsual;
bot = &curwin->w_cursor;
}
lnum_in_visual_area = (lnum >= top->lnum && lnum <= bot->lnum);
if (VIsual_mode == Ctrl_V) { // block mode
if (lnum_in_visual_area) {
fromcol = wp->w_old_cursor_fcol;
tocol = wp->w_old_cursor_lcol;
}
} else { // non-block mode
if (lnum > top->lnum && lnum <= bot->lnum) {
fromcol = 0;
} else if (lnum == top->lnum) {
if (VIsual_mode == 'V') { // linewise
fromcol = 0;
} else {
getvvcol(wp, top, (colnr_T *)&fromcol, NULL, NULL);
if (gchar_pos(top) == NUL) {
tocol = fromcol + 1;
}
}
}
if (VIsual_mode != 'V' && lnum == bot->lnum) {
if (*p_sel == 'e' && bot->col == 0
&& bot->coladd == 0) {
fromcol = -10;
tocol = MAXCOL;
} else if (bot->col == MAXCOL) {
tocol = MAXCOL;
} else {
pos = *bot;
if (*p_sel == 'e') {
getvvcol(wp, &pos, (colnr_T *)&tocol, NULL, NULL);
} else {
getvvcol(wp, &pos, NULL, NULL, (colnr_T *)&tocol);
tocol++;
}
}
}
}
// Check if the char under the cursor should be inverted (highlighted).
if (!highlight_match && lnum == curwin->w_cursor.lnum && wp == curwin
&& cursor_is_block_during_visual(*p_sel == 'e')) {
noinvcur = true;
}
// if inverting in this line set area_highlighting
if (fromcol >= 0) {
area_highlighting = true;
attr = win_hl_attr(wp, HLF_V);
}
// handle 'incsearch' and ":s///c" highlighting
} else if (highlight_match
&& wp == curwin
&& lnum >= curwin->w_cursor.lnum
&& lnum <= curwin->w_cursor.lnum + search_match_lines) {
if (lnum == curwin->w_cursor.lnum) {
getvcol(curwin, &(curwin->w_cursor),
(colnr_T *)&fromcol, NULL, NULL);
} else {
fromcol = 0;
}
if (lnum == curwin->w_cursor.lnum + search_match_lines) {
pos.lnum = lnum;
pos.col = search_match_endcol;
getvcol(curwin, &pos, (colnr_T *)&tocol, NULL, NULL);
} else {
tocol = MAXCOL;
}
// do at least one character; happens when past end of line
if (fromcol == tocol) {
tocol = fromcol + 1;
}
area_highlighting = true;
attr = win_hl_attr(wp, HLF_I);
}
}
filler_lines = diff_check(wp, lnum);
if (filler_lines < 0) {
if (filler_lines == -1) {
if (diff_find_change(wp, lnum, &change_start, &change_end))
diff_hlf = HLF_ADD; /* added line */
else if (change_start == 0)
diff_hlf = HLF_TXD; /* changed text */
else
diff_hlf = HLF_CHD; /* changed line */
} else
diff_hlf = HLF_ADD; /* added line */
filler_lines = 0;
area_highlighting = TRUE;
}
if (lnum == wp->w_topline)
filler_lines = wp->w_topfill;
filler_todo = filler_lines;
// Cursor line highlighting for 'cursorline' in the current window.
if (wp->w_p_cul && lnum == wp->w_cursor.lnum) {
// Do not show the cursor line when Visual mode is active, because it's
// not clear what is selected then.
if (!(wp == curwin && VIsual_active)) {
int cul_attr = win_hl_attr(wp, HLF_CUL);
HlAttrs ae = syn_attr2entry(cul_attr);
// We make a compromise here (#7383):
// * low-priority CursorLine if fg is not set
// * high-priority ("same as Vim" priority) CursorLine if fg is set
if (ae.rgb_fg_color == -1 && ae.cterm_fg_color == 0) {
line_attr_lowprio = cul_attr;
} else {
if (!(State & INSERT) && bt_quickfix(wp->w_buffer)
&& qf_current_entry(wp) == lnum) {
line_attr = hl_combine_attr(cul_attr, line_attr);
} else {
line_attr = cul_attr;
}
}
}
// Update w_last_cursorline even if Visual mode is active.
wp->w_last_cursorline = wp->w_cursor.lnum;
}
// If this line has a sign with line highlighting set line_attr.
v = buf_getsigntype(wp->w_buffer, lnum, SIGN_LINEHL, 0, 1);
if (v != 0) {
line_attr = sign_get_attr((int)v, SIGN_LINEHL);
}
// Highlight the current line in the quickfix window.
if (bt_quickfix(wp->w_buffer) && qf_current_entry(wp) == lnum) {
line_attr = win_hl_attr(wp, HLF_QFL);
}
if (line_attr_lowprio || line_attr) {
area_highlighting = true;
}
line = ml_get_buf(wp->w_buffer, lnum, FALSE);
ptr = line;
if (has_spell && !number_only) {
// For checking first word with a capital skip white space.
if (cap_col == 0) {
cap_col = (int)getwhitecols(line);
}
/* To be able to spell-check over line boundaries copy the end of the
* current line into nextline[]. Above the start of the next line was
* copied to nextline[SPWORDLEN]. */
if (nextline[SPWORDLEN] == NUL) {
/* No next line or it is empty. */
nextlinecol = MAXCOL;
nextline_idx = 0;
} else {
v = (long)STRLEN(line);
if (v < SPWORDLEN) {
/* Short line, use it completely and append the start of the
* next line. */
nextlinecol = 0;
memmove(nextline, line, (size_t)v);
STRMOVE(nextline + v, nextline + SPWORDLEN);
nextline_idx = v + 1;
} else {
/* Long line, use only the last SPWORDLEN bytes. */
nextlinecol = v - SPWORDLEN;
memmove(nextline, line + nextlinecol, SPWORDLEN); // -V512
nextline_idx = SPWORDLEN + 1;
}
}
}
if (wp->w_p_list) {
if (curwin->w_p_lcs_chars.space
|| wp->w_p_lcs_chars.trail
|| wp->w_p_lcs_chars.nbsp) {
extra_check = true;
}
// find start of trailing whitespace
if (wp->w_p_lcs_chars.trail) {
trailcol = (colnr_T)STRLEN(ptr);
while (trailcol > (colnr_T)0 && ascii_iswhite(ptr[trailcol - 1])) {
trailcol--;
}
trailcol += (colnr_T) (ptr - line);
}
}
/*
* 'nowrap' or 'wrap' and a single line that doesn't fit: Advance to the
* first character to be displayed.
*/
if (wp->w_p_wrap)
v = wp->w_skipcol;
else
v = wp->w_leftcol;
if (v > 0 && !number_only) {
char_u *prev_ptr = ptr;
while (vcol < v && *ptr != NUL) {
c = win_lbr_chartabsize(wp, line, ptr, (colnr_T)vcol, NULL);
vcol += c;
prev_ptr = ptr;
MB_PTR_ADV(ptr);
}
// When:
// - 'cuc' is set, or
// - 'colorcolumn' is set, or
// - 'virtualedit' is set, or
// - the visual mode is active,
// the end of the line may be before the start of the displayed part.
if (vcol < v && (wp->w_p_cuc
|| draw_color_col
|| virtual_active()
|| (VIsual_active && wp->w_buffer == curwin->w_buffer))) {
vcol = v;
}
/* Handle a character that's not completely on the screen: Put ptr at
* that character but skip the first few screen characters. */
if (vcol > v) {
vcol -= c;
ptr = prev_ptr;
// If the character fits on the screen, don't need to skip it.
// Except for a TAB.
if (utf_ptr2cells(ptr) >= c || *ptr == TAB) {
n_skip = v - vcol;
}
}
/*
* Adjust for when the inverted text is before the screen,
* and when the start of the inverted text is before the screen.
*/
if (tocol <= vcol)
fromcol = 0;
else if (fromcol >= 0 && fromcol < vcol)
fromcol = vcol;
/* When w_skipcol is non-zero, first line needs 'showbreak' */
if (wp->w_p_wrap)
need_showbreak = TRUE;
/* When spell checking a word we need to figure out the start of the
* word and if it's badly spelled or not. */
if (has_spell) {
size_t len;
colnr_T linecol = (colnr_T)(ptr - line);
hlf_T spell_hlf = HLF_COUNT;
pos = wp->w_cursor;
wp->w_cursor.lnum = lnum;
wp->w_cursor.col = linecol;
len = spell_move_to(wp, FORWARD, TRUE, TRUE, &spell_hlf);
/* spell_move_to() may call ml_get() and make "line" invalid */
line = ml_get_buf(wp->w_buffer, lnum, FALSE);
ptr = line + linecol;
if (len == 0 || (int)wp->w_cursor.col > ptr - line) {
/* no bad word found at line start, don't check until end of a
* word */
spell_hlf = HLF_COUNT;
word_end = (int)(spell_to_word_end(ptr, wp) - line + 1);
} else {
/* bad word found, use attributes until end of word */
assert(len <= INT_MAX);
word_end = wp->w_cursor.col + (int)len + 1;
/* Turn index into actual attributes. */
if (spell_hlf != HLF_COUNT)
spell_attr = highlight_attr[spell_hlf];
}
wp->w_cursor = pos;
// Need to restart syntax highlighting for this line.
if (has_syntax) {
syntax_start(wp, lnum);
}
}
}
/*
* Correct highlighting for cursor that can't be disabled.
* Avoids having to check this for each character.
*/
if (fromcol >= 0) {
if (noinvcur) {
if ((colnr_T)fromcol == wp->w_virtcol) {
/* highlighting starts at cursor, let it start just after the
* cursor */
fromcol_prev = fromcol;
fromcol = -1;
} else if ((colnr_T)fromcol < wp->w_virtcol)
/* restart highlighting after the cursor */
fromcol_prev = wp->w_virtcol;
}
if (fromcol >= tocol)
fromcol = -1;
}
/*
* Handle highlighting the last used search pattern and matches.
* Do this for both search_hl and the match list.
*/
cur = wp->w_match_head;
shl_flag = false;
while ((cur != NULL || !shl_flag) && !number_only) {
if (!shl_flag) {
shl = &search_hl;
shl_flag = true;
} else {
shl = &cur->hl; // -V595
}
shl->startcol = MAXCOL;
shl->endcol = MAXCOL;
shl->attr_cur = 0;
shl->is_addpos = false;
v = (long)(ptr - line);
if (cur != NULL) {
cur->pos.cur = 0;
}
next_search_hl(wp, shl, lnum, (colnr_T)v,
shl == &search_hl ? NULL : cur);
if (wp->w_s->b_syn_slow) {
has_syntax = false;
}
// Need to get the line again, a multi-line regexp may have made it
// invalid.
line = ml_get_buf(wp->w_buffer, lnum, false);
ptr = line + v;
if (shl->lnum != 0 && shl->lnum <= lnum) {
if (shl->lnum == lnum) {
shl->startcol = shl->rm.startpos[0].col;
} else {
shl->startcol = 0;
}
if (lnum == shl->lnum + shl->rm.endpos[0].lnum
- shl->rm.startpos[0].lnum) {
shl->endcol = shl->rm.endpos[0].col;
} else {
shl->endcol = MAXCOL;
}
// Highlight one character for an empty match.
if (shl->startcol == shl->endcol) {
if (line[shl->endcol] != NUL) {
shl->endcol += (*mb_ptr2len)(line + shl->endcol);
} else {
++shl->endcol;
}
}
if ((long)shl->startcol < v) { // match at leftcol
shl->attr_cur = shl->attr;
search_attr = shl->attr;
search_attr_from_match = shl != &search_hl;
}
area_highlighting = true;
}
if (shl != &search_hl && cur != NULL)
cur = cur->next;
}
unsigned off = 0; // Offset relative start of line
int col = 0; // Visual column on screen.
if (wp->w_p_rl) {
// Rightleft window: process the text in the normal direction, but put
// it in linebuf_char[off] from right to left. Start at the
// rightmost column of the window.
col = grid->Columns - 1;
off += col;
}
// wont highlight after 1024 columns
int term_attrs[1024] = {0};
if (wp->w_buffer->terminal) {
terminal_get_line_attributes(wp->w_buffer->terminal, wp, lnum, term_attrs);
extra_check = true;
}
int sign_idx = 0;
// Repeat for the whole displayed line.
for (;; ) {
int has_match_conc = 0; ///< match wants to conceal
bool did_decrement_ptr = false;
// Skip this quickly when working on the text.
if (draw_state != WL_LINE) {
if (draw_state == WL_CMDLINE - 1 && n_extra == 0) {
draw_state = WL_CMDLINE;
if (cmdwin_type != 0 && wp == curwin) {
/* Draw the cmdline character. */
n_extra = 1;
c_extra = cmdwin_type;
c_final = NUL;
char_attr = win_hl_attr(wp, HLF_AT);
}
}
if (draw_state == WL_FOLD - 1 && n_extra == 0) {
int fdc = compute_foldcolumn(wp, 0);
draw_state = WL_FOLD;
if (fdc > 0) {
// Draw the 'foldcolumn'. Allocate a buffer, "extra" may
// already be in use.
xfree(p_extra_free);
p_extra_free = xmalloc(12 + 1);
fill_foldcolumn(p_extra_free, wp, false, lnum);
n_extra = fdc;
p_extra_free[n_extra] = NUL;
p_extra = p_extra_free;
c_extra = NUL;
c_final = NUL;
char_attr = win_hl_attr(wp, HLF_FC);
}
}
//sign column
if (draw_state == WL_SIGN - 1 && n_extra == 0) {
draw_state = WL_SIGN;
/* Show the sign column when there are any signs in this
* buffer or when using Netbeans. */
int count = win_signcol_count(wp);
if (count > 0) {
int text_sign;
// Draw cells with the sign value or blank.
c_extra = ' ';
c_final = NUL;
char_attr = win_hl_attr(wp, HLF_SC);
n_extra = win_signcol_width(wp);
if (row == startrow + filler_lines && filler_todo <= 0) {
text_sign = buf_getsigntype(wp->w_buffer, lnum, SIGN_TEXT,
sign_idx, count);
if (text_sign != 0) {
p_extra = sign_get_text(text_sign);
int symbol_blen = (int)STRLEN(p_extra);
if (p_extra != NULL) {
c_extra = NUL;
c_final = NUL;
// symbol(s) bytes + (filling spaces) (one byte each)
n_extra = symbol_blen +
(win_signcol_width(wp) - mb_string2cells(p_extra));
memset(extra, ' ', sizeof(extra));
STRNCPY(extra, p_extra, STRLEN(p_extra));
p_extra = extra;
p_extra[n_extra] = NUL;
}
char_attr = sign_get_attr(text_sign, SIGN_TEXT);
}
}
sign_idx++;
if (sign_idx < count) {
draw_state = WL_SIGN - 1;
}
}
}
if (draw_state == WL_NR - 1 && n_extra == 0) {
draw_state = WL_NR;
/* Display the absolute or relative line number. After the
* first fill with blanks when the 'n' flag isn't in 'cpo' */
if ((wp->w_p_nu || wp->w_p_rnu)
&& (row == startrow
+ filler_lines
|| vim_strchr(p_cpo, CPO_NUMCOL) == NULL)) {
/* Draw the line number (empty space after wrapping). */
if (row == startrow
+ filler_lines
) {
long num;
char *fmt = "%*ld ";
if (wp->w_p_nu && !wp->w_p_rnu)
/* 'number' + 'norelativenumber' */
num = (long)lnum;
else {
/* 'relativenumber', don't use negative numbers */
num = labs((long)get_cursor_rel_lnum(wp, lnum));
if (num == 0 && wp->w_p_nu && wp->w_p_rnu) {
/* 'number' + 'relativenumber' */
num = lnum;
fmt = "%-*ld ";
}
}
sprintf((char *)extra, fmt,
number_width(wp), num);
if (wp->w_skipcol > 0)
for (p_extra = extra; *p_extra == ' '; ++p_extra)
*p_extra = '-';
if (wp->w_p_rl) { // reverse line numbers
// like rl_mirror(), but keep the space at the end
char_u *p2 = skiptowhite(extra) - 1;
for (char_u *p1 = extra; p1 < p2; p1++, p2--) {
const int t = *p1;
*p1 = *p2;
*p2 = t;
}
}
p_extra = extra;
c_extra = NUL;
c_final = NUL;
} else {
c_extra = ' ';
c_final = NUL;
}
n_extra = number_width(wp) + 1;
char_attr = win_hl_attr(wp, HLF_N);
int num_sign = buf_getsigntype(wp->w_buffer, lnum, SIGN_NUMHL,
0, 1);
if (num_sign != 0) {
// :sign defined with "numhl" highlight.
char_attr = sign_get_attr(num_sign, SIGN_NUMHL);
} else if ((wp->w_p_cul || wp->w_p_rnu)
&& lnum == wp->w_cursor.lnum) {
// When 'cursorline' is set highlight the line number of
// the current line differently.
// TODO(vim): Can we use CursorLine instead of CursorLineNr
// when CursorLineNr isn't set?
char_attr = win_hl_attr(wp, HLF_CLN);
}
}
}
if (wp->w_p_brisbr && draw_state == WL_BRI - 1
&& n_extra == 0 && *p_sbr != NUL) {
// draw indent after showbreak value
draw_state = WL_BRI;
} else if (wp->w_p_brisbr && draw_state == WL_SBR && n_extra == 0) {
// after the showbreak, draw the breakindent
draw_state = WL_BRI - 1;
}
// draw 'breakindent': indent wrapped text accordingly
if (draw_state == WL_BRI - 1 && n_extra == 0) {
draw_state = WL_BRI;
// if need_showbreak is set, breakindent also applies
if (wp->w_p_bri && (row != startrow || need_showbreak)
&& filler_lines == 0) {
char_attr = 0;
if (diff_hlf != (hlf_T)0) {
char_attr = win_hl_attr(wp, diff_hlf);
if (wp->w_p_cul && lnum == wp->w_cursor.lnum) {
char_attr = hl_combine_attr(char_attr, win_hl_attr(wp, HLF_CUL));
}
}
p_extra = NULL;
c_extra = ' ';
n_extra = get_breakindent_win(wp, ml_get_buf(wp->w_buffer, lnum, FALSE));
/* Correct end of highlighted area for 'breakindent',
required wen 'linebreak' is also set. */
if (tocol == vcol)
tocol += n_extra;
}
}
if (draw_state == WL_SBR - 1 && n_extra == 0) {
draw_state = WL_SBR;
if (filler_todo > 0) {
// draw "deleted" diff line(s)
if (char2cells(wp->w_p_fcs_chars.diff) > 1) {
c_extra = '-';
c_final = NUL;
} else {
c_extra = wp->w_p_fcs_chars.diff;
c_final = NUL;
}
if (wp->w_p_rl) {
n_extra = col + 1;
} else {
n_extra = grid->Columns - col;
}
char_attr = win_hl_attr(wp, HLF_DED);
}
if (*p_sbr != NUL && need_showbreak) {
/* Draw 'showbreak' at the start of each broken line. */
p_extra = p_sbr;
c_extra = NUL;
c_final = NUL;
n_extra = (int)STRLEN(p_sbr);
char_attr = win_hl_attr(wp, HLF_AT);
need_showbreak = false;
vcol_sbr = vcol + MB_CHARLEN(p_sbr);
/* Correct end of highlighted area for 'showbreak',
* required when 'linebreak' is also set. */
if (tocol == vcol)
tocol += n_extra;
/* combine 'showbreak' with 'cursorline' */
if (wp->w_p_cul && lnum == wp->w_cursor.lnum) {
char_attr = hl_combine_attr(char_attr, win_hl_attr(wp, HLF_CUL));
}
}
}
if (draw_state == WL_LINE - 1 && n_extra == 0) {
sign_idx = 0;
draw_state = WL_LINE;
if (saved_n_extra) {
/* Continue item from end of wrapped line. */
n_extra = saved_n_extra;
c_extra = saved_c_extra;
c_final = saved_c_final;
p_extra = saved_p_extra;
char_attr = saved_char_attr;
} else {
char_attr = 0;
}
}
}
// When still displaying '$' of change command, stop at cursor
if ((dollar_vcol >= 0 && wp == curwin
&& lnum == wp->w_cursor.lnum && vcol >= (long)wp->w_virtcol
&& filler_todo <= 0)
|| (number_only && draw_state > WL_NR)) {
grid_put_linebuf(grid, row, 0, col, -grid->Columns, wp->w_p_rl, wp,
wp->w_hl_attr_normal, false);
// Pretend we have finished updating the window. Except when
// 'cursorcolumn' is set.
if (wp->w_p_cuc) {
row = wp->w_cline_row + wp->w_cline_height;
} else {
row = grid->Rows;
}
break;
}
if (draw_state == WL_LINE && (area_highlighting || has_spell)) {
// handle Visual or match highlighting in this line
if (vcol == fromcol
|| (vcol + 1 == fromcol && n_extra == 0
&& utf_ptr2cells(ptr) > 1)
|| ((int)vcol_prev == fromcol_prev
&& vcol_prev < vcol // not at margin
&& vcol < tocol)) {
area_attr = attr; // start highlighting
} else if (area_attr != 0 && (vcol == tocol
|| (noinvcur
&& (colnr_T)vcol == wp->w_virtcol))) {
area_attr = 0; // stop highlighting
}
if (!n_extra) {
/*
* Check for start/end of search pattern match.
* After end, check for start/end of next match.
* When another match, have to check for start again.
* Watch out for matching an empty string!
* Do this for 'search_hl' and the match list (ordered by
* priority).
*/
v = (long)(ptr - line);
cur = wp->w_match_head;
shl_flag = FALSE;
while (cur != NULL || shl_flag == FALSE) {
if (shl_flag == FALSE
&& ((cur != NULL
&& cur->priority > SEARCH_HL_PRIORITY)
|| cur == NULL)) {
shl = &search_hl;
shl_flag = TRUE;
} else
shl = &cur->hl;
if (cur != NULL) {
cur->pos.cur = 0;
}
bool pos_inprogress = true; // mark that a position match search is
// in progress
while (shl->rm.regprog != NULL
|| (cur != NULL && pos_inprogress)) {
if (shl->startcol != MAXCOL
&& v >= (long)shl->startcol
&& v < (long)shl->endcol) {
int tmp_col = v + MB_PTR2LEN(ptr);
if (shl->endcol < tmp_col) {
shl->endcol = tmp_col;
}
shl->attr_cur = shl->attr;
// Match with the "Conceal" group results in hiding
// the match.
if (cur != NULL
&& shl != &search_hl
&& syn_name2id((char_u *)"Conceal") == cur->hlg_id) {
has_match_conc = v == (long)shl->startcol ? 2 : 1;
match_conc = cur->conceal_char;
} else {
has_match_conc = match_conc = 0;
}
} else if (v == (long)shl->endcol) {
shl->attr_cur = 0;
next_search_hl(wp, shl, lnum, (colnr_T)v,
shl == &search_hl ? NULL : cur);
pos_inprogress = !(cur == NULL || cur->pos.cur == 0);
/* Need to get the line again, a multi-line regexp
* may have made it invalid. */
line = ml_get_buf(wp->w_buffer, lnum, FALSE);
ptr = line + v;
if (shl->lnum == lnum) {
shl->startcol = shl->rm.startpos[0].col;
if (shl->rm.endpos[0].lnum == 0)
shl->endcol = shl->rm.endpos[0].col;
else
shl->endcol = MAXCOL;
if (shl->startcol == shl->endcol) {
// highlight empty match, try again after it
shl->endcol += (*mb_ptr2len)(line + shl->endcol);
}
/* Loop to check if the match starts at the
* current position */
continue;
}
}
break;
}
if (shl != &search_hl && cur != NULL)
cur = cur->next;
}
/* Use attributes from match with highest priority among
* 'search_hl' and the match list. */
search_attr_from_match = false;
search_attr = search_hl.attr_cur;
cur = wp->w_match_head;
shl_flag = FALSE;
while (cur != NULL || shl_flag == FALSE) {
if (shl_flag == FALSE
&& ((cur != NULL
&& cur->priority > SEARCH_HL_PRIORITY)
|| cur == NULL)) {
shl = &search_hl;
shl_flag = TRUE;
} else
shl = &cur->hl;
if (shl->attr_cur != 0) {
search_attr = shl->attr_cur;
search_attr_from_match = shl != &search_hl;
}
if (shl != &search_hl && cur != NULL)
cur = cur->next;
}
// Only highlight one character after the last column.
if (*ptr == NUL
&& (wp->w_p_list && lcs_eol_one == -1)) {
search_attr = 0;
}
}
if (diff_hlf != (hlf_T)0) {
if (diff_hlf == HLF_CHD && ptr - line >= change_start
&& n_extra == 0) {
diff_hlf = HLF_TXD; // changed text
}
if (diff_hlf == HLF_TXD && ptr - line > change_end
&& n_extra == 0) {
diff_hlf = HLF_CHD; // changed line
}
line_attr = win_hl_attr(wp, diff_hlf);
// Overlay CursorLine onto diff-mode highlight.
if (wp->w_p_cul && lnum == wp->w_cursor.lnum) {
line_attr = 0 != line_attr_lowprio // Low-priority CursorLine
? hl_combine_attr(hl_combine_attr(win_hl_attr(wp, HLF_CUL),
line_attr),
hl_get_underline())
: hl_combine_attr(line_attr, win_hl_attr(wp, HLF_CUL));
}
}
// Decide which of the highlight attributes to use.
attr_pri = true;
if (area_attr != 0) {
char_attr = hl_combine_attr(line_attr, area_attr);
} else if (search_attr != 0) {
char_attr = hl_combine_attr(line_attr, search_attr);
}
// Use line_attr when not in the Visual or 'incsearch' area
// (area_attr may be 0 when "noinvcur" is set).
else if (line_attr != 0 && ((fromcol == -10 && tocol == MAXCOL)
|| vcol < fromcol || vcol_prev < fromcol_prev
|| vcol >= tocol)) {
char_attr = line_attr;
} else {
attr_pri = false;
if (has_syntax) {
char_attr = syntax_attr;
} else {
char_attr = 0;
}
}
}
// Get the next character to put on the screen.
//
// The "p_extra" points to the extra stuff that is inserted to
// represent special characters (non-printable stuff) and other
// things. When all characters are the same, c_extra is used.
// If c_final is set, it will compulsorily be used at the end.
// "p_extra" must end in a NUL to avoid mb_ptr2len() reads past
// "p_extra[n_extra]".
// For the '$' of the 'list' option, n_extra == 1, p_extra == "".
if (n_extra > 0) {
if (c_extra != NUL || (n_extra == 1 && c_final != NUL)) {
c = (n_extra == 1 && c_final != NUL) ? c_final : c_extra;
mb_c = c; // doesn't handle non-utf-8 multi-byte!
if (utf_char2len(c) > 1) {
mb_utf8 = true;
u8cc[0] = 0;
c = 0xc0;
} else {
mb_utf8 = false;
}
} else {
c = *p_extra;
mb_c = c;
// If the UTF-8 character is more than one byte:
// Decode it into "mb_c".
mb_l = utfc_ptr2len(p_extra);
mb_utf8 = false;
if (mb_l > n_extra) {
mb_l = 1;
} else if (mb_l > 1) {
mb_c = utfc_ptr2char(p_extra, u8cc);
mb_utf8 = true;
c = 0xc0;
}
if (mb_l == 0) { // at the NUL at end-of-line
mb_l = 1;
}
// If a double-width char doesn't fit display a '>' in the last column.
if ((wp->w_p_rl ? (col <= 0) : (col >= grid->Columns - 1))
&& (*mb_char2cells)(mb_c) == 2) {
c = '>';
mb_c = c;
mb_l = 1;
mb_utf8 = false;
multi_attr = win_hl_attr(wp, HLF_AT);
// put the pointer back to output the double-width
// character at the start of the next line.
n_extra++;
p_extra--;
} else {
n_extra -= mb_l - 1;
p_extra += mb_l - 1;
}
++p_extra;
}
--n_extra;
} else {
int c0;
if (p_extra_free != NULL) {
XFREE_CLEAR(p_extra_free);
}
// Get a character from the line itself.
c0 = c = *ptr;
mb_c = c;
// If the UTF-8 character is more than one byte: Decode it
// into "mb_c".
mb_l = utfc_ptr2len(ptr);
mb_utf8 = false;
if (mb_l > 1) {
mb_c = utfc_ptr2char(ptr, u8cc);
// Overlong encoded ASCII or ASCII with composing char
// is displayed normally, except a NUL.
if (mb_c < 0x80) {
c0 = c = mb_c;
}
mb_utf8 = true;
// At start of the line we can have a composing char.
// Draw it as a space with a composing char.
if (utf_iscomposing(mb_c)) {
int i;
for (i = MAX_MCO - 1; i > 0; i--) {
u8cc[i] = u8cc[i - 1];
}
u8cc[0] = mb_c;
mb_c = ' ';
}
}
if ((mb_l == 1 && c >= 0x80)
|| (mb_l >= 1 && mb_c == 0)
|| (mb_l > 1 && (!vim_isprintc(mb_c)))) {
// Illegal UTF-8 byte: display as <xx>.
// Non-BMP character : display as ? or fullwidth ?.
transchar_hex((char *)extra, mb_c);
if (wp->w_p_rl) { // reverse
rl_mirror(extra);
}
p_extra = extra;
c = *p_extra;
mb_c = mb_ptr2char_adv((const char_u **)&p_extra);
mb_utf8 = (c >= 0x80);
n_extra = (int)STRLEN(p_extra);
c_extra = NUL;
c_final = NUL;
if (area_attr == 0 && search_attr == 0) {
n_attr = n_extra + 1;
extra_attr = win_hl_attr(wp, HLF_8);
saved_attr2 = char_attr; // save current attr
}
} else if (mb_l == 0) { // at the NUL at end-of-line
mb_l = 1;
} else if (p_arshape && !p_tbidi && arabic_char(mb_c)) {
// Do Arabic shaping.
int pc, pc1, nc;
int pcc[MAX_MCO];
// The idea of what is the previous and next
// character depends on 'rightleft'.
if (wp->w_p_rl) {
pc = prev_c;
pc1 = prev_c1;
nc = utf_ptr2char(ptr + mb_l);
prev_c1 = u8cc[0];
} else {
pc = utfc_ptr2char(ptr + mb_l, pcc);
nc = prev_c;
pc1 = pcc[0];
}
prev_c = mb_c;
mb_c = arabic_shape(mb_c, &c, &u8cc[0], pc, pc1, nc);
} else {
prev_c = mb_c;
}
// If a double-width char doesn't fit display a '>' in the
// last column; the character is displayed at the start of the
// next line.
if ((wp->w_p_rl ? (col <= 0) :
(col >= grid->Columns - 1))
&& (*mb_char2cells)(mb_c) == 2) {
c = '>';
mb_c = c;
mb_utf8 = false;
mb_l = 1;
multi_attr = win_hl_attr(wp, HLF_AT);
// Put pointer back so that the character will be
// displayed at the start of the next line.
ptr--;
did_decrement_ptr = true;
} else if (*ptr != NUL) {
ptr += mb_l - 1;
}
// If a double-width char doesn't fit at the left side display a '<' in
// the first column. Don't do this for unprintable characters.
if (n_skip > 0 && mb_l > 1 && n_extra == 0) {
n_extra = 1;
c_extra = MB_FILLER_CHAR;
c_final = NUL;
c = ' ';
if (area_attr == 0 && search_attr == 0) {
n_attr = n_extra + 1;
extra_attr = win_hl_attr(wp, HLF_AT);
saved_attr2 = char_attr; // save current attr
}
mb_c = c;
mb_utf8 = false;
mb_l = 1;
}
ptr++;
if (extra_check) {
bool can_spell = true;
/* Get syntax attribute, unless still at the start of the line
* (double-wide char that doesn't fit). */
v = (long)(ptr - line);
if (has_syntax && v > 0) {
/* Get the syntax attribute for the character. If there
* is an error, disable syntax highlighting. */
save_did_emsg = did_emsg;
did_emsg = FALSE;
syntax_attr = get_syntax_attr((colnr_T)v - 1,
has_spell ? &can_spell : NULL, false);
if (did_emsg) {
wp->w_s->b_syn_error = TRUE;
has_syntax = FALSE;
} else
did_emsg = save_did_emsg;
/* Need to get the line again, a multi-line regexp may
* have made it invalid. */
line = ml_get_buf(wp->w_buffer, lnum, FALSE);
ptr = line + v;
if (!attr_pri) {
char_attr = syntax_attr;
} else {
char_attr = hl_combine_attr(syntax_attr, char_attr);
}
// no concealing past the end of the line, it interferes
// with line highlighting.
if (c == NUL) {
syntax_flags = 0;
} else {
syntax_flags = get_syntax_info(&syntax_seqnr);
}
} else if (!attr_pri) {
char_attr = 0;
}
/* Check spelling (unless at the end of the line).
* Only do this when there is no syntax highlighting, the
* @Spell cluster is not used or the current syntax item
* contains the @Spell cluster. */
if (has_spell && v >= word_end && v > cur_checked_col) {
spell_attr = 0;
if (!attr_pri) {
char_attr = syntax_attr;
}
if (c != 0 && (!has_syntax || can_spell)) {
char_u *prev_ptr;
char_u *p;
int len;
hlf_T spell_hlf = HLF_COUNT;
prev_ptr = ptr - mb_l;
v -= mb_l - 1;
/* Use nextline[] if possible, it has the start of the
* next line concatenated. */
if ((prev_ptr - line) - nextlinecol >= 0) {
p = nextline + ((prev_ptr - line) - nextlinecol);
} else {
p = prev_ptr;
}
cap_col -= (int)(prev_ptr - line);
size_t tmplen = spell_check(wp, p, &spell_hlf, &cap_col, nochange);
assert(tmplen <= INT_MAX);
len = (int)tmplen;
word_end = v + len;
/* In Insert mode only highlight a word that
* doesn't touch the cursor. */
if (spell_hlf != HLF_COUNT
&& (State & INSERT) != 0
&& wp->w_cursor.lnum == lnum
&& wp->w_cursor.col >=
(colnr_T)(prev_ptr - line)
&& wp->w_cursor.col < (colnr_T)word_end) {
spell_hlf = HLF_COUNT;
spell_redraw_lnum = lnum;
}
if (spell_hlf == HLF_COUNT && p != prev_ptr
&& (p - nextline) + len > nextline_idx) {
/* Remember that the good word continues at the
* start of the next line. */
checked_lnum = lnum + 1;
checked_col = (int)((p - nextline) + len - nextline_idx);
}
/* Turn index into actual attributes. */
if (spell_hlf != HLF_COUNT)
spell_attr = highlight_attr[spell_hlf];
if (cap_col > 0) {
if (p != prev_ptr
&& (p - nextline) + cap_col >= nextline_idx) {
/* Remember that the word in the next line
* must start with a capital. */
capcol_lnum = lnum + 1;
cap_col = (int)((p - nextline) + cap_col
- nextline_idx);
} else
/* Compute the actual column. */
cap_col += (int)(prev_ptr - line);
}
}
}
if (spell_attr != 0) {
if (!attr_pri)
char_attr = hl_combine_attr(char_attr, spell_attr);
else
char_attr = hl_combine_attr(spell_attr, char_attr);
}
if (has_bufhl && v > 0) {
int bufhl_attr = bufhl_get_attr(&bufhl_info, (colnr_T)v);
if (bufhl_attr != 0) {
if (!attr_pri) {
char_attr = hl_combine_attr(char_attr, bufhl_attr);
} else {
char_attr = hl_combine_attr(bufhl_attr, char_attr);
}
}
}
if (wp->w_buffer->terminal) {
char_attr = hl_combine_attr(term_attrs[vcol], char_attr);
}
// Found last space before word: check for line break.
if (wp->w_p_lbr && c0 == c && vim_isbreak(c)
&& !vim_isbreak((int)(*ptr))) {
int mb_off = utf_head_off(line, ptr - 1);
char_u *p = ptr - (mb_off + 1);
// TODO: is passing p for start of the line OK?
n_extra = win_lbr_chartabsize(wp, line, p, (colnr_T)vcol, NULL) - 1;
if (c == TAB && n_extra + col > grid->Columns) {
n_extra = (int)wp->w_buffer->b_p_ts
- vcol % (int)wp->w_buffer->b_p_ts - 1;
}
c_extra = mb_off > 0 ? MB_FILLER_CHAR : ' ';
c_final = NUL;
if (ascii_iswhite(c)) {
if (c == TAB)
/* See "Tab alignment" below. */
FIX_FOR_BOGUSCOLS;
if (!wp->w_p_list) {
c = ' ';
}
}
}
// 'list': change char 160 to 'nbsp' and space to 'space'.
if (wp->w_p_list
&& (((c == 160
|| (mb_utf8 && (mb_c == 160 || mb_c == 0x202f)))
&& curwin->w_p_lcs_chars.nbsp)
|| (c == ' ' && curwin->w_p_lcs_chars.space
&& ptr - line <= trailcol))) {
c = (c == ' ') ? wp->w_p_lcs_chars.space : wp->w_p_lcs_chars.nbsp;
n_attr = 1;
extra_attr = win_hl_attr(wp, HLF_0);
saved_attr2 = char_attr; // save current attr
mb_c = c;
if (utf_char2len(c) > 1) {
mb_utf8 = true;
u8cc[0] = 0;
c = 0xc0;
} else {
mb_utf8 = false;
}
}
if (trailcol != MAXCOL && ptr > line + trailcol && c == ' ') {
c = wp->w_p_lcs_chars.trail;
n_attr = 1;
extra_attr = win_hl_attr(wp, HLF_0);
saved_attr2 = char_attr; // save current attr
mb_c = c;
if (utf_char2len(c) > 1) {
mb_utf8 = true;
u8cc[0] = 0;
c = 0xc0;
} else {
mb_utf8 = false;
}
}
}
/*
* Handling of non-printable characters.
*/
if (!vim_isprintc(c)) {
// when getting a character from the file, we may have to
// turn it into something else on the way to putting it on the screen.
if (c == TAB && (!wp->w_p_list || wp->w_p_lcs_chars.tab1)) {
int tab_len = 0;
long vcol_adjusted = vcol; // removed showbreak length
// Only adjust the tab_len, when at the first column after the
// showbreak value was drawn.
if (*p_sbr != NUL && vcol == vcol_sbr && wp->w_p_wrap) {
vcol_adjusted = vcol - MB_CHARLEN(p_sbr);
}
// tab amount depends on current column
tab_len = (int)wp->w_buffer->b_p_ts
- vcol_adjusted % (int)wp->w_buffer->b_p_ts - 1;
if (!wp->w_p_lbr || !wp->w_p_list) {
n_extra = tab_len;
} else {
char_u *p;
int i;
int saved_nextra = n_extra;
if (vcol_off > 0) {
// there are characters to conceal
tab_len += vcol_off;
}
// boguscols before FIX_FOR_BOGUSCOLS macro from above.
if (wp->w_p_lcs_chars.tab1 && old_boguscols > 0
&& n_extra > tab_len) {
tab_len += n_extra - tab_len;
}
/* if n_extra > 0, it gives the number of chars to use for
* a tab, else we need to calculate the width for a tab */
int len = (tab_len * mb_char2len(wp->w_p_lcs_chars.tab2));
if (n_extra > 0) {
len += n_extra - tab_len;
}
c = wp->w_p_lcs_chars.tab1;
p = xmalloc(len + 1);
memset(p, ' ', len);
p[len] = NUL;
xfree(p_extra_free);
p_extra_free = p;
for (i = 0; i < tab_len; i++) {
utf_char2bytes(wp->w_p_lcs_chars.tab2, p);
p += mb_char2len(wp->w_p_lcs_chars.tab2);
n_extra += mb_char2len(wp->w_p_lcs_chars.tab2)
- (saved_nextra > 0 ? 1: 0);
}
p_extra = p_extra_free;
// n_extra will be increased by FIX_FOX_BOGUSCOLS
// macro below, so need to adjust for that here
if (vcol_off > 0) {
n_extra -= vcol_off;
}
}
{
int vc_saved = vcol_off;
// Tab alignment should be identical regardless of
// 'conceallevel' value. So tab compensates of all
// previous concealed characters, and thus resets
// vcol_off and boguscols accumulated so far in the
// line. Note that the tab can be longer than
// 'tabstop' when there are concealed characters.
FIX_FOR_BOGUSCOLS;
// Make sure, the highlighting for the tab char will be
// correctly set further below (effectively reverts the
// FIX_FOR_BOGSUCOLS macro.
if (n_extra == tab_len + vc_saved && wp->w_p_list
&& wp->w_p_lcs_chars.tab1) {
tab_len += vc_saved;
}
}
mb_utf8 = false; // don't draw as UTF-8
if (wp->w_p_list) {
c = (n_extra == 0 && wp->w_p_lcs_chars.tab3)
? wp->w_p_lcs_chars.tab3
: wp->w_p_lcs_chars.tab1;
if (wp->w_p_lbr) {
c_extra = NUL; /* using p_extra from above */
} else {
c_extra = wp->w_p_lcs_chars.tab2;
}
c_final = wp->w_p_lcs_chars.tab3;
n_attr = tab_len + 1;
extra_attr = win_hl_attr(wp, HLF_0);
saved_attr2 = char_attr; // save current attr
mb_c = c;
if (utf_char2len(c) > 1) {
mb_utf8 = true;
u8cc[0] = 0;
c = 0xc0;
}
} else {
c_final = NUL;
c_extra = ' ';
c = ' ';
}
} else if (c == NUL
&& (wp->w_p_list
|| ((fromcol >= 0 || fromcol_prev >= 0)
&& tocol > vcol
&& VIsual_mode != Ctrl_V
&& (wp->w_p_rl ? (col >= 0) : (col < grid->Columns))
&& !(noinvcur
&& lnum == wp->w_cursor.lnum
&& (colnr_T)vcol == wp->w_virtcol)))
&& lcs_eol_one > 0) {
// Display a '$' after the line or highlight an extra
// character if the line break is included.
// For a diff line the highlighting continues after the "$".
if (diff_hlf == (hlf_T)0
&& line_attr == 0
&& line_attr_lowprio == 0) {
// In virtualedit, visual selections may extend beyond end of line
if (area_highlighting && virtual_active()
&& tocol != MAXCOL && vcol < tocol) {
n_extra = 0;
} else {
p_extra = at_end_str;
n_extra = 1;
c_extra = NUL;
c_final = NUL;
}
}
if (wp->w_p_list && wp->w_p_lcs_chars.eol > 0) {
c = wp->w_p_lcs_chars.eol;
} else {
c = ' ';
}
lcs_eol_one = -1;
ptr--; // put it back at the NUL
extra_attr = win_hl_attr(wp, HLF_AT);
n_attr = 1;
mb_c = c;
if (utf_char2len(c) > 1) {
mb_utf8 = true;
u8cc[0] = 0;
c = 0xc0;
} else {
mb_utf8 = false; // don't draw as UTF-8
}
} else if (c != NUL) {
p_extra = transchar(c);
if (n_extra == 0) {
n_extra = byte2cells(c) - 1;
}
if ((dy_flags & DY_UHEX) && wp->w_p_rl)
rl_mirror(p_extra); /* reverse "<12>" */
c_extra = NUL;
c_final = NUL;
if (wp->w_p_lbr) {
char_u *p;
c = *p_extra;
p = xmalloc(n_extra + 1);
memset(p, ' ', n_extra);
STRNCPY(p, p_extra + 1, STRLEN(p_extra) - 1);
p[n_extra] = NUL;
xfree(p_extra_free);
p_extra_free = p_extra = p;
} else {
n_extra = byte2cells(c) - 1;
c = *p_extra++;
}
n_attr = n_extra + 1;
extra_attr = win_hl_attr(wp, HLF_8);
saved_attr2 = char_attr; // save current attr
mb_utf8 = false; // don't draw as UTF-8
} else if (VIsual_active
&& (VIsual_mode == Ctrl_V || VIsual_mode == 'v')
&& virtual_active()
&& tocol != MAXCOL
&& vcol < tocol
&& (wp->w_p_rl ? (col >= 0) : (col < grid->Columns))) {
c = ' ';
ptr--; // put it back at the NUL
}
}
if (wp->w_p_cole > 0
&& (wp != curwin || lnum != wp->w_cursor.lnum
|| conceal_cursor_line(wp))
&& ((syntax_flags & HL_CONCEAL) != 0 || has_match_conc > 0)
&& !(lnum_in_visual_area
&& vim_strchr(wp->w_p_cocu, 'v') == NULL)) {
char_attr = conceal_attr;
if ((prev_syntax_id != syntax_seqnr || has_match_conc > 1)
&& (syn_get_sub_char() != NUL || match_conc
|| wp->w_p_cole == 1)
&& wp->w_p_cole != 3) {
// First time at this concealed item: display one
// character.
if (match_conc) {
c = match_conc;
} else if (syn_get_sub_char() != NUL) {
c = syn_get_sub_char();
} else if (wp->w_p_lcs_chars.conceal != NUL) {
c = wp->w_p_lcs_chars.conceal;
} else {
c = ' ';
}
prev_syntax_id = syntax_seqnr;
if (n_extra > 0)
vcol_off += n_extra;
vcol += n_extra;
if (wp->w_p_wrap && n_extra > 0) {
if (wp->w_p_rl) {
col -= n_extra;
boguscols -= n_extra;
} else {
boguscols += n_extra;
col += n_extra;
}
}
n_extra = 0;
n_attr = 0;
} else if (n_skip == 0) {
is_concealing = TRUE;
n_skip = 1;
}
mb_c = c;
if (utf_char2len(c) > 1) {
mb_utf8 = true;
u8cc[0] = 0;
c = 0xc0;
} else {
mb_utf8 = false; // don't draw as UTF-8
}
} else {
prev_syntax_id = 0;
is_concealing = FALSE;
}
if (n_skip > 0 && did_decrement_ptr) {
// not showing the '>', put pointer back to avoid getting stuck
ptr++;
}
}
/* In the cursor line and we may be concealing characters: correct
* the cursor column when we reach its position. */
if (!did_wcol && draw_state == WL_LINE
&& wp == curwin && lnum == wp->w_cursor.lnum
&& conceal_cursor_line(wp)
&& (int)wp->w_virtcol <= vcol + n_skip) {
if (wp->w_p_rl) {
wp->w_wcol = grid->Columns - col + boguscols - 1;
} else {
wp->w_wcol = col - boguscols;
}
wp->w_wrow = row;
did_wcol = true;
}
// Don't override visual selection highlighting.
if (n_attr > 0 && draw_state == WL_LINE && !search_attr_from_match) {
char_attr = hl_combine_attr(char_attr, extra_attr);
}
/*
* Handle the case where we are in column 0 but not on the first
* character of the line and the user wants us to show us a
* special character (via 'listchars' option "precedes:<char>".
*/
if (lcs_prec_todo != NUL
&& wp->w_p_list
&& (wp->w_p_wrap ? wp->w_skipcol > 0 : wp->w_leftcol > 0)
&& filler_todo <= 0
&& draw_state > WL_NR
&& c != NUL) {
c = wp->w_p_lcs_chars.prec;
lcs_prec_todo = NUL;
if ((*mb_char2cells)(mb_c) > 1) {
// Double-width character being overwritten by the "precedes"
// character, need to fill up half the character.
c_extra = MB_FILLER_CHAR;
c_final = NUL;
n_extra = 1;
n_attr = 2;
extra_attr = win_hl_attr(wp, HLF_AT);
}
mb_c = c;
if (utf_char2len(c) > 1) {
mb_utf8 = true;
u8cc[0] = 0;
c = 0xc0;
} else {
mb_utf8 = false; // don't draw as UTF-8
}
saved_attr3 = char_attr; // save current attr
char_attr = win_hl_attr(wp, HLF_AT); // overwriting char_attr
n_attr3 = 1;
}
/*
* At end of the text line or just after the last character.
*/
if (c == NUL) {
long prevcol = (long)(ptr - line) - (c == NUL);
/* we're not really at that column when skipping some text */
if ((long)(wp->w_p_wrap ? wp->w_skipcol : wp->w_leftcol) > prevcol)
++prevcol;
// Invert at least one char, used for Visual and empty line or
// highlight match at end of line. If it's beyond the last
// char on the screen, just overwrite that one (tricky!) Not
// needed when a '$' was displayed for 'list'.
prevcol_hl_flag = false;
if (!search_hl.is_addpos && prevcol == (long)search_hl.startcol) {
prevcol_hl_flag = true;
} else {
cur = wp->w_match_head;
while (cur != NULL) {
if (!cur->hl.is_addpos && prevcol == (long)cur->hl.startcol) {
prevcol_hl_flag = true;
break;
}
cur = cur->next;
}
}
if (wp->w_p_lcs_chars.eol == lcs_eol_one
&& ((area_attr != 0 && vcol == fromcol
&& (VIsual_mode != Ctrl_V
|| lnum == VIsual.lnum
|| lnum == curwin->w_cursor.lnum)
&& c == NUL)
// highlight 'hlsearch' match at end of line
|| prevcol_hl_flag)) {
int n = 0;
if (wp->w_p_rl) {
if (col < 0)
n = 1;
} else {
if (col >= grid->Columns) {
n = -1;
}
}
if (n != 0) {
/* At the window boundary, highlight the last character
* instead (better than nothing). */
off += n;
col += n;
} else {
// Add a blank character to highlight.
schar_from_ascii(linebuf_char[off], ' ');
}
if (area_attr == 0) {
/* Use attributes from match with highest priority among
* 'search_hl' and the match list. */
char_attr = search_hl.attr;
cur = wp->w_match_head;
shl_flag = FALSE;
while (cur != NULL || shl_flag == FALSE) {
if (shl_flag == FALSE
&& ((cur != NULL
&& cur->priority > SEARCH_HL_PRIORITY)
|| cur == NULL)) {
shl = &search_hl;
shl_flag = TRUE;
} else
shl = &cur->hl;
if ((ptr - line) - 1 == (long)shl->startcol
&& (shl == &search_hl || !shl->is_addpos)) {
char_attr = shl->attr;
}
if (shl != &search_hl && cur != NULL) {
cur = cur->next;
}
}
}
int eol_attr = char_attr;
if (wp->w_p_cul && lnum == wp->w_cursor.lnum) {
eol_attr = hl_combine_attr(win_hl_attr(wp, HLF_CUL), eol_attr);
}
linebuf_attr[off] = eol_attr;
if (wp->w_p_rl) {
--col;
--off;
} else {
++col;
++off;
}
++vcol;
eol_hl_off = 1;
}
// Highlight 'cursorcolumn' & 'colorcolumn' past end of the line.
if (wp->w_p_wrap) {
v = wp->w_skipcol;
} else {
v = wp->w_leftcol;
}
/* check if line ends before left margin */
if (vcol < v + col - win_col_off(wp))
vcol = v + col - win_col_off(wp);
/* Get rid of the boguscols now, we want to draw until the right
* edge for 'cursorcolumn'. */
col -= boguscols;
// boguscols = 0; // Disabled because value never read after this
if (draw_color_col)
draw_color_col = advance_color_col(VCOL_HLC, &color_cols);
if (((wp->w_p_cuc
&& (int)wp->w_virtcol >= VCOL_HLC - eol_hl_off
&& (int)wp->w_virtcol <
grid->Columns * (row - startrow + 1) + v
&& lnum != wp->w_cursor.lnum)
|| draw_color_col || line_attr_lowprio || line_attr
|| diff_hlf != (hlf_T)0 || do_virttext)) {
int rightmost_vcol = 0;
int i;
VirtText virt_text = do_virttext ? bufhl_info.line->virt_text
: (VirtText)KV_INITIAL_VALUE;
size_t virt_pos = 0;
LineState s = LINE_STATE((char_u *)"");
int virt_attr = 0;
// Make sure alignment is the same regardless
// if listchars=eol:X is used or not.
bool delay_virttext = wp->w_p_lcs_chars.eol == lcs_eol_one
&& eol_hl_off == 0;
if (wp->w_p_cuc) {
rightmost_vcol = wp->w_virtcol;
}
if (draw_color_col) {
// determine rightmost colorcolumn to possibly draw
for (i = 0; color_cols[i] >= 0; i++) {
if (rightmost_vcol < color_cols[i]) {
rightmost_vcol = color_cols[i];
}
}
}
int cuc_attr = win_hl_attr(wp, HLF_CUC);
int mc_attr = win_hl_attr(wp, HLF_MC);
int diff_attr = 0;
if (diff_hlf == HLF_TXD) {
diff_hlf = HLF_CHD;
}
if (diff_hlf != 0) {
diff_attr = win_hl_attr(wp, diff_hlf);
}
int base_attr = hl_combine_attr(line_attr_lowprio, diff_attr);
if (base_attr || line_attr) {
rightmost_vcol = INT_MAX;
}
int col_stride = wp->w_p_rl ? -1 : 1;
while (wp->w_p_rl ? col >= 0 : col < grid->Columns) {
int cells = -1;
if (do_virttext && !delay_virttext) {
if (*s.p == NUL) {
if (virt_pos < virt_text.size) {
s.p = (char_u *)kv_A(virt_text, virt_pos).text;
int hl_id = kv_A(virt_text, virt_pos).hl_id;
virt_attr = hl_id > 0 ? syn_id2attr(hl_id) : 0;
virt_pos++;
} else {
do_virttext = false;
}
}
if (*s.p != NUL) {
cells = line_putchar(&s, &linebuf_char[off], grid->Columns - col,
false);
}
}
delay_virttext = false;
if (cells == -1) {
schar_from_ascii(linebuf_char[off], ' ');
cells = 1;
}
col += cells * col_stride;
if (draw_color_col) {
draw_color_col = advance_color_col(VCOL_HLC, &color_cols);
}
int col_attr = base_attr;
if (wp->w_p_cuc && VCOL_HLC == (long)wp->w_virtcol) {
col_attr = cuc_attr;
} else if (draw_color_col && VCOL_HLC == *color_cols) {
col_attr = mc_attr;
}
if (do_virttext) {
col_attr = hl_combine_attr(col_attr, virt_attr);
}
col_attr = hl_combine_attr(col_attr, line_attr);
linebuf_attr[off] = col_attr;
if (cells == 2) {
linebuf_attr[off+1] = col_attr;
}
off += cells * col_stride;
if (VCOL_HLC >= rightmost_vcol && *s.p == NUL
&& virt_pos >= virt_text.size) {
break;
}
++vcol;
}
}
// TODO(bfredl): integrate with the common beyond-the-end-loop
if (wp->w_buffer->terminal) {
// terminal buffers may need to highlight beyond the end of the
// logical line
while (col < grid->Columns) {
schar_from_ascii(linebuf_char[off], ' ');
linebuf_attr[off++] = term_attrs[vcol++];
col++;
}
}
grid_put_linebuf(grid, row, 0, col, grid->Columns, wp->w_p_rl, wp,
wp->w_hl_attr_normal, false);
row++;
/*
* Update w_cline_height and w_cline_folded if the cursor line was
* updated (saves a call to plines() later).
*/
if (wp == curwin && lnum == curwin->w_cursor.lnum) {
curwin->w_cline_row = startrow;
curwin->w_cline_height = row - startrow;
curwin->w_cline_folded = false;
curwin->w_valid |= (VALID_CHEIGHT|VALID_CROW);
conceal_cursor_used = conceal_cursor_line(curwin);
}
break;
}
// Show "extends" character from 'listchars' if beyond the line end and
// 'list' is set.
if (wp->w_p_lcs_chars.ext != NUL
&& wp->w_p_list
&& !wp->w_p_wrap
&& filler_todo <= 0
&& (wp->w_p_rl ? col == 0 : col == grid->Columns - 1)
&& (*ptr != NUL
|| (wp->w_p_list && lcs_eol_one > 0)
|| (n_extra && (c_extra != NUL || *p_extra != NUL)))) {
c = wp->w_p_lcs_chars.ext;
char_attr = win_hl_attr(wp, HLF_AT);
mb_c = c;
if (utf_char2len(c) > 1) {
mb_utf8 = true;
u8cc[0] = 0;
c = 0xc0;
} else {
mb_utf8 = false;
}
}
/* advance to the next 'colorcolumn' */
if (draw_color_col)
draw_color_col = advance_color_col(VCOL_HLC, &color_cols);
/* Highlight the cursor column if 'cursorcolumn' is set. But don't
* highlight the cursor position itself.
* Also highlight the 'colorcolumn' if it is different than
* 'cursorcolumn' */
vcol_save_attr = -1;
if (draw_state == WL_LINE && !lnum_in_visual_area
&& search_attr == 0 && area_attr == 0) {
if (wp->w_p_cuc && VCOL_HLC == (long)wp->w_virtcol
&& lnum != wp->w_cursor.lnum) {
vcol_save_attr = char_attr;
char_attr = hl_combine_attr(win_hl_attr(wp, HLF_CUC), char_attr);
} else if (draw_color_col && VCOL_HLC == *color_cols) {
vcol_save_attr = char_attr;
char_attr = hl_combine_attr(win_hl_attr(wp, HLF_MC), char_attr);
}
}
// Apply lowest-priority line attr now, so everything can override it.
if (draw_state == WL_LINE) {
char_attr = hl_combine_attr(line_attr_lowprio, char_attr);
}
/*
* Store character to be displayed.
* Skip characters that are left of the screen for 'nowrap'.
*/
vcol_prev = vcol;
if (draw_state < WL_LINE || n_skip <= 0) {
//
// Store the character.
//
if (wp->w_p_rl && (*mb_char2cells)(mb_c) > 1) {
// A double-wide character is: put first halve in left cell.
off--;
col--;
}
if (mb_utf8) {
schar_from_cc(linebuf_char[off], mb_c, u8cc);
} else {
schar_from_ascii(linebuf_char[off], c);
}
if (multi_attr) {
linebuf_attr[off] = multi_attr;
multi_attr = 0;
} else {
linebuf_attr[off] = char_attr;
}
if ((*mb_char2cells)(mb_c) > 1) {
// Need to fill two screen columns.
off++;
col++;
// UTF-8: Put a 0 in the second screen char.
linebuf_char[off][0] = 0;
if (draw_state > WL_NR && filler_todo <= 0) {
vcol++;
}
// When "tocol" is halfway through a character, set it to the end of
// the character, otherwise highlighting won't stop.
if (tocol == vcol) {
tocol++;
}
if (wp->w_p_rl) {
/* now it's time to backup one cell */
--off;
--col;
}
}
if (wp->w_p_rl) {
--off;
--col;
} else {
++off;
++col;
}
} else if (wp->w_p_cole > 0 && is_concealing) {
--n_skip;
++vcol_off;
if (n_extra > 0)
vcol_off += n_extra;
if (wp->w_p_wrap) {
/*
* Special voodoo required if 'wrap' is on.
*
* Advance the column indicator to force the line
* drawing to wrap early. This will make the line
* take up the same screen space when parts are concealed,
* so that cursor line computations aren't messed up.
*
* To avoid the fictitious advance of 'col' causing
* trailing junk to be written out of the screen line
* we are building, 'boguscols' keeps track of the number
* of bad columns we have advanced.
*/
if (n_extra > 0) {
vcol += n_extra;
if (wp->w_p_rl) {
col -= n_extra;
boguscols -= n_extra;
} else {
col += n_extra;
boguscols += n_extra;
}
n_extra = 0;
n_attr = 0;
}
if ((*mb_char2cells)(mb_c) > 1) {
// Need to fill two screen columns.
if (wp->w_p_rl) {
--boguscols;
--col;
} else {
++boguscols;
++col;
}
}
if (wp->w_p_rl) {
--boguscols;
--col;
} else {
++boguscols;
++col;
}
} else {
if (n_extra > 0) {
vcol += n_extra;
n_extra = 0;
n_attr = 0;
}
}
} else
--n_skip;
/* Only advance the "vcol" when after the 'number' or 'relativenumber'
* column. */
if (draw_state > WL_NR
&& filler_todo <= 0
)
++vcol;
if (vcol_save_attr >= 0)
char_attr = vcol_save_attr;
/* restore attributes after "predeces" in 'listchars' */
if (draw_state > WL_NR && n_attr3 > 0 && --n_attr3 == 0)
char_attr = saved_attr3;
/* restore attributes after last 'listchars' or 'number' char */
if (n_attr > 0 && draw_state == WL_LINE && --n_attr == 0)
char_attr = saved_attr2;
/*
* At end of screen line and there is more to come: Display the line
* so far. If there is no more to display it is caught above.
*/
if ((wp->w_p_rl ? (col < 0) : (col >= grid->Columns))
&& (*ptr != NUL
|| filler_todo > 0
|| (wp->w_p_list && wp->w_p_lcs_chars.eol != NUL
&& p_extra != at_end_str)
|| (n_extra != 0 && (c_extra != NUL || *p_extra != NUL)))
) {
bool wrap = wp->w_p_wrap // Wrapping enabled.
&& filler_todo <= 0 // Not drawing diff filler lines.
&& lcs_eol_one != -1 // Haven't printed the lcs_eol character.
&& row != endrow - 1 // Not the last line being displayed.
&& (grid->Columns == Columns // Window spans the width of the screen,
|| ui_has(kUIMultigrid)) // or has dedicated grid.
&& !wp->w_p_rl; // Not right-to-left.
grid_put_linebuf(grid, row, 0, col - boguscols, grid->Columns, wp->w_p_rl,
wp, wp->w_hl_attr_normal, wrap);
if (wrap) {
ScreenGrid *current_grid = grid;
int current_row = row, dummy_col = 0; // dummy_col unused
screen_adjust_grid(¤t_grid, ¤t_row, &dummy_col);
// Force a redraw of the first column of the next line.
current_grid->attrs[current_grid->line_offset[current_row+1]] = -1;
// Remember that the line wraps, used for modeless copy.
current_grid->line_wraps[current_row] = true;
}
boguscols = 0;
row++;
/* When not wrapping and finished diff lines, or when displayed
* '$' and highlighting until last column, break here. */
if ((!wp->w_p_wrap
&& filler_todo <= 0
) || lcs_eol_one == -1)
break;
// When the window is too narrow draw all "@" lines.
if (draw_state != WL_LINE && filler_todo <= 0) {
win_draw_end(wp, '@', ' ', true, row, wp->w_grid.Rows, HLF_AT);
row = endrow;
}
/* When line got too long for screen break here. */
if (row == endrow) {
++row;
break;
}
col = 0;
off = 0;
if (wp->w_p_rl) {
col = grid->Columns - 1; // col is not used if breaking!
off += col;
}
/* reset the drawing state for the start of a wrapped line */
draw_state = WL_START;
saved_n_extra = n_extra;
saved_p_extra = p_extra;
saved_c_extra = c_extra;
saved_c_final = c_final;
saved_char_attr = char_attr;
n_extra = 0;
lcs_prec_todo = wp->w_p_lcs_chars.prec;
if (filler_todo <= 0) {
need_showbreak = true;
}
filler_todo--;
// When the filler lines are actually below the last line of the
// file, don't draw the line itself, break here.
if (filler_todo == 0 && wp->w_botfill) {
break;
}
}
} /* for every character in the line */
/* After an empty line check first word for capital. */
if (*skipwhite(line) == NUL) {
capcol_lnum = lnum + 1;
cap_col = 0;
}
xfree(p_extra_free);
return row;
}
/// Determine if dedicated window grid should be used or the default_grid
///
/// If UI did not request multigrid support, draw all windows on the
/// default_grid.
///
/// NB: this function can only been used with window grids in a context where
/// win_grid_alloc already has been called!
///
/// If the default_grid is used, adjust window relative positions to global
/// screen positions.
void screen_adjust_grid(ScreenGrid **grid, int *row_off, int *col_off)
{
if (!(*grid)->chars && *grid != &default_grid) {
*row_off += (*grid)->row_offset;
*col_off += (*grid)->col_offset;
*grid = &default_grid;
}
}
/*
* Check whether the given character needs redrawing:
* - the (first byte of the) character is different
* - the attributes are different
* - the character is multi-byte and the next byte is different
* - the character is two cells wide and the second cell differs.
*/
static int grid_char_needs_redraw(ScreenGrid *grid, int off_from, int off_to,
int cols)
{
return (cols > 0
&& ((schar_cmp(linebuf_char[off_from], grid->chars[off_to])
|| linebuf_attr[off_from] != grid->attrs[off_to]
|| (line_off2cells(linebuf_char, off_from, off_from + cols) > 1
&& schar_cmp(linebuf_char[off_from + 1],
grid->chars[off_to + 1])))
|| p_wd < 0));
}
/// Move one buffered line to the window grid, but only the characters that
/// have actually changed. Handle insert/delete character.
/// "coloff" gives the first column on the grid for this line.
/// "endcol" gives the columns where valid characters are.
/// "clear_width" is the width of the window. It's > 0 if the rest of the line
/// needs to be cleared, negative otherwise.
/// "rlflag" is TRUE in a rightleft window:
/// When TRUE and "clear_width" > 0, clear columns 0 to "endcol"
/// When FALSE and "clear_width" > 0, clear columns "endcol" to "clear_width"
/// If "wrap" is true, then hint to the UI that "row" contains a line
/// which has wrapped into the next row.
static void grid_put_linebuf(ScreenGrid *grid, int row, int coloff, int endcol,
int clear_width, int rlflag, win_T *wp,
int bg_attr, bool wrap)
{
unsigned off_from;
unsigned off_to;
unsigned max_off_from;
unsigned max_off_to;
int col = 0;
bool redraw_this; // Does character need redraw?
bool redraw_next; // redraw_this for next character
bool clear_next = false;
int char_cells; // 1: normal char
// 2: occupies two display cells
int start_dirty = -1, end_dirty = 0;
// TODO(bfredl): check all callsites and eliminate
// Check for illegal row and col, just in case
if (row >= grid->Rows) {
row = grid->Rows - 1;
}
if (endcol > grid->Columns) {
endcol = grid->Columns;
}
screen_adjust_grid(&grid, &row, &coloff);
off_from = 0;
off_to = grid->line_offset[row] + coloff;
max_off_from = linebuf_size;
max_off_to = grid->line_offset[row] + grid->Columns;
if (rlflag) {
/* Clear rest first, because it's left of the text. */
if (clear_width > 0) {
while (col <= endcol && grid->chars[off_to][0] == ' '
&& grid->chars[off_to][1] == NUL
&& grid->attrs[off_to] == bg_attr
) {
++off_to;
++col;
}
if (col <= endcol) {
grid_fill(grid, row, row + 1, col + coloff, endcol + coloff + 1,
' ', ' ', bg_attr);
}
}
col = endcol + 1;
off_to = grid->line_offset[row] + col + coloff;
off_from += col;
endcol = (clear_width > 0 ? clear_width : -clear_width);
}
if (bg_attr) {
for (int c = col; c < endcol; c++) {
linebuf_attr[off_from+c] =
hl_combine_attr(bg_attr, linebuf_attr[off_from+c]);
}
}
redraw_next = grid_char_needs_redraw(grid, off_from, off_to, endcol - col);
while (col < endcol) {
char_cells = 1;
if (col + 1 < endcol) {
char_cells = line_off2cells(linebuf_char, off_from, max_off_from);
}
redraw_this = redraw_next;
redraw_next = grid_char_needs_redraw(grid, off_from + char_cells,
off_to + char_cells,
endcol - col - char_cells);
if (redraw_this) {
if (start_dirty == -1) {
start_dirty = col;
}
end_dirty = col + char_cells;
// When writing a single-width character over a double-width
// character and at the end of the redrawn text, need to clear out
// the right halve of the old character.
// Also required when writing the right halve of a double-width
// char over the left halve of an existing one
if (col + char_cells == endcol
&& ((char_cells == 1
&& grid_off2cells(grid, off_to, max_off_to) > 1)
|| (char_cells == 2
&& grid_off2cells(grid, off_to, max_off_to) == 1
&& grid_off2cells(grid, off_to + 1, max_off_to) > 1))) {
clear_next = true;
}
schar_copy(grid->chars[off_to], linebuf_char[off_from]);
if (char_cells == 2) {
schar_copy(grid->chars[off_to+1], linebuf_char[off_from+1]);
}
grid->attrs[off_to] = linebuf_attr[off_from];
// For simplicity set the attributes of second half of a
// double-wide character equal to the first half.
if (char_cells == 2) {
grid->attrs[off_to + 1] = linebuf_attr[off_from];
}
}
off_to += char_cells;
off_from += char_cells;
col += char_cells;
}
if (clear_next) {
/* Clear the second half of a double-wide character of which the left
* half was overwritten with a single-wide character. */
schar_from_ascii(grid->chars[off_to], ' ');
end_dirty++;
}
int clear_end = -1;
if (clear_width > 0 && !rlflag) {
// blank out the rest of the line
// TODO(bfredl): we could cache winline widths
while (col < clear_width) {
if (grid->chars[off_to][0] != ' '
|| grid->chars[off_to][1] != NUL
|| grid->attrs[off_to] != bg_attr) {
grid->chars[off_to][0] = ' ';
grid->chars[off_to][1] = NUL;
grid->attrs[off_to] = bg_attr;
if (start_dirty == -1) {
start_dirty = col;
end_dirty = col;
} else if (clear_end == -1) {
end_dirty = endcol;
}
clear_end = col+1;
}
col++;
off_to++;
}
}
if (clear_width > 0 || wp->w_width != grid->Columns) {
// If we cleared after the end of the line, it did not wrap.
// For vsplit, line wrapping is not possible.
grid->line_wraps[row] = false;
}
if (clear_end < end_dirty) {
clear_end = end_dirty;
}
if (start_dirty == -1) {
start_dirty = end_dirty;
}
if (clear_end > start_dirty) {
ui_line(grid, row, coloff+start_dirty, coloff+end_dirty, coloff+clear_end,
bg_attr, wrap);
}
}
/*
* Mirror text "str" for right-left displaying.
* Only works for single-byte characters (e.g., numbers).
*/
void rl_mirror(char_u *str)
{
char_u *p1, *p2;
int t;
for (p1 = str, p2 = str + STRLEN(str) - 1; p1 < p2; ++p1, --p2) {
t = *p1;
*p1 = *p2;
*p2 = t;
}
}
/*
* mark all status lines for redraw; used after first :cd
*/
void status_redraw_all(void)
{
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (wp->w_status_height) {
wp->w_redr_status = TRUE;
redraw_later(VALID);
}
}
}
/// Marks all status lines of the current buffer for redraw.
void status_redraw_curbuf(void)
{
status_redraw_buf(curbuf);
}
/// Marks all status lines of the specified buffer for redraw.
void status_redraw_buf(buf_T *buf)
{
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (wp->w_status_height != 0 && wp->w_buffer == buf) {
wp->w_redr_status = true;
redraw_later(VALID);
}
}
}
/*
* Redraw all status lines that need to be redrawn.
*/
void redraw_statuslines(void)
{
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (wp->w_redr_status) {
win_redr_status(wp);
}
}
if (redraw_tabline)
draw_tabline();
}
/*
* Redraw all status lines at the bottom of frame "frp".
*/
void win_redraw_last_status(frame_T *frp)
{
if (frp->fr_layout == FR_LEAF)
frp->fr_win->w_redr_status = TRUE;
else if (frp->fr_layout == FR_ROW) {
for (frp = frp->fr_child; frp != NULL; frp = frp->fr_next)
win_redraw_last_status(frp);
} else { /* frp->fr_layout == FR_COL */
frp = frp->fr_child;
while (frp->fr_next != NULL)
frp = frp->fr_next;
win_redraw_last_status(frp);
}
}
/*
* Draw the verticap separator right of window "wp" starting with line "row".
*/
static void draw_vsep_win(win_T *wp, int row)
{
int hl;
int c;
if (wp->w_vsep_width) {
// draw the vertical separator right of this window
c = fillchar_vsep(wp, &hl);
grid_fill(&default_grid, wp->w_winrow + row, W_ENDROW(wp),
W_ENDCOL(wp), W_ENDCOL(wp) + 1, c, ' ', hl);
}
}
/*
* Get the length of an item as it will be shown in the status line.
*/
static int status_match_len(expand_T *xp, char_u *s)
{
int len = 0;
int emenu = (xp->xp_context == EXPAND_MENUS
|| xp->xp_context == EXPAND_MENUNAMES);
/* Check for menu separators - replace with '|'. */
if (emenu && menu_is_separator(s))
return 1;
while (*s != NUL) {
s += skip_status_match_char(xp, s);
len += ptr2cells(s);
MB_PTR_ADV(s);
}
return len;
}
/*
* Return the number of characters that should be skipped in a status match.
* These are backslashes used for escaping. Do show backslashes in help tags.
*/
static int skip_status_match_char(expand_T *xp, char_u *s)
{
if ((rem_backslash(s) && xp->xp_context != EXPAND_HELP)
|| ((xp->xp_context == EXPAND_MENUS
|| xp->xp_context == EXPAND_MENUNAMES)
&& (s[0] == '\t' || (s[0] == '\\' && s[1] != NUL)))
) {
#ifndef BACKSLASH_IN_FILENAME
if (xp->xp_shell && csh_like_shell() && s[1] == '\\' && s[2] == '!')
return 2;
#endif
return 1;
}
return 0;
}
/*
* Show wildchar matches in the status line.
* Show at least the "match" item.
* We start at item 'first_match' in the list and show all matches that fit.
*
* If inversion is possible we use it. Else '=' characters are used.
*/
void
win_redr_status_matches (
expand_T *xp,
int num_matches,
char_u **matches, /* list of matches */
int match,
int showtail
)
{
#define L_MATCH(m) (showtail ? sm_gettail(matches[m]) : matches[m])
int row;
char_u *buf;
int len;
int clen; /* length in screen cells */
int fillchar;
int attr;
int i;
int highlight = TRUE;
char_u *selstart = NULL;
int selstart_col = 0;
char_u *selend = NULL;
static int first_match = 0;
int add_left = FALSE;
char_u *s;
int emenu;
int l;
if (matches == NULL) /* interrupted completion? */
return;
buf = xmalloc(Columns * MB_MAXBYTES + 1);
if (match == -1) { /* don't show match but original text */
match = 0;
highlight = FALSE;
}
/* count 1 for the ending ">" */
clen = status_match_len(xp, L_MATCH(match)) + 3;
if (match == 0)
first_match = 0;
else if (match < first_match) {
/* jumping left, as far as we can go */
first_match = match;
add_left = TRUE;
} else {
/* check if match fits on the screen */
for (i = first_match; i < match; ++i)
clen += status_match_len(xp, L_MATCH(i)) + 2;
if (first_match > 0)
clen += 2;
// jumping right, put match at the left
if ((long)clen > Columns) {
first_match = match;
/* if showing the last match, we can add some on the left */
clen = 2;
for (i = match; i < num_matches; ++i) {
clen += status_match_len(xp, L_MATCH(i)) + 2;
if ((long)clen >= Columns) {
break;
}
}
if (i == num_matches)
add_left = TRUE;
}
}
if (add_left)
while (first_match > 0) {
clen += status_match_len(xp, L_MATCH(first_match - 1)) + 2;
if ((long)clen >= Columns) {
break;
}
first_match--;
}
fillchar = fillchar_status(&attr, curwin);
if (first_match == 0) {
*buf = NUL;
len = 0;
} else {
STRCPY(buf, "< ");
len = 2;
}
clen = len;
i = first_match;
while ((long)(clen + status_match_len(xp, L_MATCH(i)) + 2) < Columns) {
if (i == match) {
selstart = buf + len;
selstart_col = clen;
}
s = L_MATCH(i);
/* Check for menu separators - replace with '|' */
emenu = (xp->xp_context == EXPAND_MENUS
|| xp->xp_context == EXPAND_MENUNAMES);
if (emenu && menu_is_separator(s)) {
STRCPY(buf + len, transchar('|'));
l = (int)STRLEN(buf + len);
len += l;
clen += l;
} else
for (; *s != NUL; ++s) {
s += skip_status_match_char(xp, s);
clen += ptr2cells(s);
if ((l = (*mb_ptr2len)(s)) > 1) {
STRNCPY(buf + len, s, l); // NOLINT(runtime/printf)
s += l - 1;
len += l;
} else {
STRCPY(buf + len, transchar_byte(*s));
len += (int)STRLEN(buf + len);
}
}
if (i == match)
selend = buf + len;
*(buf + len++) = ' ';
*(buf + len++) = ' ';
clen += 2;
if (++i == num_matches)
break;
}
if (i != num_matches) {
*(buf + len++) = '>';
++clen;
}
buf[len] = NUL;
row = cmdline_row - 1;
if (row >= 0) {
if (wild_menu_showing == 0 || wild_menu_showing == WM_LIST) {
if (msg_scrolled > 0) {
/* Put the wildmenu just above the command line. If there is
* no room, scroll the screen one line up. */
if (cmdline_row == Rows - 1) {
msg_scroll_up();
msg_scrolled++;
} else {
cmdline_row++;
row++;
}
wild_menu_showing = WM_SCROLLED;
} else {
/* Create status line if needed by setting 'laststatus' to 2.
* Set 'winminheight' to zero to avoid that the window is
* resized. */
if (lastwin->w_status_height == 0) {
save_p_ls = p_ls;
save_p_wmh = p_wmh;
p_ls = 2;
p_wmh = 0;
last_status(FALSE);
}
wild_menu_showing = WM_SHOWN;
}
}
grid_puts(&default_grid, buf, row, 0, attr);
if (selstart != NULL && highlight) {
*selend = NUL;
grid_puts(&default_grid, selstart, row, selstart_col, HL_ATTR(HLF_WM));
}
grid_fill(&default_grid, row, row + 1, clen, (int)Columns,
fillchar, fillchar, attr);
}
win_redraw_last_status(topframe);
xfree(buf);
}
/// Redraw the status line of window `wp`.
///
/// If inversion is possible we use it. Else '=' characters are used.
static void win_redr_status(win_T *wp)
{
int row;
char_u *p;
int len;
int fillchar;
int attr;
int this_ru_col;
static int busy = FALSE;
// May get here recursively when 'statusline' (indirectly)
// invokes ":redrawstatus". Simply ignore the call then.
if (busy
// Also ignore if wildmenu is showing.
|| (wild_menu_showing != 0 && !ui_has(kUIWildmenu))) {
return;
}
busy = true;
wp->w_redr_status = FALSE;
if (wp->w_status_height == 0) {
// no status line, can only be last window
redraw_cmdline = true;
} else if (!redrawing()) {
// Don't redraw right now, do it later. Don't update status line when
// popup menu is visible and may be drawn over it
wp->w_redr_status = true;
} else if (*p_stl != NUL || *wp->w_p_stl != NUL) {
/* redraw custom status line */
redraw_custom_statusline(wp);
} else {
fillchar = fillchar_status(&attr, wp);
get_trans_bufname(wp->w_buffer);
p = NameBuff;
len = (int)STRLEN(p);
if (bt_help(wp->w_buffer)
|| wp->w_p_pvw
|| bufIsChanged(wp->w_buffer)
|| wp->w_buffer->b_p_ro) {
*(p + len++) = ' ';
}
if (bt_help(wp->w_buffer)) {
STRCPY(p + len, _("[Help]"));
len += (int)STRLEN(p + len);
}
if (wp->w_p_pvw) {
STRCPY(p + len, _("[Preview]"));
len += (int)STRLEN(p + len);
}
if (bufIsChanged(wp->w_buffer)) {
STRCPY(p + len, "[+]");
len += 3;
}
if (wp->w_buffer->b_p_ro) {
STRCPY(p + len, _("[RO]"));
// len += (int)STRLEN(p + len); // dead assignment
}
this_ru_col = ru_col - (Columns - wp->w_width);
if (this_ru_col < (wp->w_width + 1) / 2) {
this_ru_col = (wp->w_width + 1) / 2;
}
if (this_ru_col <= 1) {
p = (char_u *)"<"; // No room for file name!
len = 1;
} else {
int clen = 0, i;
// Count total number of display cells.
clen = (int)mb_string2cells(p);
// Find first character that will fit.
// Going from start to end is much faster for DBCS.
for (i = 0; p[i] != NUL && clen >= this_ru_col - 1;
i += utfc_ptr2len(p + i)) {
clen -= utf_ptr2cells(p + i);
}
len = clen;
if (i > 0) {
p = p + i - 1;
*p = '<';
++len;
}
}
row = W_ENDROW(wp);
grid_puts(&default_grid, p, row, wp->w_wincol, attr);
grid_fill(&default_grid, row, row + 1, len + wp->w_wincol,
this_ru_col + wp->w_wincol, fillchar, fillchar, attr);
if (get_keymap_str(wp, (char_u *)"<%s>", NameBuff, MAXPATHL)
&& this_ru_col - len > (int)(STRLEN(NameBuff) + 1))
grid_puts(&default_grid, NameBuff, row,
(int)(this_ru_col - STRLEN(NameBuff) - 1), attr);
win_redr_ruler(wp, TRUE);
}
/*
* May need to draw the character below the vertical separator.
*/
if (wp->w_vsep_width != 0 && wp->w_status_height != 0 && redrawing()) {
if (stl_connected(wp)) {
fillchar = fillchar_status(&attr, wp);
} else {
fillchar = fillchar_vsep(wp, &attr);
}
grid_putchar(&default_grid, fillchar, W_ENDROW(wp), W_ENDCOL(wp), attr);
}
busy = FALSE;
}
/*
* Redraw the status line according to 'statusline' and take care of any
* errors encountered.
*/
static void redraw_custom_statusline(win_T *wp)
{
static int entered = false;
int saved_did_emsg = did_emsg;
/* When called recursively return. This can happen when the statusline
* contains an expression that triggers a redraw. */
if (entered)
return;
entered = TRUE;
did_emsg = false;
win_redr_custom(wp, false);
if (did_emsg) {
// When there is an error disable the statusline, otherwise the
// display is messed up with errors and a redraw triggers the problem
// again and again.
set_string_option_direct((char_u *)"statusline", -1,
(char_u *)"", OPT_FREE | (*wp->w_p_stl != NUL
? OPT_LOCAL : OPT_GLOBAL), SID_ERROR);
}
did_emsg |= saved_did_emsg;
entered = false;
}
/*
* Return TRUE if the status line of window "wp" is connected to the status
* line of the window right of it. If not, then it's a vertical separator.
* Only call if (wp->w_vsep_width != 0).
*/
int stl_connected(win_T *wp)
{
frame_T *fr;
fr = wp->w_frame;
while (fr->fr_parent != NULL) {
if (fr->fr_parent->fr_layout == FR_COL) {
if (fr->fr_next != NULL)
break;
} else {
if (fr->fr_next != NULL)
return TRUE;
}
fr = fr->fr_parent;
}
return FALSE;
}
/*
* Get the value to show for the language mappings, active 'keymap'.
*/
int
get_keymap_str (
win_T *wp,
char_u *fmt, // format string containing one %s item
char_u *buf, // buffer for the result
int len // length of buffer
)
{
char_u *p;
if (wp->w_buffer->b_p_iminsert != B_IMODE_LMAP)
return FALSE;
{
buf_T *old_curbuf = curbuf;
win_T *old_curwin = curwin;
char_u *s;
curbuf = wp->w_buffer;
curwin = wp;
STRCPY(buf, "b:keymap_name"); /* must be writable */
++emsg_skip;
s = p = eval_to_string(buf, NULL, FALSE);
--emsg_skip;
curbuf = old_curbuf;
curwin = old_curwin;
if (p == NULL || *p == NUL) {
if (wp->w_buffer->b_kmap_state & KEYMAP_LOADED) {
p = wp->w_buffer->b_p_keymap;
} else {
p = (char_u *)"lang";
}
}
if (vim_snprintf((char *)buf, len, (char *)fmt, p) > len - 1) {
buf[0] = NUL;
}
xfree(s);
}
return buf[0] != NUL;
}
/*
* Redraw the status line or ruler of window "wp".
* When "wp" is NULL redraw the tab pages line from 'tabline'.
*/
static void
win_redr_custom (
win_T *wp,
int draw_ruler /* TRUE or FALSE */
)
{
static int entered = FALSE;
int attr;
int curattr;
int row;
int col = 0;
int maxwidth;
int width;
int n;
int len;
int fillchar;
char_u buf[MAXPATHL];
char_u *stl;
char_u *p;
struct stl_hlrec hltab[STL_MAX_ITEM];
StlClickRecord tabtab[STL_MAX_ITEM];
int use_sandbox = false;
win_T *ewp;
int p_crb_save;
/* There is a tiny chance that this gets called recursively: When
* redrawing a status line triggers redrawing the ruler or tabline.
* Avoid trouble by not allowing recursion. */
if (entered)
return;
entered = TRUE;
/* setup environment for the task at hand */
if (wp == NULL) {
/* Use 'tabline'. Always at the first line of the screen. */
stl = p_tal;
row = 0;
fillchar = ' ';
attr = HL_ATTR(HLF_TPF);
maxwidth = Columns;
use_sandbox = was_set_insecurely((char_u *)"tabline", 0);
} else {
row = W_ENDROW(wp);
fillchar = fillchar_status(&attr, wp);
maxwidth = wp->w_width;
if (draw_ruler) {
stl = p_ruf;
/* advance past any leading group spec - implicit in ru_col */
if (*stl == '%') {
if (*++stl == '-')
stl++;
if (atoi((char *)stl))
while (ascii_isdigit(*stl))
stl++;
if (*stl++ != '(')
stl = p_ruf;
}
col = ru_col - (Columns - wp->w_width);
if (col < (wp->w_width + 1) / 2) {
col = (wp->w_width + 1) / 2;
}
maxwidth = wp->w_width - col;
if (!wp->w_status_height) {
row = Rows - 1;
maxwidth--; // writing in last column may cause scrolling
fillchar = ' ';
attr = 0;
}
use_sandbox = was_set_insecurely((char_u *)"rulerformat", 0);
} else {
if (*wp->w_p_stl != NUL)
stl = wp->w_p_stl;
else
stl = p_stl;
use_sandbox = was_set_insecurely((char_u *)"statusline",
*wp->w_p_stl == NUL ? 0 : OPT_LOCAL);
}
col += wp->w_wincol;
}
if (maxwidth <= 0)
goto theend;
/* Temporarily reset 'cursorbind', we don't want a side effect from moving
* the cursor away and back. */
ewp = wp == NULL ? curwin : wp;
p_crb_save = ewp->w_p_crb;
ewp->w_p_crb = FALSE;
/* Make a copy, because the statusline may include a function call that
* might change the option value and free the memory. */
stl = vim_strsave(stl);
width = build_stl_str_hl(ewp, buf, sizeof(buf),
stl, use_sandbox,
fillchar, maxwidth, hltab, tabtab);
xfree(stl);
ewp->w_p_crb = p_crb_save;
// Make all characters printable.
p = (char_u *)transstr((const char *)buf);
len = STRLCPY(buf, p, sizeof(buf));
len = (size_t)len < sizeof(buf) ? len : (int)sizeof(buf) - 1;
xfree(p);
/* fill up with "fillchar" */
while (width < maxwidth && len < (int)sizeof(buf) - 1) {
len += utf_char2bytes(fillchar, buf + len);
width++;
}
buf[len] = NUL;
/*
* Draw each snippet with the specified highlighting.
*/
grid_puts_line_start(&default_grid, row);
curattr = attr;
p = buf;
for (n = 0; hltab[n].start != NULL; n++) {
int textlen = (int)(hltab[n].start - p);
grid_puts_len(&default_grid, p, textlen, row, col, curattr);
col += vim_strnsize(p, textlen);
p = hltab[n].start;
if (hltab[n].userhl == 0)
curattr = attr;
else if (hltab[n].userhl < 0)
curattr = syn_id2attr(-hltab[n].userhl);
else if (wp != NULL && wp != curwin && wp->w_status_height != 0)
curattr = highlight_stlnc[hltab[n].userhl - 1];
else
curattr = highlight_user[hltab[n].userhl - 1];
}
// Make sure to use an empty string instead of p, if p is beyond buf + len.
grid_puts(&default_grid, p >= buf + len ? (char_u *)"" : p, row, col,
curattr);
grid_puts_line_flush(false);
if (wp == NULL) {
// Fill the tab_page_click_defs array for clicking in the tab pages line.
col = 0;
len = 0;
p = buf;
StlClickDefinition cur_click_def = {
.type = kStlClickDisabled,
};
for (n = 0; tabtab[n].start != NULL; n++) {
len += vim_strnsize(p, (int)(tabtab[n].start - (char *) p));
while (col < len) {
tab_page_click_defs[col++] = cur_click_def;
}
p = (char_u *) tabtab[n].start;
cur_click_def = tabtab[n].def;
}
while (col < Columns) {
tab_page_click_defs[col++] = cur_click_def;
}
}
theend:
entered = FALSE;
}
// Low-level functions to manipulate invidual character cells on the
// screen grid.
/// Put a ASCII character in a screen cell.
static void schar_from_ascii(char_u *p, const char c)
{
p[0] = c;
p[1] = 0;
}
/// Put a unicode character in a screen cell.
static int schar_from_char(char_u *p, int c)
{
int len = utf_char2bytes(c, p);
p[len] = NUL;
return len;
}
/// Put a unicode char, and up to MAX_MCO composing chars, in a screen cell.
static int schar_from_cc(char_u *p, int c, int u8cc[MAX_MCO])
{
int len = utf_char2bytes(c, p);
for (int i = 0; i < MAX_MCO; i++) {
if (u8cc[i] == 0) {
break;
}
len += utf_char2bytes(u8cc[i], p + len);
}
p[len] = 0;
return len;
}
/// compare the contents of two screen cells.
static int schar_cmp(char_u *sc1, char_u *sc2)
{
return STRNCMP(sc1, sc2, sizeof(schar_T));
}
/// copy the contents of screen cell `sc2` into cell `sc1`
static void schar_copy(char_u *sc1, char_u *sc2)
{
STRLCPY(sc1, sc2, sizeof(schar_T));
}
static int line_off2cells(schar_T *line, size_t off, size_t max_off)
{
return (off + 1 < max_off && line[off + 1][0] == 0) ? 2 : 1;
}
/// Return number of display cells for char at grid->chars[off].
/// We make sure that the offset used is less than "max_off".
static int grid_off2cells(ScreenGrid *grid, size_t off, size_t max_off)
{
return line_off2cells(grid->chars, off, max_off);
}
/// Return true if the character at "row"/"col" on the screen is the left side
/// of a double-width character.
///
/// Caller must make sure "row" and "col" are not invalid!
bool grid_lefthalve(ScreenGrid *grid, int row, int col)
{
screen_adjust_grid(&grid, &row, &col);
return grid_off2cells(grid, grid->line_offset[row] + col,
grid->line_offset[row] + grid->Columns) > 1;
}
/// Correct a position on the screen, if it's the right half of a double-wide
/// char move it to the left half. Returns the corrected column.
int grid_fix_col(ScreenGrid *grid, int col, int row)
{
int coloff = 0;
screen_adjust_grid(&grid, &row, &coloff);
col += coloff;
if (grid->chars != NULL && col > 0
&& grid->chars[grid->line_offset[row] + col][0] == 0) {
return col - 1 - coloff;
}
return col - coloff;
}
/// output a single character directly to the grid
void grid_putchar(ScreenGrid *grid, int c, int row, int col, int attr)
{
char_u buf[MB_MAXBYTES + 1];
buf[utf_char2bytes(c, buf)] = NUL;
grid_puts(grid, buf, row, col, attr);
}
/// get a single character directly from grid.chars into "bytes[]".
/// Also return its attribute in *attrp;
void grid_getbytes(ScreenGrid *grid, int row, int col, char_u *bytes,
int *attrp)
{
unsigned off;
screen_adjust_grid(&grid, &row, &col);
// safety check
if (grid->chars != NULL && row < grid->Rows && col < grid->Columns) {
off = grid->line_offset[row] + col;
*attrp = grid->attrs[off];
schar_copy(bytes, grid->chars[off]);
}
}
/// put string '*text' on the window grid at position 'row' and 'col', with
/// attributes 'attr', and update chars[] and attrs[].
/// Note: only outputs within one row, message is truncated at grid boundary!
/// Note: if grid, row and/or col is invalid, nothing is done.
void grid_puts(ScreenGrid *grid, char_u *text, int row, int col, int attr)
{
grid_puts_len(grid, text, -1, row, col, attr);
}
static ScreenGrid *put_dirty_grid = NULL;
static int put_dirty_row = -1;
static int put_dirty_first = INT_MAX;
static int put_dirty_last = 0;
/// Start a group of grid_puts_len calls that builds a single grid line.
///
/// Must be matched with a grid_puts_line_flush call before moving to
/// another line.
void grid_puts_line_start(ScreenGrid *grid, int row)
{
assert(put_dirty_row == -1);
put_dirty_row = row;
put_dirty_grid = grid;
}
/// like grid_puts(), but output "text[len]". When "len" is -1 output up to
/// a NUL.
void grid_puts_len(ScreenGrid *grid, char_u *text, int textlen, int row,
int col, int attr)
{
unsigned off;
char_u *ptr = text;
int len = textlen;
int c;
unsigned max_off;
int mbyte_blen = 1;
int mbyte_cells = 1;
int u8c = 0;
int u8cc[MAX_MCO];
int clear_next_cell = FALSE;
int prev_c = 0; /* previous Arabic character */
int pc, nc, nc1;
int pcc[MAX_MCO];
int need_redraw;
bool do_flush = false;
screen_adjust_grid(&grid, &row, &col);
// safety check
if (grid->chars == NULL || row >= grid->Rows || col >= grid->Columns) {
return;
}
if (put_dirty_row == -1) {
grid_puts_line_start(grid, row);
do_flush = true;
} else {
if (grid != put_dirty_grid || row != put_dirty_row) {
abort();
}
}
off = grid->line_offset[row] + col;
/* When drawing over the right halve of a double-wide char clear out the
* left halve. Only needed in a terminal. */
if (grid != &default_grid && col == 0 && grid_invalid_row(grid, row)) {
// redraw the previous cell, make it empty
put_dirty_first = -1;
put_dirty_last = MAX(put_dirty_last, 1);
}
max_off = grid->line_offset[row] + grid->Columns;
while (col < grid->Columns
&& (len < 0 || (int)(ptr - text) < len)
&& *ptr != NUL) {
c = *ptr;
// check if this is the first byte of a multibyte
if (len > 0) {
mbyte_blen = utfc_ptr2len_len(ptr, (int)((text + len) - ptr));
} else {
mbyte_blen = utfc_ptr2len(ptr);
}
if (len >= 0) {
u8c = utfc_ptr2char_len(ptr, u8cc, (int)((text + len) - ptr));
} else {
u8c = utfc_ptr2char(ptr, u8cc);
}
mbyte_cells = utf_char2cells(u8c);
if (p_arshape && !p_tbidi && arabic_char(u8c)) {
// Do Arabic shaping.
if (len >= 0 && (int)(ptr - text) + mbyte_blen >= len) {
// Past end of string to be displayed.
nc = NUL;
nc1 = NUL;
} else {
nc = utfc_ptr2char_len(ptr + mbyte_blen, pcc,
(int)((text + len) - ptr - mbyte_blen));
nc1 = pcc[0];
}
pc = prev_c;
prev_c = u8c;
u8c = arabic_shape(u8c, &c, &u8cc[0], nc, nc1, pc);
} else {
prev_c = u8c;
}
if (col + mbyte_cells > grid->Columns) {
// Only 1 cell left, but character requires 2 cells:
// display a '>' in the last column to avoid wrapping. */
c = '>';
mbyte_cells = 1;
}
schar_T buf;
schar_from_cc(buf, u8c, u8cc);
need_redraw = schar_cmp(grid->chars[off], buf)
|| (mbyte_cells == 2 && grid->chars[off + 1][0] != 0)
|| grid->attrs[off] != attr
|| exmode_active;
if (need_redraw) {
// When at the end of the text and overwriting a two-cell
// character with a one-cell character, need to clear the next
// cell. Also when overwriting the left halve of a two-cell char
// with the right halve of a two-cell char. Do this only once
// (utf8_off2cells() may return 2 on the right halve).
if (clear_next_cell) {
clear_next_cell = false;
} else if ((len < 0 ? ptr[mbyte_blen] == NUL
: ptr + mbyte_blen >= text + len)
&& ((mbyte_cells == 1
&& grid_off2cells(grid, off, max_off) > 1)
|| (mbyte_cells == 2
&& grid_off2cells(grid, off, max_off) == 1
&& grid_off2cells(grid, off + 1, max_off) > 1))) {
clear_next_cell = true;
}
schar_copy(grid->chars[off], buf);
grid->attrs[off] = attr;
if (mbyte_cells == 2) {
grid->chars[off + 1][0] = 0;
grid->attrs[off + 1] = attr;
}
put_dirty_first = MIN(put_dirty_first, col);
put_dirty_last = MAX(put_dirty_last, col+mbyte_cells);
}
off += mbyte_cells;
col += mbyte_cells;
ptr += mbyte_blen;
if (clear_next_cell) {
// This only happens at the end, display one space next.
ptr = (char_u *)" ";
len = -1;
}
}
if (do_flush) {
grid_puts_line_flush(true);
}
}
/// End a group of grid_puts_len calls and send the screen buffer to the UI
/// layer.
///
/// @param set_cursor Move the visible cursor to the end of the changed region.
/// This is a workaround for not yet refactored code paths
/// and shouldn't be used in new code.
void grid_puts_line_flush(bool set_cursor)
{
assert(put_dirty_row != -1);
if (put_dirty_first < put_dirty_last) {
if (set_cursor) {
ui_grid_cursor_goto(put_dirty_grid->handle, put_dirty_row,
MIN(put_dirty_last, put_dirty_grid->Columns-1));
}
ui_line(put_dirty_grid, put_dirty_row, put_dirty_first, put_dirty_last,
put_dirty_last, 0, false);
put_dirty_first = INT_MAX;
put_dirty_last = 0;
}
put_dirty_row = -1;
put_dirty_grid = NULL;
}
/*
* Prepare for 'hlsearch' highlighting.
*/
static void start_search_hl(void)
{
if (p_hls && !no_hlsearch) {
last_pat_prog(&search_hl.rm);
// Set the time limit to 'redrawtime'.
search_hl.tm = profile_setlimit(p_rdt);
}
}
/*
* Clean up for 'hlsearch' highlighting.
*/
static void end_search_hl(void)
{
if (search_hl.rm.regprog != NULL) {
vim_regfree(search_hl.rm.regprog);
search_hl.rm.regprog = NULL;
}
}
/*
* Init for calling prepare_search_hl().
*/
static void init_search_hl(win_T *wp)
{
matchitem_T *cur;
/* Setup for match and 'hlsearch' highlighting. Disable any previous
* match */
cur = wp->w_match_head;
while (cur != NULL) {
cur->hl.rm = cur->match;
if (cur->hlg_id == 0)
cur->hl.attr = 0;
else
cur->hl.attr = syn_id2attr(cur->hlg_id);
cur->hl.buf = wp->w_buffer;
cur->hl.lnum = 0;
cur->hl.first_lnum = 0;
/* Set the time limit to 'redrawtime'. */
cur->hl.tm = profile_setlimit(p_rdt);
cur = cur->next;
}
search_hl.buf = wp->w_buffer;
search_hl.lnum = 0;
search_hl.first_lnum = 0;
search_hl.attr = win_hl_attr(wp, HLF_L);
// time limit is set at the toplevel, for all windows
}
/*
* Advance to the match in window "wp" line "lnum" or past it.
*/
static void prepare_search_hl(win_T *wp, linenr_T lnum)
{
matchitem_T *cur; /* points to the match list */
match_T *shl; /* points to search_hl or a match */
int shl_flag; /* flag to indicate whether search_hl
has been processed or not */
int n;
/*
* When using a multi-line pattern, start searching at the top
* of the window or just after a closed fold.
* Do this both for search_hl and the match list.
*/
cur = wp->w_match_head;
shl_flag = false;
while (cur != NULL || shl_flag == false) {
if (shl_flag == false) {
shl = &search_hl;
shl_flag = true;
} else {
shl = &cur->hl; // -V595
}
if (shl->rm.regprog != NULL
&& shl->lnum == 0
&& re_multiline(shl->rm.regprog)) {
if (shl->first_lnum == 0) {
for (shl->first_lnum = lnum;
shl->first_lnum > wp->w_topline;
shl->first_lnum--) {
if (hasFoldingWin(wp, shl->first_lnum - 1, NULL, NULL, true, NULL)) {
break;
}
}
}
if (cur != NULL) {
cur->pos.cur = 0;
}
bool pos_inprogress = true; // mark that a position match search is
// in progress
n = 0;
while (shl->first_lnum < lnum && (shl->rm.regprog != NULL
|| (cur != NULL && pos_inprogress))) {
next_search_hl(wp, shl, shl->first_lnum, (colnr_T)n,
shl == &search_hl ? NULL : cur);
pos_inprogress = !(cur == NULL || cur->pos.cur == 0);
if (shl->lnum != 0) {
shl->first_lnum = shl->lnum
+ shl->rm.endpos[0].lnum
- shl->rm.startpos[0].lnum;
n = shl->rm.endpos[0].col;
} else {
++shl->first_lnum;
n = 0;
}
}
}
if (shl != &search_hl && cur != NULL)
cur = cur->next;
}
}
/*
* Search for a next 'hlsearch' or match.
* Uses shl->buf.
* Sets shl->lnum and shl->rm contents.
* Note: Assumes a previous match is always before "lnum", unless
* shl->lnum is zero.
* Careful: Any pointers for buffer lines will become invalid.
*/
static void
next_search_hl (
win_T *win,
match_T *shl, /* points to search_hl or a match */
linenr_T lnum,
colnr_T mincol, /* minimal column for a match */
matchitem_T *cur /* to retrieve match positions if any */
)
{
linenr_T l;
colnr_T matchcol;
long nmatched = 0;
int save_called_emsg = called_emsg;
if (shl->lnum != 0) {
/* Check for three situations:
* 1. If the "lnum" is below a previous match, start a new search.
* 2. If the previous match includes "mincol", use it.
* 3. Continue after the previous match.
*/
l = shl->lnum + shl->rm.endpos[0].lnum - shl->rm.startpos[0].lnum;
if (lnum > l)
shl->lnum = 0;
else if (lnum < l || shl->rm.endpos[0].col > mincol)
return;
}
/*
* Repeat searching for a match until one is found that includes "mincol"
* or none is found in this line.
*/
called_emsg = FALSE;
for (;; ) {
/* Stop searching after passing the time limit. */
if (profile_passed_limit(shl->tm)) {
shl->lnum = 0; /* no match found in time */
break;
}
/* Three situations:
* 1. No useful previous match: search from start of line.
* 2. Not Vi compatible or empty match: continue at next character.
* Break the loop if this is beyond the end of the line.
* 3. Vi compatible searching: continue at end of previous match.
*/
if (shl->lnum == 0)
matchcol = 0;
else if (vim_strchr(p_cpo, CPO_SEARCH) == NULL
|| (shl->rm.endpos[0].lnum == 0
&& shl->rm.endpos[0].col <= shl->rm.startpos[0].col)) {
char_u *ml;
matchcol = shl->rm.startpos[0].col;
ml = ml_get_buf(shl->buf, lnum, FALSE) + matchcol;
if (*ml == NUL) {
++matchcol;
shl->lnum = 0;
break;
}
matchcol += mb_ptr2len(ml);
} else {
matchcol = shl->rm.endpos[0].col;
}
shl->lnum = lnum;
if (shl->rm.regprog != NULL) {
/* Remember whether shl->rm is using a copy of the regprog in
* cur->match. */
bool regprog_is_copy = (shl != &search_hl
&& cur != NULL
&& shl == &cur->hl
&& cur->match.regprog == cur->hl.rm.regprog);
int timed_out = false;
nmatched = vim_regexec_multi(&shl->rm, win, shl->buf, lnum, matchcol,
&(shl->tm), &timed_out);
// Copy the regprog, in case it got freed and recompiled.
if (regprog_is_copy) {
cur->match.regprog = cur->hl.rm.regprog;
}
if (called_emsg || got_int || timed_out) {
// Error while handling regexp: stop using this regexp.
if (shl == &search_hl) {
// don't free regprog in the match list, it's a copy
vim_regfree(shl->rm.regprog);
SET_NO_HLSEARCH(TRUE);
}
shl->rm.regprog = NULL;
shl->lnum = 0;
got_int = FALSE; // avoid the "Type :quit to exit Vim" message
break;
}
} else if (cur != NULL) {
nmatched = next_search_hl_pos(shl, lnum, &(cur->pos), matchcol);
}
if (nmatched == 0) {
shl->lnum = 0; /* no match found */
break;
}
if (shl->rm.startpos[0].lnum > 0
|| shl->rm.startpos[0].col >= mincol
|| nmatched > 1
|| shl->rm.endpos[0].col > mincol) {
shl->lnum += shl->rm.startpos[0].lnum;
break; /* useful match found */
}
// Restore called_emsg for assert_fails().
called_emsg = save_called_emsg;
}
}
/// If there is a match fill "shl" and return one.
/// Return zero otherwise.
static int
next_search_hl_pos(
match_T *shl, // points to a match
linenr_T lnum,
posmatch_T *posmatch, // match positions
colnr_T mincol // minimal column for a match
)
{
int i;
int found = -1;
shl->lnum = 0;
for (i = posmatch->cur; i < MAXPOSMATCH; i++) {
llpos_T *pos = &posmatch->pos[i];
if (pos->lnum == 0) {
break;
}
if (pos->len == 0 && pos->col < mincol) {
continue;
}
if (pos->lnum == lnum) {
if (found >= 0) {
// if this match comes before the one at "found" then swap
// them
if (pos->col < posmatch->pos[found].col) {
llpos_T tmp = *pos;
*pos = posmatch->pos[found];
posmatch->pos[found] = tmp;
}
} else {
found = i;
}
}
}
posmatch->cur = 0;
if (found >= 0) {
colnr_T start = posmatch->pos[found].col == 0
? 0: posmatch->pos[found].col - 1;
colnr_T end = posmatch->pos[found].col == 0
? MAXCOL : start + posmatch->pos[found].len;
shl->lnum = lnum;
shl->rm.startpos[0].lnum = 0;
shl->rm.startpos[0].col = start;
shl->rm.endpos[0].lnum = 0;
shl->rm.endpos[0].col = end;
shl->is_addpos = true;
posmatch->cur = found + 1;
return 1;
}
return 0;
}
/// Fill the grid from 'start_row' to 'end_row', from 'start_col' to 'end_col'
/// with character 'c1' in first column followed by 'c2' in the other columns.
/// Use attributes 'attr'.
void grid_fill(ScreenGrid *grid, int start_row, int end_row, int start_col,
int end_col, int c1, int c2, int attr)
{
schar_T sc;
int row_off = 0, col_off = 0;
screen_adjust_grid(&grid, &row_off, &col_off);
start_row += row_off;
end_row += row_off;
start_col += col_off;
end_col += col_off;
// safety check
if (end_row > grid->Rows) {
end_row = grid->Rows;
}
if (end_col > grid->Columns) {
end_col = grid->Columns;
}
// nothing to do
if (start_row >= end_row || start_col >= end_col) {
return;
}
for (int row = start_row; row < end_row; row++) {
// When drawing over the right halve of a double-wide char clear
// out the left halve. When drawing over the left halve of a
// double wide-char clear out the right halve. Only needed in a
// terminal.
if (start_col > 0 && grid_fix_col(grid, start_col, row) != start_col) {
grid_puts_len(grid, (char_u *)" ", 1, row, start_col - 1, 0);
}
if (end_col < grid->Columns
&& grid_fix_col(grid, end_col, row) != end_col) {
grid_puts_len(grid, (char_u *)" ", 1, row, end_col, 0);
}
// if grid was resized (in ext_multigrid mode), the UI has no redraw updates
// for the newly resized grid. It is better mark everything as dirty and
// send all the updates.
int dirty_first = INT_MAX;
int dirty_last = 0;
int col = start_col;
schar_from_char(sc, c1);
int lineoff = grid->line_offset[row];
for (col = start_col; col < end_col; col++) {
int off = lineoff + col;
if (schar_cmp(grid->chars[off], sc)
|| grid->attrs[off] != attr) {
schar_copy(grid->chars[off], sc);
grid->attrs[off] = attr;
if (dirty_first == INT_MAX) {
dirty_first = col;
}
dirty_last = col+1;
}
if (col == start_col) {
schar_from_char(sc, c2);
}
}
if (dirty_last > dirty_first) {
// TODO(bfredl): support a cleared suffix even with a batched line?
if (put_dirty_row == row) {
put_dirty_first = MIN(put_dirty_first, dirty_first);
put_dirty_last = MAX(put_dirty_last, dirty_last);
} else {
int last = c2 != ' ' ? dirty_last : dirty_first + (c1 != ' ');
ui_line(grid, row, dirty_first, last, dirty_last, attr, false);
}
}
if (end_col == grid->Columns) {
grid->line_wraps[row] = false;
}
// TODO(bfredl): The relevant caller should do this
if (row == Rows - 1 && !ui_has(kUIMessages)) {
// overwritten the command line
redraw_cmdline = true;
if (start_col == 0 && end_col == Columns
&& c1 == ' ' && c2 == ' ' && attr == 0) {
clear_cmdline = false; // command line has been cleared
}
if (start_col == 0) {
mode_displayed = false; // mode cleared or overwritten
}
}
}
}
/*
* Check if there should be a delay. Used before clearing or redrawing the
* screen or the command line.
*/
void check_for_delay(int check_msg_scroll)
{
if ((emsg_on_display || (check_msg_scroll && msg_scroll))
&& !did_wait_return
&& emsg_silent == 0) {
ui_flush();
os_delay(1000L, true);
emsg_on_display = FALSE;
if (check_msg_scroll)
msg_scroll = FALSE;
}
}
/// (Re)allocates a window grid if size changed while in ext_multigrid mode.
/// Updates size, offsets and handle for the grid regardless.
///
/// If "doclear" is true, don't try to copy from the old grid rather clear the
/// resized grid.
void win_grid_alloc(win_T *wp)
{
ScreenGrid *grid = &wp->w_grid;
int rows = wp->w_height_inner;
int cols = wp->w_width_inner;
bool want_allocation = ui_has(kUIMultigrid) || wp->w_floating;
bool has_allocation = (grid->chars != NULL);
if (grid->Rows != rows) {
wp->w_lines_valid = 0;
xfree(wp->w_lines);
wp->w_lines = xcalloc(rows+1, sizeof(wline_T));
}
int was_resized = false;
if ((has_allocation != want_allocation)
|| grid->Rows != rows
|| grid->Columns != cols) {
if (want_allocation) {
grid_alloc(grid, rows, cols, wp->w_grid.valid, wp->w_grid.valid);
grid->valid = true;
} else {
// Single grid mode, all rendering will be redirected to default_grid.
// Only keep track of the size and offset of the window.
grid_free(grid);
grid->Rows = rows;
grid->Columns = cols;
grid->valid = false;
}
was_resized = true;
} else if (want_allocation && has_allocation && !wp->w_grid.valid) {
grid_invalidate(grid);
grid->valid = true;
}
grid->row_offset = wp->w_winrow;
grid->col_offset = wp->w_wincol;
// send grid resize event if:
// - a grid was just resized
// - screen_resize was called and all grid sizes must be sent
// - the UI wants multigrid event (necessary)
if ((send_grid_resize || was_resized) && want_allocation) {
ui_call_grid_resize(grid->handle, grid->Columns, grid->Rows);
}
}
/// assign a handle to the grid. The grid need not be allocated.
void grid_assign_handle(ScreenGrid *grid)
{
static int last_grid_handle = DEFAULT_GRID_HANDLE;
// only assign a grid handle if not already
if (grid->handle == 0) {
grid->handle = ++last_grid_handle;
}
}
/// Resize the screen to Rows and Columns.
///
/// Allocate default_grid.chars[] and other grid arrays.
///
/// There may be some time between setting Rows and Columns and (re)allocating
/// default_grid arrays. This happens when starting up and when
/// (manually) changing the shell size. Always use default_grid.Rows and
/// default_grid.Columns to access items in default_grid.chars[]. Use Rows
/// and Columns for positioning text etc. where the final size of the shell is
/// needed.
void screenalloc(void)
{
static bool entered = false; // avoid recursiveness
int retry_count = 0;
retry:
// Allocation of the screen buffers is done only when the size changes and
// when Rows and Columns have been set and we have started doing full
// screen stuff.
if ((default_grid.chars != NULL
&& Rows == default_grid.Rows
&& Columns == default_grid.Columns
)
|| Rows == 0
|| Columns == 0
|| (!full_screen && default_grid.chars == NULL)) {
return;
}
/*
* It's possible that we produce an out-of-memory message below, which
* will cause this function to be called again. To break the loop, just
* return here.
*/
if (entered)
return;
entered = TRUE;
/*
* Note that the window sizes are updated before reallocating the arrays,
* thus we must not redraw here!
*/
++RedrawingDisabled;
// win_new_shellsize will recompute floats position, but tell the
// compositor to not redraw them yet
ui_comp_set_screen_valid(false);
win_new_shellsize(); /* fit the windows in the new sized shell */
comp_col(); /* recompute columns for shown command and ruler */
// We're changing the size of the screen.
// - Allocate new arrays for default_grid
// - Move lines from the old arrays into the new arrays, clear extra
// lines (unless the screen is going to be cleared).
// - Free the old arrays.
//
// If anything fails, make grid arrays NULL, so we don't do anything!
// Continuing with the old arrays may result in a crash, because the
// size is wrong.
grid_alloc(&default_grid, Rows, Columns, true, true);
StlClickDefinition *new_tab_page_click_defs = xcalloc(
(size_t)Columns, sizeof(*new_tab_page_click_defs));
clear_tab_page_click_defs(tab_page_click_defs, tab_page_click_defs_size);
xfree(tab_page_click_defs);
tab_page_click_defs = new_tab_page_click_defs;
tab_page_click_defs_size = Columns;
default_grid.row_offset = 0;
default_grid.col_offset = 0;
default_grid.handle = DEFAULT_GRID_HANDLE;
must_redraw = CLEAR; // need to clear the screen later
entered = FALSE;
--RedrawingDisabled;
/*
* Do not apply autocommands more than 3 times to avoid an endless loop
* in case applying autocommands always changes Rows or Columns.
*/
if (starting == 0 && ++retry_count <= 3) {
apply_autocmds(EVENT_VIMRESIZED, NULL, NULL, FALSE, curbuf);
/* In rare cases, autocommands may have altered Rows or Columns,
* jump back to check if we need to allocate the screen again. */
goto retry;
}
}
void grid_alloc(ScreenGrid *grid, int rows, int columns, bool copy, bool valid)
{
int new_row;
ScreenGrid new = *grid;
size_t ncells = (size_t)((rows+1) * columns);
new.chars = xmalloc(ncells * sizeof(schar_T));
new.attrs = xmalloc(ncells * sizeof(sattr_T));
new.line_offset = xmalloc((size_t)(rows * sizeof(unsigned)));
new.line_wraps = xmalloc((size_t)(rows * sizeof(char_u)));
new.Rows = rows;
new.Columns = columns;
for (new_row = 0; new_row < new.Rows; new_row++) {
new.line_offset[new_row] = new_row * new.Columns;
new.line_wraps[new_row] = false;
grid_clear_line(&new, new.line_offset[new_row], columns, valid);
if (copy) {
// If the screen is not going to be cleared, copy as much as
// possible from the old screen to the new one and clear the rest
// (used when resizing the window at the "--more--" prompt or when
// executing an external command, for the GUI).
if (new_row < grid->Rows && grid->chars != NULL) {
int len = MIN(grid->Columns, new.Columns);
memmove(new.chars + new.line_offset[new_row],
grid->chars + grid->line_offset[new_row],
(size_t)len * sizeof(schar_T));
memmove(new.attrs + new.line_offset[new_row],
grid->attrs + grid->line_offset[new_row],
(size_t)len * sizeof(sattr_T));
}
}
}
grid_free(grid);
*grid = new;
// Share a single scratch buffer for all grids, by
// ensuring it is as wide as the widest grid.
if (linebuf_size < (size_t)columns) {
xfree(linebuf_char);
xfree(linebuf_attr);
linebuf_char = xmalloc(columns * sizeof(schar_T));
linebuf_attr = xmalloc(columns * sizeof(sattr_T));
linebuf_size = columns;
}
}
void grid_free(ScreenGrid *grid)
{
xfree(grid->chars);
xfree(grid->attrs);
xfree(grid->line_offset);
xfree(grid->line_wraps);
grid->chars = NULL;
grid->attrs = NULL;
grid->line_offset = NULL;
grid->line_wraps = NULL;
}
/// Doesn't allow reinit, so must only be called by free_all_mem!
void screen_free_all_mem(void)
{
grid_free(&default_grid);
xfree(linebuf_char);
xfree(linebuf_attr);
}
/// Clear tab_page_click_defs table
///
/// @param[out] tpcd Table to clear.
/// @param[in] tpcd_size Size of the table.
void clear_tab_page_click_defs(StlClickDefinition *const tpcd,
const long tpcd_size)
{
if (tpcd != NULL) {
for (long i = 0; i < tpcd_size; i++) {
if (i == 0 || tpcd[i].func != tpcd[i - 1].func) {
xfree(tpcd[i].func);
}
}
memset(tpcd, 0, (size_t) tpcd_size * sizeof(tpcd[0]));
}
}
void screenclear(void)
{
check_for_delay(false);
screenalloc(); // allocate screen buffers if size changed
int i;
if (starting == NO_SCREEN || default_grid.chars == NULL) {
return;
}
// blank out the default grid
for (i = 0; i < default_grid.Rows; i++) {
grid_clear_line(&default_grid, default_grid.line_offset[i],
(int)default_grid.Columns, true);
default_grid.line_wraps[i] = false;
}
ui_call_grid_clear(1); // clear the display
ui_comp_set_screen_valid(true);
clear_cmdline = false;
mode_displayed = false;
redraw_all_later(NOT_VALID);
redraw_cmdline = true;
redraw_tabline = true;
redraw_popupmenu = true;
pum_invalidate();
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (wp->w_floating) {
wp->w_redr_type = CLEAR;
}
}
if (must_redraw == CLEAR) {
must_redraw = NOT_VALID; // no need to clear again
}
compute_cmdrow();
msg_row = cmdline_row; // put cursor on last line for messages
msg_col = 0;
msg_scrolled = 0; // can't scroll back
msg_didany = false;
msg_didout = false;
}
/// clear a line in the grid starting at "off" until "width" characters
/// are cleared.
static void grid_clear_line(ScreenGrid *grid, unsigned off, int width,
bool valid)
{
for (int col = 0; col < width; col++) {
schar_from_ascii(grid->chars[off + col], ' ');
}
int fill = valid ? 0 : -1;
(void)memset(grid->attrs + off, fill, (size_t)width * sizeof(sattr_T));
}
void grid_invalidate(ScreenGrid *grid)
{
(void)memset(grid->attrs, -1, grid->Rows * grid->Columns * sizeof(sattr_T));
}
bool grid_invalid_row(ScreenGrid *grid, int row)
{
return grid->attrs[grid->line_offset[row]] < 0;
}
/// Copy part of a grid line for vertically split window.
static void linecopy(ScreenGrid *grid, int to, int from, int col, int width)
{
unsigned off_to = grid->line_offset[to] + col;
unsigned off_from = grid->line_offset[from] + col;
memmove(grid->chars + off_to, grid->chars + off_from,
width * sizeof(schar_T));
memmove(grid->attrs + off_to, grid->attrs + off_from,
width * sizeof(sattr_T));
}
/*
* Set cursor to its position in the current window.
*/
void setcursor(void)
{
if (redrawing()) {
validate_cursor();
ScreenGrid *grid = &curwin->w_grid;
int row = curwin->w_wrow;
int col = curwin->w_wcol;
if (curwin->w_p_rl) {
// With 'rightleft' set and the cursor on a double-wide character,
// position it on the leftmost column.
col = curwin->w_width_inner - curwin->w_wcol
- ((utf_ptr2cells(get_cursor_pos_ptr()) == 2
&& vim_isprintc(gchar_cursor())) ? 2 : 1);
}
screen_adjust_grid(&grid, &row, &col);
ui_grid_cursor_goto(grid->handle, row, col);
}
}
/// Scroll 'line_count' lines at 'row' in window 'wp'.
///
/// Positive `line_count' means scrolling down, so that more space is available
/// at 'row'. Negative `line_count` implies deleting lines at `row`.
void win_scroll_lines(win_T *wp, int row, int line_count)
{
if (!redrawing() || line_count == 0) {
return;
}
// No lines are being moved, just draw over the entire area
if (row + abs(line_count) >= wp->w_grid.Rows) {
return;
}
if (line_count < 0) {
grid_del_lines(&wp->w_grid, row, -line_count,
wp->w_grid.Rows, 0, wp->w_grid.Columns);
} else {
grid_ins_lines(&wp->w_grid, row, line_count,
wp->w_grid.Rows, 0, wp->w_grid.Columns);
}
}
/*
* The rest of the routines in this file perform screen manipulations. The
* given operation is performed physically on the screen. The corresponding
* change is also made to the internal screen image. In this way, the editor
* anticipates the effect of editing changes on the appearance of the screen.
* That way, when we call screenupdate a complete redraw isn't usually
* necessary. Another advantage is that we can keep adding code to anticipate
* screen changes, and in the meantime, everything still works.
*/
/// insert lines on the screen and move the existing lines down
/// 'line_count' is the number of lines to be inserted.
/// 'end' is the line after the scrolled part. Normally it is Rows.
/// 'col' is the column from with we start inserting.
//
/// 'row', 'col' and 'end' are relative to the start of the region.
void grid_ins_lines(ScreenGrid *grid, int row, int line_count, int end, int col,
int width)
{
int i;
int j;
unsigned temp;
int row_off = 0;
screen_adjust_grid(&grid, &row_off, &col);
row += row_off;
end += row_off;
if (line_count <= 0) {
return;
}
// Shift line_offset[] line_count down to reflect the inserted lines.
// Clear the inserted lines.
for (i = 0; i < line_count; i++) {
if (width != grid->Columns) {
// need to copy part of a line
j = end - 1 - i;
while ((j -= line_count) >= row) {
linecopy(grid, j + line_count, j, col, width);
}
j += line_count;
grid_clear_line(grid, grid->line_offset[j] + col, width, false);
grid->line_wraps[j] = false;
} else {
j = end - 1 - i;
temp = grid->line_offset[j];
while ((j -= line_count) >= row) {
grid->line_offset[j + line_count] = grid->line_offset[j];
grid->line_wraps[j + line_count] = grid->line_wraps[j];
}
grid->line_offset[j + line_count] = temp;
grid->line_wraps[j + line_count] = false;
grid_clear_line(grid, temp, (int)grid->Columns, false);
}
}
ui_call_grid_scroll(grid->handle, row, end, col, col+width, -line_count, 0);
return;
}
/// delete lines on the screen and move lines up.
/// 'end' is the line after the scrolled part. Normally it is Rows.
/// When scrolling region used 'off' is the offset from the top for the region.
/// 'row' and 'end' are relative to the start of the region.
void grid_del_lines(ScreenGrid *grid, int row, int line_count, int end, int col,
int width)
{
int j;
int i;
unsigned temp;
int row_off = 0;
screen_adjust_grid(&grid, &row_off, &col);
row += row_off;
end += row_off;
if (line_count <= 0) {
return;
}
// Now shift line_offset[] line_count up to reflect the deleted lines.
// Clear the inserted lines.
for (i = 0; i < line_count; i++) {
if (width != grid->Columns) {
// need to copy part of a line
j = row + i;
while ((j += line_count) <= end - 1) {
linecopy(grid, j - line_count, j, col, width);
}
j -= line_count;
grid_clear_line(grid, grid->line_offset[j] + col, width, false);
grid->line_wraps[j] = false;
} else {
// whole width, moving the line pointers is faster
j = row + i;
temp = grid->line_offset[j];
while ((j += line_count) <= end - 1) {
grid->line_offset[j - line_count] = grid->line_offset[j];
grid->line_wraps[j - line_count] = grid->line_wraps[j];
}
grid->line_offset[j - line_count] = temp;
grid->line_wraps[j - line_count] = false;
grid_clear_line(grid, temp, (int)grid->Columns, false);
}
}
ui_call_grid_scroll(grid->handle, row, end, col, col+width, line_count, 0);
return;
}
// Show the current mode and ruler.
//
// If clear_cmdline is TRUE, clear the rest of the cmdline.
// If clear_cmdline is FALSE there may be a message there that needs to be
// cleared only if a mode is shown.
// Return the length of the message (0 if no message).
int showmode(void)
{
int need_clear;
int length = 0;
int do_mode;
int attr;
int nwr_save;
int sub_attr;
if (ui_has(kUIMessages) && clear_cmdline) {
msg_ext_clear(true);
}
// don't make non-flushed message part of the showmode
msg_ext_ui_flush();
do_mode = ((p_smd && msg_silent == 0)
&& ((State & TERM_FOCUS)
|| (State & INSERT)
|| restart_edit
|| VIsual_active));
if (do_mode || reg_recording != 0) {
// Don't show mode right now, when not redrawing or inside a mapping.
// Call char_avail() only when we are going to show something, because
// it takes a bit of time.
if (!redrawing() || (char_avail() && !KeyTyped) || msg_silent != 0) {
redraw_cmdline = TRUE; /* show mode later */
return 0;
}
nwr_save = need_wait_return;
/* wait a bit before overwriting an important message */
check_for_delay(FALSE);
/* if the cmdline is more than one line high, erase top lines */
need_clear = clear_cmdline;
if (clear_cmdline && cmdline_row < Rows - 1) {
msg_clr_cmdline(); // will reset clear_cmdline
}
/* Position on the last line in the window, column 0 */
msg_pos_mode();
attr = HL_ATTR(HLF_CM); // Highlight mode
// When the screen is too narrow to show the entire mode messsage,
// avoid scrolling and truncate instead.
msg_no_more = true;
int save_lines_left = lines_left;
lines_left = 0;
if (do_mode) {
MSG_PUTS_ATTR("--", attr);
// CTRL-X in Insert mode
if (edit_submode != NULL && !shortmess(SHM_COMPLETIONMENU)) {
// These messages can get long, avoid a wrap in a narrow window.
// Prefer showing edit_submode_extra. With external messages there
// is no imposed limit.
if (ui_has(kUIMessages)) {
length = INT_MAX;
} else {
length = (Rows - msg_row) * Columns - 3;
}
if (edit_submode_extra != NULL) {
length -= vim_strsize(edit_submode_extra);
}
if (length > 0) {
if (edit_submode_pre != NULL)
length -= vim_strsize(edit_submode_pre);
if (length - vim_strsize(edit_submode) > 0) {
if (edit_submode_pre != NULL) {
msg_puts_attr((const char *)edit_submode_pre, attr);
}
msg_puts_attr((const char *)edit_submode, attr);
}
if (edit_submode_extra != NULL) {
MSG_PUTS_ATTR(" ", attr); // Add a space in between.
if ((int)edit_submode_highl < (int)HLF_COUNT) {
sub_attr = win_hl_attr(curwin, edit_submode_highl);
} else {
sub_attr = attr;
}
msg_puts_attr((const char *)edit_submode_extra, sub_attr);
}
}
} else {
if (State & TERM_FOCUS) {
MSG_PUTS_ATTR(_(" TERMINAL"), attr);
} else if (State & VREPLACE_FLAG)
MSG_PUTS_ATTR(_(" VREPLACE"), attr);
else if (State & REPLACE_FLAG)
MSG_PUTS_ATTR(_(" REPLACE"), attr);
else if (State & INSERT) {
if (p_ri)
MSG_PUTS_ATTR(_(" REVERSE"), attr);
MSG_PUTS_ATTR(_(" INSERT"), attr);
} else if (restart_edit == 'I' || restart_edit == 'i'
|| restart_edit == 'a') {
MSG_PUTS_ATTR(_(" (insert)"), attr);
} else if (restart_edit == 'R') {
MSG_PUTS_ATTR(_(" (replace)"), attr);
} else if (restart_edit == 'V') {
MSG_PUTS_ATTR(_(" (vreplace)"), attr);
}
if (p_hkmap) {
MSG_PUTS_ATTR(_(" Hebrew"), attr);
}
if (State & LANGMAP) {
if (curwin->w_p_arab) {
MSG_PUTS_ATTR(_(" Arabic"), attr);
} else if (get_keymap_str(curwin, (char_u *)" (%s)",
NameBuff, MAXPATHL)) {
MSG_PUTS_ATTR(NameBuff, attr);
}
}
if ((State & INSERT) && p_paste)
MSG_PUTS_ATTR(_(" (paste)"), attr);
if (VIsual_active) {
char *p;
/* Don't concatenate separate words to avoid translation
* problems. */
switch ((VIsual_select ? 4 : 0)
+ (VIsual_mode == Ctrl_V) * 2
+ (VIsual_mode == 'V')) {
case 0: p = N_(" VISUAL"); break;
case 1: p = N_(" VISUAL LINE"); break;
case 2: p = N_(" VISUAL BLOCK"); break;
case 4: p = N_(" SELECT"); break;
case 5: p = N_(" SELECT LINE"); break;
default: p = N_(" SELECT BLOCK"); break;
}
MSG_PUTS_ATTR(_(p), attr);
}
MSG_PUTS_ATTR(" --", attr);
}
need_clear = TRUE;
}
if (reg_recording != 0
&& edit_submode == NULL // otherwise it gets too long
) {
recording_mode(attr);
need_clear = true;
}
mode_displayed = TRUE;
if (need_clear || clear_cmdline)
msg_clr_eos();
msg_didout = FALSE; /* overwrite this message */
length = msg_col;
msg_col = 0;
msg_no_more = false;
lines_left = save_lines_left;
need_wait_return = nwr_save; // never ask for hit-return for this
} else if (clear_cmdline && msg_silent == 0) {
// Clear the whole command line. Will reset "clear_cmdline".
msg_clr_cmdline();
}
// NB: also handles clearing the showmode if it was emtpy or disabled
msg_ext_flush_showmode();
/* In Visual mode the size of the selected area must be redrawn. */
if (VIsual_active)
clear_showcmd();
// If the last window has no status line, the ruler is after the mode
// message and must be redrawn
win_T *last = lastwin_nofloating();
if (redrawing() && last->w_status_height == 0) {
win_redr_ruler(last, true);
}
redraw_cmdline = false;
clear_cmdline = false;
return length;
}
/*
* Position for a mode message.
*/
static void msg_pos_mode(void)
{
msg_col = 0;
msg_row = Rows - 1;
}
/// Delete mode message. Used when ESC is typed which is expected to end
/// Insert mode (but Insert mode didn't end yet!).
/// Caller should check "mode_displayed".
void unshowmode(bool force)
{
// Don't delete it right now, when not redrawing or inside a mapping.
if (!redrawing() || (!force && char_avail() && !KeyTyped)) {
redraw_cmdline = true; // delete mode later
} else {
clearmode();
}
}
// Clear the mode message.
void clearmode(void)
{
const int save_msg_row = msg_row;
const int save_msg_col = msg_col;
msg_ext_ui_flush();
msg_pos_mode();
if (reg_recording != 0) {
recording_mode(HL_ATTR(HLF_CM));
}
msg_clr_eos();
msg_ext_flush_showmode();
msg_col = save_msg_col;
msg_row = save_msg_row;
}
static void recording_mode(int attr)
{
MSG_PUTS_ATTR(_("recording"), attr);
if (!shortmess(SHM_RECORDING)) {
char_u s[4];
snprintf((char *)s, ARRAY_SIZE(s), " @%c", reg_recording);
MSG_PUTS_ATTR(s, attr);
}
}
/*
* Draw the tab pages line at the top of the Vim window.
*/
static void draw_tabline(void)
{
int tabcount = 0;
int tabwidth = 0;
int col = 0;
int scol = 0;
int attr;
win_T *wp;
win_T *cwp;
int wincount;
int modified;
int c;
int len;
int attr_nosel = HL_ATTR(HLF_TP);
int attr_fill = HL_ATTR(HLF_TPF);
char_u *p;
int room;
int use_sep_chars = (t_colors < 8
);
if (default_grid.chars == NULL) {
return;
}
redraw_tabline = false;
if (ui_has(kUITabline)) {
ui_ext_tabline_update();
return;
}
if (tabline_height() < 1)
return;
// Init TabPageIdxs[] to zero: Clicking outside of tabs has no effect.
assert(Columns == tab_page_click_defs_size);
clear_tab_page_click_defs(tab_page_click_defs, tab_page_click_defs_size);
/* Use the 'tabline' option if it's set. */
if (*p_tal != NUL) {
int saved_did_emsg = did_emsg;
// Check for an error. If there is one we would loop in redrawing the
// screen. Avoid that by making 'tabline' empty.
did_emsg = false;
win_redr_custom(NULL, false);
if (did_emsg) {
set_string_option_direct((char_u *)"tabline", -1,
(char_u *)"", OPT_FREE, SID_ERROR);
}
did_emsg |= saved_did_emsg;
} else {
FOR_ALL_TABS(tp) {
++tabcount;
}
if (tabcount > 0) {
tabwidth = (Columns - 1 + tabcount / 2) / tabcount;
}
if (tabwidth < 6) {
tabwidth = 6;
}
attr = attr_nosel;
tabcount = 0;
FOR_ALL_TABS(tp) {
if (col >= Columns - 4) {
break;
}
scol = col;
if (tp == curtab) {
cwp = curwin;
wp = firstwin;
} else {
cwp = tp->tp_curwin;
wp = tp->tp_firstwin;
}
if (tp->tp_topframe == topframe) {
attr = win_hl_attr(cwp, HLF_TPS);
}
if (use_sep_chars && col > 0) {
grid_putchar(&default_grid, '|', 0, col++, attr);
}
if (tp->tp_topframe != topframe) {
attr = win_hl_attr(cwp, HLF_TP);
}
grid_putchar(&default_grid, ' ', 0, col++, attr);
modified = false;
for (wincount = 0; wp != NULL; wp = wp->w_next, ++wincount) {
if (bufIsChanged(wp->w_buffer)) {
modified = true;
}
}
if (modified || wincount > 1) {
if (wincount > 1) {
vim_snprintf((char *)NameBuff, MAXPATHL, "%d", wincount);
len = (int)STRLEN(NameBuff);
if (col + len >= Columns - 3) {
break;
}
grid_puts_len(&default_grid, NameBuff, len, 0, col,
hl_combine_attr(attr, win_hl_attr(cwp, HLF_T)));
col += len;
}
if (modified) {
grid_puts_len(&default_grid, (char_u *)"+", 1, 0, col++, attr);
}
grid_putchar(&default_grid, ' ', 0, col++, attr);
}
room = scol - col + tabwidth - 1;
if (room > 0) {
/* Get buffer name in NameBuff[] */
get_trans_bufname(cwp->w_buffer);
(void)shorten_dir(NameBuff);
len = vim_strsize(NameBuff);
p = NameBuff;
while (len > room) {
len -= ptr2cells(p);
MB_PTR_ADV(p);
}
if (len > Columns - col - 1) {
len = Columns - col - 1;
}
grid_puts_len(&default_grid, p, (int)STRLEN(p), 0, col, attr);
col += len;
}
grid_putchar(&default_grid, ' ', 0, col++, attr);
// Store the tab page number in tab_page_click_defs[], so that
// jump_to_mouse() knows where each one is.
tabcount++;
while (scol < col) {
tab_page_click_defs[scol++] = (StlClickDefinition) {
.type = kStlClickTabSwitch,
.tabnr = tabcount,
.func = NULL,
};
}
}
if (use_sep_chars)
c = '_';
else
c = ' ';
grid_fill(&default_grid, 0, 1, col, (int)Columns, c, c,
attr_fill);
/* Put an "X" for closing the current tab if there are several. */
if (first_tabpage->tp_next != NULL) {
grid_putchar(&default_grid, 'X', 0, (int)Columns - 1,
attr_nosel);
tab_page_click_defs[Columns - 1] = (StlClickDefinition) {
.type = kStlClickTabClose,
.tabnr = 999,
.func = NULL,
};
}
}
/* Reset the flag here again, in case evaluating 'tabline' causes it to be
* set. */
redraw_tabline = FALSE;
}
void ui_ext_tabline_update(void)
{
Array tabs = ARRAY_DICT_INIT;
FOR_ALL_TABS(tp) {
Dictionary tab_info = ARRAY_DICT_INIT;
PUT(tab_info, "tab", TABPAGE_OBJ(tp->handle));
win_T *cwp = (tp == curtab) ? curwin : tp->tp_curwin;
get_trans_bufname(cwp->w_buffer);
PUT(tab_info, "name", STRING_OBJ(cstr_to_string((char *)NameBuff)));
ADD(tabs, DICTIONARY_OBJ(tab_info));
}
ui_call_tabline_update(curtab->handle, tabs);
}
/*
* Get buffer name for "buf" into NameBuff[].
* Takes care of special buffer names and translates special characters.
*/
void get_trans_bufname(buf_T *buf)
{
if (buf_spname(buf) != NULL)
STRLCPY(NameBuff, buf_spname(buf), MAXPATHL);
else
home_replace(buf, buf->b_fname, NameBuff, MAXPATHL, TRUE);
trans_characters(NameBuff, MAXPATHL);
}
/*
* Get the character to use in a status line. Get its attributes in "*attr".
*/
static int fillchar_status(int *attr, win_T *wp)
{
int fill;
bool is_curwin = (wp == curwin);
if (is_curwin) {
*attr = win_hl_attr(wp, HLF_S);
fill = wp->w_p_fcs_chars.stl;
} else {
*attr = win_hl_attr(wp, HLF_SNC);
fill = wp->w_p_fcs_chars.stlnc;
}
/* Use fill when there is highlighting, and highlighting of current
* window differs, or the fillchars differ, or this is not the
* current window */
if (*attr != 0 && ((win_hl_attr(wp, HLF_S) != win_hl_attr(wp, HLF_SNC)
|| !is_curwin || ONE_WINDOW)
|| (wp->w_p_fcs_chars.stl != wp->w_p_fcs_chars.stlnc))) {
return fill;
}
if (is_curwin) {
return '^';
}
return '=';
}
/*
* Get the character to use in a separator between vertically split windows.
* Get its attributes in "*attr".
*/
static int fillchar_vsep(win_T *wp, int *attr)
{
*attr = win_hl_attr(wp, HLF_C);
return wp->w_p_fcs_chars.vert;
}
/*
* Return TRUE if redrawing should currently be done.
*/
int redrawing(void)
{
return !RedrawingDisabled
&& !(p_lz && char_avail() && !KeyTyped && !do_redraw);
}
/*
* Return TRUE if printing messages should currently be done.
*/
int messaging(void)
{
return !(p_lz && char_avail() && !KeyTyped);
}
/*
* Show current status info in ruler and various other places
* If always is FALSE, only show ruler if position has changed.
*/
void showruler(int always)
{
if (!always && !redrawing())
return;
if ((*p_stl != NUL || *curwin->w_p_stl != NUL) && curwin->w_status_height) {
redraw_custom_statusline(curwin);
} else {
win_redr_ruler(curwin, always);
}
if (need_maketitle
|| (p_icon && (stl_syntax & STL_IN_ICON))
|| (p_title && (stl_syntax & STL_IN_TITLE))
)
maketitle();
/* Redraw the tab pages line if needed. */
if (redraw_tabline)
draw_tabline();
}
static void win_redr_ruler(win_T *wp, int always)
{
static bool did_show_ext_ruler = false;
// If 'ruler' off or redrawing disabled, don't do anything
if (!p_ru) {
return;
}
/*
* Check if cursor.lnum is valid, since win_redr_ruler() may be called
* after deleting lines, before cursor.lnum is corrected.
*/
if (wp->w_cursor.lnum > wp->w_buffer->b_ml.ml_line_count)
return;
/* Don't draw the ruler while doing insert-completion, it might overwrite
* the (long) mode message. */
if (wp == lastwin && lastwin->w_status_height == 0)
if (edit_submode != NULL)
return;
if (*p_ruf) {
int save_called_emsg = called_emsg;
called_emsg = FALSE;
win_redr_custom(wp, TRUE);
if (called_emsg)
set_string_option_direct((char_u *)"rulerformat", -1,
(char_u *)"", OPT_FREE, SID_ERROR);
called_emsg |= save_called_emsg;
return;
}
/*
* Check if not in Insert mode and the line is empty (will show "0-1").
*/
int empty_line = FALSE;
if (!(State & INSERT)
&& *ml_get_buf(wp->w_buffer, wp->w_cursor.lnum, FALSE) == NUL)
empty_line = TRUE;
/*
* Only draw the ruler when something changed.
*/
validate_virtcol_win(wp);
if ( redraw_cmdline
|| always
|| wp->w_cursor.lnum != wp->w_ru_cursor.lnum
|| wp->w_cursor.col != wp->w_ru_cursor.col
|| wp->w_virtcol != wp->w_ru_virtcol
|| wp->w_cursor.coladd != wp->w_ru_cursor.coladd
|| wp->w_topline != wp->w_ru_topline
|| wp->w_buffer->b_ml.ml_line_count != wp->w_ru_line_count
|| wp->w_topfill != wp->w_ru_topfill
|| empty_line != wp->w_ru_empty) {
int width;
int row;
int fillchar;
int attr;
int off;
bool part_of_status = false;
if (wp->w_status_height) {
row = W_ENDROW(wp);
fillchar = fillchar_status(&attr, wp);
off = wp->w_wincol;
width = wp->w_width;
part_of_status = true;
} else {
row = Rows - 1;
fillchar = ' ';
attr = 0;
width = Columns;
off = 0;
}
// In list mode virtcol needs to be recomputed
colnr_T virtcol = wp->w_virtcol;
if (wp->w_p_list && wp->w_p_lcs_chars.tab1 == NUL) {
wp->w_p_list = false;
getvvcol(wp, &wp->w_cursor, NULL, &virtcol, NULL);
wp->w_p_list = true;
}
#define RULER_BUF_LEN 70
char_u buffer[RULER_BUF_LEN];
/*
* Some sprintfs return the length, some return a pointer.
* To avoid portability problems we use strlen() here.
*/
vim_snprintf((char *)buffer, RULER_BUF_LEN, "%" PRId64 ",",
(wp->w_buffer->b_ml.ml_flags & ML_EMPTY) ? (int64_t)0L
: (int64_t)wp->w_cursor.lnum);
size_t len = STRLEN(buffer);
col_print(buffer + len, RULER_BUF_LEN - len,
empty_line ? 0 : (int)wp->w_cursor.col + 1,
(int)virtcol + 1);
/*
* Add a "50%" if there is room for it.
* On the last line, don't print in the last column (scrolls the
* screen up on some terminals).
*/
int i = (int)STRLEN(buffer);
get_rel_pos(wp, buffer + i + 1, RULER_BUF_LEN - i - 1);
int o = i + vim_strsize(buffer + i + 1);
if (wp->w_status_height == 0) { // can't use last char of screen
o++;
}
int this_ru_col = ru_col - (Columns - width);
if (this_ru_col < 0) {
this_ru_col = 0;
}
// Never use more than half the window/screen width, leave the other half
// for the filename.
if (this_ru_col < (width + 1) / 2) {
this_ru_col = (width + 1) / 2;
}
if (this_ru_col + o < width) {
// Need at least 3 chars left for get_rel_pos() + NUL.
while (this_ru_col + o < width && RULER_BUF_LEN > i + 4) {
i += utf_char2bytes(fillchar, buffer + i);
o++;
}
get_rel_pos(wp, buffer + i, RULER_BUF_LEN - i);
}
if (ui_has(kUIMessages) && !part_of_status) {
Array content = ARRAY_DICT_INIT;
Array chunk = ARRAY_DICT_INIT;
ADD(chunk, INTEGER_OBJ(attr));
ADD(chunk, STRING_OBJ(cstr_to_string((char *)buffer)));
ADD(content, ARRAY_OBJ(chunk));
ui_call_msg_ruler(content);
did_show_ext_ruler = true;
} else {
if (did_show_ext_ruler) {
ui_call_msg_ruler((Array)ARRAY_DICT_INIT);
did_show_ext_ruler = false;
}
// Truncate at window boundary.
o = 0;
for (i = 0; buffer[i] != NUL; i += utfc_ptr2len(buffer + i)) {
o += utf_ptr2cells(buffer + i);
if (this_ru_col + o > width) {
buffer[i] = NUL;
break;
}
}
grid_puts(&default_grid, buffer, row, this_ru_col + off, attr);
i = redraw_cmdline;
grid_fill(&default_grid, row, row + 1,
this_ru_col + off + (int)STRLEN(buffer), off + width, fillchar,
fillchar, attr);
// don't redraw the cmdline because of showing the ruler
redraw_cmdline = i;
}
wp->w_ru_cursor = wp->w_cursor;
wp->w_ru_virtcol = wp->w_virtcol;
wp->w_ru_empty = empty_line;
wp->w_ru_topline = wp->w_topline;
wp->w_ru_line_count = wp->w_buffer->b_ml.ml_line_count;
wp->w_ru_topfill = wp->w_topfill;
}
}
/*
* Return the width of the 'number' and 'relativenumber' column.
* Caller may need to check if 'number' or 'relativenumber' is set.
* Otherwise it depends on 'numberwidth' and the line count.
*/
int number_width(win_T *wp)
{
int n;
linenr_T lnum;
if (wp->w_p_rnu && !wp->w_p_nu) {
// cursor line shows "0"
lnum = wp->w_height_inner;
} else {
// cursor line shows absolute line number
lnum = wp->w_buffer->b_ml.ml_line_count;
}
if (lnum == wp->w_nrwidth_line_count)
return wp->w_nrwidth_width;
wp->w_nrwidth_line_count = lnum;
n = 0;
do {
lnum /= 10;
++n;
} while (lnum > 0);
/* 'numberwidth' gives the minimal width plus one */
if (n < wp->w_p_nuw - 1)
n = wp->w_p_nuw - 1;
wp->w_nrwidth_width = n;
return n;
}
/// Set dimensions of the Nvim application "shell".
void screen_resize(int width, int height)
{
static int busy = FALSE;
// Avoid recursiveness, can happen when setting the window size causes
// another window-changed signal.
if (updating_screen || busy) {
return;
}
if (width < 0 || height < 0) /* just checking... */
return;
if (State == HITRETURN || State == SETWSIZE) {
/* postpone the resizing */
State = SETWSIZE;
return;
}
/* curwin->w_buffer can be NULL when we are closing a window and the
* buffer has already been closed and removing a scrollbar causes a resize
* event. Don't resize then, it will happen after entering another buffer.
*/
if (curwin->w_buffer == NULL)
return;
++busy;
Rows = height;
Columns = width;
check_shellsize();
height = Rows;
width = Columns;
ui_call_grid_resize(1, width, height);
send_grid_resize = true;
/* The window layout used to be adjusted here, but it now happens in
* screenalloc() (also invoked from screenclear()). That is because the
* "busy" check above may skip this, but not screenalloc(). */
if (State != ASKMORE && State != EXTERNCMD && State != CONFIRM) {
screenclear();
}
if (starting != NO_SCREEN) {
maketitle();
changed_line_abv_curs();
invalidate_botline();
/*
* We only redraw when it's needed:
* - While at the more prompt or executing an external command, don't
* redraw, but position the cursor.
* - While editing the command line, only redraw that.
* - in Ex mode, don't redraw anything.
* - Otherwise, redraw right now, and position the cursor.
* Always need to call update_screen() or screenalloc(), to make
* sure Rows/Columns and the size of the screen is correct!
*/
if (State == ASKMORE || State == EXTERNCMD || State == CONFIRM
|| exmode_active) {
screenalloc();
repeat_message();
} else {
if (curwin->w_p_scb)
do_check_scrollbind(TRUE);
if (State & CMDLINE) {
redraw_popupmenu = false;
update_screen(NOT_VALID);
redrawcmdline();
if (pum_drawn()) {
cmdline_pum_display(false);
}
} else {
update_topline();
if (pum_drawn()) {
// TODO(bfredl): ins_compl_show_pum wants to redraw the screen first.
// For now make sure the nested update_screen(0) won't redraw the
// pum at the old position. Try to untangle this later.
redraw_popupmenu = false;
ins_compl_show_pum();
}
update_screen(NOT_VALID);
if (redrawing()) {
setcursor();
}
}
}
}
ui_flush();
--busy;
}
/// Check if the new Nvim application "shell" dimensions are valid.
/// Correct it if it's too small or way too big.
void check_shellsize(void)
{
if (Rows < min_rows()) {
// need room for one window and command line
Rows = min_rows();
}
limit_screen_size();
}
// Limit Rows and Columns to avoid an overflow in Rows * Columns.
void limit_screen_size(void)
{
if (Columns < MIN_COLUMNS) {
Columns = MIN_COLUMNS;
} else if (Columns > 10000) {
Columns = 10000;
}
if (Rows > 1000) {
Rows = 1000;
}
}
void win_new_shellsize(void)
{
static long old_Rows = 0;
static long old_Columns = 0;
if (old_Rows != Rows) {
// if 'window' uses the whole screen, keep it using that */
if (p_window == old_Rows - 1 || old_Rows == 0) {
p_window = Rows - 1;
}
old_Rows = Rows;
shell_new_rows(); // update window sizes
}
if (old_Columns != Columns) {
old_Columns = Columns;
shell_new_columns(); // update window sizes
}
}
win_T *get_win_by_grid_handle(handle_T handle)
{
FOR_ALL_WINDOWS_IN_TAB(wp, curtab) {
if (wp->w_grid.handle == handle) {
return wp;
}
}
return NULL;
}
|
695647.c | /*
SDL - Simple DirectMedia Layer
Copyright (C) 1997-2012 Sam Lantinga
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Sam Lantinga
slouken@libsdl.org
*/
#include "SDL_config.h"
#ifdef SDL_CDROM_MACOSX
#include "SDL_syscdrom_c.h"
#pragma mark -- Globals --
static FSRef** tracks;
static FSVolumeRefNum* volumes;
static CDstatus status;
static int nextTrackFrame;
static int nextTrackFramesRemaining;
static int fakeCD;
static int currentTrack;
static int didReadTOC;
static int cacheTOCNumTracks;
static int currentDrive; /* Only allow 1 drive in use at a time */
#pragma mark -- Prototypes --
static const char *SDL_SYS_CDName (int drive);
static int SDL_SYS_CDOpen (int drive);
static int SDL_SYS_CDGetTOC (SDL_CD *cdrom);
static CDstatus SDL_SYS_CDStatus (SDL_CD *cdrom, int *position);
static int SDL_SYS_CDPlay (SDL_CD *cdrom, int start, int length);
static int SDL_SYS_CDPause (SDL_CD *cdrom);
static int SDL_SYS_CDResume (SDL_CD *cdrom);
static int SDL_SYS_CDStop (SDL_CD *cdrom);
static int SDL_SYS_CDEject (SDL_CD *cdrom);
static void SDL_SYS_CDClose (SDL_CD *cdrom);
#pragma mark -- Helper Functions --
/* Read a list of tracks from the volume */
static int LoadTracks (SDL_CD *cdrom)
{
/* Check if tracks are already loaded */
if ( tracks[cdrom->id] != NULL )
return 0;
/* Allocate memory for tracks */
tracks[cdrom->id] = (FSRef*) SDL_calloc (1, sizeof(**tracks) * cdrom->numtracks);
if (tracks[cdrom->id] == NULL) {
SDL_OutOfMemory ();
return -1;
}
/* Load tracks */
if (ListTrackFiles (volumes[cdrom->id], tracks[cdrom->id], cdrom->numtracks) < 0)
return -1;
return 0;
}
/* Find a file for a given start frame and length */
static FSRef* GetFileForOffset (SDL_CD *cdrom, int start, int length, int *outStartFrame, int *outStopFrame)
{
int i;
for (i = 0; i < cdrom->numtracks; i++) {
if (cdrom->track[i].offset <= start &&
start < (cdrom->track[i].offset + cdrom->track[i].length))
break;
}
if (i == cdrom->numtracks)
return NULL;
currentTrack = i;
*outStartFrame = start - cdrom->track[i].offset;
if ((*outStartFrame + length) < cdrom->track[i].length) {
*outStopFrame = *outStartFrame + length;
length = 0;
nextTrackFrame = -1;
nextTrackFramesRemaining = -1;
}
else {
*outStopFrame = -1;
length -= cdrom->track[i].length - *outStartFrame;
nextTrackFrame = cdrom->track[i+1].offset;
nextTrackFramesRemaining = length;
}
return &tracks[cdrom->id][i];
}
/* Setup another file for playback, or stop playback (called from another thread) */
static void CompletionProc (SDL_CD *cdrom)
{
Lock ();
if (nextTrackFrame > 0 && nextTrackFramesRemaining > 0) {
/* Load the next file to play */
int startFrame, stopFrame;
FSRef *file;
PauseFile ();
ReleaseFile ();
file = GetFileForOffset (cdrom, nextTrackFrame,
nextTrackFramesRemaining, &startFrame, &stopFrame);
if (file == NULL) {
status = CD_STOPPED;
Unlock ();
return;
}
LoadFile (file, startFrame, stopFrame);
SetCompletionProc (CompletionProc, cdrom);
PlayFile ();
}
else {
/* Release the current file */
PauseFile ();
ReleaseFile ();
status = CD_STOPPED;
}
Unlock ();
}
#pragma mark -- Driver Functions --
/* Initialize */
int SDL_SYS_CDInit (void)
{
/* Initialize globals */
volumes = NULL;
tracks = NULL;
status = CD_STOPPED;
nextTrackFrame = -1;
nextTrackFramesRemaining = -1;
fakeCD = SDL_FALSE;
currentTrack = -1;
didReadTOC = SDL_FALSE;
cacheTOCNumTracks = -1;
currentDrive = -1;
/* Fill in function pointers */
SDL_CDcaps.Name = SDL_SYS_CDName;
SDL_CDcaps.Open = SDL_SYS_CDOpen;
SDL_CDcaps.GetTOC = SDL_SYS_CDGetTOC;
SDL_CDcaps.Status = SDL_SYS_CDStatus;
SDL_CDcaps.Play = SDL_SYS_CDPlay;
SDL_CDcaps.Pause = SDL_SYS_CDPause;
SDL_CDcaps.Resume = SDL_SYS_CDResume;
SDL_CDcaps.Stop = SDL_SYS_CDStop;
SDL_CDcaps.Eject = SDL_SYS_CDEject;
SDL_CDcaps.Close = SDL_SYS_CDClose;
/*
Read the list of "drives"
This is currently a hack that infers drives from
mounted audio CD volumes, rather than
actual CD-ROM devices - which means it may not
act as expected sometimes.
*/
/* Find out how many cd volumes are mounted */
SDL_numcds = DetectAudioCDVolumes (NULL, 0);
/*
If there are no volumes, fake a cd device
so tray empty can be reported.
*/
if (SDL_numcds == 0) {
fakeCD = SDL_TRUE;
SDL_numcds = 1;
status = CD_TRAYEMPTY;
return 0;
}
/* Allocate space for volumes */
volumes = (FSVolumeRefNum*) SDL_calloc (1, sizeof(*volumes) * SDL_numcds);
if (volumes == NULL) {
SDL_OutOfMemory ();
return -1;
}
/* Allocate space for tracks */
tracks = (FSRef**) SDL_calloc (1, sizeof(*tracks) * (SDL_numcds + 1));
if (tracks == NULL) {
SDL_OutOfMemory ();
return -1;
}
/* Mark the end of the tracks array */
tracks[ SDL_numcds ] = (FSRef*)-1;
/*
Redetect, now save all volumes for later
Update SDL_numcds just in case it changed
*/
{
int numVolumes = SDL_numcds;
SDL_numcds = DetectAudioCDVolumes (volumes, numVolumes);
/* If more cds suddenly show up, ignore them */
if (SDL_numcds > numVolumes) {
SDL_SetError ("Some CD's were added but they will be ignored");
SDL_numcds = numVolumes;
}
}
return 0;
}
/* Shutdown and cleanup */
void SDL_SYS_CDQuit(void)
{
ReleaseFile();
if (volumes != NULL)
free (volumes);
if (tracks != NULL) {
FSRef **ptr;
for (ptr = tracks; *ptr != (FSRef*)-1; ptr++)
if (*ptr != NULL)
free (*ptr);
free (tracks);
}
}
/* Get the Unix disk name of the volume */
static const char *SDL_SYS_CDName (int drive)
{
/*
* !!! FIXME: PBHGetVolParmsSync() is gone in 10.6,
* !!! FIXME: replaced with FSGetVolumeParms(), which
* !!! FIXME: isn't available before 10.5. :/
*/
return "Mac OS X CD-ROM Device";
#if 0
OSStatus err = noErr;
HParamBlockRec pb;
GetVolParmsInfoBuffer volParmsInfo;
if (fakeCD)
return "Fake CD-ROM Device";
pb.ioParam.ioNamePtr = NULL;
pb.ioParam.ioVRefNum = volumes[drive];
pb.ioParam.ioBuffer = (Ptr)&volParmsInfo;
pb.ioParam.ioReqCount = (SInt32)sizeof(volParmsInfo);
err = PBHGetVolParmsSync(&pb);
if (err != noErr) {
SDL_SetError ("PBHGetVolParmsSync returned %d", err);
return NULL;
}
return volParmsInfo.vMDeviceID;
#endif
}
/* Open the "device" */
static int SDL_SYS_CDOpen (int drive)
{
/* Only allow 1 device to be open */
if (currentDrive >= 0) {
SDL_SetError ("Only one cdrom is supported");
return -1;
}
else
currentDrive = drive;
return drive;
}
/* Get the table of contents */
static int SDL_SYS_CDGetTOC (SDL_CD *cdrom)
{
if (fakeCD) {
SDL_SetError (kErrorFakeDevice);
return -1;
}
if (didReadTOC) {
cdrom->numtracks = cacheTOCNumTracks;
return 0;
}
ReadTOCData (volumes[cdrom->id], cdrom);
didReadTOC = SDL_TRUE;
cacheTOCNumTracks = cdrom->numtracks;
return 0;
}
/* Get CD-ROM status */
static CDstatus SDL_SYS_CDStatus (SDL_CD *cdrom, int *position)
{
if (position) {
int trackFrame;
Lock ();
trackFrame = GetCurrentFrame ();
Unlock ();
*position = cdrom->track[currentTrack].offset + trackFrame;
}
return status;
}
/* Start playback */
static int SDL_SYS_CDPlay(SDL_CD *cdrom, int start, int length)
{
int startFrame, stopFrame;
FSRef *ref;
if (fakeCD) {
SDL_SetError (kErrorFakeDevice);
return -1;
}
Lock();
if (LoadTracks (cdrom) < 0)
return -2;
if (PauseFile () < 0)
return -3;
if (ReleaseFile () < 0)
return -4;
ref = GetFileForOffset (cdrom, start, length, &startFrame, &stopFrame);
if (ref == NULL) {
SDL_SetError ("SDL_SYS_CDPlay: No file for start=%d, length=%d", start, length);
return -5;
}
if (LoadFile (ref, startFrame, stopFrame) < 0)
return -6;
SetCompletionProc (CompletionProc, cdrom);
if (PlayFile () < 0)
return -7;
status = CD_PLAYING;
Unlock();
return 0;
}
/* Pause playback */
static int SDL_SYS_CDPause(SDL_CD *cdrom)
{
if (fakeCD) {
SDL_SetError (kErrorFakeDevice);
return -1;
}
Lock ();
if (PauseFile () < 0) {
Unlock ();
return -2;
}
status = CD_PAUSED;
Unlock ();
return 0;
}
/* Resume playback */
static int SDL_SYS_CDResume(SDL_CD *cdrom)
{
if (fakeCD) {
SDL_SetError (kErrorFakeDevice);
return -1;
}
Lock ();
if (PlayFile () < 0) {
Unlock ();
return -2;
}
status = CD_PLAYING;
Unlock ();
return 0;
}
/* Stop playback */
static int SDL_SYS_CDStop(SDL_CD *cdrom)
{
if (fakeCD) {
SDL_SetError (kErrorFakeDevice);
return -1;
}
Lock ();
if (PauseFile () < 0) {
Unlock ();
return -2;
}
if (ReleaseFile () < 0) {
Unlock ();
return -3;
}
status = CD_STOPPED;
Unlock ();
return 0;
}
/* Eject the CD-ROM (Unmount the volume) */
static int SDL_SYS_CDEject(SDL_CD *cdrom)
{
OSStatus err;
pid_t dissenter;
if (fakeCD) {
SDL_SetError (kErrorFakeDevice);
return -1;
}
Lock ();
if (PauseFile () < 0) {
Unlock ();
return -2;
}
if (ReleaseFile () < 0) {
Unlock ();
return -3;
}
status = CD_STOPPED;
/* Eject the volume */
err = FSEjectVolumeSync(volumes[cdrom->id], kNilOptions, &dissenter);
if (err != noErr) {
Unlock ();
SDL_SetError ("PBUnmountVol returned %d", err);
return -4;
}
status = CD_TRAYEMPTY;
/* Invalidate volume and track info */
volumes[cdrom->id] = 0;
free (tracks[cdrom->id]);
tracks[cdrom->id] = NULL;
Unlock ();
return 0;
}
/* Close the CD-ROM */
static void SDL_SYS_CDClose(SDL_CD *cdrom)
{
currentDrive = -1;
return;
}
#endif /* SDL_CDROM_MACOSX */
|
83340.c |
/*
* refclock_local - local pseudo-clock driver
*
* wjm 17-aug-1995: add a hook for special treatment of VMS_LOCALUNIT
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifdef REFCLOCK
#include "ntpd.h"
#include "ntp_refclock.h"
#include "ntp_stdlib.h"
#include <stdio.h>
#include <ctype.h>
#ifdef KERNEL_PLL
#include "ntp_syscall.h"
#endif
/*
* This is a hack to allow a machine to use its own system clock as a
* reference clock, i.e., to free-run using no outside clock discipline
* source. This is useful if you want to use NTP in an isolated
* environment with no radio clock or NIST modem available. Pick a
* machine that you figure has a good clock oscillator and configure it
* with this driver. Set the clock using the best means available, like
* eyeball-and-wristwatch. Then, point all the other machines at this
* one or use broadcast (not multicast) mode to distribute time.
*
* Another application for this driver is if you want to use a
* particular server's clock as the clock of last resort when all other
* normal synchronization sources have gone away. This is especially
* useful if that server has an ovenized oscillator. For this you would
* configure this driver at a higher stratum (say 5) to prevent the
* server's stratum from falling below that.
*
* A third application for this driver is when an external discipline
* source is available, such as the NIST "lockclock" program, which
* synchronizes the local clock via a telephone modem and the NIST
* Automated Computer Time Service (ACTS), or the Digital Time
* Synchronization Service (DTSS), which runs on DCE machines. In this
* case the stratum should be set at zero, indicating a bona fide
* stratum-1 source. Exercise some caution with this, since there is no
* easy way to telegraph via NTP that something might be wrong in the
* discipline source itself. In the case of DTSS, the local clock can
* have a rather large jitter, depending on the interval between
* corrections and the intrinsic frequency error of the clock
* oscillator. In extreme cases, this can cause clients to exceed the
* 128-ms slew window and drop off the NTP subnet.
*
* THis driver includes provisions to telegraph synchronization state
* and related variables by means of kernel variables with specially
* modified kernels. This is done using the ntp_adjtime() syscall.
* In the cases where another protocol or device synchronizes the local
* host, the data given to the kernel can be slurped up by this driver
* and distributed to clients by ordinary NTP messaging.
*
* In the default mode the behavior of the clock selection algorithm is
* modified when this driver is in use. The algorithm is designed so
* that this driver will never be selected unless no other discipline
* source is available. This can be overriden with the prefer keyword of
* the server configuration command, in which case only this driver will
* be selected for synchronization and all other discipline sources will
* be ignored. This behavior is intended for use when an external
* discipline source controls the system clock.
*
* Fudge Factors
*
* The stratum for this driver set at 5 by default, but it can be
* changed by the fudge command and/or the ntpdc utility. The reference
* ID is "LCL" by default, but can be changed using the same mechanism.
* *NEVER* configure this driver to operate at a stratum which might
* possibly disrupt a client with access to a bona fide primary server,
* unless the local clock oscillator is reliably disciplined by another
* source. *NEVER NEVER* configure a server which might devolve to an
* undisciplined local clock to use multicast mode. Always remember that
* an improperly configured local clock driver let loose in the Internet
* can cause very serious disruption. This is why most of us who care
* about good time use cryptographic authentication.
*
* This driver provides a mechanism to trim the local clock in both time
* and frequency, as well as a way to manipulate the leap bits. The
* fudge time1 parameter adjusts the time, in seconds, and the fudge
* time2 parameter adjusts the frequency, in ppm. The fudge time1
* parameter is additive; that is, it adds an increment to the current
* time. The fudge time2 parameter directly sets the frequency.
*/
/*
* Local interface definitions
*/
#define PRECISION (-7) /* about 10 ms precision */
#if defined(VMS) && defined(VMS_LOCALUNIT)
#define REFID "LCLv" /* reference ID */
#else /* VMS VMS_LOCALUNIT */
#define REFID "LCL\0" /* reference ID */
#endif /* VMS VMS_LOCALUNIT */
#define DESCRIPTION "Undisciplined local clock" /* WRU */
#define STRATUM 5 /* default stratum */
#define DISPERSION .01 /* default dispersion (10 ms) */
/*
* Imported from the timer module
*/
extern u_long current_time;
/*
* Imported from ntp_proto
*/
extern s_char sys_precision;
#ifdef KERNEL_PLL
/*
* Imported from ntp_loopfilter
*/
extern int pll_control; /* kernel pll control */
extern int kern_enable; /* kernel pll enabled */
extern int ext_enable; /* external clock enable */
#endif /* KERNEL_PLL */
/*
* Function prototypes
*/
static int local_start P((int, struct peer *));
static void local_poll P((int, struct peer *));
/*
* Local variables
*/
static u_long poll_time; /* last time polled */
/*
* Transfer vector
*/
struct refclock refclock_local = {
local_start, /* start up driver */
noentry, /* shut down driver (not used) */
local_poll, /* transmit poll message */
noentry, /* not used (old lcl_control) */
noentry, /* initialize driver (not used) */
noentry, /* not used (old lcl_buginfo) */
NOFLAGS /* not used */
};
/*
* local_start - start up the clock
*/
static int
local_start(
int unit,
struct peer *peer
)
{
struct refclockproc *pp;
pp = peer->procptr;
/*
* Initialize miscellaneous variables
*/
peer->precision = sys_precision;
pp->leap = LEAP_NOTINSYNC;
peer->stratum = STRATUM;
pp->stratum = STRATUM;
pp->clockdesc = DESCRIPTION;
memcpy(&pp->refid, "INIT", 4);
poll_time = current_time;
return (1);
}
/*
* local_poll - called by the transmit procedure
*
* LOCKCLOCK: If the kernel supports the nanokernel or microkernel
* system calls, the leap bits are extracted from the kernel. If there
* is a kernel error or the kernel leap bits are set to 11, the NTP leap
* bits are set to 11 and the stratum is set to infinity. Otherwise, the
* NTP leap bits are set to the kernel leap bits and the stratum is set
* as fudged. This behavior does not faithfully follow the
* specification, but is probably more appropriate in a multiple-server
* national laboratory network.
*/
static void
local_poll(
int unit,
struct peer *peer
)
{
#if defined(KERNEL_PLL) && defined(LOCKCLOCK)
struct timex ntv;
#endif /* KERNEL_PLL LOCKCLOCK */
struct refclockproc *pp;
#if defined(VMS) && defined(VMS_LOCALUNIT)
if (unit == VMS_LOCALUNIT) {
extern void vms_local_poll(struct peer *);
vms_local_poll(peer);
return;
}
#endif /* VMS && VMS_LOCALUNIT */
pp = peer->procptr;
pp->polls++;
/*
* Ramble through the usual filtering and grooming code, which
* is essentially a no-op and included mostly for pretty
* billboards. We allow a one-time time adjustment using fudge
* time1 (s) and a continuous frequency adjustment using fudge
* time 2 (ppm).
*/
get_systime(&pp->lastrec);
pp->fudgetime1 += pp->fudgetime2 * 1e-6 * (current_time -
poll_time);
poll_time = current_time;
refclock_process_offset(pp, pp->lastrec, pp->lastrec,
pp->fudgetime1);
/*
* If another process is disciplining the system clock, we set
* the leap bits and quality indicators from the kernel.
*/
#if defined(KERNEL_PLL) && defined(LOCKCLOCK)
memset(&ntv, 0, sizeof ntv);
switch (ntp_adjtime(&ntv)) {
case TIME_OK:
pp->leap = LEAP_NOWARNING;
peer->stratum = pp->stratum;
break;
case TIME_INS:
pp->leap = LEAP_ADDSECOND;
peer->stratum = pp->stratum;
break;
case TIME_DEL:
pp->leap = LEAP_DELSECOND;
peer->stratum = pp->stratum;
break;
default:
pp->leap = LEAP_NOTINSYNC;
peer->stratum = STRATUM_UNSPEC;
}
pp->disp = 0;
pp->jitter = 0;
#else /* KERNEL_PLL LOCKCLOCK */
pp->leap = LEAP_NOWARNING;
pp->disp = DISPERSION;
pp->jitter = 0;
#endif /* KERNEL_PLL LOCKCLOCK */
pp->lastref = pp->lastrec;
refclock_receive(peer);
pp->fudgetime1 = 0;
}
#else
int refclock_local_bs;
#endif /* REFCLOCK */
|
861748.c | --- Firmware/src/drivers/stm32/drv_hrt.c 2015-04-07 12:50:25.798575940 +0530
+++ pandapilot_v4/src/drivers/stm32/drv_hrt.c 2015-04-07 14:59:15.114231334 +0530
@@ -1,5 +1,5 @@
/****************************************************************************
- *
+ * Copyright (c) 2014 NavStik Development Team. All rights reserved.
* Copyright (c) 2012, 2013 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,6 +59,7 @@
#include <errno.h>
#include <string.h>
+#include <arch/board/board.h>
#include <board_config.h>
#include <drivers/drv_hrt.h>
@@ -85,7 +86,7 @@
#elif HRT_TIMER == 2
# define HRT_TIMER_BASE STM32_TIM2_BASE
# define HRT_TIMER_POWER_REG STM32_RCC_APB1ENR
-# define HRT_TIMER_POWER_BIT RCC_APB2ENR_TIM2EN
+# define HRT_TIMER_POWER_BIT RCC_APB1ENR_TIM2EN
# define HRT_TIMER_VECTOR STM32_IRQ_TIM2
# define HRT_TIMER_CLOCK STM32_APB1_TIM2_CLKIN
# if CONFIG_STM32_TIM2
@@ -103,7 +104,7 @@
#elif HRT_TIMER == 4
# define HRT_TIMER_BASE STM32_TIM4_BASE
# define HRT_TIMER_POWER_REG STM32_RCC_APB1ENR
-# define HRT_TIMER_POWER_BIT RCC_APB2ENR_TIM4EN
+# define HRT_TIMER_POWER_BIT RCC_APB1ENR_TIM4EN
# define HRT_TIMER_VECTOR STM32_IRQ_TIM4
# define HRT_TIMER_CLOCK STM32_APB1_TIM4_CLKIN
# if CONFIG_STM32_TIM4
@@ -112,7 +113,7 @@
#elif HRT_TIMER == 5
# define HRT_TIMER_BASE STM32_TIM5_BASE
# define HRT_TIMER_POWER_REG STM32_RCC_APB1ENR
-# define HRT_TIMER_POWER_BIT RCC_APB2ENR_TIM5EN
+# define HRT_TIMER_POWER_BIT RCC_APB1ENR_TIM5EN
# define HRT_TIMER_VECTOR STM32_IRQ_TIM5
# define HRT_TIMER_CLOCK STM32_APB1_TIM5_CLKIN
# if CONFIG_STM32_TIM5
|
793624.c | /**
******************************************************************************
* @file stm32l4xx_hal_rcc.c
* @author MCD Application Team
* @brief RCC HAL module driver.
* This file provides firmware functions to manage the following
* functionalities of the Reset and Clock Control (RCC) peripheral:
* + Initialization and de-initialization functions
* + Peripheral Control functions
*
@verbatim
==============================================================================
##### RCC specific features #####
==============================================================================
[..]
After reset the device is running from Multiple Speed Internal oscillator
(4 MHz) with Flash 0 wait state. Flash prefetch buffer, D-Cache
and I-Cache are disabled, and all peripherals are off except internal
SRAM, Flash and JTAG.
(+) There is no prescaler on High speed (AHBs) and Low speed (APBs) busses:
all peripherals mapped on these busses are running at MSI speed.
(+) The clock for all peripherals is switched off, except the SRAM and FLASH.
(+) All GPIOs are in analog mode, except the JTAG pins which
are assigned to be used for debug purpose.
[..]
Once the device started from reset, the user application has to:
(+) Configure the clock source to be used to drive the System clock
(if the application needs higher frequency/performance)
(+) Configure the System clock frequency and Flash settings
(+) Configure the AHB and APB busses prescalers
(+) Enable the clock for the peripheral(s) to be used
(+) Configure the clock source(s) for peripherals which clocks are not
derived from the System clock (SAIx, RTC, ADC, USB OTG FS/SDMMC1/RNG)
@endverbatim
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT(c) 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "stm32l4xx_hal.h"
/** @addtogroup STM32L4xx_HAL_Driver
* @{
*/
/** @defgroup RCC RCC
* @brief RCC HAL module driver
* @{
*/
#ifdef HAL_RCC_MODULE_ENABLED
/* Private typedef -----------------------------------------------------------*/
/* Private define ------------------------------------------------------------*/
/** @defgroup RCC_Private_Constants RCC Private Constants
* @{
*/
#define HSE_TIMEOUT_VALUE HSE_STARTUP_TIMEOUT
#define HSI_TIMEOUT_VALUE 2U /* 2 ms (minimum Tick + 1) */
#define MSI_TIMEOUT_VALUE 2U /* 2 ms (minimum Tick + 1) */
#define LSI_TIMEOUT_VALUE 2U /* 2 ms (minimum Tick + 1) */
#define HSI48_TIMEOUT_VALUE 2U /* 2 ms (minimum Tick + 1) */
#define PLL_TIMEOUT_VALUE 2U /* 2 ms (minimum Tick + 1) */
#define CLOCKSWITCH_TIMEOUT_VALUE 5000U /* 5 s */
/**
* @}
*/
/* Private macro -------------------------------------------------------------*/
/** @defgroup RCC_Private_Macros RCC Private Macros
* @{
*/
#define __MCO1_CLK_ENABLE() __HAL_RCC_GPIOA_CLK_ENABLE()
#define MCO1_GPIO_PORT GPIOA
#define MCO1_PIN GPIO_PIN_8
#define RCC_PLL_OSCSOURCE_CONFIG(__HAL_RCC_PLLSOURCE__) \
(MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, (__HAL_RCC_PLLSOURCE__)))
/**
* @}
*/
/* Private variables ---------------------------------------------------------*/
/* Private function prototypes -----------------------------------------------*/
/** @defgroup RCC_Private_Functions RCC Private Functions
* @{
*/
static HAL_StatusTypeDef RCC_SetFlashLatencyFromMSIRange(uint32_t msirange);
#if defined(STM32L4R5xx) || defined(STM32L4R7xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx)
static uint32_t RCC_GetSysClockFreqFromPLLSource(void);
#endif
/**
* @}
*/
/* Exported functions --------------------------------------------------------*/
/** @defgroup RCC_Exported_Functions RCC Exported Functions
* @{
*/
/** @defgroup RCC_Exported_Functions_Group1 Initialization and de-initialization functions
* @brief Initialization and Configuration functions
*
@verbatim
===============================================================================
##### Initialization and de-initialization functions #####
===============================================================================
[..]
This section provides functions allowing to configure the internal and external oscillators
(HSE, HSI, LSE, MSI, LSI, PLL, CSS and MCO) and the System busses clocks (SYSCLK, AHB, APB1
and APB2).
[..] Internal/external clock and PLL configuration
(+) HSI (high-speed internal): 16 MHz factory-trimmed RC used directly or through
the PLL as System clock source.
(+) MSI (Mutiple Speed Internal): Its frequency is software trimmable from 100KHZ to 48MHZ.
It can be used to generate the clock for the USB OTG FS (48 MHz).
The number of flash wait states is automatically adjusted when MSI range is updated with
HAL_RCC_OscConfig() and the MSI is used as System clock source.
(+) LSI (low-speed internal): 32 KHz low consumption RC used as IWDG and/or RTC
clock source.
(+) HSE (high-speed external): 4 to 48 MHz crystal oscillator used directly or
through the PLL as System clock source. Can be used also optionally as RTC clock source.
(+) LSE (low-speed external): 32.768 KHz oscillator used optionally as RTC clock source.
(+) PLL (clocked by HSI, HSE or MSI) providing up to three independent output clocks:
(++) The first output is used to generate the high speed system clock (up to 80MHz).
(++) The second output is used to generate the clock for the USB OTG FS (48 MHz),
the random analog generator (<=48 MHz) and the SDMMC1 (<= 48 MHz).
(++) The third output is used to generate an accurate clock to achieve
high-quality audio performance on SAI interface.
(+) PLLSAI1 (clocked by HSI, HSE or MSI) providing up to three independent output clocks:
(++) The first output is used to generate SAR ADC1 clock.
(++) The second output is used to generate the clock for the USB OTG FS (48 MHz),
the random analog generator (<=48 MHz) and the SDMMC1 (<= 48 MHz).
(++) The Third output is used to generate an accurate clock to achieve
high-quality audio performance on SAI interface.
(+) PLLSAI2 (clocked by HSI , HSE or MSI) providing up to two independent output clocks:
(++) The first output is used to generate SAR ADC2 clock.
(++) The second output is used to generate an accurate clock to achieve
high-quality audio performance on SAI interface.
(+) CSS (Clock security system): once enabled, if a HSE clock failure occurs
(HSE used directly or through PLL as System clock source), the System clock
is automatically switched to HSI and an interrupt is generated if enabled.
The interrupt is linked to the Cortex-M4 NMI (Non-Maskable Interrupt)
exception vector.
(+) MCO (microcontroller clock output): used to output MSI, LSI, HSI, LSE, HSE or
main PLL clock (through a configurable prescaler) on PA8 pin.
[..] System, AHB and APB busses clocks configuration
(+) Several clock sources can be used to drive the System clock (SYSCLK): MSI, HSI,
HSE and main PLL.
The AHB clock (HCLK) is derived from System clock through configurable
prescaler and used to clock the CPU, memory and peripherals mapped
on AHB bus (DMA, GPIO...). APB1 (PCLK1) and APB2 (PCLK2) clocks are derived
from AHB clock through configurable prescalers and used to clock
the peripherals mapped on these busses. You can use
"HAL_RCC_GetSysClockFreq()" function to retrieve the frequencies of these clocks.
-@- All the peripheral clocks are derived from the System clock (SYSCLK) except:
(+@) SAI: the SAI clock can be derived either from a specific PLL (PLLSAI1) or (PLLSAI2) or
from an external clock mapped on the SAI_CKIN pin.
You have to use HAL_RCCEx_PeriphCLKConfig() function to configure this clock.
(+@) RTC: the RTC clock can be derived either from the LSI, LSE or HSE clock
divided by 2 to 31.
You have to use __HAL_RCC_RTC_ENABLE() and HAL_RCCEx_PeriphCLKConfig() function
to configure this clock.
(+@) USB OTG FS, SDMMC1 and RNG: USB OTG FS requires a frequency equal to 48 MHz
to work correctly, while the SDMMC1 and RNG peripherals require a frequency
equal or lower than to 48 MHz. This clock is derived of the main PLL or PLLSAI1
through PLLQ divider. You have to enable the peripheral clock and use
HAL_RCCEx_PeriphCLKConfig() function to configure this clock.
(+@) IWDG clock which is always the LSI clock.
(+) The maximum frequency of the SYSCLK, HCLK, PCLK1 and PCLK2 is 80 MHz.
The clock source frequency should be adapted depending on the device voltage range
as listed in the Reference Manual "Clock source frequency versus voltage scaling" chapter.
@endverbatim
Table 1. HCLK clock frequency for STM32L4Rx/STM32L4Sx devices
+--------------------------------------------------------+
| Latency | HCLK clock frequency (MHz) |
| |--------------------------------------|
| | voltage range 1 | voltage range 2 |
| | 1.2 V | 1.0 V |
|-----------------|-------------------|------------------|
|0WS(1 CPU cycles)| 0 < HCLK <= 20 | 0 < HCLK <= 8 |
|-----------------|-------------------|------------------|
|1WS(2 CPU cycles)| 20 < HCLK <= 40 | 8 < HCLK <= 16 |
|-----------------|-------------------|------------------|
|2WS(3 CPU cycles)| 40 < HCLK <= 60 | 16 < HCLK <= 26 |
|-----------------|-------------------|------------------|
|3WS(4 CPU cycles)| 60 < HCLK <= 80 | 16 < HCLK <= 26 |
|-----------------|-------------------|------------------|
|4WS(5 CPU cycles)| 80 < HCLK <= 100 | 16 < HCLK <= 26 |
|-----------------|-------------------|------------------|
|5WS(6 CPU cycles)| 100 < HCLK <= 120 | 16 < HCLK <= 26 |
+--------------------------------------------------------+
Table 2. HCLK clock frequency for other STM32L4 devices
+-------------------------------------------------------+
| Latency | HCLK clock frequency (MHz) |
| |-------------------------------------|
| | voltage range 1 | voltage range 2 |
| | 1.2 V | 1.0 V |
|-----------------|------------------|------------------|
|0WS(1 CPU cycles)| 0 < HCLK <= 16 | 0 < HCLK <= 6 |
|-----------------|------------------|------------------|
|1WS(2 CPU cycles)| 16 < HCLK <= 32 | 6 < HCLK <= 12 |
|-----------------|------------------|------------------|
|2WS(3 CPU cycles)| 32 < HCLK <= 48 | 12 < HCLK <= 18 |
|-----------------|------------------|------------------|
|3WS(4 CPU cycles)| 48 < HCLK <= 64 | 18 < HCLK <= 26 |
|-----------------|------------------|------------------|
|4WS(5 CPU cycles)| 64 < HCLK <= 80 | 18 < HCLK <= 26 |
+-------------------------------------------------------+
* @{
*/
/**
* @brief Reset the RCC clock configuration to the default reset state.
* @note The default reset state of the clock configuration is given below:
* - MSI ON and used as system clock source
* - HSE, HSI, PLL, PLLSAI1 and PLLISAI2 OFF
* - AHB, APB1 and APB2 prescaler set to 1.
* - CSS, MCO1 OFF
* - All interrupts disabled
* - All interrupt and reset flags cleared
* @note This function doesn't modify the configuration of the
* - Peripheral clocks
* - LSI, LSE and RTC clocks
* @retval HAL status
*/
HAL_StatusTypeDef HAL_RCC_DeInit(void)
{
uint32_t tickstart = 0;
/* Set MSION bit */
SET_BIT(RCC->CR, RCC_CR_MSION);
/* Insure MSIRDY bit is set before writing default MSIRANGE value */
/* Get start tick */
tickstart = HAL_GetTick();
/* Wait till MSI is ready */
while(READ_BIT(RCC->CR, RCC_CR_MSIRDY) == RESET)
{
if((HAL_GetTick() - tickstart) > MSI_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
/* Set MSIRANGE default value */
MODIFY_REG(RCC->CR, RCC_CR_MSIRANGE, RCC_MSIRANGE_6);
/* Reset CFGR register (MSI is selected as system clock source) */
CLEAR_REG(RCC->CFGR);
/* Update the SystemCoreClock global variable for MSI as system clock source */
SystemCoreClock = MSI_VALUE;
/* Configure the source of time base considering new system clock settings */
if(HAL_InitTick(TICK_INT_PRIORITY) != HAL_OK)
{
return HAL_ERROR;
}
/* Insure MSI selected as system clock source */
/* Get start tick */
tickstart = HAL_GetTick();
/* Wait till system clock source is ready */
while(READ_BIT(RCC->CFGR, RCC_CFGR_SWS) != RCC_CFGR_SWS_MSI)
{
if((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
/* Reset HSION, HSIKERON, HSIASFS, HSEON, HSECSSON, PLLON, PLLSAIxON bits */
#if defined(RCC_PLLSAI2_SUPPORT)
CLEAR_BIT(RCC->CR, RCC_CR_HSEON | RCC_CR_HSION | RCC_CR_HSIKERON| RCC_CR_HSIASFS | RCC_CR_PLLON | RCC_CR_PLLSAI1ON | RCC_CR_PLLSAI2ON);
#else
CLEAR_BIT(RCC->CR, RCC_CR_HSEON | RCC_CR_HSION | RCC_CR_HSIKERON| RCC_CR_HSIASFS | RCC_CR_PLLON | RCC_CR_PLLSAI1ON);
#endif /* RCC_PLLSAI2_SUPPORT */
/* Insure PLLRDY, PLLSAI1RDY and PLLSAI2RDY (if present) are reset */
/* Get start tick */
tickstart = HAL_GetTick();
#if defined(RCC_PLLSAI2_SUPPORT)
while(READ_BIT(RCC->CR, RCC_CR_PLLRDY | RCC_CR_PLLSAI1RDY | RCC_CR_PLLSAI2RDY) != 0U)
#else
while(READ_BIT(RCC->CR, RCC_CR_PLLRDY | RCC_CR_PLLSAI1RDY) != 0U)
#endif
{
if((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
/* Reset PLLCFGR register */
CLEAR_REG(RCC->PLLCFGR);
SET_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN_4 );
/* Reset PLLSAI1CFGR register */
CLEAR_REG(RCC->PLLSAI1CFGR);
SET_BIT(RCC->PLLSAI1CFGR, RCC_PLLSAI1CFGR_PLLSAI1N_4 );
#if defined(RCC_PLLSAI2_SUPPORT)
/* Reset PLLSAI2CFGR register */
CLEAR_REG(RCC->PLLSAI2CFGR);
SET_BIT(RCC->PLLSAI2CFGR, RCC_PLLSAI2CFGR_PLLSAI2N_4 );
#endif /* RCC_PLLSAI2_SUPPORT */
/* Reset HSEBYP bit */
CLEAR_BIT(RCC->CR, RCC_CR_HSEBYP);
/* Disable all interrupts */
CLEAR_REG(RCC->CIER);
/* Clear all interrupt flags */
WRITE_REG(RCC->CICR, 0xFFFFFFFFU);
/* Clear all reset flags */
SET_BIT(RCC->CSR, RCC_CSR_RMVF);
return HAL_OK;
}
/**
* @brief Initialize the RCC Oscillators according to the specified parameters in the
* RCC_OscInitTypeDef.
* @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that
* contains the configuration information for the RCC Oscillators.
* @note The PLL is not disabled when used as system clock.
* @note Transitions LSE Bypass to LSE On and LSE On to LSE Bypass are not
* supported by this macro. User should request a transition to LSE Off
* first and then LSE On or LSE Bypass.
* @note Transition HSE Bypass to HSE On and HSE On to HSE Bypass are not
* supported by this macro. User should request a transition to HSE Off
* first and then HSE On or HSE Bypass.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_RCC_OscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct)
{
uint32_t tickstart = 0;
/* Check the parameters */
assert_param(RCC_OscInitStruct != NULL);
assert_param(IS_RCC_OSCILLATORTYPE(RCC_OscInitStruct->OscillatorType));
/*----------------------------- MSI Configuration --------------------------*/
if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_MSI) == RCC_OSCILLATORTYPE_MSI)
{
/* Check the parameters */
assert_param(IS_RCC_MSI(RCC_OscInitStruct->MSIState));
assert_param(IS_RCC_MSICALIBRATION_VALUE(RCC_OscInitStruct->MSICalibrationValue));
assert_param(IS_RCC_MSI_CLOCK_RANGE(RCC_OscInitStruct->MSIClockRange));
/* When the MSI is used as system clock it will not be disabled */
if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_MSI) )
{
if((READ_BIT(RCC->CR, RCC_CR_MSIRDY) != RESET) && (RCC_OscInitStruct->MSIState == RCC_MSI_OFF))
{
return HAL_ERROR;
}
/* Otherwise, just the calibration and MSI range change are allowed */
else
{
/* To correctly read data from FLASH memory, the number of wait states (LATENCY)
must be correctly programmed according to the frequency of the CPU clock
(HCLK) and the supply voltage of the device. */
if(RCC_OscInitStruct->MSIClockRange > __HAL_RCC_GET_MSI_RANGE())
{
/* First increase number of wait states update if necessary */
if(RCC_SetFlashLatencyFromMSIRange(RCC_OscInitStruct->MSIClockRange) != HAL_OK)
{
return HAL_ERROR;
}
/* Selects the Multiple Speed oscillator (MSI) clock range .*/
__HAL_RCC_MSI_RANGE_CONFIG(RCC_OscInitStruct->MSIClockRange);
/* Adjusts the Multiple Speed oscillator (MSI) calibration value.*/
__HAL_RCC_MSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->MSICalibrationValue);
}
else
{
/* Else, keep current flash latency while decreasing applies */
/* Selects the Multiple Speed oscillator (MSI) clock range .*/
__HAL_RCC_MSI_RANGE_CONFIG(RCC_OscInitStruct->MSIClockRange);
/* Adjusts the Multiple Speed oscillator (MSI) calibration value.*/
__HAL_RCC_MSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->MSICalibrationValue);
/* Decrease number of wait states update if necessary */
if(RCC_SetFlashLatencyFromMSIRange(RCC_OscInitStruct->MSIClockRange) != HAL_OK)
{
return HAL_ERROR;
}
}
/* Update the SystemCoreClock global variable */
SystemCoreClock = HAL_RCC_GetSysClockFreq() >> AHBPrescTable[READ_BIT(RCC->CFGR, RCC_CFGR_HPRE) >> RCC_CFGR_HPRE_Pos];
/* Configure the source of time base considering new system clocks settings*/
HAL_InitTick (TICK_INT_PRIORITY);
}
}
else
{
/* Check the MSI State */
if(RCC_OscInitStruct->MSIState != RCC_MSI_OFF)
{
/* Enable the Internal High Speed oscillator (MSI). */
__HAL_RCC_MSI_ENABLE();
/* Get timeout */
tickstart = HAL_GetTick();
/* Wait till MSI is ready */
while(READ_BIT(RCC->CR, RCC_CR_MSIRDY) == RESET)
{
if((HAL_GetTick() - tickstart) > MSI_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
/* Selects the Multiple Speed oscillator (MSI) clock range .*/
__HAL_RCC_MSI_RANGE_CONFIG(RCC_OscInitStruct->MSIClockRange);
/* Adjusts the Multiple Speed oscillator (MSI) calibration value.*/
__HAL_RCC_MSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->MSICalibrationValue);
}
else
{
/* Disable the Internal High Speed oscillator (MSI). */
__HAL_RCC_MSI_DISABLE();
/* Get timeout */
tickstart = HAL_GetTick();
/* Wait till MSI is ready */
while(READ_BIT(RCC->CR, RCC_CR_MSIRDY) != RESET)
{
if((HAL_GetTick() - tickstart) > MSI_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
}
}
/*------------------------------- HSE Configuration ------------------------*/
if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSE) == RCC_OSCILLATORTYPE_HSE)
{
/* Check the parameters */
assert_param(IS_RCC_HSE(RCC_OscInitStruct->HSEState));
/* When the HSE is used as system clock or clock source for PLL in these cases it is not allowed to be disabled */
if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE) ||
((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && (__HAL_RCC_GET_PLL_OSCSOURCE() == RCC_PLLSOURCE_HSE)))
{
if((READ_BIT(RCC->CR, RCC_CR_HSERDY) != RESET) && (RCC_OscInitStruct->HSEState == RCC_HSE_OFF))
{
return HAL_ERROR;
}
}
else
{
/* Set the new HSE configuration ---------------------------------------*/
__HAL_RCC_HSE_CONFIG(RCC_OscInitStruct->HSEState);
/* Check the HSE State */
if(RCC_OscInitStruct->HSEState != RCC_HSE_OFF)
{
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till HSE is ready */
while(READ_BIT(RCC->CR, RCC_CR_HSERDY) == RESET)
{
if((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
else
{
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till HSE is disabled */
while(READ_BIT(RCC->CR, RCC_CR_HSERDY) != RESET)
{
if((HAL_GetTick() - tickstart) > HSE_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
}
}
/*----------------------------- HSI Configuration --------------------------*/
if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI) == RCC_OSCILLATORTYPE_HSI)
{
/* Check the parameters */
assert_param(IS_RCC_HSI(RCC_OscInitStruct->HSIState));
assert_param(IS_RCC_HSI_CALIBRATION_VALUE(RCC_OscInitStruct->HSICalibrationValue));
/* Check if HSI is used as system clock or as PLL source when PLL is selected as system clock */
if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI) ||
((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && (__HAL_RCC_GET_PLL_OSCSOURCE() == RCC_PLLSOURCE_HSI)))
{
/* When HSI is used as system clock it will not be disabled */
if((READ_BIT(RCC->CR, RCC_CR_HSIRDY) != RESET) && (RCC_OscInitStruct->HSIState == RCC_HSI_OFF))
{
return HAL_ERROR;
}
/* Otherwise, just the calibration is allowed */
else
{
/* Adjusts the Internal High Speed oscillator (HSI) calibration value.*/
__HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue);
}
}
else
{
/* Check the HSI State */
if(RCC_OscInitStruct->HSIState != RCC_HSI_OFF)
{
/* Enable the Internal High Speed oscillator (HSI). */
__HAL_RCC_HSI_ENABLE();
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till HSI is ready */
while(READ_BIT(RCC->CR, RCC_CR_HSIRDY) == RESET)
{
if((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
/* Adjusts the Internal High Speed oscillator (HSI) calibration value.*/
__HAL_RCC_HSI_CALIBRATIONVALUE_ADJUST(RCC_OscInitStruct->HSICalibrationValue);
}
else
{
/* Disable the Internal High Speed oscillator (HSI). */
__HAL_RCC_HSI_DISABLE();
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till HSI is disabled */
while(READ_BIT(RCC->CR, RCC_CR_HSIRDY) != RESET)
{
if((HAL_GetTick() - tickstart) > HSI_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
}
}
/*------------------------------ LSI Configuration -------------------------*/
if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSI) == RCC_OSCILLATORTYPE_LSI)
{
/* Check the parameters */
assert_param(IS_RCC_LSI(RCC_OscInitStruct->LSIState));
/* Check the LSI State */
if(RCC_OscInitStruct->LSIState != RCC_LSI_OFF)
{
/* Enable the Internal Low Speed oscillator (LSI). */
__HAL_RCC_LSI_ENABLE();
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till LSI is ready */
while(READ_BIT(RCC->CSR, RCC_CSR_LSIRDY) == RESET)
{
if((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
else
{
/* Disable the Internal Low Speed oscillator (LSI). */
__HAL_RCC_LSI_DISABLE();
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till LSI is disabled */
while(READ_BIT(RCC->CSR, RCC_CSR_LSIRDY) != RESET)
{
if((HAL_GetTick() - tickstart) > LSI_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
}
/*------------------------------ LSE Configuration -------------------------*/
if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_LSE) == RCC_OSCILLATORTYPE_LSE)
{
FlagStatus pwrclkchanged = RESET;
/* Check the parameters */
assert_param(IS_RCC_LSE(RCC_OscInitStruct->LSEState));
/* Update LSE configuration in Backup Domain control register */
/* Requires to enable write access to Backup Domain of necessary */
if(HAL_IS_BIT_CLR(RCC->APB1ENR1, RCC_APB1ENR1_PWREN))
{
__HAL_RCC_PWR_CLK_ENABLE();
pwrclkchanged = SET;
}
if(HAL_IS_BIT_CLR(PWR->CR1, PWR_CR1_DBP))
{
/* Enable write access to Backup domain */
SET_BIT(PWR->CR1, PWR_CR1_DBP);
/* Wait for Backup domain Write protection disable */
tickstart = HAL_GetTick();
while(HAL_IS_BIT_CLR(PWR->CR1, PWR_CR1_DBP))
{
if((HAL_GetTick() - tickstart) > RCC_DBP_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
/* Set the new LSE configuration -----------------------------------------*/
__HAL_RCC_LSE_CONFIG(RCC_OscInitStruct->LSEState);
/* Check the LSE State */
if(RCC_OscInitStruct->LSEState != RCC_LSE_OFF)
{
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till LSE is ready */
while(READ_BIT(RCC->BDCR, RCC_BDCR_LSERDY) == RESET)
{
if((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
else
{
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till LSE is disabled */
while(READ_BIT(RCC->BDCR, RCC_BDCR_LSERDY) != RESET)
{
if((HAL_GetTick() - tickstart) > RCC_LSE_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
/* Restore clock configuration if changed */
if(pwrclkchanged == SET)
{
__HAL_RCC_PWR_CLK_DISABLE();
}
}
#if defined(RCC_HSI48_SUPPORT)
/*------------------------------ HSI48 Configuration -----------------------*/
if(((RCC_OscInitStruct->OscillatorType) & RCC_OSCILLATORTYPE_HSI48) == RCC_OSCILLATORTYPE_HSI48)
{
/* Check the parameters */
assert_param(IS_RCC_HSI48(RCC_OscInitStruct->HSI48State));
/* Check the LSI State */
if(RCC_OscInitStruct->HSI48State != RCC_HSI48_OFF)
{
/* Enable the Internal Low Speed oscillator (HSI48). */
__HAL_RCC_HSI48_ENABLE();
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till HSI48 is ready */
while(READ_BIT(RCC->CRRCR, RCC_CRRCR_HSI48RDY) == RESET)
{
if((HAL_GetTick() - tickstart) > HSI48_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
else
{
/* Disable the Internal Low Speed oscillator (HSI48). */
__HAL_RCC_HSI48_DISABLE();
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till HSI48 is disabled */
while(READ_BIT(RCC->CRRCR, RCC_CRRCR_HSI48RDY) != RESET)
{
if((HAL_GetTick() - tickstart) > HSI48_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
}
#endif /* RCC_HSI48_SUPPORT */
/*-------------------------------- PLL Configuration -----------------------*/
/* Check the parameters */
assert_param(IS_RCC_PLL(RCC_OscInitStruct->PLL.PLLState));
if(RCC_OscInitStruct->PLL.PLLState != RCC_PLL_NONE)
{
/* Check if the PLL is used as system clock or not */
if(__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL)
{
if(RCC_OscInitStruct->PLL.PLLState == RCC_PLL_ON)
{
/* Check the parameters */
assert_param(IS_RCC_PLLSOURCE(RCC_OscInitStruct->PLL.PLLSource));
assert_param(IS_RCC_PLLM_VALUE(RCC_OscInitStruct->PLL.PLLM));
assert_param(IS_RCC_PLLN_VALUE(RCC_OscInitStruct->PLL.PLLN));
assert_param(IS_RCC_PLLP_VALUE(RCC_OscInitStruct->PLL.PLLP));
assert_param(IS_RCC_PLLQ_VALUE(RCC_OscInitStruct->PLL.PLLQ));
assert_param(IS_RCC_PLLR_VALUE(RCC_OscInitStruct->PLL.PLLR));
/* Disable the main PLL. */
__HAL_RCC_PLL_DISABLE();
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till PLL is ready */
while(READ_BIT(RCC->CR, RCC_CR_PLLRDY) != RESET)
{
if((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
/* Configure the main PLL clock source, multiplication and division factors. */
__HAL_RCC_PLL_CONFIG(RCC_OscInitStruct->PLL.PLLSource,
RCC_OscInitStruct->PLL.PLLM,
RCC_OscInitStruct->PLL.PLLN,
RCC_OscInitStruct->PLL.PLLP,
RCC_OscInitStruct->PLL.PLLQ,
RCC_OscInitStruct->PLL.PLLR);
/* Enable the main PLL. */
__HAL_RCC_PLL_ENABLE();
/* Enable PLL System Clock output. */
__HAL_RCC_PLLCLKOUT_ENABLE(RCC_PLL_SYSCLK);
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till PLL is ready */
while(READ_BIT(RCC->CR, RCC_CR_PLLRDY) == RESET)
{
if((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
else
{
/* Disable the main PLL. */
__HAL_RCC_PLL_DISABLE();
/* Disable all PLL outputs to save power if no PLLs on */
if((READ_BIT(RCC->CR, RCC_CR_PLLSAI1RDY) == RESET)
#if defined(RCC_PLLSAI2_SUPPORT)
&&
(READ_BIT(RCC->CR, RCC_CR_PLLSAI2RDY) == RESET)
#endif /* RCC_PLLSAI2_SUPPORT */
)
{
MODIFY_REG(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC, RCC_PLLSOURCE_NONE);
}
#if defined(RCC_PLLSAI2_SUPPORT)
__HAL_RCC_PLLCLKOUT_DISABLE(RCC_PLL_SYSCLK | RCC_PLL_48M1CLK | RCC_PLL_SAI3CLK);
#else
__HAL_RCC_PLLCLKOUT_DISABLE(RCC_PLL_SYSCLK | RCC_PLL_48M1CLK | RCC_PLL_SAI2CLK);
#endif /* RCC_PLLSAI2_SUPPORT */
/* Get Start Tick*/
tickstart = HAL_GetTick();
/* Wait till PLL is disabled */
while(READ_BIT(RCC->CR, RCC_CR_PLLRDY) != RESET)
{
if((HAL_GetTick() - tickstart) > PLL_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
}
else
{
return HAL_ERROR;
}
}
return HAL_OK;
}
/**
* @brief Initialize the CPU, AHB and APB busses clocks according to the specified
* parameters in the RCC_ClkInitStruct.
* @param RCC_ClkInitStruct pointer to an RCC_OscInitTypeDef structure that
* contains the configuration information for the RCC peripheral.
* @param FLatency FLASH Latency
* This parameter can be one of the following values:
* @arg FLASH_LATENCY_0 FLASH 0 Latency cycle
* @arg FLASH_LATENCY_1 FLASH 1 Latency cycle
* @arg FLASH_LATENCY_2 FLASH 2 Latency cycles
* @arg FLASH_LATENCY_3 FLASH 3 Latency cycles
* @arg FLASH_LATENCY_4 FLASH 4 Latency cycles
@if STM32L4S9xx
* @arg FLASH_LATENCY_5 FLASH 5 Latency cycles
* @arg FLASH_LATENCY_6 FLASH 6 Latency cycles
* @arg FLASH_LATENCY_7 FLASH 7 Latency cycles
* @arg FLASH_LATENCY_8 FLASH 8 Latency cycles
* @arg FLASH_LATENCY_9 FLASH 9 Latency cycles
* @arg FLASH_LATENCY_10 FLASH 10 Latency cycles
* @arg FLASH_LATENCY_11 FLASH 11 Latency cycles
* @arg FLASH_LATENCY_12 FLASH 12 Latency cycles
* @arg FLASH_LATENCY_13 FLASH 13 Latency cycles
* @arg FLASH_LATENCY_14 FLASH 14 Latency cycles
* @arg FLASH_LATENCY_15 FLASH 15 Latency cycles
@endif
*
* @note The SystemCoreClock CMSIS variable is used to store System Clock Frequency
* and updated by HAL_RCC_GetHCLKFreq() function called within this function
*
* @note The MSI is used by default as system clock source after
* startup from Reset, wake-up from STANDBY mode. After restart from Reset,
* the MSI frequency is set to its default value 4 MHz.
*
* @note The HSI can be selected as system clock source after
* from STOP modes or in case of failure of the HSE used directly or indirectly
* as system clock (if the Clock Security System CSS is enabled).
*
* @note A switch from one clock source to another occurs only if the target
* clock source is ready (clock stable after startup delay or PLL locked).
* If a clock source which is not yet ready is selected, the switch will
* occur when the clock source is ready.
*
* @note You can use HAL_RCC_GetClockConfig() function to know which clock is
* currently used as system clock source.
*
* @note Depending on the device voltage range, the software has to set correctly
* HPRE[3:0] bits to ensure that HCLK not exceed the maximum allowed frequency
* (for more details refer to section above "Initialization/de-initialization functions")
* @retval None
*/
HAL_StatusTypeDef HAL_RCC_ClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t FLatency)
{
uint32_t tickstart = 0;
#if defined(STM32L4R5xx) || defined(STM32L4R7xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx)
uint32_t pllfreq = 0;
uint32_t hpre = RCC_SYSCLK_DIV1;
#endif
/* Check the parameters */
assert_param(RCC_ClkInitStruct != NULL);
assert_param(IS_RCC_CLOCKTYPE(RCC_ClkInitStruct->ClockType));
assert_param(IS_FLASH_LATENCY(FLatency));
/* To correctly read data from FLASH memory, the number of wait states (LATENCY)
must be correctly programmed according to the frequency of the CPU clock
(HCLK) and the supply voltage of the device. */
/* Increasing the number of wait states because of higher CPU frequency */
if(FLatency > READ_BIT(FLASH->ACR, FLASH_ACR_LATENCY))
{
/* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */
__HAL_FLASH_SET_LATENCY(FLatency);
/* Check that the new number of wait states is taken into account to access the Flash
memory by reading the FLASH_ACR register */
if(READ_BIT(FLASH->ACR, FLASH_ACR_LATENCY) != FLatency)
{
return HAL_ERROR;
}
}
/*------------------------- SYSCLK Configuration ---------------------------*/
if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_SYSCLK) == RCC_CLOCKTYPE_SYSCLK)
{
assert_param(IS_RCC_SYSCLKSOURCE(RCC_ClkInitStruct->SYSCLKSource));
/* PLL is selected as System Clock Source */
if(RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLCLK)
{
/* Check the PLL ready flag */
if(READ_BIT(RCC->CR, RCC_CR_PLLRDY) == RESET)
{
return HAL_ERROR;
}
#if defined(STM32L4R5xx) || defined(STM32L4R7xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx)
/* Undershoot management when selection PLL as SYSCLK source and frequency above 80Mhz */
/* Compute target PLL output frequency */
pllfreq = RCC_GetSysClockFreqFromPLLSource();
/* Intermediate step with HCLK prescaler 2 necessary before to go over 80Mhz */
if((pllfreq > 80000000U) &&
(((((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_HCLK) == RCC_CLOCKTYPE_HCLK) && (RCC_ClkInitStruct->AHBCLKDivider == RCC_SYSCLK_DIV1))
||
((READ_BIT(RCC->CFGR, RCC_CFGR_HPRE) == RCC_SYSCLK_DIV1))))
{
MODIFY_REG(RCC->CFGR, RCC_CFGR_HPRE, RCC_SYSCLK_DIV2);
hpre = RCC_SYSCLK_DIV2;
}
#endif
}
else
{
/* HSE is selected as System Clock Source */
if(RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_HSE)
{
/* Check the HSE ready flag */
if(READ_BIT(RCC->CR, RCC_CR_HSERDY) == RESET)
{
return HAL_ERROR;
}
}
/* MSI is selected as System Clock Source */
else if(RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_MSI)
{
/* Check the MSI ready flag */
if(READ_BIT(RCC->CR, RCC_CR_MSIRDY) == RESET)
{
return HAL_ERROR;
}
}
/* HSI is selected as System Clock Source */
else
{
/* Check the HSI ready flag */
if(READ_BIT(RCC->CR, RCC_CR_HSIRDY) == RESET)
{
return HAL_ERROR;
}
}
#if defined(STM32L4R5xx) || defined(STM32L4R7xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx)
/* Overshoot management when going down from PLL as SYSCLK source and frequency above 80Mhz */
pllfreq = HAL_RCC_GetSysClockFreq();
/* Intermediate step with HCLK prescaler 2 necessary before to go under 80Mhz */
if(pllfreq > 80000000U)
{
MODIFY_REG(RCC->CFGR, RCC_CFGR_HPRE, RCC_SYSCLK_DIV2);
hpre = RCC_SYSCLK_DIV2;
}
#endif
}
MODIFY_REG(RCC->CFGR, RCC_CFGR_SW, RCC_ClkInitStruct->SYSCLKSource);
/* Get Start Tick*/
tickstart = HAL_GetTick();
if(RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_PLLCLK)
{
while (__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_PLL)
{
if((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
else
{
if(RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_HSE)
{
while (__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_HSE)
{
if((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
else if(RCC_ClkInitStruct->SYSCLKSource == RCC_SYSCLKSOURCE_MSI)
{
while (__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_MSI)
{
if((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
else
{
while(__HAL_RCC_GET_SYSCLK_SOURCE() != RCC_CFGR_SWS_HSI)
{
if((HAL_GetTick() - tickstart) > CLOCKSWITCH_TIMEOUT_VALUE)
{
return HAL_TIMEOUT;
}
}
}
}
}
/*-------------------------- HCLK Configuration --------------------------*/
if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_HCLK) == RCC_CLOCKTYPE_HCLK)
{
assert_param(IS_RCC_HCLK(RCC_ClkInitStruct->AHBCLKDivider));
MODIFY_REG(RCC->CFGR, RCC_CFGR_HPRE, RCC_ClkInitStruct->AHBCLKDivider);
}
#if defined(STM32L4R5xx) || defined(STM32L4R7xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx)
else
{
/* Is intermediate HCLK prescaler 2 applied internally, complete with HCLK prescaler 1 */
if(hpre == RCC_SYSCLK_DIV2)
{
MODIFY_REG(RCC->CFGR, RCC_CFGR_HPRE, RCC_SYSCLK_DIV1);
}
}
#endif
/* Decreasing the number of wait states because of lower CPU frequency */
if(FLatency < READ_BIT(FLASH->ACR, FLASH_ACR_LATENCY))
{
/* Program the new number of wait states to the LATENCY bits in the FLASH_ACR register */
__HAL_FLASH_SET_LATENCY(FLatency);
/* Check that the new number of wait states is taken into account to access the Flash
memory by reading the FLASH_ACR register */
if(READ_BIT(FLASH->ACR, FLASH_ACR_LATENCY) != FLatency)
{
return HAL_ERROR;
}
}
/*-------------------------- PCLK1 Configuration ---------------------------*/
if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK1) == RCC_CLOCKTYPE_PCLK1)
{
assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB1CLKDivider));
MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE1, RCC_ClkInitStruct->APB1CLKDivider);
}
/*-------------------------- PCLK2 Configuration ---------------------------*/
if(((RCC_ClkInitStruct->ClockType) & RCC_CLOCKTYPE_PCLK2) == RCC_CLOCKTYPE_PCLK2)
{
assert_param(IS_RCC_PCLK(RCC_ClkInitStruct->APB2CLKDivider));
MODIFY_REG(RCC->CFGR, RCC_CFGR_PPRE2, ((RCC_ClkInitStruct->APB2CLKDivider) << 3U));
}
/* Update the SystemCoreClock global variable */
SystemCoreClock = HAL_RCC_GetSysClockFreq() >> AHBPrescTable[READ_BIT(RCC->CFGR, RCC_CFGR_HPRE) >> RCC_CFGR_HPRE_Pos];
/* Configure the source of time base considering new system clocks settings*/
//HAL_InitTick (TICK_INT_PRIORITY);
return HAL_OK;
}
/**
* @}
*/
/** @defgroup RCC_Exported_Functions_Group2 Peripheral Control functions
* @brief RCC clocks control functions
*
@verbatim
===============================================================================
##### Peripheral Control functions #####
===============================================================================
[..]
This subsection provides a set of functions allowing to:
(+) Ouput clock to MCO pin.
(+) Retrieve current clock frequencies.
(+) Enable the Clock Security System.
@endverbatim
* @{
*/
/**
* @brief Select the clock source to output on MCO pin(PA8).
* @note PA8 should be configured in alternate function mode.
* @param RCC_MCOx specifies the output direction for the clock source.
* For STM32L4xx family this parameter can have only one value:
* @arg @ref RCC_MCO1 Clock source to output on MCO1 pin(PA8).
* @param RCC_MCOSource specifies the clock source to output.
* This parameter can be one of the following values:
* @arg @ref RCC_MCO1SOURCE_NOCLOCK MCO output disabled, no clock on MCO
* @arg @ref RCC_MCO1SOURCE_SYSCLK system clock selected as MCO source
* @arg @ref RCC_MCO1SOURCE_MSI MSI clock selected as MCO source
* @arg @ref RCC_MCO1SOURCE_HSI HSI clock selected as MCO source
* @arg @ref RCC_MCO1SOURCE_HSE HSE clock selected as MCO sourcee
* @arg @ref RCC_MCO1SOURCE_PLLCLK main PLL clock selected as MCO source
* @arg @ref RCC_MCO1SOURCE_LSI LSI clock selected as MCO source
* @arg @ref RCC_MCO1SOURCE_LSE LSE clock selected as MCO source
@if STM32L443xx
* @arg @ref RCC_MCO1SOURCE_HSI48 HSI48 clock selected as MCO source for devices with HSI48
@endif
* @param RCC_MCODiv specifies the MCO prescaler.
* This parameter can be one of the following values:
* @arg @ref RCC_MCODIV_1 no division applied to MCO clock
* @arg @ref RCC_MCODIV_2 division by 2 applied to MCO clock
* @arg @ref RCC_MCODIV_4 division by 4 applied to MCO clock
* @arg @ref RCC_MCODIV_8 division by 8 applied to MCO clock
* @arg @ref RCC_MCODIV_16 division by 16 applied to MCO clock
* @retval None
*/
void HAL_RCC_MCOConfig( uint32_t RCC_MCOx, uint32_t RCC_MCOSource, uint32_t RCC_MCODiv)
{
GPIO_InitTypeDef GPIO_InitStruct;
/* Check the parameters */
assert_param(IS_RCC_MCO(RCC_MCOx));
assert_param(IS_RCC_MCODIV(RCC_MCODiv));
assert_param(IS_RCC_MCO1SOURCE(RCC_MCOSource));
/* MCO Clock Enable */
__MCO1_CLK_ENABLE();
/* Configue the MCO1 pin in alternate function mode */
GPIO_InitStruct.Pin = MCO1_PIN;
GPIO_InitStruct.Mode = GPIO_MODE_AF_PP;
GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH;
GPIO_InitStruct.Pull = GPIO_NOPULL;
GPIO_InitStruct.Alternate = GPIO_AF0_MCO;
HAL_GPIO_Init(MCO1_GPIO_PORT, &GPIO_InitStruct);
/* Mask MCOSEL[] and MCOPRE[] bits then set MCO1 clock source and prescaler */
MODIFY_REG(RCC->CFGR, (RCC_CFGR_MCOSEL | RCC_CFGR_MCOPRE), (RCC_MCOSource | RCC_MCODiv ));
}
/**
* @brief Return the SYSCLK frequency.
*
* @note The system frequency computed by this function is not the real
* frequency in the chip. It is calculated based on the predefined
* constant and the selected clock source:
* @note If SYSCLK source is MSI, function returns values based on MSI
* Value as defined by the MSI range.
* @note If SYSCLK source is HSI, function returns values based on HSI_VALUE(*)
* @note If SYSCLK source is HSE, function returns values based on HSE_VALUE(**)
* @note If SYSCLK source is PLL, function returns values based on HSE_VALUE(**),
* HSI_VALUE(*) or MSI Value multiplied/divided by the PLL factors.
* @note (*) HSI_VALUE is a constant defined in stm32l4xx_hal_conf.h file (default value
* 16 MHz) but the real value may vary depending on the variations
* in voltage and temperature.
* @note (**) HSE_VALUE is a constant defined in stm32l4xx_hal_conf.h file (default value
* 8 MHz), user has to ensure that HSE_VALUE is same as the real
* frequency of the crystal used. Otherwise, this function may
* have wrong result.
*
* @note The result of this function could be not correct when using fractional
* value for HSE crystal.
*
* @note This function can be used by the user application to compute the
* baudrate for the communication peripherals or configure other parameters.
*
* @note Each time SYSCLK changes, this function must be called to update the
* right SYSCLK value. Otherwise, any configuration based on this function will be incorrect.
*
*
* @retval SYSCLK frequency
*/
uint32_t HAL_RCC_GetSysClockFreq(void)
{
uint32_t msirange = 0U, pllvco = 0U, pllsource = 0U, pllr = 2U, pllm = 2U;
uint32_t sysclockfreq = 0U;
if((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_MSI) ||
((__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL) && (__HAL_RCC_GET_PLL_OSCSOURCE() == RCC_PLLSOURCE_MSI)))
{
/* MSI or PLL with MSI source used as system clock source */
/* Get SYSCLK source */
if(READ_BIT(RCC->CR, RCC_CR_MSIRGSEL) == RESET)
{ /* MSISRANGE from RCC_CSR applies */
msirange = READ_BIT(RCC->CSR, RCC_CSR_MSISRANGE) >> RCC_CSR_MSISRANGE_Pos;
}
else
{ /* MSIRANGE from RCC_CR applies */
msirange = READ_BIT(RCC->CR, RCC_CR_MSIRANGE) >> RCC_CR_MSIRANGE_Pos;
}
/*MSI frequency range in HZ*/
msirange = MSIRangeTable[msirange];
if(__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_MSI)
{
/* MSI used as system clock source */
sysclockfreq = msirange;
}
}
else if(__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSI)
{
/* HSI used as system clock source */
sysclockfreq = HSI_VALUE;
}
else if(__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_HSE)
{
/* HSE used as system clock source */
sysclockfreq = HSE_VALUE;
}
if(__HAL_RCC_GET_SYSCLK_SOURCE() == RCC_CFGR_SWS_PLL)
{
/* PLL used as system clock source */
/* PLL_VCO = (HSE_VALUE or HSI_VALUE or MSI_VALUE/ PLLM) * PLLN
SYSCLK = PLL_VCO / PLLR
*/
pllsource = READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC);
pllm = (READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLM) >> RCC_PLLCFGR_PLLM_Pos) + 1U ;
switch (pllsource)
{
case RCC_PLLSOURCE_HSI: /* HSI used as PLL clock source */
pllvco = (HSI_VALUE / pllm) * (READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos);
break;
case RCC_PLLSOURCE_HSE: /* HSE used as PLL clock source */
pllvco = (HSE_VALUE / pllm) * (READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos);
break;
case RCC_PLLSOURCE_MSI: /* MSI used as PLL clock source */
default:
pllvco = (msirange / pllm) * (READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos);
break;
}
pllr = ((READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLR) >> RCC_PLLCFGR_PLLR_Pos) + 1U ) * 2U;
sysclockfreq = pllvco/pllr;
}
return sysclockfreq;
}
/**
* @brief Return the HCLK frequency.
* @note Each time HCLK changes, this function must be called to update the
* right HCLK value. Otherwise, any configuration based on this function will be incorrect.
*
* @note The SystemCoreClock CMSIS variable is used to store System Clock Frequency.
* @retval HCLK frequency in Hz
*/
uint32_t HAL_RCC_GetHCLKFreq(void)
{
return SystemCoreClock;
}
/**
* @brief Return the PCLK1 frequency.
* @note Each time PCLK1 changes, this function must be called to update the
* right PCLK1 value. Otherwise, any configuration based on this function will be incorrect.
* @retval PCLK1 frequency in Hz
*/
uint32_t HAL_RCC_GetPCLK1Freq(void)
{
/* Get HCLK source and Compute PCLK1 frequency ---------------------------*/
return (HAL_RCC_GetHCLKFreq() >> APBPrescTable[READ_BIT(RCC->CFGR, RCC_CFGR_PPRE1) >> RCC_CFGR_PPRE1_Pos]);
}
/**
* @brief Return the PCLK2 frequency.
* @note Each time PCLK2 changes, this function must be called to update the
* right PCLK2 value. Otherwise, any configuration based on this function will be incorrect.
* @retval PCLK2 frequency in Hz
*/
uint32_t HAL_RCC_GetPCLK2Freq(void)
{
/* Get HCLK source and Compute PCLK2 frequency ---------------------------*/
return (HAL_RCC_GetHCLKFreq()>> APBPrescTable[READ_BIT(RCC->CFGR, RCC_CFGR_PPRE2) >> RCC_CFGR_PPRE2_Pos]);
}
/**
* @brief Configure the RCC_OscInitStruct according to the internal
* RCC configuration registers.
* @param RCC_OscInitStruct pointer to an RCC_OscInitTypeDef structure that
* will be configured.
* @retval None
*/
void HAL_RCC_GetOscConfig(RCC_OscInitTypeDef *RCC_OscInitStruct)
{
/* Check the parameters */
assert_param(RCC_OscInitStruct != NULL);
/* Set all possible values for the Oscillator type parameter ---------------*/
#if defined(RCC_HSI48_SUPPORT)
RCC_OscInitStruct->OscillatorType = RCC_OSCILLATORTYPE_HSE | RCC_OSCILLATORTYPE_HSI | RCC_OSCILLATORTYPE_MSI | \
RCC_OSCILLATORTYPE_LSE | RCC_OSCILLATORTYPE_LSI | RCC_OSCILLATORTYPE_HSI48;
#else
RCC_OscInitStruct->OscillatorType = RCC_OSCILLATORTYPE_HSE | RCC_OSCILLATORTYPE_HSI | RCC_OSCILLATORTYPE_MSI | \
RCC_OSCILLATORTYPE_LSE | RCC_OSCILLATORTYPE_LSI;
#endif /* RCC_HSI48_SUPPORT */
/* Get the HSE configuration -----------------------------------------------*/
if(READ_BIT(RCC->CR, RCC_CR_HSEBYP) == RCC_CR_HSEBYP)
{
RCC_OscInitStruct->HSEState = RCC_HSE_BYPASS;
}
else if(READ_BIT(RCC->CR, RCC_CR_HSEON) == RCC_CR_HSEON)
{
RCC_OscInitStruct->HSEState = RCC_HSE_ON;
}
else
{
RCC_OscInitStruct->HSEState = RCC_HSE_OFF;
}
/* Get the MSI configuration -----------------------------------------------*/
if(READ_BIT(RCC->CR, RCC_CR_MSION) == RCC_CR_MSION)
{
RCC_OscInitStruct->MSIState = RCC_MSI_ON;
}
else
{
RCC_OscInitStruct->MSIState = RCC_MSI_OFF;
}
RCC_OscInitStruct->MSICalibrationValue = READ_BIT(RCC->ICSCR, RCC_ICSCR_MSITRIM) >> RCC_ICSCR_MSITRIM_Pos;
RCC_OscInitStruct->MSIClockRange = READ_BIT(RCC->CR, RCC_CR_MSIRANGE);
/* Get the HSI configuration -----------------------------------------------*/
if(READ_BIT(RCC->CR, RCC_CR_HSION) == RCC_CR_HSION)
{
RCC_OscInitStruct->HSIState = RCC_HSI_ON;
}
else
{
RCC_OscInitStruct->HSIState = RCC_HSI_OFF;
}
RCC_OscInitStruct->HSICalibrationValue = READ_BIT(RCC->ICSCR, RCC_ICSCR_HSITRIM) >> RCC_ICSCR_HSITRIM_Pos;
/* Get the LSE configuration -----------------------------------------------*/
if(READ_BIT(RCC->BDCR, RCC_BDCR_LSEBYP) == RCC_BDCR_LSEBYP)
{
RCC_OscInitStruct->LSEState = RCC_LSE_BYPASS;
}
else if(READ_BIT(RCC->BDCR, RCC_BDCR_LSEON) == RCC_BDCR_LSEON)
{
RCC_OscInitStruct->LSEState = RCC_LSE_ON;
}
else
{
RCC_OscInitStruct->LSEState = RCC_LSE_OFF;
}
/* Get the LSI configuration -----------------------------------------------*/
if(READ_BIT(RCC->CSR, RCC_CSR_LSION) == RCC_CSR_LSION)
{
RCC_OscInitStruct->LSIState = RCC_LSI_ON;
}
else
{
RCC_OscInitStruct->LSIState = RCC_LSI_OFF;
}
#if defined(RCC_HSI48_SUPPORT)
/* Get the HSI48 configuration ---------------------------------------------*/
if(READ_BIT(RCC->CRRCR, RCC_CRRCR_HSI48ON) == RCC_CRRCR_HSI48ON)
{
RCC_OscInitStruct->HSI48State = RCC_HSI48_ON;
}
else
{
RCC_OscInitStruct->HSI48State = RCC_HSI48_OFF;
}
#else
RCC_OscInitStruct->HSI48State = RCC_HSI48_OFF;
#endif /* RCC_HSI48_SUPPORT */
/* Get the PLL configuration -----------------------------------------------*/
if(READ_BIT(RCC->CR, RCC_CR_PLLON) == RCC_CR_PLLON)
{
RCC_OscInitStruct->PLL.PLLState = RCC_PLL_ON;
}
else
{
RCC_OscInitStruct->PLL.PLLState = RCC_PLL_OFF;
}
RCC_OscInitStruct->PLL.PLLSource = READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC);
RCC_OscInitStruct->PLL.PLLM = (READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLM) >> RCC_PLLCFGR_PLLM_Pos) + 1U;
RCC_OscInitStruct->PLL.PLLN = READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos;
RCC_OscInitStruct->PLL.PLLQ = (((READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLQ) >> RCC_PLLCFGR_PLLQ_Pos) + 1U) << 1U);
RCC_OscInitStruct->PLL.PLLR = (((READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLR) >> RCC_PLLCFGR_PLLR_Pos) + 1U) << 1U);
#if defined(RCC_PLLP_DIV_2_31_SUPPORT)
RCC_OscInitStruct->PLL.PLLP = READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLPDIV) >> RCC_PLLCFGR_PLLPDIV_Pos;
#else
if(READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLP) != RESET)
{
RCC_OscInitStruct->PLL.PLLP = RCC_PLLP_DIV17;
}
else
{
RCC_OscInitStruct->PLL.PLLP = RCC_PLLP_DIV7;
}
#endif /* RCC_PLLP_DIV_2_31_SUPPORT */
}
/**
* @brief Configure the RCC_ClkInitStruct according to the internal
* RCC configuration registers.
* @param RCC_ClkInitStruct pointer to an RCC_ClkInitTypeDef structure that
* will be configured.
* @param pFLatency Pointer on the Flash Latency.
* @retval None
*/
void HAL_RCC_GetClockConfig(RCC_ClkInitTypeDef *RCC_ClkInitStruct, uint32_t *pFLatency)
{
/* Check the parameters */
assert_param(RCC_ClkInitStruct != NULL);
assert_param(pFLatency != NULL);
/* Set all possible values for the Clock type parameter --------------------*/
RCC_ClkInitStruct->ClockType = RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2;
/* Get the SYSCLK configuration --------------------------------------------*/
RCC_ClkInitStruct->SYSCLKSource = READ_BIT(RCC->CFGR, RCC_CFGR_SW);
/* Get the HCLK configuration ----------------------------------------------*/
RCC_ClkInitStruct->AHBCLKDivider = READ_BIT(RCC->CFGR, RCC_CFGR_HPRE);
/* Get the APB1 configuration ----------------------------------------------*/
RCC_ClkInitStruct->APB1CLKDivider = READ_BIT(RCC->CFGR, RCC_CFGR_PPRE1);
/* Get the APB2 configuration ----------------------------------------------*/
RCC_ClkInitStruct->APB2CLKDivider = (READ_BIT(RCC->CFGR, RCC_CFGR_PPRE2) >> 3U);
/* Get the Flash Wait State (Latency) configuration ------------------------*/
*pFLatency = READ_BIT(FLASH->ACR, FLASH_ACR_LATENCY);
}
/**
* @brief Enable the Clock Security System.
* @note If a failure is detected on the HSE oscillator clock, this oscillator
* is automatically disabled and an interrupt is generated to inform the
* software about the failure (Clock Security System Interrupt, CSSI),
* allowing the MCU to perform rescue operations. The CSSI is linked to
* the Cortex-M4 NMI (Non-Maskable Interrupt) exception vector.
* @note The Clock Security System can only be cleared by reset.
* @retval None
*/
void HAL_RCC_EnableCSS(void)
{
SET_BIT(RCC->CR, RCC_CR_CSSON) ;
}
/**
* @brief Handle the RCC Clock Security System interrupt request.
* @note This API should be called under the NMI_Handler().
* @retval None
*/
void HAL_RCC_NMI_IRQHandler(void)
{
/* Check RCC CSSF interrupt flag */
if(__HAL_RCC_GET_IT(RCC_IT_CSS))
{
/* RCC Clock Security System interrupt user callback */
HAL_RCC_CSSCallback();
/* Clear RCC CSS pending bit */
__HAL_RCC_CLEAR_IT(RCC_IT_CSS);
}
}
/**
* @brief RCC Clock Security System interrupt callback.
* @retval none
*/
__weak void HAL_RCC_CSSCallback(void)
{
/* NOTE : This function should not be modified, when the callback is needed,
the HAL_RCC_CSSCallback should be implemented in the user file
*/
}
/**
* @}
*/
/**
* @}
*/
/* Private function prototypes -----------------------------------------------*/
/** @addtogroup RCC_Private_Functions
* @{
*/
/**
* @brief Update number of Flash wait states in line with MSI range and current
voltage range.
* @param msirange MSI range value from RCC_MSIRANGE_0 to RCC_MSIRANGE_11
* @retval HAL status
*/
static HAL_StatusTypeDef RCC_SetFlashLatencyFromMSIRange(uint32_t msirange)
{
uint32_t vos = 0;
uint32_t latency = FLASH_LATENCY_0; /* default value 0WS */
if(__HAL_RCC_PWR_IS_CLK_ENABLED())
{
vos = HAL_PWREx_GetVoltageRange();
}
else
{
__HAL_RCC_PWR_CLK_ENABLE();
vos = HAL_PWREx_GetVoltageRange();
__HAL_RCC_PWR_CLK_DISABLE();
}
if(vos == PWR_REGULATOR_VOLTAGE_SCALE1)
{
if(msirange > RCC_MSIRANGE_8)
{
/* MSI > 16Mhz */
if(msirange > RCC_MSIRANGE_10)
{
/* MSI 48Mhz */
latency = FLASH_LATENCY_2; /* 2WS */
}
else
{
/* MSI 24Mhz or 32Mhz */
latency = FLASH_LATENCY_1; /* 1WS */
}
}
/* else MSI <= 16Mhz default FLASH_LATENCY_0 0WS */
}
else
{
#if defined(STM32L4R5xx) || defined(STM32L4R7xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx)
if(msirange >= RCC_MSIRANGE_8)
{
/* MSI >= 16Mhz */
latency = FLASH_LATENCY_2; /* 2WS */
}
else
{
if(msirange == RCC_MSIRANGE_7)
{
/* MSI 8Mhz */
latency = FLASH_LATENCY_1; /* 1WS */
}
/* else MSI < 8Mhz default FLASH_LATENCY_0 0WS */
}
#else
if(msirange > RCC_MSIRANGE_8)
{
/* MSI > 16Mhz */
latency = FLASH_LATENCY_3; /* 3WS */
}
else
{
if(msirange == RCC_MSIRANGE_8)
{
/* MSI 16Mhz */
latency = FLASH_LATENCY_2; /* 2WS */
}
else if(msirange == RCC_MSIRANGE_7)
{
/* MSI 8Mhz */
latency = FLASH_LATENCY_1; /* 1WS */
}
/* else MSI < 8Mhz default FLASH_LATENCY_0 0WS */
}
#endif
}
__HAL_FLASH_SET_LATENCY(latency);
/* Check that the new number of wait states is taken into account to access the Flash
memory by reading the FLASH_ACR register */
if(READ_BIT(FLASH->ACR, FLASH_ACR_LATENCY) != latency)
{
return HAL_ERROR;
}
return HAL_OK;
}
#if defined(STM32L4R5xx) || defined(STM32L4R7xx) || defined(STM32L4R9xx) || defined(STM32L4S5xx) || defined(STM32L4S7xx) || defined(STM32L4S9xx)
/**
* @brief Compute SYSCLK frequency based on PLL SYSCLK source.
* @retval SYSCLK frequency
*/
static uint32_t RCC_GetSysClockFreqFromPLLSource(void)
{
uint32_t msirange = 0U, pllvco = 0U, pllsource = 0U, pllr = 2U, pllm = 2U;
uint32_t sysclockfreq = 0U;
if(__HAL_RCC_GET_PLL_OSCSOURCE() == RCC_PLLSOURCE_MSI)
{
/* Get MSI range source */
if(READ_BIT(RCC->CR, RCC_CR_MSIRGSEL) == RESET)
{ /* MSISRANGE from RCC_CSR applies */
msirange = READ_BIT(RCC->CSR, RCC_CSR_MSISRANGE) >> RCC_CSR_MSISRANGE_Pos;
}
else
{ /* MSIRANGE from RCC_CR applies */
msirange = READ_BIT(RCC->CR, RCC_CR_MSIRANGE) >> RCC_CR_MSIRANGE_Pos;
}
/*MSI frequency range in HZ*/
msirange = MSIRangeTable[msirange];
}
/* PLL_VCO = (HSE_VALUE or HSI_VALUE or MSI_VALUE/ PLLM) * PLLN
SYSCLK = PLL_VCO / PLLR
*/
pllsource = READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLSRC);
pllm = (READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLM) >> RCC_PLLCFGR_PLLM_Pos) + 1U ;
switch (pllsource)
{
case RCC_PLLSOURCE_HSI: /* HSI used as PLL clock source */
pllvco = (HSI_VALUE / pllm) * (READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos);
break;
case RCC_PLLSOURCE_HSE: /* HSE used as PLL clock source */
pllvco = (HSE_VALUE / pllm) * (READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos);
break;
case RCC_PLLSOURCE_MSI: /* MSI used as PLL clock source */
default:
pllvco = (msirange / pllm) * (READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLN) >> RCC_PLLCFGR_PLLN_Pos);
break;
}
pllr = ((READ_BIT(RCC->PLLCFGR, RCC_PLLCFGR_PLLR) >> RCC_PLLCFGR_PLLR_Pos) + 1U ) * 2U;
sysclockfreq = pllvco/pllr;
return sysclockfreq;
}
#endif /* STM32L4R5xx || STM32L4R7xx || STM32L4R9xx || STM32L4S5xx || STM32L4S7xx || STM32L4S9xx */
/**
* @}
*/
#endif /* HAL_RCC_MODULE_ENABLED */
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
880186.c | // RUN: %ocheck 0 %s
void abort(void) __attribute__((noreturn));
float f = 0x.1p1;
double d = 0x0.3de31P3;
double g = 0xDE.488631p0;
main()
{
#include "../ocheck-init.c"
if(f != 0.125f)
abort();
return 0;
}
|
612955.c | /*++
Copyright (c) 1990 Microsoft Corporation
Copyright (c) 1993, 1994 Digital Equipment Corporation
Module Name:
aligntrk.c
Abstract:
This module implements the code necessary to dispatch exceptions to the
proper mode and invoke the exception dispatcher.
Author:
David N. Cutler (davec) 3-Apr-1990
Environment:
Kernel mode only.
Revision History:
Thomas Van Baak (tvb) 12-May-1992
Adapted for Alpha AXP.
Forrest Foltz (forrestf) 30-Dec-1999
Broke out increasingly complex and common alignment fault handling into
this file.
--*/
#include "ki.h"
//
// EXINFO_EFFECTIVE_ADDRESS: slot number [0...4] for faulting address.
//
#if defined(_IA64_)
#define EXINFO_EFFECTIVE_ADDRESS 1
#else // !_IA64_
#define EXINFO_EFFECTIVE_ADDRESS 2
#endif // !_IA64_
//
// Data misalignment exception (auto alignment fixup) control.
//
// If KiEnableAlignmentFaultExceptions is 0, then no alignment
// exceptions are raised and all misaligned user and kernel mode data
// references are emulated. This is consistent with NT/Alpha version
// 3.1 behavior.
//
// If KiEnableAlignmentFaultExceptions is 1, then the
// current thread automatic alignment fixup enable determines whether
// emulation is attempted in user mode. This is consistent with NT/Mips
// behavior.
//
// If KiEnableAlignmentFaultExceptions is 2, then the behavior depends
// on the execution mode at the time of the fault. Kernel-mode code gets
// type 1 behaivor above (no fixup), user-mode code gets type 0 above
// (fixup).
//
// This last mode is temporary until we flush out the remaining user-mode
// alignment faults, at which point the option will be removed and the
// default value will be set to 1.
//
// N.B. This default value may be reset from the Registry during init.
//
ULONG KiEnableAlignmentFaultExceptions = 1;
#define IsWow64Process() (PsGetCurrentProcess()->Wow64Process != NULL)
#if DBG
//
// Globals to track the number of alignment exception fixups in both user and
// kernel.
//
ULONG KiKernelFixupCount = 0;
ULONG KiUserFixupCount = 0;
//
// Set KiBreakOnAlignmentFault to the desired combination of
// the following flags.
//
#define KE_ALIGNMENT_BREAK_USER 0x01
#define KE_ALIGNMENT_BREAK_KERNEL 0x02
ULONG KiBreakOnAlignmentFault = KE_ALIGNMENT_BREAK_USER;
__inline
BOOLEAN
KI_BREAK_ON_ALIGNMENT_FAULT(
IN KPROCESSOR_MODE PreviousMode
)
/*++
Routine description:
Given that an alignment fault has been encountered, determines whether
a debug break should occur based on the execution mode of the fault and
flags in KiBreakOnAlignmentFault.
Arguments:
PreviousMode - The execution mode at the time of the fault.
Return Value:
TRUE if a debug break should occur, FALSE otherwise.
--*/
{
if ((KiBreakOnAlignmentFault & KE_ALIGNMENT_BREAK_USER) != 0 &&
PreviousMode == UserMode) {
return TRUE;
}
if ((KiBreakOnAlignmentFault & KE_ALIGNMENT_BREAK_KERNEL) != 0 &&
PreviousMode == KernelMode) {
return TRUE;
}
return FALSE;
}
//
// Structures to track alignment fault locations on a global basis. These
// are used in the checked kernel only, as an aid in finding and fixing
// alignment faults in the system.
//
#define MAX_IMAGE_NAME_CHARS 15
typedef struct _ALIGNMENT_FAULT_IMAGE *PALIGNMENT_FAULT_IMAGE;
typedef struct _ALIGNMENT_FAULT_LOCATION *PALIGNMENT_FAULT_LOCATION;
typedef struct _ALIGNMENT_FAULT_IMAGE {
//
// Head of singly-linked list of fault locations associated with this image
//
PALIGNMENT_FAULT_LOCATION LocationHead;
//
// Total number of alignment faults associated with this image.
//
ULONG Count;
//
// Number of unique alignment fault locations found in this image
//
ULONG Instances;
//
// Name of the image
//
CHAR Name[ MAX_IMAGE_NAME_CHARS + 1 ];
} ALIGNMENT_FAULT_IMAGE;
BOOLEAN
KiNewGlobalAlignmentFault(
IN PVOID ProgramCounter,
IN KPROCESSOR_MODE PreviousMode,
OUT PALIGNMENT_FAULT_IMAGE *AlignmentFaultImage
);
#endif
NTSTATUS
KipRecordAlignmentException(
IN PVOID ProgramCounter,
OUT PALIGNMENT_EXCEPTION_RECORD *ExceptionRecord
);
PALIGNMENT_EXCEPTION_RECORD
KipFindAlignmentException(
IN PVOID ProgramCounter
);
PALIGNMENT_EXCEPTION_RECORD
KipAllocateAlignmentExceptionRecord( VOID );
BOOLEAN
KiHandleAlignmentFault(
IN PEXCEPTION_RECORD ExceptionRecord,
IN PKEXCEPTION_FRAME ExceptionFrame,
IN PKTRAP_FRAME TrapFrame,
IN KPROCESSOR_MODE PreviousMode,
IN BOOLEAN FirstChance,
OUT BOOLEAN *ExceptionForwarded
)
/*++
Routine description:
This routine deals with alignment exceptions as appropriate. See comments
at the beginning of this module.
Arguments:
ExceptionRecord - Supplies a pointer to an exception record.
ExceptionFrame - Supplies a pointer to an exception frame.
TrapFrame - Supplies a pointer to a trap frame.
PreviousMode - Supplies the previous processor mode.
FirstChance - Supplies a boolean variable that specifies whether this
is the first (TRUE) or second (FALSE) time that this exception has
been processed.
ExceptionForwarded - On return, indicates whether the exception had
already been forwarded to a user-mode debugger.
Return Value:
TRUE if the alignment exception was handled, FALSE otherwise.
--*/
{
BOOLEAN AlignmentFaultHandled;
BOOLEAN EmulateAlignmentFault;
BOOLEAN ExceptionWasForwarded;
BOOLEAN AutoAlignment;
NTSTATUS Status;
PVOID ProgramCounter;
#if DBG
BOOLEAN NewAlignmentFault;
PVOID EffectiveAddress;
PALIGNMENT_FAULT_IMAGE FaultImage;
#endif
//
// Assume the fault was not handled and that the exception had not
// been forwarded to a user-mode debugger.
//
AlignmentFaultHandled = FALSE;
ExceptionWasForwarded = FALSE;
if (FirstChance != FALSE) {
//
// This is the first chance for handling an exception... we haven't yet
// searched for an exception handler.
//
EmulateAlignmentFault = FALSE;
AutoAlignment = FALSE;
ProgramCounter = (PVOID)ExceptionRecord->ExceptionAddress;
//
// Determine whether autoalignment is enabled for thread. If a DPC or
// an interrupt is being executed, then we are in an arbitrary thread
// context. Per-process and per-thread settings are ignored in this
// case.
//
if (IsWow64Process() != FALSE) {
//
// For now, autoalignment is on (both user and kernel) for Wow64
// processes.
//
AutoAlignment = TRUE;
}
if (PreviousMode == UserMode &&
(KeGetCurrentThread()->AutoAlignment != FALSE ||
KeGetCurrentThread()->ApcState.Process->AutoAlignment != FALSE)) {
//
// The fault occured in user mode, and the thread and/or process
// has autoalignment turned on.
//
#if defined(_IA64_)
//
// On IA64 platform, reset psr.ac bit to disable alignment check
//
TrapFrame->StIPSR &= ~(ULONGLONG)(1ULL << PSR_AC);
#endif // defined(_IA64_)
AutoAlignment = TRUE;
}
if (PreviousMode == UserMode &&
PsGetCurrentProcess()->DebugPort != NULL &&
AutoAlignment == FALSE) {
BOOLEAN DebuggerHandledException;
PALIGNMENT_EXCEPTION_RECORD AlignmentExceptionRecord;
//
// The alignment exception is in user mode, there is a debugger
// attached, and autoalignment is not enabled for this thread.
//
// Determine whether this exception has already been observed
// and, if so, whether we should break into the debugger.
//
Status = KipRecordAlignmentException( ProgramCounter,
&AlignmentExceptionRecord );
if (!NT_SUCCESS(Status)) {
AlignmentExceptionRecord = NULL;
}
if (AlignmentExceptionRecord != NULL &&
AlignmentExceptionRecord->AutoFixup != FALSE) {
//
// The alignment exception record for this location
// indicates that an automatic fixup should be applied
// without notifying the debugger. This is because
// the user entered 'gh' at the debug prompt the last
// time we reported this fault.
//
EmulateAlignmentFault = TRUE;
} else {
//
// Forward the exception to the debugger.
//
ExceptionWasForwarded = TRUE;
DebuggerHandledException =
DbgkForwardException( ExceptionRecord, TRUE, FALSE );
if (DebuggerHandledException != FALSE) {
//
// The user continued with "gh", so fix up this and all
// subsequent alignment exceptions at this address.
//
EmulateAlignmentFault = TRUE;
if (AlignmentExceptionRecord != NULL) {
AlignmentExceptionRecord->AutoFixup = TRUE;
}
}
}
} else if ((KiEnableAlignmentFaultExceptions == 0) ||
(AutoAlignment != FALSE) ||
(PreviousMode == UserMode &&
KiEnableAlignmentFaultExceptions == 2)) {
//
// Emulate the alignment if:
//
// KiEnableAlignmentFaultExceptions is 0, OR
// this thread has enabled alignment fixups, OR
// the current process is a WOW64 process, OR
// KiEnableAlignmentFaultExceptions is 2 and the fault occured
// in usermode
//
EmulateAlignmentFault = TRUE;
} else {
//
// We are not fixing up the alignment fault.
//
#if defined(_IA64_)
//
// On IA64 platform, set psr.ac bit to enable h/w alignment check
//
TrapFrame->StIPSR |= (1ULL << PSR_AC);
#endif // defined(_IA64_)
}
#if DBG
//
// Count alignment faults by mode.
//
if (PreviousMode == KernelMode) {
KiKernelFixupCount += 1;
} else {
KiUserFixupCount += 1;
}
EffectiveAddress =
(PVOID)ExceptionRecord->ExceptionInformation[EXINFO_EFFECTIVE_ADDRESS];
NewAlignmentFault = KiNewGlobalAlignmentFault( ProgramCounter,
PreviousMode,
&FaultImage );
if (NewAlignmentFault != FALSE) {
//
// Attempt to determine and display the name of the offending
// image.
//
DbgPrint("KE: %s Fixup: %.16s [%.16s], Pc=%.16p, Addr=%.16p ... Total=%ld %s\n",
(PreviousMode == KernelMode) ? "Kernel" : "User",
&PsGetCurrentProcess()->ImageFileName[0],
FaultImage->Name,
ProgramCounter,
EffectiveAddress,
(PreviousMode == KernelMode) ? KiKernelFixupCount : KiUserFixupCount,
IsWow64Process() ? "(Wow64)" : "");
if (AutoAlignment == FALSE &&
KI_BREAK_ON_ALIGNMENT_FAULT( PreviousMode ) != FALSE &&
ExceptionWasForwarded == FALSE) {
if (EmulateAlignmentFault == FALSE) {
DbgPrint("KE: Misaligned access WILL NOT be emulated\n");
}
//
// This alignment fault would not normally have been fixed up,
// and KiBreakOnAlignmentFault flags indicate that we should
// break into the kernel debugger.
//
// Also, we know that we have not broken into a user-mode
// debugger as a result of this fault.
//
if (PreviousMode != KernelMode) {
RtlMakeStackTraceDataPresent();
}
DbgBreakPoint();
}
}
#endif
//
// Emulate the reference according to the decisions made above.
//
if (EmulateAlignmentFault != FALSE) {
if (KiEmulateReference(ExceptionRecord,
ExceptionFrame,
TrapFrame) != FALSE) {
KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
AlignmentFaultHandled = TRUE;
}
}
}
*ExceptionForwarded = ExceptionWasForwarded;
return AlignmentFaultHandled;
}
NTSTATUS
KipRecordAlignmentException(
IN PVOID ProgramCounter,
OUT PALIGNMENT_EXCEPTION_RECORD *ExceptionRecord
)
/*++
Routine Description:
This routine searches for an existing ALIGNMENT_EXCEPTION_RECORD on the
per-process list of alignment exceptions. If a match is not found, then
a new record is created.
Arguments:
ProgramCounter - Supplies the address of the faulting instruction.
ExceptionRecord - Supplies a pointer into which is placed the address
of the matching alignment exception record.
Return Value:
STATUS_SUCCESS if the operation was successful, or an appropriate error
code otherwise.
--*/
{
PALIGNMENT_EXCEPTION_RECORD exceptionRecord;
NTSTATUS status;
//
// Lock the alignment exception database
//
KeEnterCriticalRegion();
ExAcquireResourceExclusive( &PsLoadedModuleResource, TRUE );
exceptionRecord = KipFindAlignmentException( ProgramCounter );
if (exceptionRecord == NULL) {
//
// New exception. Allocate a new record.
//
exceptionRecord = KipAllocateAlignmentExceptionRecord();
if (exceptionRecord == NULL) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto exitUnlock;
}
exceptionRecord->ProgramCounter = ProgramCounter;
}
exceptionRecord->Count += 1;
*ExceptionRecord = exceptionRecord;
status = STATUS_SUCCESS;
exitUnlock:
ExReleaseResourceLite( &PsLoadedModuleResource );
KeLeaveCriticalRegion();
return status;
}
PALIGNMENT_EXCEPTION_RECORD
KipAllocateAlignmentExceptionRecord(
VOID
)
/*++
Routine Description:
This is a support routine for KipRecordAlignmentException(). Its purpose
is to locate an available alignment exception record in the per-process
alignment exception list. If none is found, a new alignment exception
table will be allocated and linked into the per-process list.
Arguments:
None.
Return Value:
A pointer to the new alignment exception record if successful, or NULL
otherwise.
--*/
{
PKTHREAD thread;
PKPROCESS process;
PALIGNMENT_EXCEPTION_RECORD exceptionRecord;
PALIGNMENT_EXCEPTION_TABLE exceptionTable;
ULONG exceptionTableCount;
//
// Free exception records have a NULL program counter.
//
exceptionRecord = KipFindAlignmentException( NULL );
if (exceptionRecord == NULL) {
thread = KeGetCurrentThread();
process = thread->ApcState.Process;
//
// Ensure that we haven't exceeded the maximum number of alignment
// exception tables for this process. We could keep a count but we
// do not care about performance here... this code only executes when
// the process is running under a debugger and we're likely about
// to break in.
//
exceptionTableCount = 0;
exceptionTable = process->AlignmentExceptionTable;
while (exceptionTable != NULL) {
exceptionTableCount += 1;
exceptionTable = exceptionTable->Next;
}
if (exceptionTableCount == MAXIMUM_ALIGNMENT_TABLES) {
return NULL;
}
//
// Allocate a new exception table and insert it at the
// head of the per-process list.
//
exceptionTable = ExAllocatePoolWithTag( PagedPool,
sizeof(ALIGNMENT_EXCEPTION_TABLE),
'tpcX' );
if (exceptionTable == NULL) {
return NULL;
}
RtlZeroMemory( exceptionTable, sizeof(ALIGNMENT_EXCEPTION_TABLE) );
exceptionTable->Next = process->AlignmentExceptionTable;
process->AlignmentExceptionTable = exceptionTable;
//
// Allocate the first record in the array
//
exceptionRecord = &exceptionTable->RecordArray[0];
}
return exceptionRecord;
}
PALIGNMENT_EXCEPTION_RECORD
KipFindAlignmentException(
IN PVOID ProgramCounter
)
/*++
Routine Description:
This routine searches the alignment exception tables associated with
the current process for an alignment exception record that matches
the supplied program counter.
Arguments:
ProgramCounter - Supplies the address of the faulting instruction.
Return Value:
A pointer to the matching alignment exception record, or NULL if none
was found.
--*/
{
PKTHREAD thread;
PKPROCESS process;
PALIGNMENT_EXCEPTION_RECORD exceptionRecord;
PALIGNMENT_EXCEPTION_RECORD lastExceptionRecord;
PALIGNMENT_EXCEPTION_TABLE exceptionTable;
thread = KeGetCurrentThread();
process = thread->ApcState.Process;
//
// Walk the singly-linked list of exception tables dangling
// off of the process.
//
exceptionTable = process->AlignmentExceptionTable;
while (exceptionTable != NULL) {
//
// Scan this table looking for a match.
//
exceptionRecord = exceptionTable->RecordArray;
lastExceptionRecord =
&exceptionTable->RecordArray[ ALIGNMENT_RECORDS_PER_TABLE ];
while (exceptionRecord < lastExceptionRecord) {
if (exceptionRecord->ProgramCounter == ProgramCounter) {
//
// Found it.
//
return exceptionRecord;
}
exceptionRecord++;
}
if (ProgramCounter == NULL) {
//
// Caller was looking for a free exception record. If one exists
// it will be in the first table, which was just examined.
//
break;
}
//
// Go look in the next exception table.
//
exceptionTable = exceptionTable->Next;
}
return NULL;
}
#if DBG
//
// The following routines are used to maintain a global database of alignment
// faults that were found in the system. Alignment faults are stored according
// to the name of the image and the offset within that image. In this way an
// existing alignment fault record will be found if it occurs in the same image
// loaded at a different base address in a new process.
//
typedef struct _ALIGNMENT_FAULT_LOCATION {
//
// Pointer to fault image associated with this location
//
PALIGNMENT_FAULT_IMAGE Image;
//
// Linkage for singly-linked list of fault locations associated with the
// same image.
//
PALIGNMENT_FAULT_LOCATION Next;
//
// Offset of the PC address within the image.
//
ULONG_PTR OffsetFromBase;
//
// Number of alignment faults taken at this location.
//
ULONG Count;
} ALIGNMENT_FAULT_LOCATION;
//
// The maximum number of individual alignment fault locations that will be
// tracked.
//
#define MAX_FAULT_LOCATIONS 2048
#define MAX_FAULT_IMAGES 128
ALIGNMENT_FAULT_LOCATION KiAlignmentFaultLocations[ MAX_FAULT_LOCATIONS ];
ULONG KiAlignmentFaultLocationCount = 0;
ALIGNMENT_FAULT_IMAGE KiAlignmentFaultImages[ MAX_FAULT_IMAGES ];
ULONG KiAlignmentFaultImageCount = 0;
KSPIN_LOCK KipGlobalAlignmentDatabaseLock;
VOID
KiCopyLastPathElement(
IN PUNICODE_STRING Source,
IN OUT PULONG StringBufferLen,
OUT PCHAR StringBuffer,
IN KPROCESSOR_MODE PreviousMode
);
PALIGNMENT_FAULT_IMAGE
KiFindAlignmentFaultImage(
IN PCHAR ImageName
);
PLDR_DATA_TABLE_ENTRY
KiFindLoaderDataTableEntry(
IN PLIST_ENTRY ListHead,
IN PVOID ProgramCounter,
IN KPROCESSOR_MODE PreviousMode
);
BOOLEAN
KiIncrementLocationAlignmentFault(
IN PALIGNMENT_FAULT_IMAGE FaultImage,
IN ULONG_PTR OffsetFromBase
);
BOOLEAN
KiGetLdrDataTableInformation(
IN PVOID ProgramCounter,
IN KPROCESSOR_MODE PreviousMode,
IN OUT PULONG ImageNameBufferLength,
OUT PCHAR ImageNameBuffer,
OUT PVOID *ImageBase
)
/*++
Routine Description:
This routine returns the name of the image that contains the supplied
address.
Arguments:
ProgramCounter - Supplies the address for which we would like the
name of the containing image.
PreviousMode - Indicates whether the module is a user or kernel image.
ImageNameBufferLength - Supplies a pointer to a buffer length value. On
entry, this value represents the maximum length of StringBuffer. On
exit, the value is set to the actual number of characters stored.
ImageNameBuffer - Supplies a pointer to the output ANSI string into which
the module name will be placed. This string will not be null
terminated.
ImageBase - Supplies a pointer to a location into which the base address
of the located image is placed.
Return Value:
Returns TRUE if a module was located and its name copied to ImageNameBuffer,
or FALSE otherwise.
--*/
{
PLIST_ENTRY head;
PPEB peb;
PLDR_DATA_TABLE_ENTRY tableEntry;
BOOLEAN status;
//
// Since we may be poking around in user space, be sure to recover
// gracefully from any exceptions thrown.
//
try {
//
// Choose the appropriate module list based on whether the fault
// occured in user- or kernel-space.
//
if (PreviousMode == KernelMode) {
head = &PsLoadedModuleList;
} else {
peb = PsGetCurrentProcess()->Peb;
head = &peb->Ldr->InLoadOrderModuleList;
}
tableEntry = KiFindLoaderDataTableEntry( head,
ProgramCounter,
PreviousMode );
if (tableEntry != NULL) {
//
// The module of interest was located. Copy its name and
// base address to the output paramters.
//
KiCopyLastPathElement( &tableEntry->BaseDllName,
ImageNameBufferLength,
ImageNameBuffer,
PreviousMode );
*ImageBase = tableEntry->DllBase;
status = TRUE;
} else {
//
// A module containing the supplied program counter could not be
// found.
//
status = FALSE;
}
} except(ExSystemExceptionFilter()) {
status = FALSE;
}
return status;
}
PLDR_DATA_TABLE_ENTRY
KiFindLoaderDataTableEntry(
IN PLIST_ENTRY ListHead,
IN PVOID ProgramCounter,
IN KPROCESSOR_MODE PreviousMode
)
/*++
Routine Description:
This is a support routine for KiGetLdrDataTableInformation. Its purpose is
to search a LDR_DATA_TABLE_ENTRY list, looking for a module that contains
the supplied program counter.
Arguments:
ListHead - Supplies a pointer to the LIST_ENTRY that represents the head of
the LDR_DATA_TABLE_ENTRY list to search.
ProgramCounter - Supplies the code location of the faulting instruction.
Return Value:
Returns a pointer to the matching LDR_DATA_TABLE_ENTRY structure, or NULL
if no match is found.
--*/
{
ULONG nodeNumber;
PLIST_ENTRY next;
PLDR_DATA_TABLE_ENTRY ldrDataTableEntry;
ULONG_PTR imageStart;
ULONG_PTR imageEnd;
//
// Walk the user- or kernel-mode module list. It is up to the caller
// to capture any exceptions as a result of the lists being corrupt.
//
nodeNumber = 0;
next = ListHead;
if (PreviousMode != KernelMode) {
ProbeForReadSmallStructure( next,
sizeof(LIST_ENTRY),
PROBE_ALIGNMENT(LIST_ENTRY) );
}
while (TRUE) {
nodeNumber += 1;
next = next->Flink;
if (next == ListHead || nodeNumber > 10000) {
//
// The end of the module list has been reached, or the
// list has been corrupted with a cycle. Indicate that
// no matching module could be located.
//
ldrDataTableEntry = NULL;
break;
}
ldrDataTableEntry = CONTAINING_RECORD( next,
LDR_DATA_TABLE_ENTRY,
InLoadOrderLinks );
if (PreviousMode != KernelMode) {
ProbeForReadSmallStructure( ldrDataTableEntry,
sizeof(LDR_DATA_TABLE_ENTRY),
PROBE_ALIGNMENT(LDR_DATA_TABLE_ENTRY) );
}
imageStart = (ULONG_PTR)ldrDataTableEntry->DllBase;
if (imageStart > (ULONG_PTR)ProgramCounter) {
//
// The start of this module is past the program counter,
// keep looking.
//
continue;
}
imageEnd = imageStart + ldrDataTableEntry->SizeOfImage;
if (imageEnd > (ULONG_PTR)ProgramCounter) {
//
// Found a match.
//
break;
}
}
return ldrDataTableEntry;
}
VOID
KiCopyLastPathElement(
IN PUNICODE_STRING Source,
IN OUT PULONG StringBufferLen,
OUT PCHAR StringBuffer,
IN KPROCESSOR_MODE PreviousMode
)
/*++
Routine Description:
This routine locates the last path element of the path name represented by
Source and copies it to StringBuffer.
Arguments:
Source - Supplies a pointer to the source UNICODE_STRING path.
StringBufferLen - Supplies a pointer to a buffer length value. On entry,
this value represents the maximum length of StringBuffer. On exit, the
value is set to the actual number of characters stored.
StringBuffer - Supplies a pointer to the output string buffer that is to
contain the last path element. This string is not null terminated.
PreviousMode - Previous mode of the caller for use in probing
Return Value:
None.
--*/
{
PWCHAR src, srcBase;
PCHAR dst;
USHORT charCount;
ULONG srcBaseLength;
//
// The name of the module containing the specified address is at
// ldrDataTableEntry->BaseDllName. It might contain just the name,
// or it might contain the whole path.
//
// Start at the end of the module path and work back until one
// of the following is encountered:
//
// - ModuleName->MaximumLength characters
// - the beginning of the module path string
// - a path seperator
//
srcBase = Source->Buffer;
srcBaseLength = Source->Length;
if (PreviousMode != KernelMode) {
ProbeForRead (srcBase, srcBaseLength, sizeof (WCHAR));
}
charCount = (USHORT)(srcBaseLength / sizeof(WCHAR));
src = &srcBase[ charCount ];
charCount = 0;
while (TRUE) {
if (charCount >= *StringBufferLen) {
break;
}
if (src == srcBase) {
break;
}
if (*(src-1) == L'\\') {
break;
}
src--;
charCount++;
}
//
// Now copy the characters into the output string. We do our own
// ansi-to-unicode conversion because the NLS routines cannot be
// called at raised IRQL.
//
dst = StringBuffer;
*StringBufferLen = charCount;
while (charCount > 0) {
*dst++ = (CHAR)(*src++);
charCount--;
}
}
BOOLEAN
KiNewGlobalAlignmentFault(
IN PVOID ProgramCounter,
IN KPROCESSOR_MODE PreviousMode,
OUT PALIGNMENT_FAULT_IMAGE *AlignmentFaultImage
)
/*++
Routine Description:
This routine looks for an existing alignment fault in the global
fault database. A new record is created if a match could not be
found. The count is incremented, and a pointer to the associated
image record is returned.
Arguments:
ProgramCounter - Supplies the code location of the faulting instruction.
PreviousMode - Supplies the execution mode at the time of the fault.
AlignmentFaultImage - Supplies a location into which the pointer to the
associated ALIGNMENT_FAULT_IMAGE structure is placed.
Return Value:
TRUE if an existing alignment fault match was not found, FALSE otherwise.
--*/
{
ULONG_PTR imageOffset;
CHAR imageNameBuffer[ MAX_IMAGE_NAME_CHARS + 1 ];
ULONG imageNameBufferLength;
PCHAR imageName;
PALIGNMENT_FAULT_IMAGE alignmentFaultImage;
BOOLEAN newFault;
BOOLEAN foundLdrDataInfo;
PVOID imageBase;
KIRQL oldIrql;
imageNameBufferLength = MAX_IMAGE_NAME_CHARS;
foundLdrDataInfo = KiGetLdrDataTableInformation( ProgramCounter,
PreviousMode,
&imageNameBufferLength,
imageNameBuffer,
&imageBase );
if (foundLdrDataInfo == FALSE) {
//
// Couldn't find an image for this program counter.
//
imageBase = NULL;
imageName = "Unavailable";
} else {
imageNameBuffer[ imageNameBufferLength ] = '\0';
imageName = imageNameBuffer;
}
//
// Acquire the spinlock at synch level so that we can handle exceptions
// from ISRs
//
imageOffset = (ULONG_PTR)ProgramCounter - (ULONG_PTR)imageBase;
oldIrql = KeAcquireSpinLockRaiseToSynch( &KipGlobalAlignmentDatabaseLock );
alignmentFaultImage = KiFindAlignmentFaultImage( imageName );
if (alignmentFaultImage == NULL) {
//
// Image table must be full
//
newFault = FALSE;
} else {
newFault = KiIncrementLocationAlignmentFault( alignmentFaultImage,
imageOffset );
}
KeReleaseSpinLock( &KipGlobalAlignmentDatabaseLock, oldIrql );
*AlignmentFaultImage = alignmentFaultImage;
return newFault;
}
BOOLEAN
KiIncrementLocationAlignmentFault(
IN PALIGNMENT_FAULT_IMAGE FaultImage,
IN ULONG_PTR OffsetFromBase
)
/*++
Routine Description:
This is a support routine for KiNewGlobalAligmentFault. Its purpose is to
find or create an alignment fault record once the appropriate alignment
fault image has been found or created.
Arguments:
FaultImage - Supplies a pointer to the ALIGNMENT_FAULT_IMAGE associated
with this alignment fault.
OffsetFromBase - Supplies the image offset within the image of the faulting
instruction.
Return Value:
TRUE if an existing alignment fault match was not found, FALSE otherwise.
--*/
{
PALIGNMENT_FAULT_LOCATION faultLocation;
//
// Walk the location table, looking for a match.
//
faultLocation = FaultImage->LocationHead;
while (faultLocation != NULL) {
if (faultLocation->OffsetFromBase == OffsetFromBase) {
faultLocation->Count++;
return FALSE;
}
faultLocation = faultLocation->Next;
}
//
// Could not find a match. Build a new alignment fault record.
//
if (KiAlignmentFaultLocationCount >= MAX_FAULT_LOCATIONS) {
//
// Table is full. Indicate that this is not a new alignment fault.
//
return FALSE;
}
faultLocation = &KiAlignmentFaultLocations[ KiAlignmentFaultLocationCount ];
faultLocation->Image = FaultImage;
faultLocation->Next = FaultImage->LocationHead;
faultLocation->OffsetFromBase = OffsetFromBase;
faultLocation->Count = 1;
FaultImage->LocationHead = faultLocation;
FaultImage->Instances += 1;
KiAlignmentFaultLocationCount++;
return TRUE;
}
PALIGNMENT_FAULT_IMAGE
KiFindAlignmentFaultImage(
IN PCHAR ImageName
)
/*++
Routine Description:
This is a support routine for KiNewGlobalAlignmentFault. Its purpose is to
walk the global ALIGNMENT_FAULT_IMAGE list looking for an image name that
matches ImageName. If none is found, a new image record is created and
inserted into the list.
Arguments:
ImageName - Supplies a pointer to the ANSI image name.
Return Value:
Returns a pointer to the matching ALIGNMENT_FAULT_IMAGE structure.
--*/
{
PALIGNMENT_FAULT_IMAGE faultImage;
PALIGNMENT_FAULT_IMAGE lastImage;
if (ImageName == NULL || *ImageName == '\0') {
//
// No image name was supplied.
//
return NULL;
}
//
// Walk the image table, looking for a match.
//
faultImage = &KiAlignmentFaultImages[ 0 ];
lastImage = &KiAlignmentFaultImages[ KiAlignmentFaultImageCount ];
while (faultImage < lastImage) {
if (strcmp(ImageName, faultImage->Name) == 0) {
//
// Found it.
//
faultImage->Count += 1;
return faultImage;
}
faultImage += 1;
}
//
// Create a new fault image if there's room
//
if (KiAlignmentFaultImageCount >= MAX_FAULT_IMAGES) {
//
// Table is full up.
//
return NULL;
}
KiAlignmentFaultImageCount += 1;
//
// Zero the image record. The records start out zero-initialized, this
// is in case KiAlignmentFaultImageCount was manually reset to zero via
// the debugger.
//
RtlZeroMemory( faultImage, sizeof(ALIGNMENT_FAULT_IMAGE) );
faultImage->Count = 1;
strcpy( faultImage->Name, ImageName );
return faultImage;
}
#endif // DBG
|
313280.c | #include <std.h>
#include "/d/islands/tonerra/areadefs.h"
inherit I_JUNGLE;
void create(){
::create();
set_short("A trail in the jungle");
set_long(
@VETRI
%^GREEN%^You are walking along a small trail leading through the
jungle. You are quickly becoming surrounded by large trees and
plants and could very easily lose your way. From deeper in the
jungle, you hear sounds that you cannot identify and the density
of the plant life makes it difficult to see anything off the trail.
You feel yourself walking on a slight incline.
VETRI
);
set_indoors(0);
set_light(2);
set_listen("default","You hear movement in the jungle and wonder if it is the wind.");
set_exits(([
"southwest":PATH+"path80",
"northeast":PATH+"path82"
]));
}
|
174794.c | #include "ex22.h"
#include "dbg.h"
const char *MY_NAME = "Zed A. Shaw";
void scope_demo(int count)
{
log_info("count is: %d", count);
if (count > 10) {
int numbers = 100; // BAD! BUGS!
log_info("count in this scope is %d", numbers);
}
log_info("count is at exit: %d", count);
count = 3000;
log_info("count after assign: %d", count);
}
int main(int argc, char *argv[])
{
// test out THE_AGE accessors
log_info("My name: %s, age: %d", MY_NAME, get_age());
set_age(100);
log_info("My age is now: %d", get_age());
// test out THE_SIZE extern
log_info("THE_SIZE is: %d", THE_SIZE);
print_size();
THE_SIZE = 9;
log_info("THE SIZE is now: %d", THE_SIZE);
print_size();
// test the ratio function static
log_info("Ratio at first: %f", update_ratio(2.0));
log_info("Ratio again: %f", update_ratio(10.0));
log_info("Ratio once more: %f", update_ratio(300.0));
// test the scope demo
int count = 4;
scope_demo(count);
scope_demo(count * 20);
log_info("count after calling scope_demo: %d", count);
return 0;
}
|
514676.c | /*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (C) 2007-2013 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* RELIC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with RELIC. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file
*
* Implementation of binary field inversion functions.
*
* @version $Id: relic_fb_inv.c 1572 2013-09-02 07:11:29Z dfaranha $
* @ingroup fb
*/
#include "relic_core.h"
#include "relic_fb.h"
#include "relic_fb_low.h"
#include "relic_bn_low.h"
#include "relic_util.h"
#include "relic_rand.h"
/*============================================================================*/
/* Public definitions */
/*============================================================================*/
#if FB_INV == BASIC || !defined(STRIP)
void fb_inv_basic(fb_t c, const fb_t a) {
fb_t t, u, v;
int i, x;
fb_null(t);
fb_null(u);
fb_null(v);
TRY {
fb_new(t);
fb_new(u);
fb_new(v);
#if (FB_POLYN % 2) == 0
fb_sqr(v, a);
for (i = 2; i < FB_BITS; i++) {
fb_sqr(u, a);
for (int j = 1; j < i; j++) {
fb_sqr(u, u);
}
fb_mul(v, v, u);
}
fb_copy(c, v);
#else
/* u = a^2, v = 1, x = (m - 1)/2. */
fb_sqr(u, a);
fb_set_dig(v, 1);
x = (FB_BITS - 1) >> 1;
while (x != 0) {
/* u = u * a^{2x}. */
fb_copy(t, u);
for (i = 0; i < x; i++) {
fb_sqr(t, t);
}
fb_mul(u, u, t);
if ((x & 0x01) == 0) {
x = x >> 1;
} else {
/* v = v * u, u = u^2, x = (x - 1)/2. */
fb_mul(v, v, u);
fb_sqr(u, u);
x = (x - 1) >> 1;
}
}
#endif
fb_copy(c, v);
}
CATCH_ANY {
THROW(ERR_CAUGHT);
}
FINALLY {
fb_free(t);
fb_free(u);
fb_free(v);
}
}
#endif
#if FB_INV == BINAR || !defined(STRIP)
void fb_inv_binar(fb_t c, const fb_t a) {
int lu, lv;
dv_t u, v, g1, g2;
dv_null(u);
dv_null(v);
dv_null(g1);
dv_null(g2);
TRY {
dv_new(u);
dv_new(v);
dv_new(g1);
dv_new(g2);
/* u = a, v = f, g1 = 1, g2 = 0. */
fb_copy(u, a);
fb_copy(v, fb_poly_get());
if (FB_BITS % FB_DIGIT == 0) {
v[FB_DIGS] = 1;
}
dv_zero(g1, 2 * FB_DIGS);
g1[0] = 1;
dv_zero(g2, 2 * FB_DIGS);
lu = FB_DIGS;
lv = FB_DIGS + (FB_BITS % FB_DIGIT == 0);
/* While (u != 1 && v != 1. */
while (1) {
/* While z divides u do. */
while ((u[0] & 0x01) == 0) {
/* u = u/z. */
bn_rsh1_low(u, u, lu);
/* If z divides g1 then g1 = g1/z; else g1 = (g1 + f)/z. */
if ((g1[0] & 0x01) == 1) {
fb_poly_add(g1, g1);
if (FB_BITS % FB_DIGIT == 0) {
g1[FB_DIGS] ^= 1;
}
}
bn_rsh1_low(g1, g1, FB_DIGS + 1);
}
while (u[lu - 1] == 0)
lu--;
if (lu == 1 && u[0] == 1)
break;
/* While z divides v do. */
while ((v[0] & 0x01) == 0) {
/* v = v/z. */
bn_rsh1_low(v, v, lv);
/* If z divides g2 then g2 = g2/z; else (g2 = g2 + f)/z. */
if ((g2[0] & 0x01) == 1) {
fb_poly_add(g2, g2);
if (FB_BITS % FB_DIGIT == 0) {
g2[FB_DIGS] ^= 1;
}
}
bn_rsh1_low(g2, g2, FB_DIGS + 1);
}
while (v[lv - 1] == 0)
lv--;
if (lv == 1 && v[0] == 1)
break;
/* If deg(u) > deg(v) then u = u + v, g1 = g1 + g2. */
if (lu > lv || (lu == lv && u[lu - 1] > v[lv - 1])) {
fb_addd_low(u, u, v, lv);
fb_add(g1, g1, g2);
} else {
/* Else v = v + u, g2 = g2 + g1. */
fb_addd_low(v, v, u, lu);
fb_add(g2, g2, g1);
}
}
/* If u == 1 then return g1; else return g2. */
if (lu == 1 && u[0] == 1) {
fb_copy(c, g1);
} else {
fb_copy(c, g2);
}
}
CATCH_ANY {
THROW(ERR_CAUGHT);
}
FINALLY {
dv_free(u);
dv_free(v);
dv_free(g1);
dv_free(g2);
}
}
#endif
#if FB_INV == EXGCD || !defined(STRIP)
void fb_inv_exgcd(fb_t c, const fb_t a) {
int j, d, lu, lv, lt, l1, l2, bu, bv;
dv_t _u, _v, _g1, _g2;
dig_t *t = NULL, *u = NULL, *v = NULL, *g1 = NULL, *g2 = NULL, carry;
fb_null(_u);
fb_null(_v);
fb_null(_g1);
fb_null(_g2);
TRY {
dv_new(_u);
dv_new(_v);
dv_new(_g1);
dv_new(_g2);
dv_zero(_g1, FB_DIGS + 1);
dv_zero(_g2, FB_DIGS + 1);
u = _u;
v = _v;
g1 = _g1;
g2 = _g2;
/* u = a, v = f, g1 = 1, g2 = 0. */
fb_copy(u, a);
fb_copy(v, fb_poly_get());
g1[0] = 1;
lu = lv = FB_DIGS;
l1 = l2 = 1;
bu = fb_bits(u);
bv = FB_BITS + 1;
j = bu - bv;
/* While (u != 1). */
while (1) {
/* If j < 0 then swap(u, v), swap(g1, g2), j = -j. */
if (j < 0) {
t = u;
u = v;
v = t;
lt = lu;
lu = lv;
lv = lt;
t = g1;
g1 = g2;
g2 = t;
lt = l1;
l1 = l2;
l2 = lt;
j = -j;
}
SPLIT(j, d, j, FB_DIG_LOG);
/* u = u + v * z^j. */
if (j > 0) {
carry = fb_lsha_low(u + d, v, j, lv);
u[d + lv] ^= carry;
} else {
fb_addd_low(u + d, u + d, v, lv);
}
/* g1 = g1 + g2 * z^j. */
if (j > 0) {
carry = fb_lsha_low(g1 + d, g2, j, l2);
l1 = (l2 + d >= l1 ? l2 + d : l1);
if (carry) {
g1[d + l2] ^= carry;
l1 = (l2 + d >= l1 ? l1 + 1 : l1);
}
} else {
fb_addd_low(g1 + d, g1 + d, g2, l2);
l1 = (l2 + d > l1 ? l2 + d : l1);
}
while (u[lu - 1] == 0)
lu--;
while (v[lv - 1] == 0)
lv--;
if (lu == 1 && u[0] == 1)
break;
/* j = deg(u) - deg(v). */
lt = util_bits_dig(u[lu - 1]) - util_bits_dig(v[lv - 1]);
j = ((lu - lv) << FB_DIG_LOG) + lt;
}
/* Return g1. */
fb_copy(c, g1);
}
CATCH_ANY {
THROW(ERR_CAUGHT);
}
FINALLY {
dv_free(_u);
dv_free(_v);
dv_free(_g1);
dv_free(_g2);
}
}
#endif
#if FB_INV == ALMOS || !defined(STRIP)
void fb_inv_almos(fb_t c, const fb_t a) {
int lu, lv, lt;
dv_t _b, _d, _u, _v;
dig_t *t = NULL, *u = NULL, *v = NULL, *b = NULL, *d = NULL;
dv_null(_b);
dv_null(_d);
dv_null(_u);
dv_null(_v);
TRY {
dv_new(_b);
dv_new(_d);
dv_new(_u);
dv_new(_v);
b = _b;
d = _d;
u = _u;
v = _v;
/* b = 1, d = 0, u = a, v = f. */
dv_zero(b, 2 * FB_DIGS);
fb_set_dig(b, 1);
dv_zero(d, 2 * FB_DIGS);
fb_copy(u, a);
fb_copy(v, fb_poly_get());
if (FB_BITS % FB_DIGIT == 0) {
v[FB_DIGS] = 1;
}
lu = FB_DIGS;
lv = FB_DIGS + (FB_BITS % FB_DIGIT == 0);
while (1) {
/* While z divides u do. */
while ((u[0] & 0x01) == 0) {
/* u = u/z. */
bn_rsh1_low(u, u, lu);
/* If z divide v then b = b/z; else b = (b + f)/z. */
if ((b[0] & 0x01) == 1) {
fb_poly_add(b, b);
if (FB_BITS % FB_DIGIT == 0) {
b[FB_DIGS] ^= 1;
}
}
/* b often has FB_DIGS digits. */
bn_rsh1_low(b, b, FB_DIGS + 1);
}
/* If u = 1, return b. */
while (u[lu - 1] == 0)
lu--;
if (lu == 1 && u[0] == 1) {
break;
}
/* If deg(u) < deg(v) then swap(u, v), swap(b, d). */
if ((lu < lv) || ((lu == lv) && (u[lu - 1] < v[lv - 1]))) {
t = u;
u = v;
v = t;
/* Swap lu and lv too. */
lt = lu;
lu = lv;
lv = lt;
t = b;
b = d;
d = t;
}
/* u = u + v, b = b + d. */
fb_addd_low(u, u, v, lv);
fb_addn_low(b, b, d);
}
}
CATCH_ANY {
THROW(ERR_CAUGHT);
}
FINALLY {
fb_copy(c, b);
dv_free(_b);
dv_free(_d);
dv_free(_u);
dv_free(_v);
}
}
#endif
#if FB_INV == ITOHT || !defined(STRIP)
void fb_inv_itoht(fb_t c, const fb_t a) {
int i, x, y, len;
const int *chain = fb_poly_get_chain(&len);
int u[len + 1];
fb_t table[len + 1];
for (i = 0; i <= len; i++) {
fb_null(table[i]);
}
TRY {
for (i = 0; i <= len; i++) {
fb_new(table[i]);
}
#if (FB_POLYN % 2) == 0
fb_sqr(table[0], a);
for (i = 2; i < FB_BITS; i++) {
fb_sqr(table[1], a);
for (int j = 1; j < i; j++) {
fb_sqr(table[1], table[1]);
}
fb_mul(table[0], table[0], table[1]);
}
fb_copy(c, table[0]);
#else
u[0] = 1;
u[1] = 2;
fb_copy(table[0], a);
fb_sqr(table[1], table[0]);
fb_mul(table[1], table[1], table[0]);
for (i = 2; i <= len; i++) {
x = chain[i - 1] >> 8;
y = chain[i - 1] - (x << 8);
if (x == y) {
u[i] = 2 * u[i - 1];
} else {
u[i] = u[x] + u[y];
}
fb_itr(table[i], table[x], u[y], fb_poly_tab_sqr(y));
fb_mul(table[i], table[i], table[y]);
}
fb_sqr(c, table[len]);
#endif
}
CATCH_ANY {
THROW(ERR_CAUGHT);
}
FINALLY {
for (i = 0; i <= len; i++) {
fb_free(table[i]);
}
}
}
#endif
#if FB_INV == BRUCH || !defined(STRIP)
void fb_inv_bruch(fb_t c, const fb_t a) {
fb_t _r, _s, _u, _v;
dig_t *r = NULL, *s = NULL, *t = NULL, *u = NULL, *v = NULL;
int delta = 0;
fb_null(_r);
fb_null(_s);
fb_null(_u);
fb_null(_v);
TRY {
fb_new(_r);
fb_new(_s);
fb_new(_u);
fb_new(_v);
fb_copy(_r, a);
fb_copy(_s, fb_poly_get());
fb_zero(_v);
fb_set_dig(_u, 1);
r = _r;
s = _s;
u = _u;
v = _v;
for (int i = 1; i <= 2 * FB_BITS; i++) {
if ((r[FB_DIGS - 1] & ((dig_t)1 << (FB_BITS % FB_DIGIT))) == 0) {
fb_lsh(r, r, 1);
fb_lsh(u, u, 1);
delta++;
} else {
if ((s[FB_DIGS - 1] & ((dig_t)1 << (FB_BITS % FB_DIGIT)))) {
fb_add(s, s, r);
fb_add(v, v, u);
}
fb_lsh(s, s, 1);
if (delta == 0) {
t = r;
r = s;
s = t;
t = u;
u = v;
v = t;
fb_lsh(u, u, 1);
delta++;
} else {
fb_rsh(u, u, 1);
delta--;
}
}
}
fb_copy(c, u);
} CATCH_ANY {
THROW(ERR_CAUGHT);
} FINALLY {
fb_free(_r);
fb_free(_s);
fb_free(_u);
fb_free(_v);
}
}
#endif
#if FB_INV == LOWER || !defined(STRIP)
void fb_inv_lower(fb_t c, const fb_t a) {
fb_invn_low(c, a);
}
#endif
void fb_inv_sim(fb_t *c, const fb_t *a, int n) {
int i;
fb_t u, t[n];
for (i = 0; i < n; i++) {
fb_null(t[i]);
}
fb_null(u);
TRY {
for (i = 0; i < n; i++) {
fb_new(t[i]);
}
fb_new(u);
fb_copy(c[0], a[0]);
fb_copy(t[0], a[0]);
for (i = 1; i < n; i++) {
fb_copy(t[i], a[i]);
fb_mul(c[i], c[i - 1], a[i]);
}
fb_inv(u, c[n - 1]);
for (i = n - 1; i > 0; i--) {
fb_mul(c[i], u, c[i - 1]);
fb_mul(u, u, t[i]);
}
fb_copy(c[0], u);
}
CATCH_ANY {
THROW(ERR_CAUGHT);
}
FINALLY {
for (i = 0; i < n; i++) {
fb_free(t[i]);
}
fb_free(u);
}
}
|
63606.c | /*
* Time of day based timer functions.
*
* S390 version
* Copyright IBM Corp. 1999, 2008
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*
* Derived from "arch/i386/kernel/time.c"
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*/
#define KMSG_COMPONENT "time"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel_stat.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/stop_machine.h>
#include <linux/time.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <linux/profile.h>
#include <linux/timex.h>
#include <linux/notifier.h>
#include <linux/timekeeper_internal.h>
#include <linux/clockchips.h>
#include <linux/gfp.h>
#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/facility.h>
#include <asm/delay.h>
#include <asm/div64.h>
#include <asm/vdso.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/vtimer.h>
#include <asm/stp.h>
#include <asm/cio.h>
#include "entry.h"
u64 sched_clock_base_cc = -1; /* Force to data section. */
EXPORT_SYMBOL_GPL(sched_clock_base_cc);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
EXPORT_SYMBOL(s390_epoch_delta_notifier);
unsigned char ptff_function_mask[16];
unsigned long lpar_offset;
unsigned long initial_leap_seconds;
/*
* Get time offsets with PTFF
*/
void __init ptff_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
if (!test_facility(28))
return;
ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
/* get LPAR offset */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
lpar_offset = qto.tod_epoch_difference;
/* get initial leap seconds */
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
initial_leap_seconds = (unsigned long)
((long) qui.old_leap * 4096000000L);
}
/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long notrace sched_clock(void)
{
return tod_to_ns(get_tod_clock_monotonic());
}
NOKPROBE_SYMBOL(sched_clock);
/*
* Monotonic_clock - returns # of nanoseconds passed since time_init()
*/
unsigned long long monotonic_clock(void)
{
return sched_clock();
}
EXPORT_SYMBOL(monotonic_clock);
void tod_to_timeval(__u64 todval, struct timespec64 *xt)
{
unsigned long long sec;
sec = todval >> 12;
do_div(sec, 1000000);
xt->tv_sec = sec;
todval -= (sec * 1000000) << 12;
xt->tv_nsec = ((todval * 1000) >> 12);
}
EXPORT_SYMBOL(tod_to_timeval);
void clock_comparator_work(void)
{
struct clock_event_device *cd;
S390_lowcore.clock_comparator = -1ULL;
cd = this_cpu_ptr(&comparators);
cd->event_handler(cd);
}
/*
* Fixup the clock comparator.
*/
static void fixup_clock_comparator(unsigned long long delta)
{
/* If nobody is waiting there's nothing to fix. */
if (S390_lowcore.clock_comparator == -1ULL)
return;
S390_lowcore.clock_comparator += delta;
set_clock_comparator(S390_lowcore.clock_comparator);
}
static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
S390_lowcore.clock_comparator = get_tod_clock() + delta;
set_clock_comparator(S390_lowcore.clock_comparator);
return 0;
}
/*
* Set up lowcore and control register of the current cpu to
* enable TOD clock and clock comparator interrupts.
*/
void init_cpu_timer(void)
{
struct clock_event_device *cd;
int cpu;
S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
cpu = smp_processor_id();
cd = &per_cpu(comparators, cpu);
cd->name = "comparator";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
cd->mult = 16777;
cd->shift = 12;
cd->min_delta_ns = 1;
cd->max_delta_ns = LONG_MAX;
cd->rating = 400;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = s390_next_event;
clockevents_register_device(cd);
/* Enable clock comparator timer interrupt. */
__ctl_set_bit(0,11);
/* Always allow the timing alert external interrupt. */
__ctl_set_bit(0, 4);
}
static void clock_comparator_interrupt(struct ext_code ext_code,
unsigned int param32,
unsigned long param64)
{
inc_irq_stat(IRQEXT_CLK);
if (S390_lowcore.clock_comparator == -1ULL)
set_clock_comparator(S390_lowcore.clock_comparator);
}
static void stp_timing_alert(struct stp_irq_parm *);
static void timing_alert_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
inc_irq_stat(IRQEXT_TLA);
if (param32 & 0x00038000)
stp_timing_alert((struct stp_irq_parm *) ¶m32);
}
static void stp_reset(void);
void read_persistent_clock64(struct timespec64 *ts)
{
__u64 clock;
clock = get_tod_clock() - initial_leap_seconds;
tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
}
void read_boot_clock64(struct timespec64 *ts)
{
__u64 clock;
clock = sched_clock_base_cc - initial_leap_seconds;
tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
}
static cycle_t read_tod_clock(struct clocksource *cs)
{
return get_tod_clock();
}
static struct clocksource clocksource_tod = {
.name = "tod",
.rating = 400,
.read = read_tod_clock,
.mask = -1ULL,
.mult = 1000,
.shift = 12,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
struct clocksource * __init clocksource_default_clock(void)
{
return &clocksource_tod;
}
void update_vsyscall(struct timekeeper *tk)
{
u64 nsecps;
if (tk->tkr_mono.clock != &clocksource_tod)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_wmb();
vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
vdso_data->wtom_clock_sec =
tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
+ ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
while (vdso_data->wtom_clock_nsec >= nsecps) {
vdso_data->wtom_clock_nsec -= nsecps;
vdso_data->wtom_clock_sec++;
}
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec =
(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
vdso_data->wtom_coarse_sec =
vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_coarse_nsec =
vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
vdso_data->wtom_coarse_sec++;
}
vdso_data->tk_mult = tk->tkr_mono.mult;
vdso_data->tk_shift = tk->tkr_mono.shift;
smp_wmb();
++vdso_data->tb_update_count;
}
extern struct timezone sys_tz;
void update_vsyscall_tz(void)
{
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
}
/*
* Initialize the TOD clock and the CPU timer of
* the boot cpu.
*/
void __init time_init(void)
{
/* Reset time synchronization interfaces. */
stp_reset();
/* request the clock comparator external interrupt */
if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
panic("Couldn't request external interrupt 0x1004");
/* request the timing alert external interrupt */
if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406");
if (__clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
/* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer();
/* Enable cpu timer interrupts on the boot cpu. */
vtime_init();
}
static DEFINE_PER_CPU(atomic_t, clock_sync_word);
static DEFINE_MUTEX(clock_sync_mutex);
static unsigned long clock_sync_flags;
#define CLOCK_SYNC_HAS_STP 0
#define CLOCK_SYNC_STP 1
/*
* The get_clock function for the physical clock. It will get the current
* TOD clock, subtract the LPAR offset and write the result to *clock.
* The function returns 0 if the clock is in sync with the external time
* source. If the clock mode is local it will return -EOPNOTSUPP and
* -EAGAIN if the clock is not in sync with the external reference.
*/
int get_phys_clock(unsigned long long *clock)
{
atomic_t *sw_ptr;
unsigned int sw0, sw1;
sw_ptr = &get_cpu_var(clock_sync_word);
sw0 = atomic_read(sw_ptr);
*clock = get_tod_clock() - lpar_offset;
sw1 = atomic_read(sw_ptr);
put_cpu_var(clock_sync_word);
if (sw0 == sw1 && (sw0 & 0x80000000U))
/* Success: time is in sync. */
return 0;
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -EOPNOTSUPP;
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return -EACCES;
return -EAGAIN;
}
EXPORT_SYMBOL(get_phys_clock);
/*
* Make get_phys_clock() return -EAGAIN.
*/
static void disable_sync_clock(void *dummy)
{
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
/*
* Clear the in-sync bit 2^31. All get_phys_clock calls will
* fail until the sync bit is turned back on. In addition
* increase the "sequence" counter to avoid the race of an
* stp event and the complete recovery against get_phys_clock.
*/
atomic_andnot(0x80000000, sw_ptr);
atomic_inc(sw_ptr);
}
/*
* Make get_phys_clock() return 0 again.
* Needs to be called from a context disabled for preemption.
*/
static void enable_sync_clock(void)
{
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
atomic_or(0x80000000, sw_ptr);
}
/*
* Function to check if the clock is in sync.
*/
static inline int check_sync_clock(void)
{
atomic_t *sw_ptr;
int rc;
sw_ptr = &get_cpu_var(clock_sync_word);
rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
put_cpu_var(clock_sync_word);
return rc;
}
/* Single threaded workqueue used for stp sync events */
static struct workqueue_struct *time_sync_wq;
static void __init time_init_wq(void)
{
if (time_sync_wq)
return;
time_sync_wq = create_singlethread_workqueue("timesync");
}
struct clock_sync_data {
atomic_t cpus;
int in_sync;
unsigned long long fixup_cc;
};
static void clock_sync_cpu(struct clock_sync_data *sync)
{
atomic_dec(&sync->cpus);
enable_sync_clock();
while (sync->in_sync == 0) {
__udelay(1);
/*
* A different cpu changes *in_sync. Therefore use
* barrier() to force memory access.
*/
barrier();
}
if (sync->in_sync != 1)
/* Didn't work. Clear per-cpu in sync bit again. */
disable_sync_clock(NULL);
/*
* This round of TOD syncing is done. Set the clock comparator
* to the next tick and let the processor continue.
*/
fixup_clock_comparator(sync->fixup_cc);
}
/*
* Server Time Protocol (STP) code.
*/
static bool stp_online;
static struct stp_sstpi stp_info;
static void *stp_page;
static void stp_work_fn(struct work_struct *work);
static DEFINE_MUTEX(stp_work_mutex);
static DECLARE_WORK(stp_work, stp_work_fn);
static struct timer_list stp_timer;
static int __init early_parse_stp(char *p)
{
return kstrtobool(p, &stp_online);
}
early_param("stp", early_parse_stp);
/*
* Reset STP attachment.
*/
static void __init stp_reset(void)
{
int rc;
stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) {
pr_warn("The real or virtual hardware system does not provide an STP interface\n");
free_page((unsigned long) stp_page);
stp_page = NULL;
stp_online = 0;
}
}
static void stp_timeout(unsigned long dummy)
{
queue_work(time_sync_wq, &stp_work);
}
static int __init stp_init(void)
{
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return 0;
setup_timer(&stp_timer, stp_timeout, 0UL);
time_init_wq();
if (!stp_online)
return 0;
queue_work(time_sync_wq, &stp_work);
return 0;
}
arch_initcall(stp_init);
/*
* STP timing alert. There are three causes:
* 1) timing status change
* 2) link availability change
* 3) time control parameter change
* In all three cases we are only interested in the clock source state.
* If a STP clock source is now available use it.
*/
static void stp_timing_alert(struct stp_irq_parm *intparm)
{
if (intparm->tsc || intparm->lac || intparm->tcpc)
queue_work(time_sync_wq, &stp_work);
}
/*
* STP sync check machine check. This is called when the timing state
* changes from the synchronized state to the unsynchronized state.
* After a STP sync check the clock is not in sync. The machine check
* is broadcasted to all cpus at the same time.
*/
int stp_sync_check(void)
{
disable_sync_clock(NULL);
return 1;
}
/*
* STP island condition machine check. This is called when an attached
* server attempts to communicate over an STP link and the servers
* have matching CTN ids and have a valid stratum-1 configuration
* but the configurations do not match.
*/
int stp_island_check(void)
{
disable_sync_clock(NULL);
return 1;
}
void stp_queue_work(void)
{
queue_work(time_sync_wq, &stp_work);
}
static int stp_sync_clock(void *data)
{
static int first;
unsigned long long clock_delta;
struct clock_sync_data *stp_sync;
struct ptff_qto qto;
int rc;
stp_sync = data;
if (xchg(&first, 1) == 1) {
/* Slave */
clock_sync_cpu(stp_sync);
return 0;
}
/* Wait until all other cpus entered the sync function. */
while (atomic_read(&stp_sync->cpus) != 0)
cpu_relax();
enable_sync_clock();
rc = 0;
if (stp_info.todoff[0] || stp_info.todoff[1] ||
stp_info.todoff[2] || stp_info.todoff[3] ||
stp_info.tmd != 2) {
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
if (rc == 0) {
/* fixup the monotonic sched clock */
sched_clock_base_cc += clock_delta;
if (ptff_query(PTFF_QTO) &&
ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
/* Update LPAR offset */
lpar_offset = qto.tod_epoch_difference;
atomic_notifier_call_chain(&s390_epoch_delta_notifier,
0, &clock_delta);
stp_sync->fixup_cc = clock_delta;
fixup_clock_comparator(clock_delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));
if (rc == 0 && stp_info.tmd != 2)
rc = -EAGAIN;
}
}
if (rc) {
disable_sync_clock(NULL);
stp_sync->in_sync = -EAGAIN;
} else
stp_sync->in_sync = 1;
xchg(&first, 0);
return 0;
}
/*
* STP work. Check for the STP state and take over the clock
* synchronization if the STP clock source is usable.
*/
static void stp_work_fn(struct work_struct *work)
{
struct clock_sync_data stp_sync;
int rc;
/* prevent multiple execution. */
mutex_lock(&stp_work_mutex);
if (!stp_online) {
chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
del_timer_sync(&stp_timer);
goto out_unlock;
}
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0, NULL);
if (rc)
goto out_unlock;
rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
if (rc || stp_info.c == 0)
goto out_unlock;
/* Skip synchronization if the clock is already in sync. */
if (check_sync_clock())
goto out_unlock;
memset(&stp_sync, 0, sizeof(stp_sync));
get_online_cpus();
atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask);
put_online_cpus();
if (!check_sync_clock())
/*
* There is a usable clock but the synchonization failed.
* Retry after a second.
*/
mod_timer(&stp_timer, jiffies + HZ);
out_unlock:
mutex_unlock(&stp_work_mutex);
}
/*
* STP subsys sysfs interface functions
*/
static struct bus_type stp_subsys = {
.name = "stp",
.dev_name = "stp",
};
static ssize_t stp_ctn_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%016llx\n",
*(unsigned long long *) stp_info.ctnid);
}
static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
static ssize_t stp_ctn_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", stp_info.ctn);
}
static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
static ssize_t stp_dst_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x2000))
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
}
static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
static ssize_t stp_leap_seconds_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x8000))
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
}
static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
static ssize_t stp_stratum_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
}
static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
static ssize_t stp_time_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x0800))
return -ENODATA;
return sprintf(buf, "%i\n", (int) stp_info.tto);
}
static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
static ssize_t stp_time_zone_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x4000))
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
}
static DEVICE_ATTR(time_zone_offset, 0400,
stp_time_zone_offset_show, NULL);
static ssize_t stp_timing_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", stp_info.tmd);
}
static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
static ssize_t stp_timing_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", stp_info.tst);
}
static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
static ssize_t stp_online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%i\n", stp_online);
}
static ssize_t stp_online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int value;
value = simple_strtoul(buf, NULL, 0);
if (value != 0 && value != 1)
return -EINVAL;
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -EOPNOTSUPP;
mutex_lock(&clock_sync_mutex);
stp_online = value;
if (stp_online)
set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
else
clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
queue_work(time_sync_wq, &stp_work);
mutex_unlock(&clock_sync_mutex);
return count;
}
/*
* Can't use DEVICE_ATTR because the attribute should be named
* stp/online but dev_attr_online already exists in this file ..
*/
static struct device_attribute dev_attr_stp_online = {
.attr = { .name = "online", .mode = 0600 },
.show = stp_online_show,
.store = stp_online_store,
};
static struct device_attribute *stp_attributes[] = {
&dev_attr_ctn_id,
&dev_attr_ctn_type,
&dev_attr_dst_offset,
&dev_attr_leap_seconds,
&dev_attr_stp_online,
&dev_attr_stratum,
&dev_attr_time_offset,
&dev_attr_time_zone_offset,
&dev_attr_timing_mode,
&dev_attr_timing_state,
NULL
};
static int __init stp_init_sysfs(void)
{
struct device_attribute **attr;
int rc;
rc = subsys_system_register(&stp_subsys, NULL);
if (rc)
goto out;
for (attr = stp_attributes; *attr; attr++) {
rc = device_create_file(stp_subsys.dev_root, *attr);
if (rc)
goto out_unreg;
}
return 0;
out_unreg:
for (; attr >= stp_attributes; attr--)
device_remove_file(stp_subsys.dev_root, *attr);
bus_unregister(&stp_subsys);
out:
return rc;
}
device_initcall(stp_init_sysfs);
|
44140.c | /* Copyright (c) Microsoft Corporation. All rights reserved. */
#include "sll.h"
typedef struct _SLL_ENTRY cell, *list, *listseg;
list list_reverse_rec_aux(list x, list y) {
if(y == NULL) {
return x;
} else {
list tmp = y->Data;
y->Flink = x;
return list_reverse_rec_aux(y, tmp);
}
}
void list_reverse(list *z) {
*z = list_reverse_rec_aux(NULL, *z);
}
int main() {
list x;
x = cons(1, cons(2, cons(3, NULL)));
list_reverse(&x);
print_list(x); printf_s("\n");
}
|
533130.c | /*
* Copyright (c) 2014-2017 Alibaba Group. All rights reserved.
*
* Alibaba Group retains all right, title and interest (including all
* intellectual property rights) in and to this computer program, which is
* protected by applicable intellectual property laws. Unless you have
* obtained a separate written license from Alibaba Group., you are not
* authorized to utilize all or a part of this computer program for any
* purpose (including reproduction, distribution, modification, and
* compilation into object code), and you must immediately destroy or
* return to Alibaba Group all copies of this computer program. If you
* are licensed by Alibaba Group, your rights to utilize this computer
* program are limited by the terms of that license. To obtain a license,
* please contact Alibaba Group.
*
* This computer program contains trade secrets owned by Alibaba Group.
* and, unless unauthorized by Alibaba Group in writing, you agree to
* maintain the confidentiality of this computer program and related
* information and to not disclose this computer program and related
* information to any other person or entity.
*
* THIS COMPUTER PROGRAM IS PROVIDED AS IS WITHOUT ANY WARRANTIES, AND
* Alibaba Group EXPRESSLY DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED,
* INCLUDING THE WARRANTIES OF MERCHANTIBILITY, FITNESS FOR A PARTICULAR
* PURPOSE, TITLE, AND NONINFRINGEMENT.
*/
#include <pthread.h>
#include <unistd.h>
#include <errno.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <time.h>
#include <sys/time.h>
#include <fcntl.h>
#include <sys/select.h>
#include <stdio.h>
#include <sys/stat.h>
#include <stdlib.h>
#include <string.h>
#include "iot_import.h"
#include "iot_export.h"
#include "mqtt_global.h"
#include "gwiotapi.h"
#include "sysconfig.h"
#include "abp_nodes.h"
#if defined(ENABLE_WATCHDOG)
#include "watch_dog_export.h"
#endif
#include "mqtt_ipc_local.h"
#if defined(ENABLE_MONITOR)
#include <limits.h>
#define CUSTOM_MON_MSG_UP_ID 0xA0
#define CUSTOM_MON_MSG_DOWN_ID 0xA1
#include "monitor_interface_export.h"
#endif
//#define ENABLE_MSG_CACHE
#if defined(ENABLE_MSG_CACHE)
#include "msg/utils_msg.h"
#endif
#if defined(ENABLE_ABP_NODES)
static int lora_ns_server_exist = 0;
#endif
#define ENABLE_OTA
#define ENABLE_REMOTE_CTRL_SSH
#define ENABLE_REMOTE_CTRL_UART
#define TOPIC_GWMP_UPLINK "/lora/gwmp/uplink"
#define TOPIC_GWMP_DOWNLINK "/lora/gwmp/downlink"
#define TOPIC_CUSTOM_UPLINK "/lora/custom/uplink"
#define TOPIC_CUSTOM_DOWNLINK "/lora/custom/downlink"
#define TOPIC_GWCONFIG_UPLOAD "/lora/gwconfig/upload"
#define TOPIC_GWCONFIG_DOWNLOAD "/lora/gwconfig/download"
#define TOPIC_GWCONFIG_GET "/lora/gwconfig/get"
#define TOPIC_DEVICEINFO_UPLOAD "/lora/deviceinfo/upload"
#define TOPIC_DEVICEINFO_GET "/lora/deviceinfo/get"
#define TOPIC_GW_RESET "/lora/gw/reset"
#define TOPIC_LOG_UPLOAD "/logfile/upload"
#define TOPIC_CTRL_SSH "/ctrl/ssh"
#define TOPIC_CTRL_UART "/ctrl/uart"
#define UDP_LOCALHOST_ADDR "127.0.0.1"
#define MSG_LEN_MAX (8 * 1024)
#define TOPIC_NAME_LEN_MAX 128
#define GWMP_HEAD_UP_LEN 12
#define GWMP_HEAD_DOWN_LEN 4
#define OTA_BUF_LEN (6 * 1024)
#define CUSTOM_MSG_ID 0x80
#if defined(ENABLE_WATCHDOG)
#define THRD_ID_MAIN "thrd_main"
#define THRD_ID_OTA "thrd_ota"
#define THRD_ID_UPLINK "thrd_uplink"
#define THRD_ID_NAT_DOWNLINK "thrd_nat_downlink"
#endif
#define MQTT_CONNECT_RESET_CHECK_CNT 300
static int exit_sig = 0;
#if defined(ENABLE_MSG_CACHE)
static int enable_cache = 0;
#endif
static int native_exit_sig = 0;
const char uploadDeviceinfo[] = {
"\"deviceinfo\": {\
\"gateway_eui\": \"%s\",\
\"model\": \"%s\",\
\"manufacturer\": \"%s\",\
\"hw_version\": \"%s\",\
\"sw_version\": \"%s\",\
\"ota_version\": \"%s\"\
}"
};
iotx_lorogw_t g_iotx_loragw;
extern uint8_t abp_out_enable;
#ifdef ENABLE_OTA
#ifdef ENABLE_ADVANCED_OTA
static FILE *testfp = NULL;
static int _ota_download_start(const char *path)
{
if(!path || strlen(path)== 0)
return -1;
testfp = fopen(path, "w");
if (NULL == testfp) {
printf("fopen OTA file failed\n");
return -1;
}
return 0;
}
int _ota_download_write(char *buffer, uint32_t length)
{
uint32_t written_len = 0;
if (NULL == testfp) {
printf("OTA file not fopen\n");
return -1;
}
written_len = fwrite(buffer, 1, length, testfp);
if (written_len != length) {
printf("fwrite failed, %d != %d\n", written_len, length);
return -1;
}
return 0;
}
static int _ota_download_finalize(int stat)
{
if (testfp != NULL) {
fclose(testfp);
testfp = NULL;
}
return 0;
}
#endif
#endif
void event_handle(void *pcontext, void *pclient, iotx_mqtt_event_msg_pt msg)
{
iotx_lorogw_t *ploragw = &g_iotx_loragw;
uintptr_t packet_id = (uintptr_t)msg->msg;
iotx_mqtt_topic_info_pt topic_info = (iotx_mqtt_topic_info_pt)msg->msg;
switch (msg->event_type) {
case IOTX_MQTT_EVENT_UNDEF:
log_info("undefined event occur.");
break;
case IOTX_MQTT_EVENT_DISCONNECT:
#if defined(ENABLE_MSG_CACHE)
enable_cache = 1;
#endif
log_info("MQTT disconnect.");
break;
case IOTX_MQTT_EVENT_RECONNECT:
log_info("MQTT reconnect.");
break;
case IOTX_MQTT_EVENT_SUBCRIBE_SUCCESS:
log_info("subscribe success, packet-id=%u", (unsigned int)packet_id);
break;
case IOTX_MQTT_EVENT_SUBCRIBE_TIMEOUT:
log_info("subscribe wait ack timeout, packet-id=%u", (unsigned int)packet_id);
break;
case IOTX_MQTT_EVENT_SUBCRIBE_NACK:
log_info("subscribe nack, packet-id=%u", (unsigned int)packet_id);
break;
case IOTX_MQTT_EVENT_UNSUBCRIBE_SUCCESS:
log_info("unsubscribe success, packet-id=%u", (unsigned int)packet_id);
break;
case IOTX_MQTT_EVENT_UNSUBCRIBE_TIMEOUT:
log_info("unsubscribe timeout, packet-id=%u", (unsigned int)packet_id);
break;
case IOTX_MQTT_EVENT_UNSUBCRIBE_NACK:
log_info("unsubscribe nack, packet-id=%u", (unsigned int)packet_id);
break;
case IOTX_MQTT_EVENT_PUBLISH_SUCCESS:
#if defined(ENABLE_MSG_CACHE)
enable_cache = 0;
#endif
log_info("publish success, packet-id=%u checkCnt=%d", (unsigned int)packet_id, ploragw->checkCnt);
HAL_MutexLock(ploragw->check_mutex);
ploragw->checkCnt++;
HAL_MutexUnlock(ploragw->check_mutex);
break;
case IOTX_MQTT_EVENT_PUBLISH_TIMEOUT:
log_info("publish timeout, packet-id=%u", (unsigned int)packet_id);
break;
case IOTX_MQTT_EVENT_PUBLISH_NACK:
log_info("publish nack, packet-id=%u", (unsigned int)packet_id);
break;
case IOTX_MQTT_EVENT_PUBLISH_RECEIVED:
log_info("topic message arrived but without any related handle: topic=%.*s, topic_msg=%.*s",
topic_info->topic_len,
topic_info->ptopic,
topic_info->payload_len,
topic_info->payload);
break;
case IOTX_MQTT_EVENT_BUFFER_OVERFLOW:
log_info("buffer overflow, %s", (char *)(msg->msg));
break;
default:
log_info("Should NOT arrive here.");
break;
}
}
void thread_file_check(void)
{
iotx_lorogw_t *ploragw = &g_iotx_loragw;
int ret = 0;
while (!exit_sig) {
#if defined(ENABLE_REMOTE_LOG)
if (1 == ploragw->uploadlog_flag) {
log_file_upload(FILELOG_MOD_IOTX, ploragw->auth_info.device_name);
log_file_upload(FILELOG_MOD_PKFWD, ploragw->auth_info.device_name);
HAL_MutexLock(ploragw->log_mutex);
ploragw->uploadlog_flag = 0;
HAL_MutexUnlock(ploragw->log_mutex);
log_info("after upload_filelog, set upload log_flag: %d\n", ploragw->uploadlog_flag);
}
#endif
#if defined(ENABLE_ABP_NODES)
if (1 == ploragw->abpdl_flag) {
ret = abp_file_download();
if (0 == ret) {
abp_send_msg_ack(NULL);
abp_redis_init();
} else {
abp_send_msg_ack("abpf:download error");
}
HAL_MutexLock(ploragw->abp_mutex);
ploragw->abpdl_flag = 0;
HAL_MutexUnlock(ploragw->abp_mutex);
log_info("after abp_file_download, set abpdl_flag: %d", ploragw->abpdl_flag);
}
#endif
HAL_SleepMs(1000);
}
return;
}
#if defined(ENABLE_OTA)
#ifdef ENABLE_ADVANCED_OTA
int need_reinit_ota = 0;
void thread_ota_check(void)
{
iotx_lorogw_t *ploragw = &g_iotx_loragw;
char buf_ota[OTA_BUF_LEN] = {0};
//char cur_ver[64] = {0};
//char *ota_ver = NULL;
uint32_t firmware_valid = 0;
int fin_stat = -1;
int ret = -1;
char ota_file_path[FILENAME_MAX + 1] = { 0 };
//int reportver = 1;
#if defined(ENABLE_WATCHDOG)
struct timespec watchdog_time_keeper;
clock_gettime(CLOCK_MONOTONIC, &watchdog_time_keeper);
#endif
while (!exit_sig) {
HAL_SleepMs(1000);
HAL_MutexLock(ploragw->ota_mutex);
if(need_reinit_ota){
log_info("reinitialize OTA");
IOT_OTA_Deinit(ploragw->h_ota);
ploragw->h_ota = NULL;
HAL_MutexUnlock(ploragw->ota_mutex);
HAL_SleepMs(2000);
HAL_MutexLock(ploragw->ota_mutex);
need_reinit_ota = 0;
ploragw->h_ota = IOT_OTA_Init(ploragw->auth_info.product_key, ploragw->auth_info.device_name, ploragw->pclient);
if (NULL == ploragw->h_ota) {
log_err("initialize OTA failed");
HAL_MutexUnlock(ploragw->ota_mutex);
break;
}
}
if (IOT_OTA_IsFetching(ploragw->h_ota)) {
HAL_MutexUnlock(ploragw->ota_mutex);
uint32_t last_percent = 0, percent = 0;
uint32_t size_downloaded = 0, size_file = 0;
int len = 0;
char version[32], md5sum[33];
//get OTA information
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_FILE_SIZE, &size_file, 4);
memset(md5sum, 0x0, sizeof(md5sum));
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_MD5SUM, md5sum, 33);
memset(version, 0x0, sizeof(version));
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_VERSION, version, 32);
log_info("OTA info, new version: %s, md5sum: %s, file size: %d", version, md5sum, size_file);
if (0 == size_file) {
log_err("file size is 0, ota failed");
HAL_SleepMs(2000);
continue;
}
ota_file_path[0] = '\0';
ret = ota_notify_update_file_info(version, md5sum, size_file, ota_file_path);
if (LORA_IPC_SUCCESS != ret) {
log_err("notify update-deamon for downloading failed ret %d !", ret);
HAL_SleepMs(2000);
continue;
}
ret = _ota_download_start(ota_file_path);
if(ret < 0) {
log_err("call ota start api failed");
HAL_SleepMs(2000);
continue;
}
do {
len = IOT_OTA_FetchYield(ploragw->h_ota, buf_ota, OTA_BUF_LEN, 1);
if (len > 0) {
ret = _ota_download_write(buf_ota, (uint32_t)len);
if (-1 == ret) {
log_err("call ota write api failed");
break;
}
log_info("write %d bytes to file %s !\n", len, ota_file_path);
}
else {
log_err("IOT_OTA_FetchYield error!\n");
}
// get downloaded size
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_FETCHED_SIZE, &size_downloaded, 4);
log_info("current download size %d !", size_downloaded);
percent = (size_downloaded * 50) / size_file;
if ((percent == 50) || ((percent < 50) && (percent - last_percent >= 10))) {
IOT_OTA_ReportProgress(ploragw->h_ota, percent, "");
log_info("download percent:%d", percent);
last_percent = percent;
}
HAL_SleepMs(100);
#if defined(ENABLE_WATCHDOG)
if (thread_feeddog_periodically(MQTT_SYMBOL, THRD_ID_OTA, 60, 1200, &watchdog_time_keeper) < 0) {
log_err("OTA thread feeddog failed\n");
}
#endif
} while(!IOT_OTA_IsFetchFinish(ploragw->h_ota));
if(size_downloaded < size_file) {
log_err("download failed!");
IOT_OTA_ReportProgress(ploragw->h_ota, IOT_OTAP_CHECK_FALIED, "download failed");
fin_stat = -1;
}
else {
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_CHECK_FIRMWARE, &firmware_valid, 4);
if (0 == firmware_valid) {
log_info("The firmware is invalid");
IOT_OTA_ReportProgress(ploragw->h_ota, IOT_OTAP_CHECK_FALIED, "check failed");
fin_stat = -1;
} else {
log_info("The firmware is valid");
fin_stat = 0;
HAL_SleepMs(2000);
}
}
_ota_download_finalize(fin_stat);
ret = ota_notify_update_download_result(fin_stat, (int)size_downloaded);
if(ret < 0) {
log_err("notify download result failed!!!");
IOT_OTA_ReportProgress(ploragw->h_ota, IOT_OTAP_GENERAL_FAILED, "notify update-deamon failed");
}
if(fin_stat == -1 || ret < 0) {
log_info("reinitialize OTA");
HAL_MutexLock(ploragw->ota_mutex);
IOT_OTA_Deinit(ploragw->h_ota);
ploragw->h_ota = NULL;
HAL_MutexUnlock(ploragw->ota_mutex);
HAL_SleepMs(2000);
HAL_MutexLock(ploragw->ota_mutex);
ploragw->h_ota = IOT_OTA_Init(ploragw->auth_info.product_key, ploragw->auth_info.device_name, ploragw->pclient);
if (NULL == ploragw->h_ota) {
log_err("initialize OTA failed");
HAL_MutexUnlock(ploragw->ota_mutex);
break;
}
HAL_MutexUnlock(ploragw->ota_mutex);
}
}
else {
HAL_MutexUnlock(ploragw->ota_mutex);
}
HAL_SleepMs(2000);
#if defined(ENABLE_WATCHDOG)
if (thread_feeddog_periodically(MQTT_SYMBOL, THRD_ID_OTA, 60, 600, &watchdog_time_keeper) < 0) {
log_err("OTA thread feeddog failed\n");
}
#endif
}
#if defined(ENABLE_WATCHDOG)
thread_cancel_feeddog(MQTT_SYMBOL, THRD_ID_OTA);
#endif
return;
}
#else
void thread_ota_check(void)
{
iotx_lorogw_t *ploragw = &g_iotx_loragw;
char buf_ota[OTA_BUF_LEN] = {0};
char cur_ver[64] = {0};
char *ota_ver = NULL;
uint32_t firmware_valid = 0;
int fin_stat = -1;
int ret = -1;
int reportver = 1;
#if defined(ENABLE_WATCHDOG)
struct timespec watchdog_time_keeper;
clock_gettime(CLOCK_MONOTONIC, &watchdog_time_keeper);
#endif
while (!exit_sig) {
if (1 == reportver) {
ota_ver = config_get_ota_version();
if (NULL == ota_ver) {
log_err("get OTA version failed\n");
HAL_SleepMs(2000);
continue;
}
snprintf(cur_ver, sizeof(cur_ver), "%s", ota_ver);
ret = IOT_OTA_ReportVersion(ploragw->h_ota, cur_ver);
if (0 != ret) {
log_err("report OTA version failed, ret: %d\n", ret);
HAL_SleepMs(2000);
continue;
}
reportver = 0;
}
HAL_SleepMs(1000);
if (IOT_OTA_IsFetching(ploragw->h_ota)) {
uint32_t last_percent = 0, percent = 0;
uint32_t len = 0, size_downloaded = 0, size_file = 0;
char version[32], md5sum[33];
//get OTA information
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_FILE_SIZE, &size_file, 4);
memset(md5sum, 0x0, sizeof(md5sum));
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_MD5SUM, md5sum, 33);
memset(version, 0x0, sizeof(version));
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_VERSION, version, 32);
log_info("OTA info, new version: %s, md5sum: %s, file size: %d", version, md5sum, size_file);
if (0 == size_file) {
log_err("file size is 0, ota failed");
HAL_SleepMs(2000);
continue;
}
ret = aliot_platform_ota_start(md5sum);
if (-1 == ret) {
log_err("call ota start api failed");
HAL_SleepMs(2000);
continue;
}
do {
len = IOT_OTA_FetchYield(ploragw->h_ota, buf_ota, OTA_BUF_LEN, 1);
if (len > 0) {
ret = aliot_platform_ota_write(buf_ota, len);
if (-1 == ret) {
log_err("call ota write api failed");
break;
}
}
// get downloaded size
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_FETCHED_SIZE, &size_downloaded, 4);
percent = (size_downloaded * 100) / size_file;
if ((percent == 100) || ((percent < 100) && (percent - last_percent >= 10))) {
IOT_OTA_ReportProgress(ploragw->h_ota, percent, "");
log_info("download percent:%d", percent);
last_percent = percent;
}
HAL_SleepMs(100);
#if defined(ENABLE_WATCHDOG)
if (thread_feeddog_periodically(MQTT_SYMBOL, THRD_ID_OTA, 60, 1200, &watchdog_time_keeper) < 0) {
log_err("OTA thread feeddog failed\n");
}
#endif
} while(!IOT_OTA_IsFetchFinish(ploragw->h_ota));
IOT_OTA_Ioctl(ploragw->h_ota, IOT_OTAG_CHECK_FIRMWARE, &firmware_valid, 4);
if (0 == firmware_valid) {
log_info("The firmware is invalid");
IOT_OTA_ReportProgress(ploragw->h_ota, IOT_OTAP_CHECK_FALIED, "check failed");
fin_stat = -1;
} else {
log_info("The firmware is valid");
fin_stat = 0;
HAL_SleepMs(2000);
}
ret = aliot_platform_ota_finalize(fin_stat);
if (-1 == ret) {
log_err("call ota finalize api failed");
IOT_OTA_ReportProgress(ploragw->h_ota, IOT_OTAP_BURN_FAILED, "burn failed");
} else {
log_info("report version:%s", version);
IOT_OTA_ReportVersion(ploragw->h_ota, version);
}
log_info("reinitialize OTA");
IOT_OTA_Deinit(ploragw->h_ota);
HAL_SleepMs(2000);
ploragw->h_ota = IOT_OTA_Init(ploragw->auth_info.product_key, ploragw->auth_info.device_name, ploragw->pclient);
if (NULL == ploragw->h_ota) {
log_err("initialize OTA failed");
}
reportver = 1;
}
HAL_SleepMs(2000);
#if defined(ENABLE_WATCHDOG)
if (thread_feeddog_periodically(MQTT_SYMBOL, THRD_ID_OTA, 60, 600, &watchdog_time_keeper) < 0) {
log_err("OTA thread feeddog failed\n");
}
#endif
}
#if defined(ENABLE_WATCHDOG)
thread_cancel_feeddog(MQTT_SYMBOL, THRD_ID_OTA);
#endif
return;
}
#endif
#endif
#if defined(ENABLE_MSG_CACHE)
static int set_history_msg(const char *msg, uint32_t len)
{
int ret = 0;
log_info("store msg to DB.....");
msg_set(msg, len);
return ret;
}
static int get_history_msg(char *msg_buf, uint32_t msg_buf_len, uint32_t *msg_len)
{
int ret = 0;
ret = msg_get(msg_buf, msg_buf_len, msg_len);
log_info("get msg from DB,ret=%d.....",ret);
return ret;
}
#endif
int publish_gwmp_msg_uplink(char *msg_buf, int msg_len)
{
int ret = -1;
iotx_mqtt_topic_info_t topic_msg;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
char topicName[TOPIC_NAME_LEN_MAX] = {0};
memset(&topic_msg, 0x0, sizeof(iotx_mqtt_topic_info_t));
topic_msg.qos = IOTX_MQTT_QOS1;
topic_msg.retain = 0;
topic_msg.dup = 0;
topic_msg.payload = msg_buf;
topic_msg.payload_len = msg_len;
if ((msg_len > 3) && (msg_buf[3] >= CUSTOM_MSG_ID)) {
snprintf(topicName, sizeof(topicName), "/sys/%s/%s%s", ploragw->auth_info.product_key, ploragw->auth_info.device_name, TOPIC_CUSTOM_UPLINK);
} else {
snprintf(topicName, sizeof(topicName), "%s/%s/%s", TOPIC_GWMP_UPLINK, ploragw->auth_info.product_key, ploragw->auth_info.device_name);
}
ret = IOT_MQTT_Publish(ploragw->pclient, topicName, &topic_msg);
if (ret < 0) {
log_err("IOT_MQTT_Publish failed ret = %d", ret);
#if defined(ENABLE_MSG_CACHE)
enable_cache = 1;
set_history_msg(msg_buf, msg_len);
#endif
}
if (msg_len > GWMP_HEAD_UP_LEN) {
log_info("publish mqtt gwmp msg: %s, len: %d\n", msg_buf + GWMP_HEAD_UP_LEN, msg_len);
} else {
log_info("publish mqtt gwmp msg len: %d\n", msg_len);
}
return ret;
}
#if defined(ENABLE_MSG_CACHE)
void thread_msg_cache(void)
{
char *pdatabuf = NULL;
int byte_nb = 0;
pdatabuf = (char *)HAL_Malloc(MSG_LEN_MAX);
while (!exit_sig) {
if (1 == enable_cache) {
log_info("msg_cache working, decide not to get msg from DB");
HAL_SleepMs(60000);
continue;
}
log_info("msg_cache not working, begin to get history msg from DB.");
if(get_history_msg(pdatabuf, MSG_LEN_MAX, (uint32_t *)&byte_nb) >= 0) {
publish_gwmp_msg_uplink(pdatabuf, byte_nb);
log_info("pub history msg cached in DB, msg_lenth=%d", byte_nb);
} else {
log_info("no history msg in DB!!!");
HAL_SleepMs(60000);
}
HAL_SleepMs(500);
}
HAL_Free(pdatabuf);
return;
}
#endif
int publish_gwconfig_upload(void)
{
int ret = -1;
iotx_mqtt_topic_info_t topic_msg;
int data_len = 0;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
char topicName[TOPIC_NAME_LEN_MAX] = {0};
memset(ploragw->ppub_msg, 0x0, MSG_LEN_MAX);
data_len = aliot_gw_get_global_conf((unsigned char *)ploragw->ppub_msg, MSG_LEN_MAX);
if (data_len >= MSG_LEN_MAX) {
data_len = MSG_LEN_MAX - 1;
}
if (data_len > 0) {
memset(&topic_msg, 0x0, sizeof(iotx_mqtt_topic_info_t));
topic_msg.qos = IOTX_MQTT_QOS1;
topic_msg.retain = 0;
topic_msg.dup = 0;
topic_msg.payload = ploragw->ppub_msg;
topic_msg.payload_len = data_len;
snprintf(topicName, sizeof(topicName), "%s/%s/%s", TOPIC_GWCONFIG_UPLOAD, ploragw->auth_info.product_key, ploragw->auth_info.device_name);
ret = IOT_MQTT_Publish(ploragw->pclient, topicName, &topic_msg);
if (ret < 0) {
log_err("IOT_MQTT_Publish failed ret = %d", ret);
}
log_info("publish mqtt msg: %s, len: %d\n", ploragw->ppub_msg, data_len);
}
return ret;
}
int publish_deviceinfo_upload()
{
int ret = -1;
iotx_mqtt_topic_info_t topic_msg;
aliot_gw_device_info_t devinfo;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
char topicName[TOPIC_NAME_LEN_MAX] = {0};
char *ota_ver = NULL;
// call gateway get deviceinfo api
memset(&devinfo, 0x0, sizeof(devinfo));
ret = aliot_gw_get_device_info(&devinfo);
if (0 != ret) {
log_err("call gatewway get deviceinfo api error, ret: %d\n", ret);
return -1;
}
memset(ploragw->ppub_msg, 0x0, MSG_LEN_MAX);
ota_ver = config_get_ota_version();
if (NULL == ota_ver) {
log_err("get OTA version failed\n");
sprintf(ploragw->ppub_msg, uploadDeviceinfo, devinfo.gateway_eui, devinfo.model,
devinfo.manufacturer, devinfo.hw_version, devinfo.sw_version, "");
} else {
sprintf(ploragw->ppub_msg, uploadDeviceinfo, devinfo.gateway_eui, devinfo.model,
devinfo.manufacturer, devinfo.hw_version, devinfo.sw_version, ota_ver);
}
memset(&topic_msg, 0x0, sizeof(iotx_mqtt_topic_info_t));
topic_msg.qos = IOTX_MQTT_QOS1;
topic_msg.retain = 0;
topic_msg.dup = 0;
topic_msg.payload = ploragw->ppub_msg;
topic_msg.payload_len = strlen(ploragw->ppub_msg);
snprintf(topicName, sizeof(topicName), "%s/%s/%s", TOPIC_DEVICEINFO_UPLOAD, ploragw->auth_info.product_key, ploragw->auth_info.device_name);
ret = IOT_MQTT_Publish(ploragw->pclient, topicName, &topic_msg);
if (ret < 0) {
log_err("IOT_MQTT_Publish failed ret = %d", ret);
}
log_info("publish mqtt msg: %s\n", ploragw->ppub_msg);
return ret;
}
/**
* @brief This is a callback function for TOPIC_GWMP_DOWNLINK topic proc
*
* @return none
* @see none.
* @note none.
*/
static void callback_gwmp_msg_downlink(void *pcontext, void *pclient, iotx_mqtt_event_msg_pt msg)
{
uint32_t msg_len;
iotx_mqtt_topic_info_pt ptopic_info = (iotx_mqtt_topic_info_pt) msg->msg;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
int ret = 0;
if (ptopic_info->payload_len < MSG_LEN_MAX - 1) {
msg_len = ptopic_info->payload_len;
} else {
log_info("message is too long, truncate it");
msg_len = MSG_LEN_MAX - 1;
}
// copy the message to mqtt msg buffer
memset(ploragw->prev_msg, 0x0, MSG_LEN_MAX);
memcpy(ploragw->prev_msg, ptopic_info->payload, msg_len);
if (msg_len > GWMP_HEAD_DOWN_LEN) {
log_info("received mqtt gwmp msg: %s, len: %d\n", ploragw->prev_msg + GWMP_HEAD_DOWN_LEN, msg_len);
} else {
log_info("received mqtt gwmp msg len: %d\n", msg_len);
}
#ifdef ENABLE_ADVANCED_OTA
HAL_MutexLock(ploragw->check_mutex);
if(ploragw->checkCnt > 0) {
//uplink is good && downlink is good too
static int ping_cycles = 0;
ping_cycles ++;
if(ping_cycles > 5) {
if( mqtt_notify_update_checkout_result(0, "mqtt connect to the server") < 0) {
log_err("failed to report mqtt running state to update-deamon!!");
}
}
}
ploragw->checkCnt = 0;
HAL_MutexUnlock(ploragw->check_mutex);
#else
HAL_MutexLock(ploragw->check_mutex);
ploragw->checkCnt = 0;
HAL_MutexUnlock(ploragw->check_mutex);
#endif
#if defined(ENABLE_ABP_NODES)
ret = abp_file_conf(ploragw->prev_msg, msg_len);
if (ret == 1) {
HAL_MutexLock(ploragw->abp_mutex);
ploragw->abpdl_flag = 1;
HAL_MutexUnlock(ploragw->abp_mutex);
log_info("received abp file config msg, set abpdl_flag: %d", ploragw->abpdl_flag);
return;
}
#endif
#if defined(ENABLE_MONITOR)
if (msg_len > GWMP_HEAD_DOWN_LEN && ploragw->prev_msg[3] == CUSTOM_MON_MSG_DOWN_ID) {
ret = mqtt_notify_monitor_gwmp_downlink_msg(ploragw->prev_msg + GWMP_HEAD_DOWN_LEN);
} else {
#endif
// send GWMP message to gateway
ret = send(ploragw->sock_down, ploragw->prev_msg, msg_len, 0);
if (ret <= 0) {
log_err("send gwmp msg error: %d\n", errno);
} else {
log_info("send gwmp msg\n");
}
#if defined(ENABLE_MONITOR)
}
#endif
return;
}
/**
* @brief This is a callback function for TOPIC_GWCONFIG_DOWNLOAD topic proc
*
* @return none
* @see none.
* @note none.
*/
static void callback_gwconfig_download(void *pcontext, void *pclient, iotx_mqtt_event_msg_pt msg)
{
uint32_t msg_len;
iotx_mqtt_topic_info_pt ptopic_info = (iotx_mqtt_topic_info_pt) msg->msg;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
int ret = 0;
if (ptopic_info->payload_len < MSG_LEN_MAX - 1) {
msg_len = ptopic_info->payload_len;
} else {
log_info("message is too long, truncate it");
msg_len = MSG_LEN_MAX - 1;
}
// copy the message to mqtt msg buffer
memset(ploragw->prev_msg, 0x0, MSG_LEN_MAX);
memcpy(ploragw->prev_msg, ptopic_info->payload, msg_len);
log_info("received mqtt msg: %s, len: %d\n", ploragw->prev_msg, msg_len);
// call gateway update global api
ret = aliot_gw_update_global_conf((unsigned char *)ploragw->prev_msg, msg_len);
if (0 != ret) {
log_err("call gatewway update global config api error, ret: %d\n", ret);
}
return;
}
/**
* @brief This is a callback function for TOPIC_GWCONFIG_GET topic proc
*
* @return none
* @see none.
* @note none.
*/
static void callback_gwconfig_get(void *pcontext, void *pclient, iotx_mqtt_event_msg_pt msg)
{
uint32_t msg_len;
iotx_mqtt_topic_info_pt ptopic_info = (iotx_mqtt_topic_info_pt) msg->msg;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
if (ptopic_info->payload_len < MSG_LEN_MAX - 1) {
msg_len = ptopic_info->payload_len;
} else {
log_info("message is too long, truncate it");
msg_len = MSG_LEN_MAX - 1;
}
// copy the message to mqtt msg buffer
memset(ploragw->prev_msg, 0x0, MSG_LEN_MAX);
memcpy(ploragw->prev_msg, ptopic_info->payload, msg_len);
log_info("received mqtt msg: %s\n", ploragw->prev_msg);
// publish gw config to server
publish_gwconfig_upload();
return;
}
/**
* @brief This is a callback function for TOPIC_DEVICEINFO_GET topic proc
*
* @return none
* @see none.
* @note none.
*/
static void callback_deviceinfo_get(void *pcontext, void *pclient, iotx_mqtt_event_msg_pt msg)
{
uint32_t msg_len;
iotx_mqtt_topic_info_pt ptopic_info = (iotx_mqtt_topic_info_pt) msg->msg;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
if (ptopic_info->payload_len < MSG_LEN_MAX - 1) {
msg_len = ptopic_info->payload_len;
} else {
log_info("message is too long, truncate it");
msg_len = MSG_LEN_MAX - 1;
}
// copy the message to mqtt msg buffer
memset(ploragw->prev_msg, 0x0, MSG_LEN_MAX);
memcpy(ploragw->prev_msg, ptopic_info->payload, msg_len);
log_info("received mqtt msg: %s\n", ploragw->prev_msg);
// publish deviceinfo to server
publish_deviceinfo_upload();
return;
}
/**
* @brief This is a callback function for TOPIC_GW_RESET topic proc
*
* @return none
* @see none.
* @note none.
*/
static void callback_gw_reset(void *pcontext, void *pclient, iotx_mqtt_event_msg_pt msg)
{
uint32_t msg_len;
iotx_mqtt_topic_info_pt ptopic_info = (iotx_mqtt_topic_info_pt) msg->msg;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
int ret = 0;
if (ptopic_info->payload_len < MSG_LEN_MAX - 1) {
msg_len = ptopic_info->payload_len;
} else {
log_info("message is too long, truncate it");
msg_len = MSG_LEN_MAX - 1;
}
// copy the message to mqtt msg buffer
memset(ploragw->prev_msg, 0x0, MSG_LEN_MAX);
memcpy(ploragw->prev_msg, ptopic_info->payload, msg_len);
log_info("received mqtt msg: %s\n", ploragw->prev_msg);
#if defined(ENABLE_MONITOR)
mqtt_send_monitor_alarm(MON_ALARM_REBOOT, "callback_gw_reset");
#endif
// call gateway reset api
ret = aliot_gw_reset();
if (0 != ret) {
log_err("call gateway reset api error, ret: %d\n", ret);
}
return;
}
#if defined(ENABLE_REMOTE_CTRL_SSH)
static int open_remote_debug(const char *path)
{
struct stat st;
int ret = 0;
char buf[512] = {0};
if(!path)
return -1;
ret = stat(path, &st);
if(ret == -1){
log_err("Failed to stat %s\n", path);
return -1;
}
if(!S_ISREG(st.st_mode)){
log_err("%s is not a file\n", path);
return -2;
}
snprintf(buf, sizeof(buf), "%s &>/dev/null &", path);
ret = system(buf);
return ret;
}
int close_remote_debug()
{
int ret = 0;
char buf[256] = {0};
system("systemctl stop sshd.socket");
sleep(1);
snprintf(buf, sizeof(buf), "kill -2 `cat /tmp/sshd_agent.pid`");
sleep(1);
ret = system(buf);
return ret;
}
static void callback_remote_ctrl_ssh(void *pcontext, void *pclient, iotx_mqtt_event_msg_pt msg)
{
char buff[FILENAME_MAX+1] = {0};
char abs_path[FILENAME_MAX] = {0};
uint32_t msg_len;
iotx_mqtt_topic_info_pt ptopic_info = (iotx_mqtt_topic_info_pt) msg->msg;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
int read_len = 0;
char *tmp = NULL;
int ret = 0;
if (ptopic_info->payload_len < MSG_LEN_MAX - 1) {
msg_len = ptopic_info->payload_len;
} else {
log_info("message is too long, truncate it");
msg_len = MSG_LEN_MAX - 1;
}
// copy the message to mqtt msg buffer
memset(ploragw->prev_msg, 0x0, MSG_LEN_MAX);
memcpy(ploragw->prev_msg, ptopic_info->payload, msg_len);
log_info("received mqtt msg: %s\n", ploragw->prev_msg);
//switch ssh
if (0 == strcmp(ploragw->prev_msg, "on")) {
ret = aliot_platform_ssh_enable(1);
} else if (0 == strcmp(ploragw->prev_msg, "off")) {
ret = aliot_platform_ssh_enable(0);
}
if (0 != ret) {
log_err("call ssh enable api error, ret: %d\n", ret);
return;
}
//get sshd_agent path
memset(buff, 0, FILENAME_MAX+1);
read_len = readlink("/proc/self/exe", buff, FILENAME_MAX);
if(read_len <= 0){
log_err("sshd_agent(remote debug) bin file not exsist.\n");
return;
}
buff[read_len] = 0;
tmp = strrchr(buff, '/');
if(tmp){
buff[tmp - buff]='\0';
}
snprintf(abs_path, FILENAME_MAX, "%s/%s", buff, "sshd_agent");
//swicth remote debug
if (0 == strcmp(ploragw->prev_msg, "on")) {
log_info("opening remote debug...\n");
ret = open_remote_debug(abs_path);
if(ret == 0){
log_info("open remote debug process success.\n");
}else{
log_err("Failed to open remote debug process: %d:%s.\n", ret, strerror(errno));
}
} else if (0 == strcmp(ploragw->prev_msg, "off")) {
log_info("closing remote debug...\n");
ret = close_remote_debug();
if(ret == 0){
log_info("close remote debug process success.\n");
}else{
log_err("Failed to close remote debug process: %d.\n", ret);
}
}
if (0 != ret) {
log_err("call remote debug enable api error, ret: %d\n", ret);
}
return;
}
#endif
#if defined(ENABLE_REMOTE_CTRL_UART)
static void callback_remote_ctrl_uart(void *pcontext, void *pclient, iotx_mqtt_event_msg_pt msg)
{
uint32_t msg_len;
iotx_mqtt_topic_info_pt ptopic_info = (iotx_mqtt_topic_info_pt) msg->msg;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
int ret = 0;
if (ptopic_info->payload_len < MSG_LEN_MAX - 1) {
msg_len = ptopic_info->payload_len;
} else {
log_info("message is too long, truncate it");
msg_len = MSG_LEN_MAX - 1;
}
// copy the message to mqtt msg buffer
memset(ploragw->prev_msg, 0x0, MSG_LEN_MAX);
memcpy(ploragw->prev_msg, ptopic_info->payload, msg_len);
log_info("received mqtt msg: %s\n", ploragw->prev_msg);
if (0 == strcmp(ploragw->prev_msg, "on")) {
ret = aliot_platform_uart_enable(1);
} else if (0 == strcmp(ploragw->prev_msg, "off")) {
ret = aliot_platform_uart_enable(0);
}
if (0 != ret) {
log_err("call uart enable api error, ret: %d\n", ret);
}
return;
}
#endif
/**
* @brief This is a callback function for TOPIC_LOG_UPLOAD topic proc
*
* @return none
* @see none.
* @note none.
*/
#if defined(ENABLE_REMOTE_LOG)
static void callback_filelog_upload(void *pcontext, void *pclient, iotx_mqtt_event_msg_pt msg)
{
uint32_t msg_len;
iotx_mqtt_topic_info_pt ptopic_info = (iotx_mqtt_topic_info_pt) msg->msg;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
if (ptopic_info->payload_len < MSG_LEN_MAX - 1) {
msg_len = ptopic_info->payload_len;
} else {
log_info("message is too long, truncate it");
msg_len = MSG_LEN_MAX - 1;
}
// copy the message to mqtt msg buffer
memset(ploragw->prev_msg, 0x0, MSG_LEN_MAX);
memcpy(ploragw->prev_msg, ptopic_info->payload, msg_len);
log_info("received mqtt msg: %s, upload filelog\n", ploragw->prev_msg);
// set upload filelog flag
HAL_MutexLock(ploragw->log_mutex);
ploragw->uploadlog_flag = 1;
HAL_MutexUnlock(ploragw->log_mutex);
log_info("set uploadlog flag: %d\n", ploragw->uploadlog_flag);
return;
}
#endif
void thread_gwmp_msg_uplink(void)
{
struct sockaddr_storage dist_addr;
socklen_t addr_len = sizeof(dist_addr);
char *pdatabuf = NULL;
int byte_nb = 0;
fd_set sets;
int flags;
int ret = -1;
int rc = 0;
struct timeval timeout;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
#if defined(ENABLE_WATCHDOG)
struct timespec watchdog_time_keeper;
clock_gettime(CLOCK_MONOTONIC, &watchdog_time_keeper);
#endif
pdatabuf = (char *)HAL_Malloc(MSG_LEN_MAX);
if (NULL == pdatabuf) {
log_err("malloc data buf error");
return;
}
if ((flags = fcntl(ploragw->sock_up, F_GETFL, 0)) < 0) {
log_err("fcntl F_GETFL error: %d\n", errno);
HAL_Free(pdatabuf);
return;
}
if (fcntl(ploragw->sock_up, F_SETFL, flags | O_NONBLOCK) < 0) {
log_err("fcntl F_SETFL error: %d\n", errno);
HAL_Free(pdatabuf);
return;
}
/* wait to receive a gateway UDP request packet */
log_info("waiting to receive a gw UDP request\n");
while (!native_exit_sig) {
FD_ZERO(&sets);
FD_SET(ploragw->sock_up, &sets);
timeout.tv_sec = 3;
timeout.tv_usec = 0;
ret = select(ploragw->sock_up + 1, &sets, NULL, NULL, &timeout);
if (ret > 0) {
if (FD_ISSET(ploragw->sock_up, &sets)) {
memset(pdatabuf, 0x0, MSG_LEN_MAX);
byte_nb = recvfrom(ploragw->sock_up, pdatabuf, MSG_LEN_MAX - 1, 0, (struct sockaddr *)&dist_addr, &addr_len);
if (byte_nb > 0) {
pdatabuf[byte_nb] = 0;
log_info("received gwmp msg len: %d\n", byte_nb);
// publish GWMP message to server
publish_gwmp_msg_uplink(pdatabuf, byte_nb);
#if defined(ENABLE_ABP_NODES)
// send to native lora server
if (abp_out_enable != 0) {
rc = send(ploragw->native_sock_up, pdatabuf, byte_nb, 0);
if (rc <= 0) {
log_info("send native up gwmp msg: %d\n", errno);
} else {
log_info("send native up gwmp msg\n");
}
}
#endif
} else if (byte_nb == 0) {
log_err("connection is closed");
HAL_SleepMs(100);
} else {
if (errno != EAGAIN) {
log_err("recvfrom gwmp msg error: %d\n", errno);
}
HAL_SleepMs(100);
}
}
} else if (0 == ret) {
// select timeout
} else {
log_err("select-recv gwmp socket error: %d\n", errno);
HAL_SleepMs(100);
}
#if defined(ENABLE_WATCHDOG)
if(thread_feeddog_periodically(MQTT_SYMBOL, THRD_ID_UPLINK, 30, 60, &watchdog_time_keeper) < 0) {
log_err("uplink thread feeddog failed\n");
}
#endif
}
HAL_Free(pdatabuf);
#if defined(ENABLE_WATCHDOG)
thread_cancel_feeddog(MQTT_SYMBOL, THRD_ID_UPLINK);
#endif
return;
}
#if defined(ENABLE_MONITOR)
int loragw_ipc_monitor_msg_uplink_send(const char * msg_body);
static int mon_util_strtol(char *str, uint32_t *in_val)
{
int val;
errno = 0;
val = strtoll(str, NULL, 16);
if ((errno == ERANGE && (val == LLONG_MAX || val == LLONG_MIN))
|| (errno != 0 && val == 0)) {
perror("strtoll");
return -1;
}
*in_val = val;
return 0;
}
#define PROTOCOL_VERSION 2 /* v1.3 */
#define GWMP_HEAD_UP_LEN 12
static char g_mqtt_dbus_msg[MSG_LEN_MAX];
static int mqtt_fill_monitor_gwmp_header(char *msg)
{
char eui_h[9] = {'\0'};
char eui_l[9] = {'\0'};
uint32_t eui_h_val = 0;
uint32_t eui_l_val = 0;
aliot_gw_device_info_t devinfo;
/* fill header */
msg[0] = PROTOCOL_VERSION;
msg[1] = (uint8_t)rand();
msg[2] = (uint8_t)rand();
msg[3] = CUSTOM_MON_MSG_UP_ID;
aliot_gw_get_device_info(&devinfo);
strncpy(eui_h, devinfo.gateway_eui, 8);
strncpy(eui_l, devinfo.gateway_eui + 8, 8);
mon_util_strtol(eui_h, &eui_h_val);
mon_util_strtol(eui_l, &eui_l_val);
*(uint32_t *)(msg + 4) = eui_h_val;
*(uint32_t *)(msg + 8) = eui_l_val;
return 0;
}
int loragw_ipc_monitor_msg_uplink_send(const char * msg_body)
{
if(!msg_body || strlen(msg_body)==0 )
return -1;
int byte_nb = strlen(msg_body);
log_debug("received dbus msg len: %d\n", byte_nb);
if (byte_nb > MSG_LEN_MAX - GWMP_HEAD_UP_LEN) {
log_err("dbus msg = %d too long\n");
return -1;
}
memset(g_mqtt_dbus_msg, '\0', MSG_LEN_MAX);
strncpy(g_mqtt_dbus_msg + GWMP_HEAD_UP_LEN, msg_body, byte_nb);
mqtt_fill_monitor_gwmp_header(g_mqtt_dbus_msg);
// publish GWMP full message to server
return publish_gwmp_msg_uplink(g_mqtt_dbus_msg, byte_nb + GWMP_HEAD_UP_LEN);
}
#endif
int create_gw_upd_msg(void)
{
int i;
pthread_t thrid_up;
uint16_t upd_port_up;
uint16_t upd_port_down;
char udp_port[16] = {0};
iotx_lorogw_t *ploragw = &g_iotx_loragw;
struct addrinfo hints;
/* store result of getaddrinfo */
struct addrinfo *result;
/* pointer to move into *result data */
struct addrinfo *q;
/* prepare hints to open network sockets */
memset(&hints, 0, sizeof hints);
/* should handle IP v4 or v6 automatically */
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_DGRAM;
// call gateway get upd port up
upd_port_up = aliot_gw_get_udp_port_up();
log_info("upd_port_up: %u\n", upd_port_up);
sprintf(udp_port, "%d", upd_port_up);
/* look for upd local address /upstream port */
i = getaddrinfo(UDP_LOCALHOST_ADDR, udp_port, &hints, &result);
if (i != 0) {
log_err("upstream getaddrinfo returned, error: %s\n", gai_strerror(i));
return -1;
}
/* try to open socket and bind to it */
for (q = result; q != NULL; q = q->ai_next) {
ploragw->sock_up = socket(q->ai_family, q->ai_socktype, q->ai_protocol);
if (ploragw->sock_up == -1) {
/* socket failed, try next field */
continue;
} else {
i = bind(ploragw->sock_up, q->ai_addr, q->ai_addrlen);
if (i == -1) {
log_err("bind up socket, error: %s\n", gai_strerror(i));
close(ploragw->sock_up);
ploragw->sock_up = -1;
/* bind failed, try next field */
continue;
} else {
/* success, get out of loop */
break;
}
}
}
if (q == NULL) {
log_err("failed to open socket or to bind to it\n");
freeaddrinfo(result);
return -1;
}
freeaddrinfo(result);
// call gateway get upd port down
upd_port_down = aliot_gw_get_udp_port_down();
log_info("upd_port_down: %u\n", upd_port_down);
sprintf(udp_port, "%d", upd_port_down);
/* look for upd local address /downstream port */
i = getaddrinfo(UDP_LOCALHOST_ADDR, udp_port, &hints, &result);
if (i != 0) {
log_err("downstream getaddrinfo returned, error: %s\n", gai_strerror(i));
close(ploragw->sock_up);
ploragw->sock_up = -1;
return -1;
}
/* try to open socket and connect to it */
for (q = result; q != NULL; q = q->ai_next) {
ploragw->sock_down = socket(q->ai_family, q->ai_socktype, q->ai_protocol);
if (ploragw->sock_down == -1) {
/* socket failed, try next field */
continue;
} else {
i = connect(ploragw->sock_down, q->ai_addr, q->ai_addrlen);
if (i == -1) {
log_err("connect down socket, error: %s\n", gai_strerror(i));
close(ploragw->sock_down);
ploragw->sock_down = -1;
/* connect failed, try next field */
continue;
} else {
/* success, get out of loop */
break;
}
}
}
if (q == NULL) {
log_err("failed to open socket or to connect to it\n");
close(ploragw->sock_up);
ploragw->sock_up = -1;
freeaddrinfo(result);
return -1;
}
freeaddrinfo(result);
i = pthread_create(&thrid_up, NULL, (void * ( *)(void *))thread_gwmp_msg_uplink, NULL);
if (i != 0) {
log_err("impossible to create uplink thread\n");
close(ploragw->sock_down);
close(ploragw->sock_up);
ploragw->sock_up = -1;
ploragw->sock_down = -1;
return -1;
}
return 0;
}
#if defined(ENABLE_ABP_NODES)
void thread_native_msg_downlink(void)
{
struct sockaddr_storage dist_addr;
socklen_t addr_len = sizeof(dist_addr);
char *pdatabuf = NULL;
int byte_nb = 0;
fd_set sets;
int flags;
int ret = -1;
int rc = 0;
struct timeval timeout;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
#if defined(ENABLE_WATCHDOG)
struct timespec watchdog_time_keeper;
clock_gettime(CLOCK_MONOTONIC, &watchdog_time_keeper);
#endif
pdatabuf = (char *)HAL_Malloc(MSG_LEN_MAX);
if (NULL == pdatabuf) {
log_err("malloc data buf error");
return;
}
if ((flags = fcntl(ploragw->native_sock_down, F_GETFL, 0)) < 0) {
log_err("fcntl F_GETFL error: %d\n", errno);
HAL_Free(pdatabuf);
return;
}
if (fcntl(ploragw->native_sock_down, F_SETFL, flags | O_NONBLOCK) < 0) {
log_err("fcntl F_SETFL error: %d\n", errno);
HAL_Free(pdatabuf);
return;
}
/* wait to receive native UDP down packet */
log_info("waiting to receive native UDP down packet\n");
while (!native_exit_sig) {
FD_ZERO(&sets);
FD_SET(ploragw->native_sock_down, &sets);
timeout.tv_sec = 3;
timeout.tv_usec = 0;
ret = select(ploragw->native_sock_down + 1, &sets, NULL, NULL, &timeout);
if (ret > 0) {
if (FD_ISSET(ploragw->native_sock_down, &sets)) {
memset(pdatabuf, 0x0, MSG_LEN_MAX);
byte_nb = recvfrom(ploragw->native_sock_down, pdatabuf, MSG_LEN_MAX - 1, 0, (struct sockaddr *)&dist_addr, &addr_len);
if (byte_nb > 0) {
pdatabuf[byte_nb] = 0;
if (byte_nb > GWMP_HEAD_DOWN_LEN) {
log_info("received native down gwmp msg: %s, len: %d\n", pdatabuf + GWMP_HEAD_DOWN_LEN, byte_nb);
} else {
log_info("received native down gwmp msg len: %d\n", byte_nb);
}
lora_ns_server_exist = 1;
// send native GWMP message to gateway
rc = send(ploragw->sock_down, pdatabuf, byte_nb, 0);
if (rc <= 0) {
log_err("send native down gwmp msg error: %d\n", errno);
} else {
log_info("send native down gwmp msg\n");
}
} else if (byte_nb == 0) {
log_err("connection is closed");
HAL_SleepMs(100);
} else {
if (errno != EAGAIN) {
log_err("recvfrom native gwmp msg error: %d\n", errno);
}
HAL_SleepMs(100);
}
}
} else if (0 == ret) {
// select timeout
} else {
log_err("select-recv native down gwmp socket error: %d\n", errno);
HAL_SleepMs(100);
}
#if defined(ENABLE_WATCHDOG)
if(thread_feeddog_periodically(MQTT_SYMBOL, THRD_ID_NAT_DOWNLINK, 30, 60, &watchdog_time_keeper) < 0) {
log_err("native downlink thread feeddog failed\n");
}
#endif
}
HAL_Free(pdatabuf);
#if defined(ENABLE_WATCHDOG)
thread_cancel_feeddog(MQTT_SYMBOL, THRD_ID_NAT_DOWNLINK);
#endif
return;
}
int create_native_upd_msg(void)
{
int i;
pthread_t thrid_down;
uint16_t upd_port_up = 28888;
uint16_t upd_port_down = 29999;
char udp_port[16] = {0};
iotx_lorogw_t *ploragw = &g_iotx_loragw;
struct addrinfo hints;
/* store result of getaddrinfo */
struct addrinfo *result;
/* pointer to move into *result data */
struct addrinfo *q;
/* prepare hints to open network sockets */
memset(&hints, 0, sizeof hints);
/* should handle IP v4 or v6 automatically */
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_DGRAM;
/* look for upd local address /upstream port */
sprintf(udp_port, "%d", upd_port_up);
i = getaddrinfo(UDP_LOCALHOST_ADDR, udp_port, &hints, &result);
if (i != 0) {
log_err("upstream getaddrinfo returned, error: %s\n", gai_strerror(i));
return -1;
}
/* try to open socket and bind to it */
for (q = result; q != NULL; q = q->ai_next) {
ploragw->native_sock_up = socket(q->ai_family, q->ai_socktype, q->ai_protocol);
if (ploragw->native_sock_up == -1) {
/* socket failed, try next field */
continue;
} else {
i = connect(ploragw->native_sock_up, q->ai_addr, q->ai_addrlen);
if (i == -1) {
log_err("connect up socket, error: %s\n", gai_strerror(i));
close(ploragw->native_sock_up);
ploragw->native_sock_up = -1;
/* connect failed, try next field */
continue;
} else {
/* success, get out of loop */
break;
}
}
}
if (q == NULL) {
log_err("failed to open socket or to connect to it\n");
freeaddrinfo(result);
return -1;
}
freeaddrinfo(result);
/* look for upd local address /downstream port */
sprintf(udp_port, "%d", upd_port_down);
i = getaddrinfo(UDP_LOCALHOST_ADDR, udp_port, &hints, &result);
if (i != 0) {
log_err("downstream getaddrinfo returned, error: %s\n", gai_strerror(i));
close(ploragw->native_sock_up);
ploragw->native_sock_up = -1;
return -1;
}
/* try to open socket and connect to it */
for (q = result; q != NULL; q = q->ai_next) {
ploragw->native_sock_down = socket(q->ai_family, q->ai_socktype, q->ai_protocol);
if (ploragw->native_sock_down == -1) {
/* socket failed, try next field */
continue;
} else {
i = bind(ploragw->native_sock_down, q->ai_addr, q->ai_addrlen);
if (i == -1) {
log_err("bind down socket, error: %s\n", gai_strerror(i));
close(ploragw->native_sock_down);
ploragw->native_sock_down = -1;
/* bind failed, try next field */
continue;
} else {
/* success, get out of loop */
break;
}
}
}
if (q == NULL) {
log_err("failed to open socket or to bind to it\n");
close(ploragw->native_sock_up);
ploragw->native_sock_up = -1;
freeaddrinfo(result);
return -1;
}
freeaddrinfo(result);
i = pthread_create(&thrid_down, NULL, (void * ( *)(void *))thread_native_msg_downlink, NULL);
if (i != 0) {
log_err("impossible to create native down thread\n");
close(ploragw->native_sock_down);
close(ploragw->native_sock_up);
ploragw->native_sock_up = -1;
ploragw->native_sock_down = -1;
return -1;
}
return 0;
}
#endif
void unsubscribe_topics(void)
{
iotx_lorogw_t *ploragw = &g_iotx_loragw;
if (NULL != ploragw->ptpc_gwmp_dl) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_gwmp_dl);
HAL_Free(ploragw->ptpc_gwmp_dl);
}
if (NULL != ploragw->ptpc_custom_dl) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_custom_dl);
HAL_Free(ploragw->ptpc_custom_dl);
}
if (NULL != ploragw->ptpc_gwconf_dl) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_gwconf_dl);
HAL_Free(ploragw->ptpc_gwconf_dl);
}
if (NULL != ploragw->ptpc_gwconf_get) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_gwconf_get);
HAL_Free(ploragw->ptpc_gwconf_get);
}
if (NULL != ploragw->ptpc_devinfo_get) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_devinfo_get);
HAL_Free(ploragw->ptpc_devinfo_get);
}
if (NULL != ploragw->ptpc_gw_reset) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_gw_reset);
HAL_Free(ploragw->ptpc_gw_reset);
}
if (NULL != ploragw->ptpc_log_upload) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_log_upload);
HAL_Free(ploragw->ptpc_log_upload);
}
if (NULL != ploragw->ptpc_log_upload_user) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_log_upload_user);
HAL_Free(ploragw->ptpc_log_upload_user);
}
if (NULL != ploragw->ptpc_ctrl_ssh) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_ctrl_ssh);
HAL_Free(ploragw->ptpc_ctrl_ssh);
}
if (NULL != ploragw->ptpc_ctrl_ssh_user) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_ctrl_ssh_user);
HAL_Free(ploragw->ptpc_ctrl_ssh_user);
}
if (NULL != ploragw->ptpc_ctrl_uart) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_ctrl_uart);
HAL_Free(ploragw->ptpc_ctrl_uart);
}
if (NULL != ploragw->ptpc_ctrl_uart_user) {
IOT_MQTT_Unsubscribe(ploragw->pclient, ploragw->ptpc_ctrl_uart_user);
HAL_Free(ploragw->ptpc_ctrl_uart_user);
}
return;
}
int subscribe_topics(void)
{
iotx_lorogw_t *ploragw = &g_iotx_loragw;
int ret = -1, ret1 = -1;
ploragw->ptpc_gwmp_dl = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_gwmp_dl) {
snprintf(ploragw->ptpc_gwmp_dl, TOPIC_NAME_LEN_MAX, "%s/%s/%s", TOPIC_GWMP_DOWNLINK, ploragw->auth_info.product_key, ploragw->auth_info.device_name);
ret = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_gwmp_dl, IOTX_MQTT_QOS1, callback_gwmp_msg_downlink, NULL);
if (ret < 0) {
log_err("subscribe topic:%s error\n", TOPIC_GWMP_DOWNLINK);
return ret;
}
}
ploragw->ptpc_custom_dl = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_custom_dl) {
snprintf(ploragw->ptpc_custom_dl, TOPIC_NAME_LEN_MAX, "/sys/%s/%s%s", ploragw->auth_info.product_key, ploragw->auth_info.device_name, TOPIC_CUSTOM_DOWNLINK);
ret = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_custom_dl, IOTX_MQTT_QOS1, callback_gwmp_msg_downlink, NULL);
if (ret < 0) {
log_err("subscribe topic:%s error\n", TOPIC_CUSTOM_DOWNLINK);
return ret;
}
}
ploragw->ptpc_gwconf_dl = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_gwconf_dl) {
snprintf(ploragw->ptpc_gwconf_dl, TOPIC_NAME_LEN_MAX, "%s/%s/%s", TOPIC_GWCONFIG_DOWNLOAD, ploragw->auth_info.product_key, ploragw->auth_info.device_name);
ret = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_gwconf_dl, IOTX_MQTT_QOS1, callback_gwconfig_download, NULL);
if (ret < 0) {
log_err("subscribe topic:%s error\n", TOPIC_GWCONFIG_DOWNLOAD);
return ret;
}
}
ploragw->ptpc_gwconf_get = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_gwconf_get) {
snprintf(ploragw->ptpc_gwconf_get, TOPIC_NAME_LEN_MAX, "%s/%s/%s", TOPIC_GWCONFIG_GET, ploragw->auth_info.product_key, ploragw->auth_info.device_name);
ret = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_gwconf_get, IOTX_MQTT_QOS1, callback_gwconfig_get, NULL);
if (ret < 0) {
log_err("subscribe topic:%s error\n", TOPIC_GWCONFIG_GET);
return ret;
}
}
ploragw->ptpc_devinfo_get = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_devinfo_get) {
snprintf(ploragw->ptpc_devinfo_get, TOPIC_NAME_LEN_MAX, "%s/%s/%s", TOPIC_DEVICEINFO_GET, ploragw->auth_info.product_key, ploragw->auth_info.device_name);
ret = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_devinfo_get, IOTX_MQTT_QOS1, callback_deviceinfo_get, NULL);
if (ret < 0) {
log_err("subscribe topic:%s error\n", TOPIC_DEVICEINFO_GET);
return ret;
}
}
ploragw->ptpc_gw_reset = HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_gw_reset) {
snprintf(ploragw->ptpc_gw_reset, TOPIC_NAME_LEN_MAX, "%s/%s/%s", TOPIC_GW_RESET, ploragw->auth_info.product_key, ploragw->auth_info.device_name);
ret = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_gw_reset, IOTX_MQTT_QOS1, callback_gw_reset, NULL);
if (ret < 0) {
log_err("subscribe topic:%s error\n", TOPIC_GW_RESET);
return ret;
}
}
#if defined(ENABLE_REMOTE_LOG)
ploragw->ptpc_log_upload = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_log_upload) {
snprintf(ploragw->ptpc_log_upload, TOPIC_NAME_LEN_MAX, "/%s/%s%s", ploragw->auth_info.product_key, ploragw->auth_info.device_name, TOPIC_LOG_UPLOAD);
ret1 = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_log_upload, IOTX_MQTT_QOS1, callback_filelog_upload, NULL);
if (ret1 < 0) {
log_err("subscribe topic:%s error, no exit\n", TOPIC_LOG_UPLOAD);
}
}
ploragw->ptpc_log_upload_user = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_log_upload_user) {
snprintf(ploragw->ptpc_log_upload_user, TOPIC_NAME_LEN_MAX, "/%s/%s/user%s", ploragw->auth_info.product_key, ploragw->auth_info.device_name, TOPIC_LOG_UPLOAD);
ret1 = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_log_upload_user, IOTX_MQTT_QOS1, callback_filelog_upload, NULL);
if (ret1 < 0) {
log_err("subscribe topic:user/%s error, no exit\n", TOPIC_LOG_UPLOAD);
}
}
#endif
#if defined(ENABLE_REMOTE_CTRL_SSH)
ploragw->ptpc_ctrl_ssh = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_ctrl_ssh) {
snprintf(ploragw->ptpc_ctrl_ssh, TOPIC_NAME_LEN_MAX, "/%s/%s%s", ploragw->auth_info.product_key, ploragw->auth_info.device_name, TOPIC_CTRL_SSH);
ret1 = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_ctrl_ssh, IOTX_MQTT_QOS1, callback_remote_ctrl_ssh, NULL);
if (ret1 < 0) {
log_err("subscribe topic:%s error, no exit\n", TOPIC_CTRL_SSH);
}
}
ploragw->ptpc_ctrl_ssh_user = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_ctrl_ssh_user) {
snprintf(ploragw->ptpc_ctrl_ssh_user, TOPIC_NAME_LEN_MAX, "/%s/%s/user%s", ploragw->auth_info.product_key, ploragw->auth_info.device_name, TOPIC_CTRL_SSH);
ret1 = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_ctrl_ssh_user, IOTX_MQTT_QOS1, callback_remote_ctrl_ssh, NULL);
if (ret1 < 0) {
log_err("subscribe topic:user/%s error, no exit\n", TOPIC_CTRL_SSH);
}
}
#endif
#if defined(ENABLE_REMOTE_CTRL_UART)
ploragw->ptpc_ctrl_uart = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_ctrl_uart) {
snprintf(ploragw->ptpc_ctrl_uart, TOPIC_NAME_LEN_MAX, "/%s/%s%s", ploragw->auth_info.product_key, ploragw->auth_info.device_name, TOPIC_CTRL_UART);
ret1 = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_ctrl_uart, IOTX_MQTT_QOS1, callback_remote_ctrl_uart, NULL);
if (ret1 < 0) {
log_err("subscribe topic:%s error, no exit\n", TOPIC_CTRL_UART);
}
}
ploragw->ptpc_ctrl_uart_user = (char *)HAL_Malloc(TOPIC_NAME_LEN_MAX);
if (NULL != ploragw->ptpc_ctrl_uart_user) {
snprintf(ploragw->ptpc_ctrl_uart_user, TOPIC_NAME_LEN_MAX, "/%s/%s/user%s", ploragw->auth_info.product_key, ploragw->auth_info.device_name, TOPIC_CTRL_UART);
ret1 = IOT_MQTT_Subscribe(ploragw->pclient, ploragw->ptpc_ctrl_uart_user, IOTX_MQTT_QOS1, callback_remote_ctrl_uart, NULL);
if (ret1 < 0) {
log_err("subscribe topic:user/%s error, no exit\n", TOPIC_CTRL_UART);
}
}
#endif
return ret;
}
int mqtt_client(char *msg_buf, char *msg_readbuf)
{
int rc = 0;
iotx_conn_info_pt pconn_info;
iotx_mqtt_param_t mqtt_params;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
#if defined(ENABLE_MSG_CACHE)
pthread_t thrid_msg_cache;
#endif
pthread_t thrid_file;
#if defined(ENABLE_OTA)
pthread_t thrid_ota;
#endif
#if defined(ENABLE_WATCHDOG)
struct timespec watchdog_time_keeper;
clock_gettime(CLOCK_MONOTONIC, &watchdog_time_keeper);
#endif
// call gateway get auth info api
memset(&ploragw->auth_info, 0x0, sizeof(aliot_gw_auth_info_t));
rc = aliot_gw_get_auth_info(&ploragw->auth_info);
if (0 != rc) {
log_err("call gateway get auth info api error!");
return -1;
}
// device auth
if (0 != IOT_SetupConnInfo(ploragw->auth_info.product_key,
ploragw->auth_info.device_name,
ploragw->auth_info.device_secret,
(void **)&pconn_info)) {
log_err("IOT_SetupConnInfo() error!");
return -1;
}
memset(&mqtt_params, 0x0, sizeof(mqtt_params));
mqtt_params.port = pconn_info->port;
mqtt_params.host = pconn_info->host_name;
mqtt_params.client_id = pconn_info->client_id;
mqtt_params.username = pconn_info->username;
mqtt_params.password = pconn_info->password;
mqtt_params.pub_key = pconn_info->pub_key;
mqtt_params.request_timeout_ms = 2000;
mqtt_params.clean_session = 0;
mqtt_params.keepalive_interval_ms = 60000;
// mqtt_params.pread_buf = msg_readbuf;
mqtt_params.read_buf_size = MSG_LEN_MAX;
// mqtt_params.pwrite_buf = msg_buf;
mqtt_params.write_buf_size = MSG_LEN_MAX;
mqtt_params.handle_event.h_fp = event_handle;
mqtt_params.handle_event.pcontext = NULL;
ploragw->pclient = IOT_MQTT_Construct(&mqtt_params);
if (NULL == ploragw->pclient) {
log_err("MQTT construct failed");
return -1;
}
// subscribe gateway topic
rc = subscribe_topics();
if (rc < 0) {
log_err("subscribe_topics failed ret = %d", rc);
goto do_exit;
}
#if defined(ENABLE_MSG_CACHE)
msg_init();
#endif
// create check file thread
rc = pthread_create(&thrid_file, NULL, (void * ( *)(void *))thread_file_check, NULL);
if (0 != rc) {
log_err("impossible to create check file thread\n");
goto do_exit;
}
#if defined(ENABLE_MSG_CACHE)
rc = pthread_create(&thrid_msg_cache, NULL, (void * ( *)(void *))thread_msg_cache, NULL);
if (0 != rc) {
log_err("impossible to create msg cache thread\n");
goto do_exit;
}
#endif
#if defined(ENABLE_OTA)
ploragw->h_ota = IOT_OTA_Init(ploragw->auth_info.product_key, ploragw->auth_info.device_name, ploragw->pclient);
if (NULL == ploragw->h_ota) {
log_err("initialize OTA failed");
goto do_exit;
}
// create ota check thread
rc = pthread_create(&thrid_ota, NULL, (void * ( *)(void *))thread_ota_check, NULL);
if (0 != rc) {
log_err("impossible to create ota check thread\n");
goto do_exit;
}
#endif
// publish device info on boot
publish_deviceinfo_upload();
HAL_SleepMs(200);
// publish gw config to nms on boot
publish_gwconfig_upload();
HAL_MutexLock(ploragw->check_mutex);
ploragw->checkCnt = 0;
HAL_MutexUnlock(ploragw->check_mutex);
while (1) {
//handle the MQTT packet received from TCP or SSL connection
IOT_MQTT_Yield(ploragw->pclient, 500);
HAL_SleepMs(200);
if (ploragw->checkCnt > MQTT_CONNECT_RESET_CHECK_CNT) {
log_err("no downlink in %d uplink counts, need restart mqtt client\n", ploragw->checkCnt);
break;
}
#if defined(ENABLE_WATCHDOG)
if (thread_feeddog_periodically(MQTT_SYMBOL, THRD_ID_MAIN, 60, 180, &watchdog_time_keeper) < 0) {
log_err("mqtt main thread feeddog failed\n");
}
#endif
}
#if defined(ENABLE_WATCHDOG)
/*feed watchdog 180s while mqtt client is quiting . so system will reboot when mqtt client does't reconnect to the server in 180s*/
#if !defined(ENABLE_ABP_NODES)
thread_feeddog(MQTT_SYMBOL, THRD_ID_MAIN, 180);
#else
if(!lora_ns_server_exist) {
thread_feeddog(MQTT_SYMBOL, THRD_ID_MAIN, 180);
}
#endif
#endif
do_exit:
exit_sig = 1;
HAL_SleepMs(3000);
unsubscribe_topics();
#if defined(ENABLE_OTA)
if (NULL != ploragw->h_ota) {
IOT_OTA_Deinit(ploragw->h_ota);
ploragw->h_ota = NULL;
}
#endif
if (NULL != ploragw->pclient) {
IOT_MQTT_Destroy(&ploragw->pclient);
ploragw->pclient = NULL;
}
HAL_SleepMs(200);
return 0;
}
int main(int argc, char **argv)
{
int rc = 0;
char *msg_buf = NULL;
char *msg_readbuf = NULL;
iotx_lorogw_t *ploragw = &g_iotx_loragw;
memset(ploragw, 0x0, sizeof(iotx_lorogw_t));
IOT_OpenLog("mqtt");
IOT_SetLogLevel(IOT_LOG_INFO);
if (NULL == (ploragw->check_mutex = HAL_MutexCreate())) {
log_err("create mutex failed");
goto do_exit;
}
#if defined(ENABLE_REMOTE_LOG)
log_init(FILELOG_MOD_IOTX, LOG_FILE, LOG_LEVEL_INFO, LOG_MOD_VERBOSE);
if (NULL == (ploragw->log_mutex = HAL_MutexCreate())) {
log_err("create mutex failed");
goto do_exit;
}
#endif
#if defined(ENABLE_ABP_NODES)
if (NULL == (ploragw->abp_mutex = HAL_MutexCreate())) {
log_err("create mutex failed");
goto do_exit;
}
#endif
#if defined(ENABLE_OTA) && defined(ENABLE_ADVANCED_OTA)
if (NULL == (ploragw->ota_mutex = HAL_MutexCreate())) {
log_err("create mutex failed");
goto do_exit;
}
#endif
ploragw->ppub_msg = (char *)HAL_Malloc(MSG_LEN_MAX);
ploragw->prev_msg = (char *)HAL_Malloc(MSG_LEN_MAX);
msg_buf = (char *)HAL_Malloc(MSG_LEN_MAX);
msg_readbuf = (char *)HAL_Malloc(MSG_LEN_MAX);
if ((NULL == msg_buf) || (NULL == msg_readbuf)
|| (NULL == ploragw->ppub_msg) || (NULL == ploragw->prev_msg)) {
log_err("malloc mqtt buf error");
goto do_exit;
}
native_exit_sig = 0;
// create UDP channel for gateway
ploragw->sock_up = -1;
ploragw->sock_down = -1;
rc = create_gw_upd_msg();
if (0 != rc) {
log_err("create gw upd channel fail, ret = %d", rc);
goto do_exit;
}
#if defined(ENABLE_ABP_NODES)
ploragw->native_sock_up = -1;
ploragw->native_sock_down = -1;
rc = create_native_upd_msg();
if (0 != rc) {
log_err("create native upd channel fail, ret = %d", rc);
goto do_exit;
}
abp_key_init();
abp_redis_init();
#endif
#ifdef ENABLE_DBUS_IPC
rc = mqtt_dbus_setup();
if(rc < 0) {
log_err("setup dbus ipc failed!!!\n");
goto do_exit;
}
#endif
while (1) {
exit_sig = 0;
mqtt_client(msg_buf, msg_readbuf);
log_err("mqtt client run error, sleep 3s restart");
HAL_SleepMs(3000);
}
do_exit:
native_exit_sig = 1;
HAL_SleepMs(3000);
#if defined(ENABLE_ABP_NODES)
if (-1 != ploragw->native_sock_up) {
close(ploragw->native_sock_up);
ploragw->native_sock_up = -1;
}
if (-1 != ploragw->native_sock_down) {
close(ploragw->native_sock_down);
ploragw->native_sock_down = -1;
}
#endif
if (-1 != ploragw->sock_up) {
close(ploragw->sock_up);
ploragw->sock_up = -1;
}
if (-1 != ploragw->sock_down) {
close(ploragw->sock_down);
ploragw->sock_down = -1;
}
if (NULL != msg_buf) {
HAL_Free(msg_buf);
}
if (NULL != msg_readbuf) {
HAL_Free(msg_readbuf);
}
if (NULL != ploragw->ppub_msg) {
HAL_Free(ploragw->ppub_msg);
}
if (NULL != ploragw->prev_msg) {
HAL_Free(ploragw->prev_msg);
}
#ifdef ENABLE_DBUS_IPC
rc = mqtt_dbus_exit();
if(rc != LORA_IPC_SUCCESS) {
log_err("exit from dbus ipc failed!!!");
}
#endif
#if defined(ENABLE_REMOTE_LOG)
if (NULL != ploragw->log_mutex) {
HAL_MutexDestroy(ploragw->log_mutex);
}
log_destroy();
#endif
#if defined(ENABLE_ABP_NODES)
if (NULL != ploragw->abp_mutex) {
HAL_MutexDestroy(ploragw->abp_mutex);
}
#endif
#if defined(ENABLE_OTA) && defined(ENABLE_ADVANCED_OTA)
if (NULL != ploragw->ota_mutex) {
HAL_MutexDestroy(ploragw->ota_mutex);
}
#endif
if (NULL != ploragw->check_mutex) {
HAL_MutexDestroy(ploragw->check_mutex);
}
IOT_DumpMemoryStats(IOT_LOG_DEBUG);
IOT_CloseLog();
log_info("exit");
return 0;
}
|
773362.c | #include <stdlib.h>
#include <string.h>
#include <math.h>
#include "md5.h"
|
847386.c | #include <assert.h>
#include <endian.h>
#include <errno.h>
#include <linux/virtio_mmio.h>
#include <string.h>
#include "enclave/enclave_oe.h"
#include "enclave/enclave_util.h"
#include "enclave/sgxlkl_t.h"
#include "enclave/ticketlock.h"
#include "lkl/virtio.h"
#define MAX_NET_DEVS 16
static uint8_t registered_dev_idx = 0;
struct virtio_dev* registered_devs[MAX_NET_DEVS];
/*
* Function to get netdev instance to use its attributes
*/
static inline struct virtio_dev* get_netdev_instance(uint8_t netdev_id)
{
for (size_t i = 0; i < registered_dev_idx; i++)
if (registered_devs[i]->vendor_id == netdev_id)
return registered_devs[i];
SGXLKL_ASSERT(false);
}
/*
* Function to register net device & hold the reference
*/
static int dev_register(struct virtio_dev* dev)
{
int ret = 0;
if (registered_dev_idx == MAX_NET_DEVS)
{
/* This error code is a little bit of a lie */
sgxlkl_info("Too many virtio_net devices!\n");
ret = -LKL_ENOMEM;
}
else
{
/* registered_dev_idx is incremented by the caller */
registered_devs[registered_dev_idx] = dev;
}
return ret;
}
/*
* Function to generate an interrupt for LKL kernel to reap the virtQ data
*/
static void lkl_deliver_irq(uint64_t dev_id)
{
struct virtio_dev* dev = get_netdev_instance(dev_id);
dev->int_status |= VIRTIO_MMIO_INT_VRING;
lkl_trigger_irq(dev->irq);
}
/*
* Function to add a new net device to LKL and register the cb to notify
* frontend driver for the request completion.
*/
int lkl_virtio_netdev_add(struct virtio_dev* netdev)
{
int ret = -1;
int mmio_size = VIRTIO_MMIO_CONFIG + netdev->config_len;
registered_devs[registered_dev_idx] = netdev;
if (lkl_virtio_dev_setup(netdev, mmio_size, &lkl_deliver_irq) != 0)
return -1;
ret = dev_register(netdev);
if (ret < 0)
sgxlkl_info("Failed to register netdev \n");
return registered_dev_idx++;
}
/*
* Function to shutdown the network interface and remove it
*/
void lkl_virtio_netdev_remove(void)
{
uint8_t netdev_id = 0;
for (netdev_id = 0; netdev_id < registered_dev_idx; netdev_id++)
{
sgxlkl_host_netdev_remove(netdev_id);
int ret = lkl_netdev_get_ifindex(netdev_id);
if (ret < 0)
return;
ret = lkl_if_down(ret);
}
return;
}
|
754424.c | /* { dg-do run } */
/* { dg-require-effective-target arm_v8_2a_fp16_scalar_hw } */
/* { dg-add-options arm_v8_2a_fp16_scalar } */
/* { dg-skip-if "" { arm*-*-* } } */
#include <arm_fp16.h>
uint16_t expected[] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0xFFFF, 0xFFFF, 0x0, 0xFFFF,
0x0, 0x0, 0x0, 0x0, 0xFFFF, 0xFFFF, 0xFFFF, 0x0, 0x0};
#define TEST_MSG "VCAGTH_F16"
#define INSN_NAME vcagth_f16
#define EXPECTED expected
#define INPUT_TYPE float16_t
#define OUTPUT_TYPE uint16_t
#define OUTPUT_TYPE_SIZE 16
/* Include the template for binary scalar operations. */
#include "binary_scalar_op.inc"
|
312108.c | /*
* Copyright (c) 2004, Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file is part of the Contiki operating system.
*
* Author: Adam Dunkels <adam@sics.se>
*
*/
/**
* \file
* Timer library implementation.
* \author
* Adam Dunkels <adam@sics.se>
*/
/**
* \addtogroup timer
* @{
*/
#include "xcontiki/xcontiki.h"
#define XCONTIKI_OS_SYS_TIMER_PRIV_H
#include "xcontiki_os_sys_Timer_priv.h"
/*---------------------------------------------------------------------------*/
/**
* Set a timer.
*
* This function is used to set a timer for a time sometime in the
* future. The function xcontiki_os_sys_Timer__expired() will evaluate to true after
* the timer has expired.
*
* \param t A pointer to the timer
* \param interval The interval before the timer expires.
*
*/
xcontiki_os_sys_Timer__timer_id_t
xcontiki_os_sys_Timer__set(xcontiki_os_sys_Timer__timer_id_t t, xcontiki_arch_Clock__time_t intervl) {
assert(t < XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER && "Wrong timer id");
if (t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return 0;
}
if (0 == t) {
t = allocate_new_timer();
assert(t != 0 && "No free timers. Increase number of timers");
if (0 == t) { //No free timer?
return 0;
}
}
interval[t] = intervl;
if (0 == intervl) {
timer_flags[t].expired = true;
timer_flags[t].running = false;
} else {
start[t] = xcontiki_arch_Clock__time();
previous_diff[t] = 0;
timer_flags[t].running = true;
timer_flags[t].expired = false;
}
return t;
}
/*---------------------------------------------------------------------------*/
/**
* Reset the timer with the same interval.
*
* This function resets the timer with the same interval that was
* given to the timer_set() function. The start point of the interval
* is the exact time that the timer last expired. Therefore, this
* function will cause the timer to be stable over time, unlike the
* timer_restart() function. If this is executed before the
* timer expired, this function has no effect.
*
* \param t A pointer to the timer.
* \sa timer_restart()
*/
void
xcontiki_os_sys_Timer__reset(xcontiki_os_sys_Timer__timer_id_t t) {
assert(t != 0 && t < XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER && "Wrong timer id");
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return;
}
if (xcontiki_os_sys_Timer__expired(t)) {
start[t] += interval[t];
previous_diff[t] = 0;
timer_flags[t].expired = (0 == interval[t]);
if (false == timer_flags[t].expired) {
timer_flags[t].running = true;
}
}
}
/*---------------------------------------------------------------------------*/
/**
* Restart the timer from the current point in time
*
* This function restarts a timer with the same interval that was
* given to the timer_set() function. The timer will start at the
* current time.
*
* \note A periodic timer will drift if this function is used to reset
* it. For periodic timers, use the timer_reset() function instead.
*
* \param t A pointer to the timer.
*
* \sa timer_reset()
*/
void
xcontiki_os_sys_Timer__restart(xcontiki_os_sys_Timer__timer_id_t t) {
assert(t != 0 && t < XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER && "Wrong timer id");
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return;
}
start[t] = xcontiki_arch_Clock__time();
previous_diff[t] = 0;
timer_flags[t].expired = (0 == interval[t]);
if (false == timer_flags[t].expired) {
timer_flags[t].running = true;
}
}
/*---------------------------------------------------------------------------*/
/**
* Check if a timer has expired.
*
* This function tests if a timer has expired and returns true or
* false depending on its status.
*
* \param t A pointer to the timer
*
* \return Non-zero if the timer has expired, zero otherwise.
*
*/
bool
xcontiki_os_sys_Timer__expired(xcontiki_os_sys_Timer__timer_id_t t) {
assert(t != 0 && t < XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER && "Wrong timer id");
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return true;
}
if (timer_flags[t].expired) {
timer_flags[t].running = false;
return true;
}
xcontiki_arch_Clock__time_t diff = (xcontiki_arch_Clock__time() - start[t]);
if (diff >= interval[t] || diff < previous_diff[t]) {
timer_flags[t].expired = true;
timer_flags[t].running = false;
return true;
} else {
previous_diff[t] = diff;
return false;
}
}
/*---------------------------------------------------------------------------*/
/**
* The time until the timer expires
*
* This function returns the time until the timer expires.
*
* \param t A pointer to the timer
*
* \return The time until the timer expires
*
*/
xcontiki_arch_Clock__time_t
xcontiki_os_sys_Timer__remaining(xcontiki_os_sys_Timer__timer_id_t t) {
assert(t != 0 && t < XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER && "Wrong timer id");
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return 0;
}
if (xcontiki_os_sys_Timer__expired(t)) {
return 0;
}
return (xcontiki_arch_Clock__time_t) (interval[t] - previous_diff[t]);
}
/**
* Remove the timer.
*
* This function removes the timer.
* \param t A pointer to the timer.
* \sa timer_restart()
*/
void
xcontiki_os_sys_Timer__remove(xcontiki_os_sys_Timer__timer_id_t t) {
assert(t != 0 && t < XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER && "Wrong timer id");
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return;
}
timer_flags[t].allocated = false;
timer_flags[t].expired = true;
timer_flags[t].running = false;
}
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
xcontiki_os_sys_Protothread__state_t xcontiki_os_sys_Timer__sleepyhead_thread(void) {
static xcontiki_os_sys_Protothread__pt_t pt;
static xcontiki_os_sys_Timer__timer_id_t t;
static xcontiki_arch_Clock__time_t now;
static xcontiki_arch_Clock__time_t tdist; //time distance for the nearest expiration
static xcontiki_arch_Clock__time_t diff;
XCONTIKI_OS_SYS_PROTOTHREAD__BEGIN(pt);
now = xcontiki_arch_Clock__time();
tdist = 0;
for (t = 1; t < XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER; t++) {
if (timer_flags[t].running) {
diff = now - start[t];
if (diff >= interval[t] || diff < previous_diff[t]) {
timer_flags[t].expired = true;
timer_flags[t].running = false;
} else {
previous_diff[t] = diff;
diff = interval[t] - diff; //time distance to the next expiration
if (tdist < diff) {
tdist = diff;
}
}
}
}
XCONTIKI_OS_SYS_PROTOTHREAD__END(pt);
}
void xcontiki_os_sys_Timer__set_interval(xcontiki_os_sys_Timer__timer_id_t t, xcontiki_arch_Clock__time_t intervl) {
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return;
}
interval[t] = intervl;
}
xcontiki_arch_Clock__time_t xcontiki_os_sys_Timer__get_interval(xcontiki_os_sys_Timer__timer_id_t t) {
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return 0;
}
return interval[t];
}
void xcontiki_os_sys_Timer__set_start(xcontiki_os_sys_Timer__timer_id_t t, xcontiki_arch_Clock__time_t strt) {
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return;
}
start[t] = strt;
}
xcontiki_arch_Clock__time_t xcontiki_os_sys_Timer__get_start(xcontiki_os_sys_Timer__timer_id_t t) {
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return 0;
}
return start[t];
}
/**
* Check if timer was allocated
*
* \param t An id of the timer.
* \sa timer__set()
*/
bool
xcontiki_os_sys_Timer__is_allocated(xcontiki_os_sys_Timer__timer_id_t t) {
if (0 == t || t >= XCONTIKI_OS_SYS_TIMER__CONF_TIMERS_NUMBER) {
return false;
}
return (0!=timer_flags[t].allocated);
}
/** @} */
|
495907.c | /*
* Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
/*
* DSA low level APIs are deprecated for public use, but still ok for
* internal use.
*/
#include "internal/deprecated.h"
#include <stdio.h>
#include "internal/cryptlib.h"
#include "internal/refcount.h"
#include <openssl/bn.h>
#include <openssl/err.h>
#include <openssl/objects.h>
#include <openssl/evp.h>
#include <openssl/x509.h>
#include <openssl/rsa.h>
#include <openssl/dsa.h>
#include <openssl/dh.h>
#include <openssl/ec.h>
#include <openssl/cmac.h>
#include <openssl/engine.h>
#include <openssl/params.h>
#include <openssl/serializer.h>
#include <openssl/core_names.h>
#include "crypto/asn1.h"
#include "crypto/evp.h"
#include "internal/evp.h"
#include "internal/provider.h"
#include "evp_local.h"
#include "crypto/ec.h"
/* TODO remove this when the EVP_PKEY_is_a() #legacy support hack is removed */
#include "e_os.h" /* strcasecmp on Windows */
static int pkey_set_type(EVP_PKEY *pkey, ENGINE *e, int type, const char *str,
int len, EVP_KEYMGMT *keymgmt);
static void evp_pkey_free_it(EVP_PKEY *key);
#ifndef FIPS_MODE
/* The type of parameters selected in key parameter functions */
# define SELECT_PARAMETERS OSSL_KEYMGMT_SELECT_DOMAIN_PARAMETERS
int EVP_PKEY_bits(const EVP_PKEY *pkey)
{
if (pkey != NULL) {
if (pkey->ameth == NULL)
return pkey->cache.bits;
else if (pkey->ameth->pkey_bits)
return pkey->ameth->pkey_bits(pkey);
}
return 0;
}
int EVP_PKEY_security_bits(const EVP_PKEY *pkey)
{
if (pkey == NULL)
return 0;
if (pkey->ameth == NULL)
return pkey->cache.security_bits;
if (pkey->ameth->pkey_security_bits == NULL)
return -2;
return pkey->ameth->pkey_security_bits(pkey);
}
int EVP_PKEY_save_parameters(EVP_PKEY *pkey, int mode)
{
# ifndef OPENSSL_NO_DSA
if (pkey->type == EVP_PKEY_DSA) {
int ret = pkey->save_parameters;
if (mode >= 0)
pkey->save_parameters = mode;
return ret;
}
# endif
# ifndef OPENSSL_NO_EC
if (pkey->type == EVP_PKEY_EC) {
int ret = pkey->save_parameters;
if (mode >= 0)
pkey->save_parameters = mode;
return ret;
}
# endif
return 0;
}
int EVP_PKEY_set_ex_data(EVP_PKEY *key, int idx, void *arg)
{
return CRYPTO_set_ex_data(&key->ex_data, idx, arg);
}
void *EVP_PKEY_get_ex_data(const EVP_PKEY *key, int idx)
{
return CRYPTO_get_ex_data(&key->ex_data, idx);
}
int EVP_PKEY_copy_parameters(EVP_PKEY *to, const EVP_PKEY *from)
{
/*
* TODO: clean up legacy stuff from this function when legacy support
* is gone.
*/
/*
* If |to| is a legacy key and |from| isn't, we must downgrade |from|.
* If that fails, this function fails.
*/
if (to->type != EVP_PKEY_NONE && from->keymgmt != NULL)
if (!evp_pkey_downgrade((EVP_PKEY *)from))
return 0;
/*
* Make sure |to| is typed. Content is less important at this early
* stage.
*
* 1. If |to| is untyped, assign |from|'s key type to it.
* 2. If |to| contains a legacy key, compare its |type| to |from|'s.
* (|from| was already downgraded above)
*
* If |to| is a provided key, there's nothing more to do here, functions
* like evp_keymgmt_util_copy() and evp_pkey_export_to_provider() called
* further down help us find out if they are the same or not.
*/
if (to->type == EVP_PKEY_NONE && to->keymgmt == NULL) {
if (from->type != EVP_PKEY_NONE) {
if (EVP_PKEY_set_type(to, from->type) == 0)
return 0;
} else {
if (EVP_PKEY_set_type_by_keymgmt(to, from->keymgmt) == 0)
return 0;
}
} else if (to->type != EVP_PKEY_NONE) {
if (to->type != from->type) {
EVPerr(EVP_F_EVP_PKEY_COPY_PARAMETERS, EVP_R_DIFFERENT_KEY_TYPES);
goto err;
}
}
if (EVP_PKEY_missing_parameters(from)) {
EVPerr(EVP_F_EVP_PKEY_COPY_PARAMETERS, EVP_R_MISSING_PARAMETERS);
goto err;
}
if (!EVP_PKEY_missing_parameters(to)) {
if (EVP_PKEY_cmp_parameters(to, from) == 1)
return 1;
EVPerr(EVP_F_EVP_PKEY_COPY_PARAMETERS, EVP_R_DIFFERENT_PARAMETERS);
return 0;
}
/* For purely provided keys, we just call the keymgmt utility */
if (to->keymgmt != NULL && from->keymgmt != NULL)
return evp_keymgmt_util_copy(to, (EVP_PKEY *)from, SELECT_PARAMETERS);
/*
* If |to| is provided, we know that |from| is legacy at this point.
* Try exporting |from| to |to|'s keymgmt, then use evp_keymgmt_copy()
* to copy the appropriate data to |to|'s keydata.
*/
if (to->keymgmt != NULL) {
EVP_KEYMGMT *to_keymgmt = to->keymgmt;
void *from_keydata =
evp_pkey_export_to_provider((EVP_PKEY *)from, NULL, &to_keymgmt,
NULL);
/*
* If we get a NULL, it could be an internal error, or it could be
* that there's a key mismatch. We're pretending the latter...
*/
if (from_keydata == NULL) {
ERR_raise(ERR_LIB_EVP, EVP_R_DIFFERENT_KEY_TYPES);
return 0;
}
return evp_keymgmt_copy(to->keymgmt, to->keydata, from_keydata,
SELECT_PARAMETERS);
}
/* Both keys are legacy */
if (from->ameth != NULL && from->ameth->param_copy != NULL)
return from->ameth->param_copy(to, from);
err:
return 0;
}
int EVP_PKEY_missing_parameters(const EVP_PKEY *pkey)
{
if (pkey != NULL) {
if (pkey->keymgmt != NULL)
return !evp_keymgmt_util_has((EVP_PKEY *)pkey, SELECT_PARAMETERS);
else if (pkey->ameth != NULL && pkey->ameth->param_missing != NULL)
return pkey->ameth->param_missing(pkey);
}
return 0;
}
/*
* This function is called for any mixture of keys except pure legacy pair.
* TODO When legacy keys are gone, we replace a call to this functions with
* a call to evp_keymgmt_util_match().
*/
static int evp_pkey_cmp_any(const EVP_PKEY *a, const EVP_PKEY *b,
int selection)
{
EVP_KEYMGMT *keymgmt1 = NULL, *keymgmt2 = NULL;
void *keydata1 = NULL, *keydata2 = NULL, *tmp_keydata = NULL;
/* If none of them are provided, this function shouldn't have been called */
if (!ossl_assert(a->keymgmt != NULL || b->keymgmt != NULL))
return -2;
/* For purely provided keys, we just call the keymgmt utility */
if (a->keymgmt != NULL && b->keymgmt != NULL)
return evp_keymgmt_util_match((EVP_PKEY *)a, (EVP_PKEY *)b, selection);
/*
* At this point, one of them is provided, the other not. This allows
* us to compare types using legacy NIDs.
*/
if ((a->type != EVP_PKEY_NONE
&& !EVP_KEYMGMT_is_a(b->keymgmt, OBJ_nid2sn(a->type)))
|| (b->type != EVP_PKEY_NONE
&& !EVP_KEYMGMT_is_a(a->keymgmt, OBJ_nid2sn(b->type))))
return -1; /* not the same key type */
/*
* We've determined that they both are the same keytype, so the next
* step is to do a bit of cross export to ensure we have keydata for
* both keys in the same keymgmt.
*/
keymgmt1 = a->keymgmt;
keydata1 = a->keydata;
keymgmt2 = b->keymgmt;
keydata2 = b->keydata;
if (keymgmt2 != NULL && keymgmt2->match != NULL) {
tmp_keydata =
evp_pkey_export_to_provider((EVP_PKEY *)a, NULL, &keymgmt2, NULL);
if (tmp_keydata != NULL) {
keymgmt1 = keymgmt2;
keydata1 = tmp_keydata;
}
}
if (tmp_keydata == NULL && keymgmt1 != NULL && keymgmt1->match != NULL) {
tmp_keydata =
evp_pkey_export_to_provider((EVP_PKEY *)b, NULL, &keymgmt1, NULL);
if (tmp_keydata != NULL) {
keymgmt2 = keymgmt1;
keydata2 = tmp_keydata;
}
}
/* If we still don't have matching keymgmt implementations, we give up */
if (keymgmt1 != keymgmt2)
return -2;
return evp_keymgmt_match(keymgmt1, keydata1, keydata2, selection);
}
int EVP_PKEY_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b)
{
/*
* TODO: clean up legacy stuff from this function when legacy support
* is gone.
*/
if (a->keymgmt != NULL || b->keymgmt != NULL)
return evp_pkey_cmp_any(a, b, SELECT_PARAMETERS);
/* All legacy keys */
if (a->type != b->type)
return -1;
if (a->ameth != NULL && a->ameth->param_cmp != NULL)
return a->ameth->param_cmp(a, b);
return -2;
}
int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b)
{
/*
* TODO: clean up legacy stuff from this function when legacy support
* is gone.
*/
if (a->keymgmt != NULL || b->keymgmt != NULL)
return evp_pkey_cmp_any(a, b, (SELECT_PARAMETERS
| OSSL_KEYMGMT_SELECT_PUBLIC_KEY));
/* All legacy keys */
if (a->type != b->type)
return -1;
if (a->ameth != NULL) {
int ret;
/* Compare parameters if the algorithm has them */
if (a->ameth->param_cmp != NULL) {
ret = a->ameth->param_cmp(a, b);
if (ret <= 0)
return ret;
}
if (a->ameth->pub_cmp != NULL)
return a->ameth->pub_cmp(a, b);
}
return -2;
}
EVP_PKEY *EVP_PKEY_new_raw_private_key(int type, ENGINE *e,
const unsigned char *priv,
size_t len)
{
EVP_PKEY *ret = EVP_PKEY_new();
if (ret == NULL
|| !pkey_set_type(ret, e, type, NULL, -1, NULL)) {
/* EVPerr already called */
goto err;
}
if (ret->ameth->set_priv_key == NULL) {
EVPerr(EVP_F_EVP_PKEY_NEW_RAW_PRIVATE_KEY,
EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE);
goto err;
}
if (!ret->ameth->set_priv_key(ret, priv, len)) {
EVPerr(EVP_F_EVP_PKEY_NEW_RAW_PRIVATE_KEY, EVP_R_KEY_SETUP_FAILED);
goto err;
}
return ret;
err:
EVP_PKEY_free(ret);
return NULL;
}
EVP_PKEY *EVP_PKEY_new_raw_public_key(int type, ENGINE *e,
const unsigned char *pub,
size_t len)
{
EVP_PKEY *ret = EVP_PKEY_new();
if (ret == NULL
|| !pkey_set_type(ret, e, type, NULL, -1, NULL)) {
/* EVPerr already called */
goto err;
}
if (ret->ameth->set_pub_key == NULL) {
EVPerr(EVP_F_EVP_PKEY_NEW_RAW_PUBLIC_KEY,
EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE);
goto err;
}
if (!ret->ameth->set_pub_key(ret, pub, len)) {
EVPerr(EVP_F_EVP_PKEY_NEW_RAW_PUBLIC_KEY, EVP_R_KEY_SETUP_FAILED);
goto err;
}
return ret;
err:
EVP_PKEY_free(ret);
return NULL;
}
int EVP_PKEY_get_raw_private_key(const EVP_PKEY *pkey, unsigned char *priv,
size_t *len)
{
/* TODO(3.0) Do we need to do anything about provider side keys? */
if (pkey->ameth->get_priv_key == NULL) {
EVPerr(EVP_F_EVP_PKEY_GET_RAW_PRIVATE_KEY,
EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE);
return 0;
}
if (!pkey->ameth->get_priv_key(pkey, priv, len)) {
EVPerr(EVP_F_EVP_PKEY_GET_RAW_PRIVATE_KEY, EVP_R_GET_RAW_KEY_FAILED);
return 0;
}
return 1;
}
int EVP_PKEY_get_raw_public_key(const EVP_PKEY *pkey, unsigned char *pub,
size_t *len)
{
/* TODO(3.0) Do we need to do anything about provider side keys? */
if (pkey->ameth->get_pub_key == NULL) {
EVPerr(EVP_F_EVP_PKEY_GET_RAW_PUBLIC_KEY,
EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE);
return 0;
}
if (!pkey->ameth->get_pub_key(pkey, pub, len)) {
EVPerr(EVP_F_EVP_PKEY_GET_RAW_PUBLIC_KEY, EVP_R_GET_RAW_KEY_FAILED);
return 0;
}
return 1;
}
EVP_PKEY *EVP_PKEY_new_CMAC_key(ENGINE *e, const unsigned char *priv,
size_t len, const EVP_CIPHER *cipher)
{
# ifndef OPENSSL_NO_CMAC
# ifndef OPENSSL_NO_ENGINE
const char *engine_id = e != NULL ? ENGINE_get_id(e) : NULL;
# endif
const char *cipher_name = EVP_CIPHER_name(cipher);
const OSSL_PROVIDER *prov = EVP_CIPHER_provider(cipher);
OPENSSL_CTX *libctx =
prov == NULL ? NULL : ossl_provider_library_context(prov);
EVP_PKEY *ret = EVP_PKEY_new();
EVP_MAC *cmac = EVP_MAC_fetch(libctx, OSSL_MAC_NAME_CMAC, NULL);
EVP_MAC_CTX *cmctx = cmac != NULL ? EVP_MAC_CTX_new(cmac) : NULL;
OSSL_PARAM params[4];
size_t paramsn = 0;
if (ret == NULL
|| cmctx == NULL
|| !pkey_set_type(ret, e, EVP_PKEY_CMAC, NULL, -1, NULL)) {
/* EVPerr already called */
goto err;
}
# ifndef OPENSSL_NO_ENGINE
if (engine_id != NULL)
params[paramsn++] =
OSSL_PARAM_construct_utf8_string("engine", (char *)engine_id, 0);
# endif
params[paramsn++] =
OSSL_PARAM_construct_utf8_string(OSSL_MAC_PARAM_CIPHER,
(char *)cipher_name, 0);
params[paramsn++] =
OSSL_PARAM_construct_octet_string(OSSL_MAC_PARAM_KEY,
(char *)priv, len);
params[paramsn] = OSSL_PARAM_construct_end();
if (!EVP_MAC_CTX_set_params(cmctx, params)) {
EVPerr(EVP_F_EVP_PKEY_NEW_CMAC_KEY, EVP_R_KEY_SETUP_FAILED);
goto err;
}
ret->pkey.ptr = cmctx;
return ret;
err:
EVP_PKEY_free(ret);
EVP_MAC_CTX_free(cmctx);
EVP_MAC_free(cmac);
return NULL;
# else
EVPerr(EVP_F_EVP_PKEY_NEW_CMAC_KEY,
EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE);
return NULL;
# endif
}
int EVP_PKEY_set_type(EVP_PKEY *pkey, int type)
{
return pkey_set_type(pkey, NULL, type, NULL, -1, NULL);
}
int EVP_PKEY_set_type_str(EVP_PKEY *pkey, const char *str, int len)
{
return pkey_set_type(pkey, NULL, EVP_PKEY_NONE, str, len, NULL);
}
int EVP_PKEY_set_alias_type(EVP_PKEY *pkey, int type)
{
if (pkey->type == type) {
return 1; /* it already is that type */
}
/*
* The application is requesting to alias this to a different pkey type,
* but not one that resolves to the base type.
*/
if (EVP_PKEY_type(type) != EVP_PKEY_base_id(pkey)) {
EVPerr(EVP_F_EVP_PKEY_SET_ALIAS_TYPE, EVP_R_UNSUPPORTED_ALGORITHM);
return 0;
}
pkey->type = type;
return 1;
}
# ifndef OPENSSL_NO_ENGINE
int EVP_PKEY_set1_engine(EVP_PKEY *pkey, ENGINE *e)
{
if (e != NULL) {
if (!ENGINE_init(e)) {
EVPerr(EVP_F_EVP_PKEY_SET1_ENGINE, ERR_R_ENGINE_LIB);
return 0;
}
if (ENGINE_get_pkey_meth(e, pkey->type) == NULL) {
ENGINE_finish(e);
EVPerr(EVP_F_EVP_PKEY_SET1_ENGINE, EVP_R_UNSUPPORTED_ALGORITHM);
return 0;
}
}
ENGINE_finish(pkey->pmeth_engine);
pkey->pmeth_engine = e;
return 1;
}
ENGINE *EVP_PKEY_get0_engine(const EVP_PKEY *pkey)
{
return pkey->engine;
}
# endif
int EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key)
{
int alias = type;
#ifndef OPENSSL_NO_EC
if (EVP_PKEY_type(type) == EVP_PKEY_EC) {
const EC_GROUP *group = EC_KEY_get0_group(key);
if (group != NULL && EC_GROUP_get_curve_name(group) == NID_sm2)
alias = EVP_PKEY_SM2;
}
#endif
if (pkey == NULL || !EVP_PKEY_set_type(pkey, type))
return 0;
if (!EVP_PKEY_set_alias_type(pkey, alias))
return 0;
pkey->pkey.ptr = key;
return (key != NULL);
}
void *EVP_PKEY_get0(const EVP_PKEY *pkey)
{
if (!evp_pkey_downgrade((EVP_PKEY *)pkey)) {
ERR_raise(ERR_LIB_EVP, EVP_R_INACCESSIBLE_KEY);
return NULL;
}
return pkey->pkey.ptr;
}
const unsigned char *EVP_PKEY_get0_hmac(const EVP_PKEY *pkey, size_t *len)
{
ASN1_OCTET_STRING *os = NULL;
if (pkey->type != EVP_PKEY_HMAC) {
EVPerr(EVP_F_EVP_PKEY_GET0_HMAC, EVP_R_EXPECTING_AN_HMAC_KEY);
return NULL;
}
os = EVP_PKEY_get0(pkey);
*len = os->length;
return os->data;
}
# ifndef OPENSSL_NO_POLY1305
const unsigned char *EVP_PKEY_get0_poly1305(const EVP_PKEY *pkey, size_t *len)
{
ASN1_OCTET_STRING *os = NULL;
if (pkey->type != EVP_PKEY_POLY1305) {
EVPerr(EVP_F_EVP_PKEY_GET0_POLY1305, EVP_R_EXPECTING_A_POLY1305_KEY);
return NULL;
}
os = EVP_PKEY_get0(pkey);
*len = os->length;
return os->data;
}
# endif
# ifndef OPENSSL_NO_SIPHASH
const unsigned char *EVP_PKEY_get0_siphash(const EVP_PKEY *pkey, size_t *len)
{
ASN1_OCTET_STRING *os = NULL;
if (pkey->type != EVP_PKEY_SIPHASH) {
EVPerr(EVP_F_EVP_PKEY_GET0_SIPHASH, EVP_R_EXPECTING_A_SIPHASH_KEY);
return NULL;
}
os = EVP_PKEY_get0(pkey);
*len = os->length;
return os->data;
}
# endif
# ifndef OPENSSL_NO_RSA
int EVP_PKEY_set1_RSA(EVP_PKEY *pkey, RSA *key)
{
int ret = EVP_PKEY_assign_RSA(pkey, key);
if (ret)
RSA_up_ref(key);
return ret;
}
RSA *EVP_PKEY_get0_RSA(const EVP_PKEY *pkey)
{
if (!evp_pkey_downgrade((EVP_PKEY *)pkey)) {
ERR_raise(ERR_LIB_EVP, EVP_R_INACCESSIBLE_KEY);
return NULL;
}
if (pkey->type != EVP_PKEY_RSA && pkey->type != EVP_PKEY_RSA_PSS) {
EVPerr(EVP_F_EVP_PKEY_GET0_RSA, EVP_R_EXPECTING_AN_RSA_KEY);
return NULL;
}
return pkey->pkey.rsa;
}
RSA *EVP_PKEY_get1_RSA(EVP_PKEY *pkey)
{
RSA *ret = EVP_PKEY_get0_RSA(pkey);
if (ret != NULL)
RSA_up_ref(ret);
return ret;
}
# endif
# ifndef OPENSSL_NO_DSA
DSA *EVP_PKEY_get0_DSA(const EVP_PKEY *pkey)
{
if (!evp_pkey_downgrade((EVP_PKEY *)pkey)) {
ERR_raise(ERR_LIB_EVP, EVP_R_INACCESSIBLE_KEY);
return NULL;
}
if (pkey->type != EVP_PKEY_DSA) {
EVPerr(EVP_F_EVP_PKEY_GET0_DSA, EVP_R_EXPECTING_A_DSA_KEY);
return NULL;
}
return pkey->pkey.dsa;
}
int EVP_PKEY_set1_DSA(EVP_PKEY *pkey, DSA *key)
{
int ret = EVP_PKEY_assign_DSA(pkey, key);
if (ret)
DSA_up_ref(key);
return ret;
}
DSA *EVP_PKEY_get1_DSA(EVP_PKEY *pkey)
{
DSA *ret = EVP_PKEY_get0_DSA(pkey);
if (ret != NULL)
DSA_up_ref(ret);
return ret;
}
# endif /* OPENSSL_NO_DSA */
#endif /* FIPS_MODE */
#ifndef FIPS_MODE
# ifndef OPENSSL_NO_EC
int EVP_PKEY_set1_EC_KEY(EVP_PKEY *pkey, EC_KEY *key)
{
int ret = EVP_PKEY_assign_EC_KEY(pkey, key);
if (ret)
EC_KEY_up_ref(key);
return ret;
}
EC_KEY *EVP_PKEY_get0_EC_KEY(const EVP_PKEY *pkey)
{
if (!evp_pkey_downgrade((EVP_PKEY *)pkey)) {
ERR_raise(ERR_LIB_EVP, EVP_R_INACCESSIBLE_KEY);
return NULL;
}
if (EVP_PKEY_base_id(pkey) != EVP_PKEY_EC) {
EVPerr(EVP_F_EVP_PKEY_GET0_EC_KEY, EVP_R_EXPECTING_A_EC_KEY);
return NULL;
}
return pkey->pkey.ec;
}
EC_KEY *EVP_PKEY_get1_EC_KEY(EVP_PKEY *pkey)
{
EC_KEY *ret = EVP_PKEY_get0_EC_KEY(pkey);
if (ret != NULL)
EC_KEY_up_ref(ret);
return ret;
}
# endif
# ifndef OPENSSL_NO_DH
int EVP_PKEY_set1_DH(EVP_PKEY *pkey, DH *key)
{
int type = DH_get0_q(key) == NULL ? EVP_PKEY_DH : EVP_PKEY_DHX;
int ret = EVP_PKEY_assign(pkey, type, key);
if (ret)
DH_up_ref(key);
return ret;
}
DH *EVP_PKEY_get0_DH(const EVP_PKEY *pkey)
{
if (!evp_pkey_downgrade((EVP_PKEY *)pkey)) {
ERR_raise(ERR_LIB_EVP, EVP_R_INACCESSIBLE_KEY);
return NULL;
}
if (pkey->type != EVP_PKEY_DH && pkey->type != EVP_PKEY_DHX) {
EVPerr(EVP_F_EVP_PKEY_GET0_DH, EVP_R_EXPECTING_A_DH_KEY);
return NULL;
}
return pkey->pkey.dh;
}
DH *EVP_PKEY_get1_DH(EVP_PKEY *pkey)
{
DH *ret = EVP_PKEY_get0_DH(pkey);
if (ret != NULL)
DH_up_ref(ret);
return ret;
}
# endif
int EVP_PKEY_type(int type)
{
int ret;
const EVP_PKEY_ASN1_METHOD *ameth;
ENGINE *e;
ameth = EVP_PKEY_asn1_find(&e, type);
if (ameth)
ret = ameth->pkey_id;
else
ret = NID_undef;
# ifndef OPENSSL_NO_ENGINE
ENGINE_finish(e);
# endif
return ret;
}
int EVP_PKEY_id(const EVP_PKEY *pkey)
{
return pkey->type;
}
int EVP_PKEY_base_id(const EVP_PKEY *pkey)
{
return EVP_PKEY_type(pkey->type);
}
int EVP_PKEY_is_a(const EVP_PKEY *pkey, const char *name)
{
#ifndef FIPS_MODE
if (pkey->keymgmt == NULL) {
/*
* These hard coded cases are pure hackery to get around the fact
* that names in crypto/objects/objects.txt are a mess. There is
* no "EC", and "RSA" leads to the NID for 2.5.8.1.1, an OID that's
* fallen out in favor of { pkcs-1 1 }, i.e. 1.2.840.113549.1.1.1,
* the NID of which is used for EVP_PKEY_RSA. Strangely enough,
* "DSA" is accurate... but still, better be safe and hard-code
* names that we know.
* TODO Clean this away along with all other #legacy support.
*/
int type;
if (strcasecmp(name, "RSA") == 0)
type = EVP_PKEY_RSA;
#ifndef OPENSSL_NO_EC
else if (strcasecmp(name, "EC") == 0)
type = EVP_PKEY_EC;
#endif
#ifndef OPENSSL_NO_DSA
else if (strcasecmp(name, "DSA") == 0)
type = EVP_PKEY_DSA;
#endif
else
type = EVP_PKEY_type(OBJ_sn2nid(name));
return EVP_PKEY_type(pkey->type) == type;
}
#endif
return EVP_KEYMGMT_is_a(pkey->keymgmt, name);
}
int EVP_PKEY_can_sign(const EVP_PKEY *pkey)
{
if (pkey->keymgmt == NULL) {
switch (EVP_PKEY_base_id(pkey)) {
case EVP_PKEY_RSA:
return 1;
#ifndef OPENSSL_NO_DSA
case EVP_PKEY_DSA:
return 1;
#endif
#ifndef OPENSSL_NO_EC
case EVP_PKEY_ED25519:
case EVP_PKEY_ED448:
return 1;
case EVP_PKEY_EC: /* Including SM2 */
return EC_KEY_can_sign(pkey->pkey.ec);
#endif
default:
break;
}
} else {
const OSSL_PROVIDER *prov = EVP_KEYMGMT_provider(pkey->keymgmt);
OPENSSL_CTX *libctx = ossl_provider_library_context(prov);
const char *supported_sig =
pkey->keymgmt->query_operation_name != NULL
? pkey->keymgmt->query_operation_name(OSSL_OP_SIGNATURE)
: evp_first_name(prov, pkey->keymgmt->name_id);
EVP_SIGNATURE *signature = NULL;
signature = EVP_SIGNATURE_fetch(libctx, supported_sig, NULL);
if (signature != NULL) {
EVP_SIGNATURE_free(signature);
return 1;
}
}
return 0;
}
#ifndef OPENSSL_NO_EC
/*
* TODO rewrite when we have proper data extraction functions
* Note: an octet pointer would be desirable!
*/
static OSSL_CALLBACK get_ec_curve_name_cb;
static int get_ec_curve_name_cb(const OSSL_PARAM params[], void *arg)
{
const OSSL_PARAM *p = NULL;
if ((p = OSSL_PARAM_locate_const(params, OSSL_PKEY_PARAM_EC_NAME)) != NULL)
return OSSL_PARAM_get_utf8_string(p, arg, 0);
/* If there is no curve name, this is not an EC key */
return 0;
}
int evp_pkey_get_EC_KEY_curve_nid(const EVP_PKEY *pkey)
{
int ret = NID_undef;
if (pkey->keymgmt == NULL) {
if (EVP_PKEY_base_id(pkey) == EVP_PKEY_EC) {
EC_KEY *ec = EVP_PKEY_get0_EC_KEY(pkey);
ret = EC_GROUP_get_curve_name(EC_KEY_get0_group(ec));
}
} else if (EVP_PKEY_is_a(pkey, "EC") || EVP_PKEY_is_a(pkey, "SM2")) {
char *curve_name = NULL;
ret = evp_keymgmt_export(pkey->keymgmt, pkey->keydata,
OSSL_KEYMGMT_SELECT_DOMAIN_PARAMETERS,
get_ec_curve_name_cb, &curve_name);
if (ret)
ret = ec_curve_name2nid(curve_name);
OPENSSL_free(curve_name);
}
return ret;
}
#endif
static int print_reset_indent(BIO **out, int pop_f_prefix, long saved_indent)
{
BIO_set_indent(*out, saved_indent);
if (pop_f_prefix) {
BIO *next = BIO_pop(*out);
BIO_free(*out);
*out = next;
}
return 1;
}
static int print_set_indent(BIO **out, int *pop_f_prefix, long *saved_indent,
long indent)
{
*pop_f_prefix = 0;
*saved_indent = 0;
if (indent > 0) {
long i = BIO_get_indent(*out);
*saved_indent = (i < 0 ? 0 : i);
if (BIO_set_indent(*out, indent) <= 0) {
if ((*out = BIO_push(BIO_new(BIO_f_prefix()), *out)) == NULL)
return 0;
*pop_f_prefix = 1;
}
if (BIO_set_indent(*out, indent) <= 0) {
print_reset_indent(out, *pop_f_prefix, *saved_indent);
return 0;
}
}
return 1;
}
static int unsup_alg(BIO *out, const EVP_PKEY *pkey, int indent,
const char *kstr)
{
return BIO_indent(out, indent, 128)
&& BIO_printf(out, "%s algorithm \"%s\" unsupported\n",
kstr, OBJ_nid2ln(pkey->type)) > 0;
}
static int print_pkey(const EVP_PKEY *pkey, BIO *out, int indent,
const char *propquery /* For provided serialization */,
int (*legacy_print)(BIO *out, const EVP_PKEY *pkey,
int indent, ASN1_PCTX *pctx),
ASN1_PCTX *legacy_pctx /* For legacy print */)
{
int pop_f_prefix;
long saved_indent;
OSSL_SERIALIZER_CTX *ctx = NULL;
int ret = -2; /* default to unsupported */
if (!print_set_indent(&out, &pop_f_prefix, &saved_indent, indent))
return 0;
ctx = OSSL_SERIALIZER_CTX_new_by_EVP_PKEY(pkey, propquery);
if (OSSL_SERIALIZER_CTX_get_serializer(ctx) != NULL)
ret = OSSL_SERIALIZER_to_bio(ctx, out);
OSSL_SERIALIZER_CTX_free(ctx);
if (ret != -2)
goto end;
/* legacy fallback */
if (legacy_print != NULL)
ret = legacy_print(out, pkey, 0, legacy_pctx);
else
ret = unsup_alg(out, pkey, 0, "Public Key");
end:
print_reset_indent(&out, pop_f_prefix, saved_indent);
return ret;
}
int EVP_PKEY_print_public(BIO *out, const EVP_PKEY *pkey,
int indent, ASN1_PCTX *pctx)
{
return print_pkey(pkey, out, indent, OSSL_SERIALIZER_PUBKEY_TO_TEXT_PQ,
(pkey->ameth != NULL ? pkey->ameth->pub_print : NULL),
pctx);
}
int EVP_PKEY_print_private(BIO *out, const EVP_PKEY *pkey,
int indent, ASN1_PCTX *pctx)
{
return print_pkey(pkey, out, indent, OSSL_SERIALIZER_PrivateKey_TO_TEXT_PQ,
(pkey->ameth != NULL ? pkey->ameth->priv_print : NULL),
pctx);
}
int EVP_PKEY_print_params(BIO *out, const EVP_PKEY *pkey,
int indent, ASN1_PCTX *pctx)
{
return print_pkey(pkey, out, indent, OSSL_SERIALIZER_Parameters_TO_TEXT_PQ,
(pkey->ameth != NULL ? pkey->ameth->param_print : NULL),
pctx);
}
static int legacy_asn1_ctrl_to_param(EVP_PKEY *pkey, int op,
int arg1, void *arg2)
{
if (pkey->keymgmt == NULL)
return 0;
switch (op) {
case ASN1_PKEY_CTRL_DEFAULT_MD_NID:
{
char mdname[80] = "";
int nid;
int rv = EVP_PKEY_get_default_digest_name(pkey, mdname,
sizeof(mdname));
if (rv <= 0)
return rv;
nid = OBJ_sn2nid(mdname);
if (nid == NID_undef)
nid = OBJ_ln2nid(mdname);
if (nid == NID_undef)
return 0;
*(int *)arg2 = nid;
return 1;
}
default:
return -2;
}
}
static int evp_pkey_asn1_ctrl(EVP_PKEY *pkey, int op, int arg1, void *arg2)
{
if (pkey->ameth == NULL)
return legacy_asn1_ctrl_to_param(pkey, op, arg1, arg2);
if (pkey->ameth->pkey_ctrl == NULL)
return -2;
return pkey->ameth->pkey_ctrl(pkey, op, arg1, arg2);
}
int EVP_PKEY_get_default_digest_nid(EVP_PKEY *pkey, int *pnid)
{
return evp_pkey_asn1_ctrl(pkey, ASN1_PKEY_CTRL_DEFAULT_MD_NID, 0, pnid);
}
int EVP_PKEY_get_default_digest_name(EVP_PKEY *pkey,
char *mdname, size_t mdname_sz)
{
if (pkey->ameth == NULL) {
OSSL_PARAM params[3];
char mddefault[100] = "";
char mdmandatory[100] = "";
params[0] =
OSSL_PARAM_construct_utf8_string(OSSL_PKEY_PARAM_DEFAULT_DIGEST,
mddefault, sizeof(mddefault));
params[1] =
OSSL_PARAM_construct_utf8_string(OSSL_PKEY_PARAM_MANDATORY_DIGEST,
mdmandatory,
sizeof(mdmandatory));
params[2] = OSSL_PARAM_construct_end();
if (!evp_keymgmt_get_params(pkey->keymgmt, pkey->keydata, params))
return 0;
if (mdmandatory[0] != '\0') {
OPENSSL_strlcpy(mdname, mdmandatory, mdname_sz);
return 2;
}
OPENSSL_strlcpy(mdname, mddefault, mdname_sz);
return 1;
}
{
int nid = NID_undef;
int rv = EVP_PKEY_get_default_digest_nid(pkey, &nid);
const char *name = rv > 0 ? OBJ_nid2sn(nid) : NULL;
if (rv > 0)
OPENSSL_strlcpy(mdname, name, mdname_sz);
return rv;
}
}
int EVP_PKEY_supports_digest_nid(EVP_PKEY *pkey, int nid)
{
int rv, default_nid;
rv = evp_pkey_asn1_ctrl(pkey, ASN1_PKEY_CTRL_SUPPORTS_MD_NID, nid, NULL);
if (rv == -2) {
/*
* If there is a mandatory default digest and this isn't it, then
* the answer is 'no'.
*/
rv = EVP_PKEY_get_default_digest_nid(pkey, &default_nid);
if (rv == 2)
return (nid == default_nid);
/* zero is an error from EVP_PKEY_get_default_digest_nid() */
if (rv == 0)
return -1;
}
return rv;
}
int EVP_PKEY_set1_tls_encodedpoint(EVP_PKEY *pkey,
const unsigned char *pt, size_t ptlen)
{
if (ptlen > INT_MAX)
return 0;
if (evp_pkey_asn1_ctrl(pkey, ASN1_PKEY_CTRL_SET1_TLS_ENCPT, ptlen,
(void *)pt) <= 0)
return 0;
return 1;
}
size_t EVP_PKEY_get1_tls_encodedpoint(EVP_PKEY *pkey, unsigned char **ppt)
{
int rv;
rv = evp_pkey_asn1_ctrl(pkey, ASN1_PKEY_CTRL_GET1_TLS_ENCPT, 0, ppt);
if (rv <= 0)
return 0;
return rv;
}
#endif /* FIPS_MODE */
/*- All methods below can also be used in FIPS_MODE */
EVP_PKEY *EVP_PKEY_new(void)
{
EVP_PKEY *ret = OPENSSL_zalloc(sizeof(*ret));
if (ret == NULL) {
EVPerr(EVP_F_EVP_PKEY_NEW, ERR_R_MALLOC_FAILURE);
return NULL;
}
ret->type = EVP_PKEY_NONE;
ret->save_type = EVP_PKEY_NONE;
ret->references = 1;
ret->save_parameters = 1;
ret->lock = CRYPTO_THREAD_lock_new();
if (ret->lock == NULL) {
EVPerr(EVP_F_EVP_PKEY_NEW, ERR_R_MALLOC_FAILURE);
goto err;
}
#ifndef FIPS_MODE
if (!CRYPTO_new_ex_data(CRYPTO_EX_INDEX_EVP_PKEY, ret, &ret->ex_data)) {
EVPerr(EVP_F_EVP_PKEY_NEW, ERR_R_MALLOC_FAILURE);
goto err;
}
#endif
return ret;
err:
CRYPTO_THREAD_lock_free(ret->lock);
OPENSSL_free(ret);
return NULL;
}
/*
* Setup a public key management method.
*
* For legacy keys, either |type| or |str| is expected to have the type
* information. In this case, the setup consists of finding an ASN1 method
* and potentially an ENGINE, and setting those fields in |pkey|.
*
* For provider side keys, |keymgmt| is expected to be non-NULL. In this
* case, the setup consists of setting the |keymgmt| field in |pkey|.
*
* If pkey is NULL just return 1 or 0 if the key management method exists.
*/
static int pkey_set_type(EVP_PKEY *pkey, ENGINE *e, int type, const char *str,
int len, EVP_KEYMGMT *keymgmt)
{
#ifndef FIPS_MODE
const EVP_PKEY_ASN1_METHOD *ameth = NULL;
ENGINE **eptr = (e == NULL) ? &e : NULL;
#endif
/*
* The setups can't set both legacy and provider side methods.
* It is forbidden
*/
if (!ossl_assert(type == EVP_PKEY_NONE || keymgmt == NULL)
|| !ossl_assert(e == NULL || keymgmt == NULL)) {
ERR_raise(ERR_LIB_EVP, ERR_R_INTERNAL_ERROR);
return 0;
}
if (pkey != NULL) {
int free_it = 0;
#ifndef FIPS_MODE
free_it = free_it || pkey->pkey.ptr != NULL;
#endif
free_it = free_it || pkey->keydata != NULL;
if (free_it)
evp_pkey_free_it(pkey);
#ifndef FIPS_MODE
/*
* If key type matches and a method exists then this lookup has
* succeeded once so just indicate success.
*/
if (pkey->type != EVP_PKEY_NONE
&& type == pkey->save_type
&& pkey->ameth != NULL)
return 1;
# ifndef OPENSSL_NO_ENGINE
/* If we have ENGINEs release them */
ENGINE_finish(pkey->engine);
pkey->engine = NULL;
ENGINE_finish(pkey->pmeth_engine);
pkey->pmeth_engine = NULL;
# endif
#endif
}
#ifndef FIPS_MODE
if (str != NULL)
ameth = EVP_PKEY_asn1_find_str(eptr, str, len);
else if (type != EVP_PKEY_NONE)
ameth = EVP_PKEY_asn1_find(eptr, type);
# ifndef OPENSSL_NO_ENGINE
if (pkey == NULL && eptr != NULL)
ENGINE_finish(e);
# endif
#endif
{
int check = 1;
#ifndef FIPS_MODE
check = check && ameth == NULL;
#endif
check = check && keymgmt == NULL;
if (check) {
EVPerr(EVP_F_PKEY_SET_TYPE, EVP_R_UNSUPPORTED_ALGORITHM);
return 0;
}
}
if (pkey != NULL) {
if (keymgmt != NULL && !EVP_KEYMGMT_up_ref(keymgmt)) {
ERR_raise(ERR_LIB_EVP, ERR_R_INTERNAL_ERROR);
return 0;
}
pkey->keymgmt = keymgmt;
pkey->save_type = type;
pkey->type = type;
#ifndef FIPS_MODE
/*
* If the internal "origin" key is provider side, don't save |ameth|.
* The main reason is that |ameth| is one factor to detect that the
* internal "origin" key is a legacy one.
*/
if (keymgmt == NULL)
pkey->ameth = ameth;
pkey->engine = e;
/*
* The EVP_PKEY_ASN1_METHOD |pkey_id| serves different purposes,
* depending on if we're setting this key to contain a legacy or
* a provider side "origin" key. For a legacy key, we assign it
* to the |type| field, but for a provider side key, we assign it
* to the |save_type| field, because |type| is supposed to be set
* to EVP_PKEY_NONE in that case.
*/
if (keymgmt != NULL)
pkey->save_type = ameth->pkey_id;
else if (pkey->ameth != NULL)
pkey->type = ameth->pkey_id;
#endif
}
return 1;
}
#ifndef FIPS_MODE
static void find_ameth(const char *name, void *data)
{
const char **str = data;
/*
* The error messages from pkey_set_type() are uninteresting here,
* and misleading.
*/
ERR_set_mark();
if (pkey_set_type(NULL, NULL, EVP_PKEY_NONE, name, strlen(name),
NULL)) {
if (str[0] == NULL)
str[0] = name;
else if (str[1] == NULL)
str[1] = name;
}
ERR_pop_to_mark();
}
#endif
int EVP_PKEY_set_type_by_keymgmt(EVP_PKEY *pkey, EVP_KEYMGMT *keymgmt)
{
#ifndef FIPS_MODE
# define EVP_PKEY_TYPE_STR str[0]
# define EVP_PKEY_TYPE_STRLEN (str[0] == NULL ? -1 : (int)strlen(str[0]))
/*
* Find at most two strings that have an associated EVP_PKEY_ASN1_METHOD
* Ideally, only one should be found. If two (or more) are found, the
* match is ambiguous. This should never happen, but...
*/
const char *str[2] = { NULL, NULL };
EVP_KEYMGMT_names_do_all(keymgmt, find_ameth, &str);
if (str[1] != NULL) {
ERR_raise(ERR_LIB_EVP, ERR_R_INTERNAL_ERROR);
return 0;
}
#else
# define EVP_PKEY_TYPE_STR NULL
# define EVP_PKEY_TYPE_STRLEN -1
#endif
return pkey_set_type(pkey, NULL, EVP_PKEY_NONE,
EVP_PKEY_TYPE_STR, EVP_PKEY_TYPE_STRLEN,
keymgmt);
#undef EVP_PKEY_TYPE_STR
#undef EVP_PKEY_TYPE_STRLEN
}
int EVP_PKEY_up_ref(EVP_PKEY *pkey)
{
int i;
if (CRYPTO_UP_REF(&pkey->references, &i, pkey->lock) <= 0)
return 0;
REF_PRINT_COUNT("EVP_PKEY", pkey);
REF_ASSERT_ISNT(i < 2);
return ((i > 1) ? 1 : 0);
}
#ifndef FIPS_MODE
void evp_pkey_free_legacy(EVP_PKEY *x)
{
if (x->ameth != NULL) {
if (x->ameth->pkey_free != NULL)
x->ameth->pkey_free(x);
x->pkey.ptr = NULL;
}
# ifndef OPENSSL_NO_ENGINE
ENGINE_finish(x->engine);
x->engine = NULL;
ENGINE_finish(x->pmeth_engine);
x->pmeth_engine = NULL;
# endif
x->type = EVP_PKEY_NONE;
}
#endif /* FIPS_MODE */
static void evp_pkey_free_it(EVP_PKEY *x)
{
/* internal function; x is never NULL */
evp_keymgmt_util_clear_operation_cache(x);
#ifndef FIPS_MODE
evp_pkey_free_legacy(x);
#endif
if (x->keymgmt != NULL) {
evp_keymgmt_freedata(x->keymgmt, x->keydata);
EVP_KEYMGMT_free(x->keymgmt);
x->keymgmt = NULL;
x->keydata = NULL;
}
}
void EVP_PKEY_free(EVP_PKEY *x)
{
int i;
if (x == NULL)
return;
CRYPTO_DOWN_REF(&x->references, &i, x->lock);
REF_PRINT_COUNT("EVP_PKEY", x);
if (i > 0)
return;
REF_ASSERT_ISNT(i < 0);
evp_pkey_free_it(x);
#ifndef FIPS_MODE
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_EVP_PKEY, x, &x->ex_data);
#endif
CRYPTO_THREAD_lock_free(x->lock);
#ifndef FIPS_MODE
sk_X509_ATTRIBUTE_pop_free(x->attributes, X509_ATTRIBUTE_free);
#endif
OPENSSL_free(x);
}
int EVP_PKEY_size(const EVP_PKEY *pkey)
{
int size = 0;
if (pkey != NULL) {
size = pkey->cache.size;
#ifndef FIPS_MODE
if (pkey->ameth != NULL && pkey->ameth->pkey_size != NULL)
size = pkey->ameth->pkey_size(pkey);
#endif
}
return size;
}
void *evp_pkey_export_to_provider(EVP_PKEY *pk, OPENSSL_CTX *libctx,
EVP_KEYMGMT **keymgmt,
const char *propquery)
{
EVP_KEYMGMT *allocated_keymgmt = NULL;
EVP_KEYMGMT *tmp_keymgmt = NULL;
void *keydata = NULL;
int check;
if (pk == NULL)
return NULL;
/* No key data => nothing to export */
check = 1;
#ifndef FIPS_MODE
check = check && pk->pkey.ptr == NULL;
#endif
check = check && pk->keydata == NULL;
if (check)
return NULL;
#ifndef FIPS_MODE
if (pk->pkey.ptr != NULL) {
/*
* If the legacy key doesn't have an dirty counter or export function,
* give up
*/
if (pk->ameth->dirty_cnt == NULL || pk->ameth->export_to == NULL)
return NULL;
}
#endif
if (keymgmt != NULL) {
tmp_keymgmt = *keymgmt;
*keymgmt = NULL;
}
/*
* If no keymgmt was given or found, get a default keymgmt. We do so by
* letting EVP_PKEY_CTX_new_from_pkey() do it for us, then we steal it.
*/
if (tmp_keymgmt == NULL) {
EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new_from_pkey(libctx, pk, propquery);
tmp_keymgmt = ctx->keymgmt;
ctx->keymgmt = NULL;
EVP_PKEY_CTX_free(ctx);
}
/* If there's still no keymgmt to be had, give up */
if (tmp_keymgmt == NULL)
goto end;
#ifndef FIPS_MODE
if (pk->pkey.ptr != NULL) {
size_t i = 0;
/*
* If the legacy "origin" hasn't changed since last time, we try
* to find our keymgmt in the operation cache. If it has changed,
* |i| remains zero, and we will clear the cache further down.
*/
if (pk->ameth->dirty_cnt(pk) == pk->dirty_cnt_copy) {
i = evp_keymgmt_util_find_operation_cache_index(pk, tmp_keymgmt);
/*
* If |tmp_keymgmt| is present in the operation cache, it means
* that export doesn't need to be redone. In that case, we take
* token copies of the cached pointers, to have token success
* values to return.
*/
if (i < OSSL_NELEM(pk->operation_cache)
&& pk->operation_cache[i].keymgmt != NULL) {
keydata = pk->operation_cache[i].keydata;
goto end;
}
}
/*
* TODO(3.0) Right now, we assume we have ample space. We will have
* to think about a cache aging scheme, though, if |i| indexes outside
* the array.
*/
if (!ossl_assert(i < OSSL_NELEM(pk->operation_cache)))
goto end;
/* Make sure that the keymgmt key type matches the legacy NID */
if (!ossl_assert(EVP_KEYMGMT_is_a(tmp_keymgmt, OBJ_nid2sn(pk->type))))
goto end;
if ((keydata = evp_keymgmt_newdata(tmp_keymgmt)) == NULL)
goto end;
if (!pk->ameth->export_to(pk, keydata, tmp_keymgmt, libctx, propquery)) {
evp_keymgmt_freedata(tmp_keymgmt, keydata);
keydata = NULL;
goto end;
}
/*
* If the dirty counter changed since last time, then clear the
* operation cache. In that case, we know that |i| is zero. Just
* in case this is a re-export, we increment then decrement the
* keymgmt reference counter.
*/
if (!EVP_KEYMGMT_up_ref(tmp_keymgmt)) { /* refcnt++ */
evp_keymgmt_freedata(tmp_keymgmt, keydata);
keydata = NULL;
goto end;
}
if (pk->ameth->dirty_cnt(pk) != pk->dirty_cnt_copy)
evp_keymgmt_util_clear_operation_cache(pk);
EVP_KEYMGMT_free(tmp_keymgmt); /* refcnt-- */
/* Add the new export to the operation cache */
if (!evp_keymgmt_util_cache_keydata(pk, i, tmp_keymgmt, keydata)) {
evp_keymgmt_freedata(tmp_keymgmt, keydata);
keydata = NULL;
goto end;
}
/* Synchronize the dirty count */
pk->dirty_cnt_copy = pk->ameth->dirty_cnt(pk);
goto end;
}
#endif /* FIPS_MODE */
keydata = evp_keymgmt_util_export_to_provider(pk, tmp_keymgmt);
end:
/*
* If nothing was exported, |tmp_keymgmt| might point at a freed
* EVP_KEYMGMT, so we clear it to be safe. It shouldn't be useful for
* the caller either way in that case.
*/
if (keydata == NULL)
tmp_keymgmt = NULL;
if (keymgmt != NULL)
*keymgmt = tmp_keymgmt;
EVP_KEYMGMT_free(allocated_keymgmt);
return keydata;
}
#ifndef FIPS_MODE
int evp_pkey_downgrade(EVP_PKEY *pk)
{
EVP_KEYMGMT *keymgmt = pk->keymgmt;
void *keydata = pk->keydata;
int type = pk->save_type;
const char *keytype = NULL;
/* If this isn't a provider side key, we're done */
if (keymgmt == NULL)
return 1;
/* Get the key type name for error reporting */
if (type != EVP_PKEY_NONE)
keytype = OBJ_nid2sn(type);
else
keytype =
evp_first_name(EVP_KEYMGMT_provider(keymgmt), keymgmt->name_id);
/*
* |save_type| was set when any of the EVP_PKEY_set_type functions
* was called. It was set to EVP_PKEY_NONE if the key type wasn't
* recognised to be any of the legacy key types, and the downgrade
* isn't possible.
*/
if (type == EVP_PKEY_NONE) {
ERR_raise_data(ERR_LIB_EVP, EVP_R_UNKNOWN_KEY_TYPE,
"key type = %s, can't downgrade", keytype);
return 0;
}
/*
* To be able to downgrade, we steal the provider side "origin" keymgmt
* and keydata. We've already grabbed the pointers, so all we need to
* do is clear those pointers in |pk| and then call evp_pkey_free_it().
* That way, we can restore |pk| if we need to.
*/
pk->keymgmt = NULL;
pk->keydata = NULL;
evp_pkey_free_it(pk);
if (EVP_PKEY_set_type(pk, type)) {
/* If the key is typed but empty, we're done */
if (keydata == NULL) {
/* We're dropping the EVP_KEYMGMT */
EVP_KEYMGMT_free(keymgmt);
return 1;
}
if (pk->ameth->import_from == NULL) {
ERR_raise_data(ERR_LIB_EVP, EVP_R_NO_IMPORT_FUNCTION,
"key type = %s", keytype);
} else {
/*
* We perform the export in the same libctx as the keymgmt that we
* are using.
*/
OPENSSL_CTX *libctx = ossl_provider_library_context(keymgmt->prov);
EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_from_pkey(libctx, pk, NULL);
if (pctx == NULL)
ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
if (pctx != NULL
&& evp_keymgmt_export(keymgmt, keydata,
OSSL_KEYMGMT_SELECT_ALL,
pk->ameth->import_from, pctx)) {
/*
* Save the provider side data in the operation cache, so they'll
* find it again. evp_pkey_free_it() cleared the cache, so it's
* safe to assume slot zero is free.
* Note that evp_keymgmt_util_cache_keydata() increments keymgmt's
* reference count.
*/
evp_keymgmt_util_cache_keydata(pk, 0, keymgmt, keydata);
EVP_PKEY_CTX_free(pctx);
/* Synchronize the dirty count */
pk->dirty_cnt_copy = pk->ameth->dirty_cnt(pk);
/* evp_keymgmt_export() increased the refcount... */
EVP_KEYMGMT_free(keymgmt);
return 1;
}
EVP_PKEY_CTX_free(pctx);
}
ERR_raise_data(ERR_LIB_EVP, EVP_R_KEYMGMT_EXPORT_FAILURE,
"key type = %s", keytype);
}
/*
* Something went wrong. This could for example happen if the keymgmt
* turns out to be an HSM implementation that refuses to let go of some
* of the key data, typically the private bits. In this case, we restore
* the provider side internal "origin" and leave it at that.
*/
if (!ossl_assert(EVP_PKEY_set_type_by_keymgmt(pk, keymgmt))) {
/* This should not be impossible */
ERR_raise(ERR_LIB_EVP, ERR_R_INTERNAL_ERROR);
return 0;
}
/* EVP_PKEY_set_type_by_keymgmt() increased the refcount... */
EVP_KEYMGMT_free(keymgmt);
pk->keydata = keydata;
evp_keymgmt_util_cache_keyinfo(pk);
return 0; /* No downgrade, but at least the key is restored */
}
#endif /* FIPS_MODE */
const OSSL_PARAM *EVP_PKEY_gettable_params(EVP_PKEY *pkey)
{
if (pkey == NULL
|| pkey->keymgmt == NULL
|| pkey->keydata == NULL)
return 0;
return evp_keymgmt_gettable_params(pkey->keymgmt);
}
/*
* For the following methods param->return_size is set to a value
* larger than can be returned by the call to evp_keymgmt_get_params().
* If it is still this value then the parameter was ignored - and in this
* case it returns an error..
*/
int EVP_PKEY_get_bn_param(EVP_PKEY *pkey, const char *key_name, BIGNUM **bn)
{
int ret = 0;
OSSL_PARAM params[2];
unsigned char buffer[2048];
/*
* Use -1 as the terminator here instead of sizeof(buffer) + 1 since
* -1 is less likely to be a valid value.
*/
const size_t not_set = (size_t)-1;
unsigned char *buf = NULL;
size_t buf_sz = 0;
if (pkey == NULL
|| pkey->keymgmt == NULL
|| pkey->keydata == NULL
|| key_name == NULL
|| bn == NULL)
return 0;
memset(buffer, 0, sizeof(buffer));
params[0] = OSSL_PARAM_construct_BN(key_name, buffer, sizeof(buffer));
/* If the return_size is still not_set then we know it was not found */
params[0].return_size = not_set;
params[1] = OSSL_PARAM_construct_end();
if (!evp_keymgmt_get_params(pkey->keymgmt, pkey->keydata, params)) {
if (params[0].return_size == not_set
|| params[0].return_size == 0)
return 0;
buf_sz = params[0].return_size;
/*
* If it failed because the buffer was too small then allocate the
* required buffer size and retry.
*/
buf = OPENSSL_zalloc(buf_sz);
if (buf == NULL)
return 0;
params[0].data = buf;
params[0].data_size = buf_sz;
if (!evp_keymgmt_get_params(pkey->keymgmt, pkey->keydata, params))
goto err;
}
/* Fail if the param was not found */
if (params[0].return_size == not_set)
goto err;
ret = OSSL_PARAM_get_BN(params, bn);
err:
OPENSSL_free(buf);
return ret;
}
int EVP_PKEY_get_octet_string_param(EVP_PKEY *pkey, const char *key_name,
unsigned char *buf, size_t max_buf_sz,
size_t *out_sz)
{
OSSL_PARAM params[2];
const size_t not_set = max_buf_sz + 1;
if (pkey == NULL
|| pkey->keymgmt == NULL
|| pkey->keydata == NULL
|| key_name == NULL)
return 0;
params[0] = OSSL_PARAM_construct_octet_string(key_name, buf, max_buf_sz);
params[0].return_size = not_set;
params[1] = OSSL_PARAM_construct_end();
if (!evp_keymgmt_get_params(pkey->keymgmt, pkey->keydata, params))
return 0;
if (params[0].return_size == not_set)
return 0;
if (out_sz != NULL)
*out_sz = params[0].return_size;
return 1;
}
int EVP_PKEY_get_utf8_string_param(EVP_PKEY *pkey, const char *key_name,
char *str, size_t max_buf_sz,
size_t *out_sz)
{
OSSL_PARAM params[2];
const size_t not_set = max_buf_sz + 1;
if (pkey == NULL
|| pkey->keymgmt == NULL
|| pkey->keydata == NULL
|| key_name == NULL)
return 0;
params[0] = OSSL_PARAM_construct_utf8_string(key_name, str, max_buf_sz);
params[0].return_size = not_set;
params[1] = OSSL_PARAM_construct_end();
if (!evp_keymgmt_get_params(pkey->keymgmt, pkey->keydata, params))
return 0;
if (params[0].return_size == not_set)
return 0;
if (out_sz != NULL)
*out_sz = params[0].return_size;
return 1;
}
int EVP_PKEY_get_int_param(EVP_PKEY *pkey, const char *key_name, int *out)
{
OSSL_PARAM params[2];
const size_t not_set = sizeof(int) + 1;
if (pkey == NULL
|| pkey->keymgmt == NULL
|| pkey->keydata == NULL
|| key_name == NULL)
return 0;
params[0] = OSSL_PARAM_construct_int(key_name, out);
params[0].return_size = not_set;
params[1] = OSSL_PARAM_construct_end();
if (!evp_keymgmt_get_params(pkey->keymgmt, pkey->keydata, params))
return 0;
if (params[0].return_size == not_set)
return 0;
return 1;
}
int EVP_PKEY_get_size_t_param(EVP_PKEY *pkey, const char *key_name, size_t *out)
{
OSSL_PARAM params[2];
const size_t not_set = sizeof(size_t) + 1;
if (pkey == NULL
|| pkey->keymgmt == NULL
|| pkey->keydata == NULL
|| key_name == NULL)
return 0;
params[0] = OSSL_PARAM_construct_size_t(key_name, out);
params[0].return_size = not_set;
params[1] = OSSL_PARAM_construct_end();
if (!evp_keymgmt_get_params(pkey->keymgmt, pkey->keydata, params))
return 0;
if (params[0].return_size == not_set)
return 0;
return 1;
}
|
859142.c | /*
* Active Template Library ActiveX functions (atl.dll)
*
* Copyright 2006 Andrey Turkin
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#include <stdarg.h>
#include <stdio.h>
#define COBJMACROS
#include "windef.h"
#include "winbase.h"
#include "winerror.h"
#include "winuser.h"
#include "wine/debug.h"
#include "objbase.h"
#include "objidl.h"
#include "ole2.h"
#include "exdisp.h"
#include "atlbase.h"
#include "atliface.h"
#include "atlwin.h"
#include "wine/unicode.h"
WINE_DEFAULT_DEBUG_CHANNEL(atl);
typedef struct IOCS {
const IOleClientSiteVtbl *lpOleClientSiteVtbl;
const IOleContainerVtbl *lpOleContainerVtbl;
const IOleInPlaceSiteWindowlessVtbl *lpOleInPlaceSiteWindowlessVtbl;
const IOleInPlaceFrameVtbl *lpOleInPlaceFrameVtbl;
const IOleControlSiteVtbl *lpOleControlSiteVtbl;
LONG ref;
HWND hWnd;
IOleObject *control;
RECT size;
WNDPROC OrigWndProc;
BOOL fActive, fInPlace, fWindowless;
} IOCS;
/**********************************************************************
* AtlAxWin class window procedure
*/
static LRESULT CALLBACK AtlAxWin_wndproc( HWND hWnd, UINT wMsg, WPARAM wParam, LPARAM lParam )
{
if ( wMsg == WM_CREATE )
{
DWORD len = GetWindowTextLengthW( hWnd ) + 1;
WCHAR *ptr = HeapAlloc( GetProcessHeap(), 0, len*sizeof(WCHAR) );
if (!ptr)
return 1;
GetWindowTextW( hWnd, ptr, len );
AtlAxCreateControlEx( ptr, hWnd, NULL, NULL, NULL, NULL, NULL );
HeapFree( GetProcessHeap(), 0, ptr );
return 0;
}
return DefWindowProcW( hWnd, wMsg, wParam, lParam );
}
/***********************************************************************
* AtlAxWinInit [ATL.@]
* Initializes the control-hosting code: registering the AtlAxWin,
* AtlAxWin7 and AtlAxWinLic7 window classes and some messages.
*
* RETURNS
* TRUE or FALSE
*/
BOOL WINAPI AtlAxWinInit(void)
{
WNDCLASSEXW wcex;
const WCHAR AtlAxWin[] = {'A','t','l','A','x','W','i','n',0};
FIXME("semi-stub\n");
if ( FAILED( OleInitialize(NULL) ) )
return FALSE;
wcex.cbSize = sizeof(wcex);
wcex.style = 0;
wcex.cbClsExtra = 0;
wcex.cbWndExtra = 0;
wcex.hInstance = GetModuleHandleW( NULL );
wcex.hIcon = NULL;
wcex.hCursor = NULL;
wcex.hbrBackground = NULL;
wcex.lpszMenuName = NULL;
wcex.hIconSm = 0;
wcex.lpfnWndProc = AtlAxWin_wndproc;
wcex.lpszClassName = AtlAxWin;
if ( !RegisterClassExW( &wcex ) )
return FALSE;
return TRUE;
}
/***********************************************************************
* Atl container component implementation
*/
static ULONG IOCS_AddRef(IOCS *This)
{
ULONG ref = InterlockedIncrement(&This->ref);
TRACE( "(%p) : AddRef from %d\n", This, ref - 1 );
return ref;
}
#define THIS2IOLECLIENTSITE(This) ((IOleClientSite*)&(This)->lpOleClientSiteVtbl)
#define THIS2IOLECONTAINER(This) (&(This)->lpOleContainerVtbl)
#define THIS2IOLEINPLACESITEWINDOWLESS(This) (&(This)->lpOleInPlaceSiteWindowlessVtbl)
#define THIS2IOLEINPLACEFRAME(This) (&(This)->lpOleInPlaceFrameVtbl)
#define THIS2IOLECONTROLSITE(This) (&(This)->lpOleControlSiteVtbl)
static HRESULT IOCS_QueryInterface(IOCS *This, REFIID riid, void **ppv)
{
*ppv = NULL;
if ( IsEqualIID( &IID_IUnknown, riid )
|| IsEqualIID( &IID_IOleClientSite, riid ) )
{
*ppv = THIS2IOLECLIENTSITE(This);
} else if ( IsEqualIID( &IID_IOleContainer, riid ) )
{
*ppv = THIS2IOLECONTAINER(This);
} else if ( IsEqualIID( &IID_IOleInPlaceSite, riid ) || IsEqualIID( &IID_IOleInPlaceSiteEx, riid ) || IsEqualIID( &IID_IOleInPlaceSiteWindowless, riid ) )
{
*ppv = THIS2IOLEINPLACESITEWINDOWLESS(This);
} else if ( IsEqualIID( &IID_IOleInPlaceFrame, riid ) )
{
*ppv = THIS2IOLEINPLACEFRAME(This);
} else if ( IsEqualIID( &IID_IOleControlSite, riid ) )
{
*ppv = THIS2IOLECONTROLSITE(This);
}
if (*ppv)
{
IOCS_AddRef( This );
return S_OK;
}
WARN("unsupported interface %s\n", debugstr_guid( riid ) );
*ppv = NULL;
return E_NOINTERFACE;
}
static HRESULT IOCS_Detach( IOCS *This );
static ULONG IOCS_Release(IOCS *This)
{
ULONG ref = InterlockedDecrement(&This->ref);
TRACE( "(%p) : ReleaseRef to %d\n", This, ref );
if (!ref)
{
IOCS_Detach( This );
HeapFree( GetProcessHeap(), 0, This );
}
return ref;
}
#define DEFINE_THIS(cls,ifc,iface) ((cls*)((BYTE*)(iface)-offsetof(cls,lp ## ifc ## Vtbl)))
/****** IOleClientSite *****/
#undef IFACE2THIS
#define IFACE2THIS(iface) DEFINE_THIS(IOCS,OleClientSite, iface)
static HRESULT WINAPI OleClientSite_QueryInterface(IOleClientSite *iface, REFIID riid, void **ppv)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_QueryInterface(This, riid, ppv);
}
static ULONG WINAPI OleClientSite_AddRef(IOleClientSite *iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_AddRef(This);
}
static ULONG WINAPI OleClientSite_Release(IOleClientSite *iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_Release(This);
}
static HRESULT WINAPI OleClientSite_SaveObject(IOleClientSite *iface)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p) - stub\n", This );
return E_NOTIMPL;
}
static HRESULT WINAPI OleClientSite_GetMoniker(IOleClientSite *iface, DWORD dwAssign, DWORD dwWhichMoniker, IMoniker **ppmk)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p, 0x%x, 0x%x, %p)\n", This, dwAssign, dwWhichMoniker, ppmk );
return E_NOTIMPL;
}
static HRESULT WINAPI OleClientSite_GetContainer(IOleClientSite *iface, IOleContainer **ppContainer)
{
IOCS *This = IFACE2THIS(iface);
TRACE( "(%p, %p)\n", This, ppContainer );
return OleClientSite_QueryInterface( iface, &IID_IOleContainer, (void**)ppContainer );
}
static HRESULT WINAPI OleClientSite_ShowObject(IOleClientSite *iface)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p) - stub\n", This );
return S_OK;
}
static HRESULT WINAPI OleClientSite_OnShowWindow(IOleClientSite *iface, BOOL fShow)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p, %s) - stub\n", This, fShow ? "TRUE" : "FALSE" );
return E_NOTIMPL;
}
static HRESULT WINAPI OleClientSite_RequestNewObjectLayout(IOleClientSite *iface)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p) - stub\n", This );
return E_NOTIMPL;
}
#undef IFACE2THIS
/****** IOleContainer *****/
#define IFACE2THIS(iface) DEFINE_THIS(IOCS, OleContainer, iface)
static HRESULT WINAPI OleContainer_QueryInterface( IOleContainer* iface, REFIID riid, void** ppv)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_QueryInterface( This, riid, ppv );
}
static ULONG WINAPI OleContainer_AddRef(IOleContainer* iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_AddRef(This);
}
static ULONG WINAPI OleContainer_Release(IOleContainer* iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_Release(This);
}
static HRESULT WINAPI OleContainer_ParseDisplayName(IOleContainer* iface, IBindCtx* pbc,
LPOLESTR pszDisplayName, ULONG* pchEaten, IMoniker** ppmkOut)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p,%p,%s,%p,%p) - stub\n", This, pbc, debugstr_w(pszDisplayName), pchEaten, ppmkOut );
return E_NOTIMPL;
}
static HRESULT WINAPI OleContainer_EnumObjects(IOleContainer* iface, DWORD grfFlags, IEnumUnknown** ppenum)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p, %u, %p) - stub\n", This, grfFlags, ppenum );
return E_NOTIMPL;
}
static HRESULT WINAPI OleContainer_LockContainer(IOleContainer* iface, BOOL fLock)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p, %s) - stub\n", This, fLock?"TRUE":"FALSE" );
return E_NOTIMPL;
}
#undef IFACE2THIS
/****** IOleInPlaceSiteWindowless *******/
#define IFACE2THIS(iface) DEFINE_THIS(IOCS, OleInPlaceSiteWindowless, iface)
static HRESULT WINAPI OleInPlaceSiteWindowless_QueryInterface(IOleInPlaceSiteWindowless *iface, REFIID riid, void **ppv)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_QueryInterface(This, riid, ppv);
}
static ULONG WINAPI OleInPlaceSiteWindowless_AddRef(IOleInPlaceSiteWindowless *iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_AddRef(This);
}
static ULONG WINAPI OleInPlaceSiteWindowless_Release(IOleInPlaceSiteWindowless *iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_Release(This);
}
static HRESULT WINAPI OleInPlaceSiteWindowless_GetWindow(IOleInPlaceSiteWindowless* iface, HWND* phwnd)
{
IOCS *This = IFACE2THIS(iface);
TRACE("(%p,%p)\n", This, phwnd);
*phwnd = This->hWnd;
return S_OK;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_ContextSensitiveHelp(IOleInPlaceSiteWindowless* iface, BOOL fEnterMode)
{
IOCS *This = IFACE2THIS(iface);
FIXME("(%p,%d) - stub\n", This, fEnterMode);
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_CanInPlaceActivate(IOleInPlaceSiteWindowless *iface)
{
IOCS *This = IFACE2THIS(iface);
TRACE("(%p)\n", This);
return S_OK;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_OnInPlaceActivate(IOleInPlaceSiteWindowless *iface)
{
IOCS *This = IFACE2THIS(iface);
TRACE("(%p)\n", This);
This->fInPlace = TRUE;
return S_OK;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_OnUIActivate(IOleInPlaceSiteWindowless *iface)
{
IOCS *This = IFACE2THIS(iface);
TRACE("(%p)\n", This);
return S_OK;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_GetWindowContext(IOleInPlaceSiteWindowless *iface,
IOleInPlaceFrame **ppFrame, IOleInPlaceUIWindow **ppDoc, LPRECT lprcPosRect,
LPRECT lprcClipRect, LPOLEINPLACEFRAMEINFO lpFrameInfo)
{
IOCS *This = IFACE2THIS(iface);
TRACE("(%p,%p,%p,%p,%p,%p)\n", This, ppFrame, ppDoc, lprcPosRect, lprcClipRect, lpFrameInfo);
if ( lprcClipRect )
*lprcClipRect = This->size;
if ( lprcPosRect )
*lprcPosRect = This->size;
if ( ppFrame )
{
IOCS_QueryInterface( This, &IID_IOleInPlaceFrame, (void**) ppFrame );
}
if ( ppDoc )
*ppDoc = NULL;
if ( lpFrameInfo )
{
lpFrameInfo->fMDIApp = FALSE;
lpFrameInfo->hwndFrame = This->hWnd;
lpFrameInfo->haccel = NULL;
lpFrameInfo->cAccelEntries = 0;
}
return S_OK;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_Scroll(IOleInPlaceSiteWindowless *iface, SIZE scrollExtent)
{
IOCS *This = IFACE2THIS(iface);
FIXME("(%p) - stub\n", This);
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_OnUIDeactivate(IOleInPlaceSiteWindowless *iface, BOOL fUndoable)
{
IOCS *This = IFACE2THIS(iface);
FIXME("(%p,%d) - stub\n", This, fUndoable);
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_OnInPlaceDeactivate(IOleInPlaceSiteWindowless *iface)
{
IOCS *This = IFACE2THIS(iface);
TRACE("(%p)\n", This);
This->fInPlace = This->fWindowless = FALSE;
return S_OK;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_DiscardUndoState(IOleInPlaceSiteWindowless *iface)
{
IOCS *This = IFACE2THIS(iface);
FIXME("(%p) - stub\n", This);
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_DeactivateAndUndo(IOleInPlaceSiteWindowless *iface)
{
IOCS *This = IFACE2THIS(iface);
FIXME("(%p) - stub\n", This);
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_OnPosRectChange(IOleInPlaceSiteWindowless *iface, LPCRECT lprcPosRect)
{
IOCS *This = IFACE2THIS(iface);
FIXME("(%p,%p) - stub\n", This, lprcPosRect);
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_OnInPlaceActivateEx( IOleInPlaceSiteWindowless *iface, BOOL* pfNoRedraw, DWORD dwFlags)
{
IOCS *This = IFACE2THIS(iface);
TRACE("\n");
This->fActive = This->fInPlace = TRUE;
if ( dwFlags & ACTIVATE_WINDOWLESS )
This->fWindowless = TRUE;
return S_OK;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_OnInPlaceDeactivateEx( IOleInPlaceSiteWindowless *iface, BOOL fNoRedraw)
{
IOCS *This = IFACE2THIS(iface);
TRACE("\n");
This->fActive = This->fInPlace = This->fWindowless = FALSE;
return S_OK;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_RequestUIActivate( IOleInPlaceSiteWindowless *iface)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_CanWindowlessActivate( IOleInPlaceSiteWindowless *iface)
{
FIXME("\n");
return S_OK;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_GetCapture( IOleInPlaceSiteWindowless *iface)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_SetCapture( IOleInPlaceSiteWindowless *iface, BOOL fCapture)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_GetFocus( IOleInPlaceSiteWindowless *iface)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_SetFocus( IOleInPlaceSiteWindowless *iface, BOOL fFocus)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_GetDC( IOleInPlaceSiteWindowless *iface, LPCRECT pRect, DWORD grfFlags, HDC* phDC)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_ReleaseDC( IOleInPlaceSiteWindowless *iface, HDC hDC)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_InvalidateRect( IOleInPlaceSiteWindowless *iface, LPCRECT pRect, BOOL fErase)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_InvalidateRgn( IOleInPlaceSiteWindowless *iface, HRGN hRGN, BOOL fErase)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_ScrollRect( IOleInPlaceSiteWindowless *iface, INT dx, INT dy, LPCRECT pRectScroll, LPCRECT pRectClip)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_AdjustRect( IOleInPlaceSiteWindowless *iface, LPRECT prc)
{
FIXME("\n");
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceSiteWindowless_OnDefWindowMessage( IOleInPlaceSiteWindowless *iface, UINT msg, WPARAM wParam, LPARAM lParam, LRESULT* plResult)
{
FIXME("\n");
return E_NOTIMPL;
}
#undef IFACE2THIS
/****** IOleInPlaceFrame *******/
#define IFACE2THIS(iface) DEFINE_THIS(IOCS, OleInPlaceFrame, iface)
static HRESULT WINAPI OleInPlaceFrame_QueryInterface(IOleInPlaceFrame *iface, REFIID riid, void **ppv)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_QueryInterface(This, riid, ppv);
}
static ULONG WINAPI OleInPlaceFrame_AddRef(IOleInPlaceFrame *iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_AddRef(This);
}
static ULONG WINAPI OleInPlaceFrame_Release(IOleInPlaceFrame *iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_Release(This);
}
static HRESULT WINAPI OleInPlaceFrame_GetWindow(IOleInPlaceFrame *iface, HWND *phWnd)
{
IOCS *This = IFACE2THIS(iface);
TRACE( "(%p,%p)\n", This, phWnd );
*phWnd = This->hWnd;
return S_OK;
}
static HRESULT WINAPI OleInPlaceFrame_ContextSensitiveHelp(IOleInPlaceFrame *iface, BOOL fEnterMode)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p,%d) - stub\n", This, fEnterMode );
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceFrame_GetBorder(IOleInPlaceFrame *iface, LPRECT lprectBorder)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p,%p) - stub\n", This, lprectBorder );
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceFrame_RequestBorderSpace(IOleInPlaceFrame *iface, LPCBORDERWIDTHS pborderwidths)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p,%p) - stub\n", This, pborderwidths );
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceFrame_SetBorderSpace(IOleInPlaceFrame *iface, LPCBORDERWIDTHS pborderwidths)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p,%p) - stub\n", This, pborderwidths );
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceFrame_SetActiveObject(IOleInPlaceFrame *iface, IOleInPlaceActiveObject *pActiveObject, LPCOLESTR pszObjName)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p,%p,%s) - stub\n", This, pActiveObject, debugstr_w(pszObjName) );
return S_OK;
}
static HRESULT WINAPI OleInPlaceFrame_InsertMenus(IOleInPlaceFrame *iface, HMENU hmenuShared, LPOLEMENUGROUPWIDTHS lpMenuWidths)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p,%p,%p) - stub\n", This, hmenuShared, lpMenuWidths );
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceFrame_SetMenu(IOleInPlaceFrame *iface, HMENU hmenuShared, HOLEMENU holemenu, HWND hwndActiveObject)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p,%p,%p,%p) - stub\n", This, hmenuShared, holemenu, hwndActiveObject );
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceFrame_RemoveMenus(IOleInPlaceFrame *iface, HMENU hmenuShared)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p, %p) - stub\n", This, hmenuShared );
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceFrame_SetStatusText(IOleInPlaceFrame *iface, LPCOLESTR pszStatusText)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p, %s) - stub\n", This, debugstr_w( pszStatusText ) );
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceFrame_EnableModeless(IOleInPlaceFrame *iface, BOOL fEnable)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p, %d) - stub\n", This, fEnable );
return E_NOTIMPL;
}
static HRESULT WINAPI OleInPlaceFrame_TranslateAccelerator(IOleInPlaceFrame *iface, LPMSG lpmsg, WORD wID)
{
IOCS *This = IFACE2THIS(iface);
FIXME( "(%p, %p, %x) - stub\n", This, lpmsg, wID );
return E_NOTIMPL;
}
#undef IFACE2THIS
/****** IOleControlSite *******/
#define IFACE2THIS(iface) DEFINE_THIS(IOCS, OleControlSite, iface)
static HRESULT WINAPI OleControlSite_QueryInterface(IOleControlSite *iface, REFIID riid, void **ppv)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_QueryInterface(This, riid, ppv);
}
static ULONG WINAPI OleControlSite_AddRef(IOleControlSite *iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_AddRef(This);
}
static ULONG WINAPI OleControlSite_Release(IOleControlSite *iface)
{
IOCS *This = IFACE2THIS(iface);
return IOCS_Release(This);
}
static HRESULT WINAPI OleControlSite_OnControlInfoChanged( IOleControlSite* This)
{
FIXME( "\n" );
return E_NOTIMPL;
}
static HRESULT WINAPI OleControlSite_LockInPlaceActive( IOleControlSite* This, BOOL fLock)
{
FIXME( "\n" );
return E_NOTIMPL;
}
static HRESULT WINAPI OleControlSite_GetExtendedControl( IOleControlSite* This, IDispatch** ppDisp)
{
FIXME( "\n" );
return E_NOTIMPL;
}
static HRESULT WINAPI OleControlSite_TransformCoords( IOleControlSite* This, POINTL* pPtlHimetric, POINTF* pPtfContainer, DWORD dwFlags)
{
FIXME( "\n" );
return E_NOTIMPL;
}
static HRESULT WINAPI OleControlSite_TranslateAccelerator( IOleControlSite* This, MSG* pMsg, DWORD grfModifiers)
{
FIXME( "\n" );
return E_NOTIMPL;
}
static HRESULT WINAPI OleControlSite_OnFocus( IOleControlSite* This, BOOL fGotFocus)
{
FIXME( "\n" );
return E_NOTIMPL;
}
static HRESULT WINAPI OleControlSite_ShowPropertyFrame( IOleControlSite* This)
{
FIXME( "\n" );
return E_NOTIMPL;
}
#undef IFACE2THIS
static const IOleClientSiteVtbl OleClientSite_vtbl = {
OleClientSite_QueryInterface,
OleClientSite_AddRef,
OleClientSite_Release,
OleClientSite_SaveObject,
OleClientSite_GetMoniker,
OleClientSite_GetContainer,
OleClientSite_ShowObject,
OleClientSite_OnShowWindow,
OleClientSite_RequestNewObjectLayout
};
static const IOleContainerVtbl OleContainer_vtbl = {
OleContainer_QueryInterface,
OleContainer_AddRef,
OleContainer_Release,
OleContainer_ParseDisplayName,
OleContainer_EnumObjects,
OleContainer_LockContainer
};
static const IOleInPlaceSiteWindowlessVtbl OleInPlaceSiteWindowless_vtbl = {
OleInPlaceSiteWindowless_QueryInterface,
OleInPlaceSiteWindowless_AddRef,
OleInPlaceSiteWindowless_Release,
OleInPlaceSiteWindowless_GetWindow,
OleInPlaceSiteWindowless_ContextSensitiveHelp,
OleInPlaceSiteWindowless_CanInPlaceActivate,
OleInPlaceSiteWindowless_OnInPlaceActivate,
OleInPlaceSiteWindowless_OnUIActivate,
OleInPlaceSiteWindowless_GetWindowContext,
OleInPlaceSiteWindowless_Scroll,
OleInPlaceSiteWindowless_OnUIDeactivate,
OleInPlaceSiteWindowless_OnInPlaceDeactivate,
OleInPlaceSiteWindowless_DiscardUndoState,
OleInPlaceSiteWindowless_DeactivateAndUndo,
OleInPlaceSiteWindowless_OnPosRectChange,
OleInPlaceSiteWindowless_OnInPlaceActivateEx,
OleInPlaceSiteWindowless_OnInPlaceDeactivateEx,
OleInPlaceSiteWindowless_RequestUIActivate,
OleInPlaceSiteWindowless_CanWindowlessActivate,
OleInPlaceSiteWindowless_GetCapture,
OleInPlaceSiteWindowless_SetCapture,
OleInPlaceSiteWindowless_GetFocus,
OleInPlaceSiteWindowless_SetFocus,
OleInPlaceSiteWindowless_GetDC,
OleInPlaceSiteWindowless_ReleaseDC,
OleInPlaceSiteWindowless_InvalidateRect,
OleInPlaceSiteWindowless_InvalidateRgn,
OleInPlaceSiteWindowless_ScrollRect,
OleInPlaceSiteWindowless_AdjustRect,
OleInPlaceSiteWindowless_OnDefWindowMessage
};
static const IOleInPlaceFrameVtbl OleInPlaceFrame_vtbl =
{
OleInPlaceFrame_QueryInterface,
OleInPlaceFrame_AddRef,
OleInPlaceFrame_Release,
OleInPlaceFrame_GetWindow,
OleInPlaceFrame_ContextSensitiveHelp,
OleInPlaceFrame_GetBorder,
OleInPlaceFrame_RequestBorderSpace,
OleInPlaceFrame_SetBorderSpace,
OleInPlaceFrame_SetActiveObject,
OleInPlaceFrame_InsertMenus,
OleInPlaceFrame_SetMenu,
OleInPlaceFrame_RemoveMenus,
OleInPlaceFrame_SetStatusText,
OleInPlaceFrame_EnableModeless,
OleInPlaceFrame_TranslateAccelerator
};
static const IOleControlSiteVtbl OleControlSite_vtbl =
{
OleControlSite_QueryInterface,
OleControlSite_AddRef,
OleControlSite_Release,
OleControlSite_OnControlInfoChanged,
OleControlSite_LockInPlaceActive,
OleControlSite_GetExtendedControl,
OleControlSite_TransformCoords,
OleControlSite_TranslateAccelerator,
OleControlSite_OnFocus,
OleControlSite_ShowPropertyFrame
};
static HRESULT IOCS_Detach( IOCS *This ) /* remove subclassing */
{
if ( This->hWnd )
{
SetWindowLongPtrW( This->hWnd, GWLP_WNDPROC, (ULONG_PTR) This->OrigWndProc );
SetWindowLongPtrW( This->hWnd, GWLP_USERDATA, 0 );
This->hWnd = NULL;
}
if ( This->control )
{
IOleObject *control = This->control;
This->control = NULL;
IOleObject_SetClientSite( control, NULL );
IOleObject_Release( control );
}
return S_OK;
}
static void IOCS_OnSize( IOCS* This, LPCRECT rect )
{
SIZEL inPix, inHi;
This->size.left = rect->left; This->size.right = rect->right; This->size.top = rect->top; This->size.bottom = rect->bottom;
if ( !This->control )
return;
inPix.cx = rect->right - rect->left;
inPix.cy = rect->bottom - rect->top;
AtlPixelToHiMetric( &inPix, &inHi );
IOleObject_SetExtent( This->control, DVASPECT_CONTENT, &inHi );
if ( This->fInPlace )
{
IOleInPlaceObject *wl;
if ( SUCCEEDED( IOleObject_QueryInterface( This->control, &IID_IOleInPlaceObject, (void**)&wl ) ) )
{
IOleInPlaceObject_SetObjectRects( wl, rect, rect );
IOleInPlaceObject_Release( wl );
}
}
}
static void IOCS_OnShow( IOCS *This, BOOL fShow )
{
if (!This->control || This->fActive || !fShow )
return;
This->fActive = TRUE;
}
static void IOCS_OnDraw( IOCS *This )
{
IViewObject *view;
if ( !This->control || !This->fWindowless )
return;
if ( SUCCEEDED( IOleObject_QueryInterface( This->control, &IID_IViewObject, (void**)&view ) ) )
{
HDC dc = GetDC( This->hWnd );
RECTL rect;
rect.left = This->size.left; rect.top = This->size.top;
rect.bottom = This->size.bottom; rect.right = This->size.right;
IViewObject_Draw( view, DVASPECT_CONTENT, ~0, NULL, NULL, 0, dc, &rect, &rect, NULL, 0 );
IViewObject_Release( view );
ReleaseDC( This->hWnd, dc );
}
}
static LRESULT IOCS_OnWndProc( IOCS *This, HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam )
{
WNDPROC OrigWndProc = This->OrigWndProc;
switch( uMsg )
{
case WM_DESTROY:
IOCS_Detach( This );
break;
case WM_SIZE:
{
RECT r;
r.left = r.top = 0;
r.right = LOWORD( lParam );
r.bottom = HIWORD( lParam );
IOCS_OnSize( This, &r );
}
break;
case WM_SHOWWINDOW:
IOCS_OnShow( This, (BOOL) wParam );
break;
case WM_PAINT:
IOCS_OnDraw( This );
break;
}
return CallWindowProcW( OrigWndProc, hWnd, uMsg, wParam, lParam );
}
static LRESULT CALLBACK AtlHost_wndproc( HWND hWnd, UINT wMsg, WPARAM wParam, LPARAM lParam )
{
IOCS *This = (IOCS*) GetWindowLongPtrW( hWnd, GWLP_USERDATA );
return IOCS_OnWndProc( This, hWnd, wMsg, wParam, lParam );
}
static HRESULT IOCS_Attach( IOCS *This, HWND hWnd, IUnknown *pUnkControl ) /* subclass hWnd */
{
This->hWnd = hWnd;
IUnknown_QueryInterface( pUnkControl, &IID_IOleObject, (void**)&This->control );
IOleObject_SetClientSite( This->control, THIS2IOLECLIENTSITE( This ) );
SetWindowLongPtrW( hWnd, GWLP_USERDATA, (ULONG_PTR) This );
This->OrigWndProc = (WNDPROC)SetWindowLongPtrW( hWnd, GWLP_WNDPROC, (ULONG_PTR) AtlHost_wndproc );
return S_OK;
}
static HRESULT IOCS_Init( IOCS *This )
{
RECT rect;
static const WCHAR AXWIN[] = {'A','X','W','I','N',0};
IOleObject_SetHostNames( This->control, AXWIN, AXWIN );
GetClientRect( This->hWnd, &rect );
IOCS_OnSize( This, &rect );
IOleObject_DoVerb( This->control, OLEIVERB_INPLACEACTIVATE, NULL, THIS2IOLECLIENTSITE( This ), 0, This->hWnd, &rect );
return S_OK;
}
/**********************************************************************
* Create new instance of Atl host component and attach it to window *
*/
static HRESULT IOCS_Create( HWND hWnd, IUnknown *pUnkControl, IOCS **ppSite )
{
HRESULT hr;
IOCS *This;
*ppSite = NULL;
This = HeapAlloc(GetProcessHeap(), 0, sizeof(IOCS));
if (!This)
return E_OUTOFMEMORY;
This->lpOleClientSiteVtbl = &OleClientSite_vtbl;
This->lpOleContainerVtbl = &OleContainer_vtbl;
This->lpOleInPlaceSiteWindowlessVtbl = &OleInPlaceSiteWindowless_vtbl;
This->lpOleInPlaceFrameVtbl = &OleInPlaceFrame_vtbl;
This->lpOleControlSiteVtbl = &OleControlSite_vtbl;
This->ref = 1;
This->OrigWndProc = NULL;
This->hWnd = NULL;
This->fWindowless = This->fActive = This->fInPlace = FALSE;
hr = IOCS_Attach( This, hWnd, pUnkControl );
if ( SUCCEEDED( hr ) )
hr = IOCS_Init( This );
if ( SUCCEEDED( hr ) )
*ppSite = This;
else
IOCS_Release( This );
return hr;
}
/***********************************************************************
* AtlAxCreateControl [ATL.@]
*/
HRESULT WINAPI AtlAxCreateControl(LPCOLESTR lpszName, HWND hWnd,
IStream *pStream, IUnknown **ppUnkContainer)
{
return AtlAxCreateControlEx( lpszName, hWnd, pStream, ppUnkContainer,
NULL, NULL, NULL );
}
/***********************************************************************
* AtlAxCreateControlEx [ATL.@]
*
* REMARKS
* See http://www.codeproject.com/com/cwebpage.asp for some background
*
*/
HRESULT WINAPI AtlAxCreateControlEx(LPCOLESTR lpszName, HWND hWnd,
IStream *pStream, IUnknown **ppUnkContainer, IUnknown **ppUnkControl,
REFIID iidSink, IUnknown *punkSink)
{
CLSID controlId;
HRESULT hRes;
IOleObject *pControl;
IUnknown *pUnkControl;
IPersistStreamInit *pPSInit;
IUnknown *pContainer;
enum {IsGUID=0,IsHTML=1,IsURL=2} content;
TRACE("(%s %p %p %p %p %p %p)\n", debugstr_w(lpszName), hWnd, pStream,
ppUnkContainer, ppUnkControl, iidSink, punkSink);
hRes = CLSIDFromString( lpszName, &controlId );
if ( FAILED(hRes) )
hRes = CLSIDFromProgID( lpszName, &controlId );
if ( SUCCEEDED( hRes ) )
content = IsGUID;
else {
/* FIXME - check for MSHTML: prefix! */
content = IsURL;
controlId = CLSID_WebBrowser;
}
hRes = CoCreateInstance( &controlId, 0, CLSCTX_ALL, &IID_IOleObject,
(void**) &pControl );
if ( FAILED( hRes ) )
{
WARN( "cannot create ActiveX control %s instance - error 0x%08x\n",
debugstr_guid( &controlId ), hRes );
return hRes;
}
hRes = IOleObject_QueryInterface( pControl, &IID_IPersistStreamInit, (void**) &pPSInit );
if ( SUCCEEDED( hRes ) )
{
if (!pStream)
IPersistStreamInit_InitNew( pPSInit );
else
IPersistStreamInit_Load( pPSInit, pStream );
IPersistStreamInit_Release( pPSInit );
} else
WARN("cannot get IID_IPersistStreamInit out of control\n");
IOleObject_QueryInterface( pControl, &IID_IUnknown, (void**) &pUnkControl );
IOleObject_Release( pControl );
hRes = AtlAxAttachControl( pUnkControl, hWnd, &pContainer );
if ( FAILED( hRes ) )
WARN("cannot attach control to window\n");
if ( content == IsURL )
{
IWebBrowser2 *browser;
hRes = IOleObject_QueryInterface( pControl, &IID_IWebBrowser2, (void**) &browser );
if ( !browser )
WARN( "Cannot query IWebBrowser2 interface: %08x\n", hRes );
else {
VARIANT url;
IWebBrowser2_put_Visible( browser, VARIANT_TRUE ); /* it seems that native does this on URL (but do not on MSHTML:! why? */
V_VT(&url) = VT_BSTR;
V_BSTR(&url) = SysAllocString( lpszName );
hRes = IWebBrowser2_Navigate2( browser, &url, NULL, NULL, NULL, NULL );
if ( FAILED( hRes ) )
WARN( "IWebBrowser2::Navigate2 failed: %08x\n", hRes );
SysFreeString( V_BSTR(&url) );
IWebBrowser2_Release( browser );
}
}
if (ppUnkContainer)
{
*ppUnkContainer = pContainer;
if ( pContainer )
IUnknown_AddRef( pContainer );
}
if (ppUnkControl)
{
*ppUnkControl = pUnkControl;
if ( pUnkControl )
IUnknown_AddRef( pUnkControl );
}
if ( pUnkControl )
IUnknown_Release( pUnkControl );
if ( pContainer )
IUnknown_Release( pContainer );
return S_OK;
}
/***********************************************************************
* AtlAxAttachControl [ATL.@]
*/
HRESULT WINAPI AtlAxAttachControl(IUnknown* pControl, HWND hWnd, IUnknown** ppUnkContainer)
{
IOCS *pUnkContainer;
HRESULT hr;
TRACE( "%p %p %p\n", pControl, hWnd, ppUnkContainer );
if (!pControl)
return E_INVALIDARG;
hr = IOCS_Create( hWnd, pControl, &pUnkContainer );
if ( SUCCEEDED( hr ) && ppUnkContainer)
{
*ppUnkContainer = (IUnknown*) pUnkContainer;
}
if(!hWnd)
return S_FALSE;
return hr;
}
/**********************************************************************
* Helper function for AX_ConvertDialogTemplate
*/
static inline BOOL advance_array(WORD **pptr, DWORD *palloc, DWORD *pfilled, const WORD *data, DWORD size)
{
if ( (*pfilled + size) > *palloc )
{
*palloc = ((*pfilled+size) + 0xFF) & ~0xFF;
*pptr = HeapReAlloc( GetProcessHeap(), 0, *pptr, *palloc * sizeof(WORD) );
if (!*pptr)
return FALSE;
}
RtlMoveMemory( *pptr+*pfilled, data, size * sizeof(WORD) );
*pfilled += size;
return TRUE;
}
/**********************************************************************
* Convert ActiveX control templates to AtlAxWin class instances
*/
static LPDLGTEMPLATEW AX_ConvertDialogTemplate(LPCDLGTEMPLATEW src_tmpl)
{
#define GET_WORD(x) (*(const WORD *)(x))
#define GET_DWORD(x) (*(const DWORD *)(x))
#define PUT_BLOCK(x,y) do {if (!advance_array(&output, &allocated, &filled, (x), (y))) return NULL;} while (0)
#define PUT_WORD(x) do {WORD w = (x);PUT_BLOCK(&w, 1);} while(0)
#define PUT_DWORD(x) do {DWORD w = (x);PUT_BLOCK(&w, 2);} while(0)
const WORD *tmp, *src = (const WORD *)src_tmpl;
WORD *output;
DWORD allocated, filled; /* in WORDs */
BOOL ext;
WORD signature, dlgver, rescount;
DWORD style;
filled = 0; allocated = 256;
output = HeapAlloc( GetProcessHeap(), 0, allocated * sizeof(WORD) );
if (!output)
return NULL;
/* header */
tmp = src;
signature = GET_WORD(src);
dlgver = GET_WORD(src + 1);
if (signature == 1 && dlgver == 0xFFFF)
{
ext = TRUE;
src += 6;
style = GET_DWORD(src);
src += 2;
rescount = GET_WORD(src++);
src += 4;
if ( GET_WORD(src) == 0xFFFF ) /* menu */
src += 2;
else
src += strlenW(src) + 1;
if ( GET_WORD(src) == 0xFFFF ) /* class */
src += 2;
else
src += strlenW(src) + 1;
src += strlenW(src) + 1; /* title */
if ( style & (DS_SETFONT | DS_SHELLFONT) )
{
src += 3;
src += strlenW(src) + 1;
}
} else {
ext = FALSE;
style = GET_DWORD(src);
src += 4;
rescount = GET_WORD(src++);
src += 4;
if ( GET_WORD(src) == 0xFFFF ) /* menu */
src += 2;
else
src += strlenW(src) + 1;
if ( GET_WORD(src) == 0xFFFF ) /* class */
src += 2;
else
src += strlenW(src) + 1;
src += strlenW(src) + 1; /* title */
if ( style & DS_SETFONT )
{
src++;
src += strlenW(src) + 1;
}
}
PUT_BLOCK(tmp, src-tmp);
while(rescount--)
{
src = (const WORD *)( ( ((ULONG_PTR)src) + 3) & ~3); /* align on DWORD boundary */
filled = (filled + 1) & ~1; /* depends on DWORD-aligned allocation unit */
tmp = src;
if (ext)
src += 12;
else
src += 9;
PUT_BLOCK(tmp, src-tmp);
tmp = src;
if ( GET_WORD(src) == 0xFFFF ) /* class */
{
src += 2;
} else
{
src += strlenW(src) + 1;
}
src += strlenW(src) + 1; /* title */
if ( GET_WORD(tmp) == '{' ) /* all this mess created because of this line */
{
static const WCHAR AtlAxWin[9]={'A','t','l','A','x','W','i','n',0};
PUT_BLOCK(AtlAxWin, sizeof(AtlAxWin)/sizeof(WCHAR));
PUT_BLOCK(tmp, strlenW(tmp)+1);
} else
PUT_BLOCK(tmp, src-tmp);
if ( GET_WORD(src) )
{
WORD size = (GET_WORD(src)+sizeof(WORD)-1) / sizeof(WORD); /* quite ugly :( Maybe use BYTE* instead of WORD* everywhere ? */
PUT_BLOCK(src, size);
src+=size;
}
else
{
PUT_WORD(0);
src++;
}
}
return (LPDLGTEMPLATEW) output;
}
/***********************************************************************
* AtlAxCreateDialogA [ATL.@]
*
* Creates a dialog window
*
* PARAMS
* hInst [I] Application instance
* name [I] Dialog box template name
* owner [I] Dialog box parent HWND
* dlgProc [I] Dialog box procedure
* param [I] This value will be passed to dlgProc as WM_INITDIALOG's message lParam
*
* RETURNS
* Window handle of dialog window.
*/
HWND WINAPI AtlAxCreateDialogA(HINSTANCE hInst, LPCSTR name, HWND owner, DLGPROC dlgProc ,LPARAM param)
{
HWND res = NULL;
int length;
WCHAR *nameW;
if (IS_INTRESOURCE(name))
return AtlAxCreateDialogW( hInst, (LPCWSTR) name, owner, dlgProc, param );
length = MultiByteToWideChar( CP_ACP, 0, name, -1, NULL, 0 );
nameW = HeapAlloc( GetProcessHeap(), 0, length * sizeof(WCHAR) );
if (nameW)
{
MultiByteToWideChar( CP_ACP, 0, name, -1, nameW, length );
res = AtlAxCreateDialogW( hInst, nameW, owner, dlgProc, param );
HeapFree( GetProcessHeap(), 0, nameW );
}
return res;
}
/***********************************************************************
* AtlAxCreateDialogW [ATL.@]
*
* See AtlAxCreateDialogA
*
*/
HWND WINAPI AtlAxCreateDialogW(HINSTANCE hInst, LPCWSTR name, HWND owner, DLGPROC dlgProc ,LPARAM param)
{
HRSRC hrsrc;
HGLOBAL hgl;
LPCDLGTEMPLATEW ptr;
LPDLGTEMPLATEW newptr;
HWND res;
TRACE("(%p %s %p %p %lx)\n", hInst, debugstr_w(name), owner, dlgProc, param);
hrsrc = FindResourceW( hInst, name, (LPWSTR)RT_DIALOG );
if ( !hrsrc )
return NULL;
hgl = LoadResource (hInst, hrsrc);
if ( !hgl )
return NULL;
ptr = LockResource ( hgl );
if (!ptr)
{
FreeResource( hgl );
return NULL;
}
newptr = AX_ConvertDialogTemplate( ptr );
if ( newptr )
{
res = CreateDialogIndirectParamW( hInst, newptr, owner, dlgProc, param );
HeapFree( GetProcessHeap(), 0, newptr );
} else
res = NULL;
FreeResource ( hrsrc );
return res;
}
/***********************************************************************
* AtlAxGetHost [ATL.@]
*
*/
HRESULT WINAPI AtlAxGetHost(HWND hWnd, IUnknown **pUnk)
{
IOCS *This;
TRACE( "(%p, %p)\n", hWnd, pUnk );
*pUnk = NULL;
This = (IOCS*) GetWindowLongPtrW( hWnd, GWLP_USERDATA );
if ( !This )
{
WARN("No container attached to %p\n", hWnd );
return E_FAIL;
}
return IOCS_QueryInterface( This, &IID_IUnknown, (void**) pUnk );
}
/***********************************************************************
* AtlAxGetControl [ATL.@]
*
*/
HRESULT WINAPI AtlAxGetControl(HWND hWnd, IUnknown **pUnk)
{
IOCS *This;
TRACE( "(%p, %p)\n", hWnd, pUnk );
*pUnk = NULL;
This = (IOCS*) GetWindowLongPtrW( hWnd, GWLP_USERDATA );
if ( !This || !This->control )
{
WARN("No control attached to %p\n", hWnd );
return E_FAIL;
}
return IOleObject_QueryInterface( This->control, &IID_IUnknown, (void**) pUnk );
}
|
830888.c | /* monitor.c
This file written 2017 by akv
Latest update 2017-11-18
*/
#include <stdint.h> /* Declarations of uint_32 and the like */
#include <pic32mx.h> /* Declarations of system-specific addresses etc */
#include <math.h>
#include <string.h>
#include "projecthead.h" /* function declarations! */
int rawVal = 0;
int prevRaw = 0;
int ewma = 0;
int prevEwma = 0;
double alpha = 0.05;
float bpm = 0;
int beats = 0;
int timerCount = 0;
int totalBeats = 0;
double alphaBpm = 0.1;
float avgBpm = 0;
float prevBpm = 0;
float prevAvgBpm = -1;
int mode = 0;
int drawEwma = 0;
int bpmInSig = 0;
//Interrupt services
void user_isr( void ) {
//handle interrupt from ADC -- get new value from ADC buffer, clear flag
if ( (IFS(1) & 0x2) > 0 ){
rawVal = ADC1BUF0;
IFSCLR(1) = (0x2);
}
//handle interrupt from timer 2
if ( (IFS(0) & 0x100) > 0 ){
timerCount += 1;
IFSCLR(0) = (1 << 8);
}
//handle interrupt from switches (now only sw4)
if ( (IFS(0) & 0x80000) > 0 ){
drawEwma = 1 - drawEwma;
IFSCLR(0) = (1 << 19);
}
}
//should really be on a very short timer to blink properly via one call
void blinkLED(char leds){
PORTESET = leds;
PORTECLR = leds;
}
void initRate(void){
ewma = rawVal;
prevEwma = rawVal;
prevRaw = rawVal;
}
void setupTimer(void){
//clear TMR2
TMR2 = 0;
//set scaling to 256:1 (bits 4 to 6 on).
T2CON = 0x70;
//set period to 6250 for 20 ms (with 80 MHz clock)
PR2 = 6250;
//clear interrupt flag (bit 8 in IFS0)
IFS(0) &= (0 << 8);
//set interrupt prios to highest (bits 2 3 4 in IPC2) (and IPC2 bits 0 and 1 for subprio)
IPC(2) |= 0x1f;
//enable timer 2 interrupt (bit 8 in IEC0)
IEC(0) |= 0x100;
}
//calculate and update the EWMA
void updateEWMA(void){
ewma = alpha*rawVal + (1-alpha)*prevEwma;
ewma += 5;
}
void calcRate(void){
updateEWMA();
if (rawVal > ewma && prevRaw < ewma){
blinkLED(0xff);
beats += 1;
totalBeats += 1;
//start a timer
//turn T2 on (bit 15)
T2CONSET = (1 << 15);
}
if (beats == 2){
beats = 0;
//end the timer and check value
T2CONCLR = (1 << 15);
TMR2 = 0;
bpm = 60 / (0.02 * (float)timerCount);
bpm = round(bpm);
//initialize the average bpm to first bpm if not yet set
if (prevAvgBpm < 0){
prevAvgBpm = bpm;
}
if (bpm > 30 && bpm < 150 ){
avgBpm = alphaBpm*bpm + (1-alphaBpm)*prevAvgBpm;
}
timerCount = 0;
}
prevEwma = ewma;
prevRaw = rawVal;
prevAvgBpm = avgBpm;
}
int readPin(char pin){
AD1CHS = pin << 16; //select ADC pin in AD1CHS<16:19>
AD1CON1SET = 0x0002; //sample by setting SAMP bit
while( AD1CON1 & 0x0002 ); //wait until internal counter ends sampling
while( !AD1CON1 & 0x0001 ); //wait for auto convert
return ADC1BUF0; //get result
}
//draw the raw signal curve on screen
void drawSignal(void){
draw_voltage(0,rawVal);
if (drawEwma){
//draw_voltage(0,ewma);
draw_line(ewma);
}
if (bpmInSig){
char * bpmConv = itoaconv(bpm);
display_string(3, bpmConv);
}
}
//write the heart rate in BPM on screen
void drawRate(void){
char * rawConv = itoaconv(rawVal);
char rawStr[50];
strcpy(rawStr, "Voltage: ");
strcat(rawStr, rawConv);
display_string(0, rawStr);
char * ewmaConv = itoaconv(ewma);
char ewmaStr[50];
strcpy(ewmaStr, "EWMA: ");
strcat(ewmaStr, ewmaConv);
display_string(1, ewmaStr);
char * bpmConv = itoaconv(bpm);
char bpmStr[50];
strcpy(bpmStr, "iBPM: ");
strcat(bpmStr, bpmConv);
display_string(2, bpmStr);
char avgTotStr[100];
strcpy(avgTotStr, "avg/b: ");
strcat(avgTotStr, itoaconv(avgBpm));
strcat(avgTotStr, " / ");
strcat(avgTotStr, itoaconv(totalBeats));
display_string(3, avgTotStr);
display_update();
}
void setupADCManual(void){
AD1PCFGCLR = (1 << 4); //set up A1 as analog, not digital
TRISBSET = (1 << 4); //initialize Port B so that bit 4 is set (use A1 as analog input)
AD1CON1CLR = 0x8000; // make sure ADC is off before messing with it
//AD1CON1<2> 0 - ASAM (sample bit manually set to start sampling)
//AD1CON1<7:5> 111 - SSRC internal counter ends sampling and starts conversion (auto convert)
AD1CON1 = 0x00e0;
AD1CON2 = 0; // AD1CON2<15:13> set voltage reference to pins AVSS/AVDD
AD1CON3 = 0x0f01; // TAD = 4*TPB, acquisition time = 15*TAD
AD1CON1SET = 0x8000; //turn ADC back on
}
void setupADCAuto(void){
AD1PCFGCLR = (1 << 4); //set up A1 as analog, not digital
TRISBSET = (1 << 4); //initialize Port B so that bit 4 is set (use A1 as analog input)
AD1CON1CLR = 0x8000; // make sure ADC is off before messing with it
IPCSET(6) = (3 << 26); //AD1IP
IPCSET(6) = (1 << 24); //AD1IS
IFSCLR(1) = (0x2); //AD1IF
IECSET(1) = (0x2); //AD1IE
//AD1CON1<2> 1 - ASAM (sample bit automatically set)
//AD1CON1<7:5> 111 - SSRC internal counter ends sampling and starts conversion (auto convert)
AD1CON1SET = 0x00e4;
//AD1CON2<1>, 0 - BUFM Buffer configured as 16 bit buffer (only one)
//AD1CON2<10>, 0 - CSCNA - Scan inputs
AD1CON2 = 0x0000;
//AD2CON2<5:2> x - SMPI - Interrupt flag set at after x completed conversions
AD1CON2SET = (1 << 2);
//AD1CON3<7:0> ADCS TAD = TPB * 2 * (ADCS<7:0> + 1) = 4 * TPB if ADCS<7:0> set to 1
//AD1CON3<12:8> SAMC AD1CON3<12:8> * TAD = 15 * TAD no of TAD clock cycles btw acquisition & start of conversion.
AD1CON3 = 0x0f01;
AD1CHS = 4 << 16; //ADC1CHS
AD1CON1SET = 0x8000; //turn ADC back on
}
void setupIO(void){
//initialize Port E so that bits 7 through 0 of Port E are set as outputs (i.e., the 8 least significant bits)
TRISECLR = 0xff;
//initialize the value to 0 on port E LEDs
PORTECLR = 0xff;
//initialize port D so that bits 11 through 5 of Port D are set as inputs (i.e switches + buttons)
TRISDSET = 0xfe0;
//initialize button 1 so we can reset total count (byte 1 in Port F)
TRISFSET = 0x2;
//clear interrupt flag for SW4, bit 19 in IFS(0)
IFSCLR(0) = (1 << 19);
//set up IPC4, bits 28 27 26 (main), 25 24 (sub)
IPCSET(4) = (28 << 24);
//also enable interrupts from SW4, which is INT4, and INT4IE is bit 19 in IEC0
IECSET(0) = (1 << 19);
}
int getbtns(void){
int btnvals = PORTD;
btnvals &= 0xe0;
btnvals >>= 5;
return btnvals;
}
int getsw( void ){
int switchvals = PORTD;
switchvals &= 0xf00;
switchvals >>= 8;
return switchvals;
}
void monitorLoop(void){
calcRate();
switch(mode) {
case 0 :
drawSignal();
break;
case 1:
drawRate();
break;
default :
drawSignal();
}
//poll buttons
int btnvals = getbtns();
if (btnvals > 0){
//go through bit by bit in btnvals
int bit1 = btnvals & 0x1;
int bit2 = btnvals & 0x2;
int bit3 = btnvals & 0x4;
if (bit1 > 0){
avgBpm = 0;
}
else if (bit2 > 0){
clearScreen();
mode = 1;
}
else if (bit3 > 0){
clearScreen();
mode = 0;
}
}
//check BTN1 too which is on another port
int btn1 = PORTF;
btn1 &= 0x2;
btn1 >>= 1;
if (btn1 > 0){
totalBeats = 0;
}
}
|
424381.c | /*
* main.c
*/
// system includes
#include <stdio.h>
// Driverlib includes
#include "hw_types.h"
#include "hw_ints.h"
#include "hw_memmap.h"
#include "hw_common_reg.h"
#include "interrupt.h"
#include "hw_apps_rcm.h"
#include "prcm.h"
#include "rom.h"
#include "rom_map.h"
#include "prcm.h"
#include "gpio.h"
#include "utils.h"
// user-defined includes
#include "pin_mux_config.h"
#include "ht_gpio_if.h"
#include "uart_if.h"
#include "i2c_if.h"
extern void (* const g_pfnVectors[])(void);
void LEDBlinkyRoutine();
static void BoardInit(void);
void LEDBlinkyRoutine()
{
//
// Toggle the lines initially to turn off the LEDs.
// The values driven are as required by the LEDs on the LP.
//
GPIO_IF_LedOff(MCU_RED_LED_GPIO);
unsigned char ucBtn1,ucBtn2;
while(1)
{
ucBtn1=GPIO_IF_ButtonRead(BTN1);
if(ucBtn1) GPIO_IF_LedOn(MCU_RED_LED_GPIO);
ucBtn2=GPIO_IF_ButtonRead(BTN2);
if(ucBtn2) GPIO_IF_LedOff(MCU_RED_LED_GPIO);
}
}
static void
BoardInit(void)
{
MAP_IntVTableBaseSet((unsigned long)&g_pfnVectors[0]);
//
// Enable Processor
//
MAP_IntMasterEnable();
MAP_IntEnable(FAULT_SYSTICK);
PRCMCC3200MCUInit();
}
int main(void) {
char cmd[50];
int cnt;
BoardInit();
PinMuxConfig();
InitTerm();
GPIO_IF_LedConfigure(LED1);
GPIO_IF_ButtonConfigure(BTN1|BTN2);
GPIO_IF_LedOff(MCU_RED_LED_GPIO);
Message("Hello world! This is a test of UART on CC3200.\n\r");
Message("Please input something to test UART:");
cnt=GetCmd(cmd,50);
if(cnt>0) Report("Success, recevied %d chars.\n\r",cnt);
else Report("Failed.\n\r");
Report("Send Charactor 't' to get temperature.\n\r");
I2C_IF_Open(I2C_MASTER_MODE_STD);
InitTermInt();
while(1){
MAP_UtilsDelay(8000000);
}
return 0;
}
|
125854.c | /*
* Copyright (c) 2004, Bull SA. All rights reserved.
* Created by: Laurent.Vivier@bull.net
* This file is licensed under the GPL license. For the full content
* of this license, see the COPYING file at the top level of this
* source tree.
*/
/*
* assertion:
*
* The aio_read() function shall return the value -1 and set errno to
* indicate error if the operation is not succesfully queued.
*
* method:
*
* - fill in an aiocb with a NULL aio_buf
* - call aio_read
* - check aio_read return value
*/
#define _XOPEN_SOURCE 600
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
#include <aio.h>
#include "posixtest.h"
#define TNAME "aio_read/8-1.c"
int main()
{
struct aiocb aiocb;
#if _POSIX_ASYNCHRONOUS_IO != 200112L
exit(PTS_UNSUPPORTED);
#endif
/* submit a request with a NULL buffer */
aiocb.aio_fildes = 0;
aiocb.aio_buf = NULL;
aiocb.aio_nbytes = 0;
aiocb.aio_offset = 0;
if (aio_read(&aiocb) != -1)
{
printf(TNAME " aio_read() should fail!\n");
exit(PTS_FAIL);
}
printf ("Test PASSED\n");
return PTS_PASS;
}
|
643695.c | /* crypto/dsa/dsa_lib.c */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
/* Original version from Steven Schoch <schoch@sheba.arc.nasa.gov> */
#include <stdio.h>
#include "cryptlib.h"
#include <openssl/bn.h>
#include <openssl/dsa.h>
#include <openssl/asn1.h>
#ifndef OPENSSL_NO_ENGINE
#include <openssl/engine.h>
#endif
#ifndef OPENSSL_NO_DH
#include <openssl/dh.h>
#endif
const char DSA_version[]="DSA" OPENSSL_VERSION_PTEXT;
static const DSA_METHOD *default_DSA_method = NULL;
void DSA_set_default_method(const DSA_METHOD *meth)
{
#ifdef OPENSSL_FIPS
if (FIPS_mode() && !(meth->flags & DSA_FLAG_FIPS_METHOD))
{
DSAerr(DSA_F_DSA_SET_DEFAULT_METHOD, DSA_R_NON_FIPS_METHOD);
return;
}
#endif
default_DSA_method = meth;
}
const DSA_METHOD *DSA_get_default_method(void)
{
if(!default_DSA_method)
default_DSA_method = DSA_OpenSSL();
return default_DSA_method;
}
DSA *DSA_new(void)
{
return DSA_new_method(NULL);
}
int DSA_set_method(DSA *dsa, const DSA_METHOD *meth)
{
/* NB: The caller is specifically setting a method, so it's not up to us
* to deal with which ENGINE it comes from. */
const DSA_METHOD *mtmp;
#ifdef OPENSSL_FIPS
if (FIPS_mode() && !(meth->flags & DSA_FLAG_FIPS_METHOD))
{
DSAerr(DSA_F_DSA_SET_METHOD, DSA_R_NON_FIPS_METHOD);
return 0;
}
#endif
mtmp = dsa->meth;
if (mtmp->finish) mtmp->finish(dsa);
#ifndef OPENSSL_NO_ENGINE
if (dsa->engine)
{
ENGINE_finish(dsa->engine);
dsa->engine = NULL;
}
#endif
dsa->meth = meth;
if (meth->init) meth->init(dsa);
return 1;
}
DSA *DSA_new_method(ENGINE *engine)
{
DSA *ret;
ret=(DSA *)OPENSSL_malloc(sizeof(DSA));
if (ret == NULL)
{
DSAerr(DSA_F_DSA_NEW_METHOD,ERR_R_MALLOC_FAILURE);
return(NULL);
}
ret->meth = DSA_get_default_method();
#ifndef OPENSSL_NO_ENGINE
if (engine)
{
if (!ENGINE_init(engine))
{
DSAerr(DSA_F_DSA_NEW_METHOD, ERR_R_ENGINE_LIB);
OPENSSL_free(ret);
return NULL;
}
ret->engine = engine;
}
else
ret->engine = ENGINE_get_default_DSA();
if(ret->engine)
{
ret->meth = ENGINE_get_DSA(ret->engine);
if(!ret->meth)
{
DSAerr(DSA_F_DSA_NEW_METHOD,
ERR_R_ENGINE_LIB);
ENGINE_finish(ret->engine);
OPENSSL_free(ret);
return NULL;
}
}
#endif
#ifdef OPENSSL_FIPS
if (FIPS_mode() && !(ret->meth->flags & DSA_FLAG_FIPS_METHOD))
{
DSAerr(DSA_F_DSA_NEW_METHOD, DSA_R_NON_FIPS_METHOD);
#ifndef OPENSSL_NO_ENGINE
if (ret->engine)
ENGINE_finish(ret->engine);
#endif
OPENSSL_free(ret);
return NULL;
}
#endif
ret->pad=0;
ret->version=0;
ret->write_params=1;
ret->p=NULL;
ret->q=NULL;
ret->g=NULL;
ret->pub_key=NULL;
ret->priv_key=NULL;
ret->kinv=NULL;
ret->r=NULL;
ret->method_mont_p=NULL;
ret->references=1;
ret->flags=ret->meth->flags & ~DSA_FLAG_NON_FIPS_ALLOW;
CRYPTO_new_ex_data(CRYPTO_EX_INDEX_DSA, ret, &ret->ex_data);
if ((ret->meth->init != NULL) && !ret->meth->init(ret))
{
#ifndef OPENSSL_NO_ENGINE
if (ret->engine)
ENGINE_finish(ret->engine);
#endif
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_DSA, ret, &ret->ex_data);
OPENSSL_free(ret);
ret=NULL;
}
return(ret);
}
void DSA_free(DSA *r)
{
int i;
if (r == NULL) return;
i=CRYPTO_add(&r->references,-1,CRYPTO_LOCK_DSA);
#ifdef REF_PRINT
REF_PRINT("DSA",r);
#endif
if (i > 0) return;
#ifdef REF_CHECK
if (i < 0)
{
fprintf(stderr,"DSA_free, bad reference count\n");
abort();
}
#endif
if(r->meth->finish)
r->meth->finish(r);
#ifndef OPENSSL_NO_ENGINE
if(r->engine)
ENGINE_finish(r->engine);
#endif
CRYPTO_free_ex_data(CRYPTO_EX_INDEX_DSA, r, &r->ex_data);
if (r->p != NULL) BN_clear_free(r->p);
if (r->q != NULL) BN_clear_free(r->q);
if (r->g != NULL) BN_clear_free(r->g);
if (r->pub_key != NULL) BN_clear_free(r->pub_key);
if (r->priv_key != NULL) BN_clear_free(r->priv_key);
if (r->kinv != NULL) BN_clear_free(r->kinv);
if (r->r != NULL) BN_clear_free(r->r);
OPENSSL_free(r);
}
int DSA_up_ref(DSA *r)
{
int i = CRYPTO_add(&r->references, 1, CRYPTO_LOCK_DSA);
#ifdef REF_PRINT
REF_PRINT("DSA",r);
#endif
#ifdef REF_CHECK
if (i < 2)
{
fprintf(stderr, "DSA_up_ref, bad reference count\n");
abort();
}
#endif
return ((i > 1) ? 1 : 0);
}
int DSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func,
CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func)
{
return CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_DSA, argl, argp,
new_func, dup_func, free_func);
}
int DSA_set_ex_data(DSA *d, int idx, void *arg)
{
return(CRYPTO_set_ex_data(&d->ex_data,idx,arg));
}
void *DSA_get_ex_data(DSA *d, int idx)
{
return(CRYPTO_get_ex_data(&d->ex_data,idx));
}
#ifndef OPENSSL_NO_DH
DH *DSA_dup_DH(const DSA *r)
{
/* DSA has p, q, g, optional pub_key, optional priv_key.
* DH has p, optional length, g, optional pub_key, optional priv_key.
*/
DH *ret = NULL;
if (r == NULL)
goto err;
ret = DH_new();
if (ret == NULL)
goto err;
if (r->p != NULL)
if ((ret->p = BN_dup(r->p)) == NULL)
goto err;
if (r->q != NULL)
ret->length = BN_num_bits(r->q);
if (r->g != NULL)
if ((ret->g = BN_dup(r->g)) == NULL)
goto err;
if (r->pub_key != NULL)
if ((ret->pub_key = BN_dup(r->pub_key)) == NULL)
goto err;
if (r->priv_key != NULL)
if ((ret->priv_key = BN_dup(r->priv_key)) == NULL)
goto err;
return ret;
err:
if (ret != NULL)
DH_free(ret);
return NULL;
}
#endif
|
977671.c | /* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2016-2020 Intel Corporation
*/
#include <string.h>
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
#include "aesni_gcm_pmd_private.h"
static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
{ /* AES GMAC (AUTH) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_AES_GMAC,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.digest_size = {
.min = 1,
.max = 16,
.increment = 1
},
.iv_size = {
.min = 12,
.max = 12,
.increment = 0
}
}, }
}, }
},
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
{.aead = {
.algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.digest_size = {
.min = 1,
.max = 16,
.increment = 1
},
.aad_size = {
.min = 0,
.max = 65535,
.increment = 1
},
.iv_size = {
.min = 12,
.max = 12,
.increment = 0
}
}, }
}, }
},
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
/** Configure device */
static int
aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
__rte_unused struct rte_cryptodev_config *config)
{
return 0;
}
/** Start device */
static int
aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
{
return 0;
}
/** Stop device */
static void
aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
{
}
/** Close device */
static int
aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
{
return 0;
}
/** Get device statistics */
static void
aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
struct rte_cryptodev_stats *stats)
{
int qp_id;
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
stats->enqueued_count += qp->qp_stats.enqueued_count;
stats->dequeued_count += qp->qp_stats.dequeued_count;
stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
}
}
/** Reset device statistics */
static void
aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
{
int qp_id;
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
}
}
/** Get device info */
static void
aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info *dev_info)
{
struct aesni_gcm_private *internals = dev->data->dev_private;
if (dev_info != NULL) {
dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = aesni_gcm_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
/* No limit of number of sessions */
dev_info->sym.max_nb_sessions = 0;
}
}
/** Release queue pair */
static int
aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
{
if (dev->data->queue_pairs[qp_id] != NULL) {
struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
if (qp->processed_pkts)
rte_ring_free(qp->processed_pkts);
rte_free(dev->data->queue_pairs[qp_id]);
dev->data->queue_pairs[qp_id] = NULL;
}
return 0;
}
/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
static int
aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
struct aesni_gcm_qp *qp)
{
unsigned n = snprintf(qp->name, sizeof(qp->name),
"aesni_gcm_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
if (n >= sizeof(qp->name))
return -1;
return 0;
}
/** Create a ring to place process packets on */
static struct rte_ring *
aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
unsigned ring_size, int socket_id)
{
struct rte_ring *r;
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
" packets", qp->name);
return r;
}
AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
" packets", qp->name);
return NULL;
}
return rte_ring_create(qp->name, ring_size, socket_id,
RING_F_SP_ENQ | RING_F_SC_DEQ);
}
/** Setup a queue pair */
static int
aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
const struct rte_cryptodev_qp_conf *qp_conf,
int socket_id)
{
struct aesni_gcm_qp *qp = NULL;
struct aesni_gcm_private *internals = dev->data->dev_private;
/* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL)
aesni_gcm_pmd_qp_release(dev, qp_id);
/* Allocate the queue pair data structure. */
qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
RTE_CACHE_LINE_SIZE, socket_id);
if (qp == NULL)
return (-ENOMEM);
qp->id = qp_id;
dev->data->queue_pairs[qp_id] = qp;
if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
goto qp_setup_cleanup;
qp->ops = (const struct aesni_gcm_ops *)internals->ops;
qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
qp_conf->nb_descriptors, socket_id);
if (qp->processed_pkts == NULL)
goto qp_setup_cleanup;
qp->sess_mp = qp_conf->mp_session;
qp->sess_mp_priv = qp_conf->mp_session_private;
memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
return 0;
qp_setup_cleanup:
if (qp)
rte_free(qp);
return -1;
}
/** Returns the size of the aesni gcm session structure */
static unsigned
aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct aesni_gcm_session);
}
/** Configure a aesni gcm session from a crypto xform chain */
static int
aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
{
void *sess_private_data;
int ret;
struct aesni_gcm_private *internals = dev->data->dev_private;
if (unlikely(sess == NULL)) {
AESNI_GCM_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
AESNI_GCM_LOG(ERR,
"Couldn't get object from session mempool");
return -ENOMEM;
}
ret = aesni_gcm_set_session_parameters(internals->ops,
sess_private_data, xform);
if (ret != 0) {
AESNI_GCM_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.dev_configure = aesni_gcm_pmd_config,
.dev_start = aesni_gcm_pmd_start,
.dev_stop = aesni_gcm_pmd_stop,
.dev_close = aesni_gcm_pmd_close,
.stats_get = aesni_gcm_pmd_stats_get,
.stats_reset = aesni_gcm_pmd_stats_reset,
.dev_infos_get = aesni_gcm_pmd_info_get,
.queue_pair_setup = aesni_gcm_pmd_qp_setup,
.queue_pair_release = aesni_gcm_pmd_qp_release,
.sym_cpu_process = aesni_gcm_pmd_cpu_crypto_process,
.sym_session_get_size = aesni_gcm_pmd_sym_session_get_size,
.sym_session_configure = aesni_gcm_pmd_sym_session_configure,
.sym_session_clear = aesni_gcm_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
|
950137.c | /*
* An implementation of the Acorn Econet and AUN protocols.
* Philip Blundell <philb@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#define pr_fmt(fmt) fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/route.h>
#include <linux/inet.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/wireless.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/if_ec.h>
#include <net/udp.h>
#include <net/ip.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <asm/system.h>
static const struct proto_ops econet_ops;
static struct hlist_head econet_sklist;
static DEFINE_SPINLOCK(econet_lock);
static DEFINE_MUTEX(econet_mutex);
/* Since there are only 256 possible network numbers (or fewer, depends
how you count) it makes sense to use a simple lookup table. */
static struct net_device *net2dev_map[256];
#define EC_PORT_IP 0xd2
#ifdef CONFIG_ECONET_AUNUDP
static DEFINE_SPINLOCK(aun_queue_lock);
static struct socket *udpsock;
#define AUN_PORT 0x8000
struct aunhdr {
unsigned char code; /* AUN magic protocol byte */
unsigned char port;
unsigned char cb;
unsigned char pad;
unsigned long handle;
};
static unsigned long aun_seq;
/* Queue of packets waiting to be transmitted. */
static struct sk_buff_head aun_queue;
static struct timer_list ab_cleanup_timer;
#endif /* CONFIG_ECONET_AUNUDP */
/* Per-packet information */
struct ec_cb {
struct sockaddr_ec sec;
unsigned long cookie; /* Supplied by user. */
#ifdef CONFIG_ECONET_AUNUDP
int done;
unsigned long seq; /* Sequencing */
unsigned long timeout; /* Timeout */
unsigned long start; /* jiffies */
#endif
#ifdef CONFIG_ECONET_NATIVE
void (*sent)(struct sk_buff *, int result);
#endif
};
static void econet_remove_socket(struct hlist_head *list, struct sock *sk)
{
spin_lock_bh(&econet_lock);
sk_del_node_init(sk);
spin_unlock_bh(&econet_lock);
}
static void econet_insert_socket(struct hlist_head *list, struct sock *sk)
{
spin_lock_bh(&econet_lock);
sk_add_node(sk, list);
spin_unlock_bh(&econet_lock);
}
/*
* Pull a packet from our receive queue and hand it to the user.
* If necessary we block.
*/
static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
size_t copied;
int err;
msg->msg_namelen = sizeof(struct sockaddr_ec);
mutex_lock(&econet_mutex);
/*
* Call the generic datagram receiver. This handles all sorts
* of horrible races and re-entrancy so we can forget about it
* in the protocol layers.
*
* Now it will return ENETDOWN, if device have just gone down,
* but then it will block.
*/
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
/*
* An error occurred so return it. Because skb_recv_datagram()
* handles the blocking we don't see and worry about blocking
* retries.
*/
if (skb == NULL)
goto out;
/*
* You lose any data beyond the buffer you gave. If it worries a
* user program they can ask the device for its MTU anyway.
*/
copied = skb->len;
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
/* We can't use skb_copy_datagram here */
err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
if (err)
goto out_free;
sk->sk_stamp = skb->tstamp;
if (msg->msg_name)
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
/*
* Free or return the buffer as appropriate. Again this
* hides all the races and re-entrancy issues from us.
*/
err = copied;
out_free:
skb_free_datagram(sk, skb);
out:
mutex_unlock(&econet_mutex);
return err;
}
/*
* Bind an Econet socket.
*/
static int econet_bind(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
struct sock *sk;
struct econet_sock *eo;
/*
* Check legality
*/
if (addr_len < sizeof(struct sockaddr_ec) ||
sec->sec_family != AF_ECONET)
return -EINVAL;
mutex_lock(&econet_mutex);
sk = sock->sk;
eo = ec_sk(sk);
eo->cb = sec->cb;
eo->port = sec->port;
eo->station = sec->addr.station;
eo->net = sec->addr.net;
mutex_unlock(&econet_mutex);
return 0;
}
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
/*
* Queue a transmit result for the user to be told about.
*/
static void tx_result(struct sock *sk, unsigned long cookie, int result)
{
struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
struct ec_cb *eb;
struct sockaddr_ec *sec;
if (skb == NULL) {
pr_debug("econet: memory squeeze, transmit result dropped\n");
return;
}
eb = (struct ec_cb *)&skb->cb;
sec = (struct sockaddr_ec *)&eb->sec;
memset(sec, 0, sizeof(struct sockaddr_ec));
sec->cookie = cookie;
sec->type = ECTYPE_TRANSMIT_STATUS | result;
sec->sec_family = AF_ECONET;
if (sock_queue_rcv_skb(sk, skb) < 0)
kfree_skb(skb);
}
#endif
#ifdef CONFIG_ECONET_NATIVE
/*
* Called by the Econet hardware driver when a packet transmit
* has completed. Tell the user.
*/
static void ec_tx_done(struct sk_buff *skb, int result)
{
struct ec_cb *eb = (struct ec_cb *)&skb->cb;
tx_result(skb->sk, eb->cookie, result);
}
#endif
/*
* Send a packet. We have to work out which device it's going out on
* and hence whether to use real Econet or the UDP emulation.
*/
static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sockaddr_ec *saddr = (struct sockaddr_ec *)msg->msg_name;
struct net_device *dev;
struct ec_addr addr;
int err;
unsigned char port, cb;
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct ec_cb *eb;
#endif
#ifdef CONFIG_ECONET_AUNUDP
struct msghdr udpmsg;
struct iovec iov[2];
struct aunhdr ah;
struct sockaddr_in udpdest;
__kernel_size_t size;
mm_segment_t oldfs;
char *userbuf;
#endif
/*
* Check the flags.
*/
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
/*
* Get and verify the address.
*/
mutex_lock(&econet_mutex);
if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
mutex_unlock(&econet_mutex);
return -EINVAL;
}
addr.station = saddr->addr.station;
addr.net = saddr->addr.net;
port = saddr->port;
cb = saddr->cb;
/* Look for a device with the right network number. */
dev = net2dev_map[addr.net];
/* If not directly reachable, use some default */
if (dev == NULL) {
dev = net2dev_map[0];
/* No interfaces at all? */
if (dev == NULL) {
mutex_unlock(&econet_mutex);
return -ENETDOWN;
}
}
if (dev->type == ARPHRD_ECONET) {
/* Real hardware Econet. We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
unsigned short proto = 0;
int hlen, tlen;
int res;
if (len + 15 > dev->mtu) {
mutex_unlock(&econet_mutex);
return -EMSGSIZE;
}
dev_hold(dev);
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(sk, len + hlen + tlen,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
eb = (struct ec_cb *)&skb->cb;
eb->cookie = saddr->cookie;
eb->sec = *saddr;
eb->sent = ec_tx_done;
err = -EINVAL;
res = dev_hard_header(skb, dev, ntohs(proto), &addr, NULL, len);
if (res < 0)
goto out_free;
if (res > 0) {
struct ec_framehdr *fh;
/* Poke in our control byte and
port number. Hack, hack. */
fh = (struct ec_framehdr *)skb->data;
fh->cb = cb;
fh->port = port;
if (sock->type != SOCK_DGRAM) {
skb_reset_tail_pointer(skb);
skb->len = 0;
}
}
/* Copy the data. Returns -EFAULT on error */
err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
if (err)
goto out_free;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_free;
/*
* Now send it
*/
dev_queue_xmit(skb);
dev_put(dev);
mutex_unlock(&econet_mutex);
return len;
out_free:
kfree_skb(skb);
out_unlock:
if (dev)
dev_put(dev);
#else
err = -EPROTOTYPE;
#endif
mutex_unlock(&econet_mutex);
return err;
}
#ifdef CONFIG_ECONET_AUNUDP
/* AUN virtual Econet. */
if (udpsock == NULL) {
mutex_unlock(&econet_mutex);
return -ENETDOWN; /* No socket - can't send */
}
if (len > 32768) {
err = -E2BIG;
goto error;
}
/* Make up a UDP datagram and hand it off to some higher intellect. */
memset(&udpdest, 0, sizeof(udpdest));
udpdest.sin_family = AF_INET;
udpdest.sin_port = htons(AUN_PORT);
/* At the moment we use the stupid Acorn scheme of Econet address
y.x maps to IP a.b.c.x. This should be replaced with something
more flexible and more aware of subnet masks. */
{
struct in_device *idev;
unsigned long network = 0;
rcu_read_lock();
idev = __in_dev_get_rcu(dev);
if (idev) {
if (idev->ifa_list)
network = ntohl(idev->ifa_list->ifa_address) &
0xffffff00; /* !!! */
}
rcu_read_unlock();
udpdest.sin_addr.s_addr = htonl(network | addr.station);
}
memset(&ah, 0, sizeof(ah));
ah.port = port;
ah.cb = cb & 0x7f;
ah.code = 2; /* magic */
/* tack our header on the front of the iovec */
size = sizeof(struct aunhdr);
iov[0].iov_base = (void *)&ah;
iov[0].iov_len = size;
userbuf = vmalloc(len);
if (userbuf == NULL) {
err = -ENOMEM;
goto error;
}
iov[1].iov_base = userbuf;
iov[1].iov_len = len;
err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
if (err)
goto error_free_buf;
/* Get a skbuff (no data, just holds our cb information) */
skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error_free_buf;
eb = (struct ec_cb *)&skb->cb;
eb->cookie = saddr->cookie;
eb->timeout = 5 * HZ;
eb->start = jiffies;
ah.handle = aun_seq;
eb->seq = (aun_seq++);
eb->sec = *saddr;
skb_queue_tail(&aun_queue, skb);
udpmsg.msg_name = (void *)&udpdest;
udpmsg.msg_namelen = sizeof(udpdest);
udpmsg.msg_iov = &iov[0];
udpmsg.msg_iovlen = 2;
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
udpmsg.msg_flags = 0;
oldfs = get_fs();
set_fs(KERNEL_DS); /* More privs :-) */
err = sock_sendmsg(udpsock, &udpmsg, size);
set_fs(oldfs);
error_free_buf:
vfree(userbuf);
error:
#else
err = -EPROTOTYPE;
#endif
mutex_unlock(&econet_mutex);
return err;
}
/*
* Look up the address of a socket.
*/
static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk;
struct econet_sock *eo;
struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
if (peer)
return -EOPNOTSUPP;
memset(sec, 0, sizeof(*sec));
mutex_lock(&econet_mutex);
sk = sock->sk;
eo = ec_sk(sk);
sec->sec_family = AF_ECONET;
sec->port = eo->port;
sec->addr.station = eo->station;
sec->addr.net = eo->net;
mutex_unlock(&econet_mutex);
*uaddr_len = sizeof(*sec);
return 0;
}
static void econet_destroy_timer(unsigned long data)
{
struct sock *sk = (struct sock *)data;
if (!sk_has_allocations(sk)) {
sk_free(sk);
return;
}
sk->sk_timer.expires = jiffies + 10 * HZ;
add_timer(&sk->sk_timer);
pr_debug("econet: socket destroy delayed\n");
}
/*
* Close an econet socket.
*/
static int econet_release(struct socket *sock)
{
struct sock *sk;
mutex_lock(&econet_mutex);
sk = sock->sk;
if (!sk)
goto out_unlock;
econet_remove_socket(&econet_sklist, sk);
/*
* Now the socket is dead. No more input will appear.
*/
sk->sk_state_change(sk); /* It is useless. Just for sanity. */
sock_orphan(sk);
/* Purge queues */
skb_queue_purge(&sk->sk_receive_queue);
if (sk_has_allocations(sk)) {
sk->sk_timer.data = (unsigned long)sk;
sk->sk_timer.expires = jiffies + HZ;
sk->sk_timer.function = econet_destroy_timer;
add_timer(&sk->sk_timer);
goto out_unlock;
}
sk_free(sk);
out_unlock:
mutex_unlock(&econet_mutex);
return 0;
}
static struct proto econet_proto = {
.name = "ECONET",
.owner = THIS_MODULE,
.obj_size = sizeof(struct econet_sock),
};
/*
* Create an Econet socket
*/
static int econet_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct econet_sock *eo;
int err;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
/* Econet only provides datagram services. */
if (sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
sock->state = SS_UNCONNECTED;
err = -ENOBUFS;
sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto);
if (sk == NULL)
goto out;
sk->sk_reuse = 1;
sock->ops = &econet_ops;
sock_init_data(sock, sk);
eo = ec_sk(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_family = PF_ECONET;
eo->num = protocol;
econet_insert_socket(&econet_sklist, sk);
return 0;
out:
return err;
}
/*
* Handle Econet specific ioctls
*/
static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
{
struct ifreq ifr;
struct ec_device *edev;
struct net_device *dev;
struct sockaddr_ec *sec;
int err;
/*
* Fetch the caller's info block into kernel space
*/
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
dev = dev_get_by_name(&init_net, ifr.ifr_name);
if (dev == NULL)
return -ENODEV;
sec = (struct sockaddr_ec *)&ifr.ifr_addr;
mutex_lock(&econet_mutex);
err = 0;
switch (cmd) {
case SIOCSIFADDR:
if (!capable(CAP_NET_ADMIN)) {
err = -EPERM;
break;
}
edev = dev->ec_ptr;
if (edev == NULL) {
/* Magic up a new one. */
edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
if (edev == NULL) {
err = -ENOMEM;
break;
}
dev->ec_ptr = edev;
} else
net2dev_map[edev->net] = NULL;
edev->station = sec->addr.station;
edev->net = sec->addr.net;
net2dev_map[sec->addr.net] = dev;
if (!net2dev_map[0])
net2dev_map[0] = dev;
break;
case SIOCGIFADDR:
edev = dev->ec_ptr;
if (edev == NULL) {
err = -ENODEV;
break;
}
memset(sec, 0, sizeof(struct sockaddr_ec));
sec->addr.station = edev->station;
sec->addr.net = edev->net;
sec->sec_family = AF_ECONET;
dev_put(dev);
if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
err = -EFAULT;
break;
default:
err = -EINVAL;
break;
}
mutex_unlock(&econet_mutex);
dev_put(dev);
return err;
}
/*
* Handle generic ioctls
*/
static int econet_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
switch (cmd) {
case SIOCGSTAMP:
return sock_get_timestamp(sk, argp);
case SIOCGSTAMPNS:
return sock_get_timestampns(sk, argp);
case SIOCSIFADDR:
case SIOCGIFADDR:
return ec_dev_ioctl(sock, cmd, argp);
}
return -ENOIOCTLCMD;
}
static const struct net_proto_family econet_family_ops = {
.family = PF_ECONET,
.create = econet_create,
.owner = THIS_MODULE,
};
static const struct proto_ops econet_ops = {
.family = PF_ECONET,
.owner = THIS_MODULE,
.release = econet_release,
.bind = econet_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = econet_getname,
.poll = datagram_poll,
.ioctl = econet_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = econet_sendmsg,
.recvmsg = econet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
/*
* Find the listening socket, if any, for the given data.
*/
static struct sock *ec_listening_socket(unsigned char port, unsigned char
station, unsigned char net)
{
struct sock *sk;
struct hlist_node *node;
spin_lock(&econet_lock);
sk_for_each(sk, node, &econet_sklist) {
struct econet_sock *opt = ec_sk(sk);
if ((opt->port == port || opt->port == 0) &&
(opt->station == station || opt->station == 0) &&
(opt->net == net || opt->net == 0)) {
sock_hold(sk);
goto found;
}
}
sk = NULL;
found:
spin_unlock(&econet_lock);
return sk;
}
/*
* Queue a received packet for a socket.
*/
static int ec_queue_packet(struct sock *sk, struct sk_buff *skb,
unsigned char stn, unsigned char net,
unsigned char cb, unsigned char port)
{
struct ec_cb *eb = (struct ec_cb *)&skb->cb;
struct sockaddr_ec *sec = (struct sockaddr_ec *)&eb->sec;
memset(sec, 0, sizeof(struct sockaddr_ec));
sec->sec_family = AF_ECONET;
sec->type = ECTYPE_PACKET_RECEIVED;
sec->port = port;
sec->cb = cb;
sec->addr.net = net;
sec->addr.station = stn;
return sock_queue_rcv_skb(sk, skb);
}
#endif
#ifdef CONFIG_ECONET_AUNUDP
/*
* Send an AUN protocol response.
*/
static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
{
struct sockaddr_in sin = {
.sin_family = AF_INET,
.sin_port = htons(AUN_PORT),
.sin_addr = {.s_addr = addr}
};
struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
struct msghdr udpmsg;
udpmsg.msg_name = (void *)&sin;
udpmsg.msg_namelen = sizeof(sin);
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
udpmsg.msg_flags = 0;
kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
}
/*
* Handle incoming AUN packets. Work out if anybody wants them,
* and send positive or negative acknowledgements as appropriate.
*/
static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
{
struct iphdr *ip = ip_hdr(skb);
unsigned char stn = ntohl(ip->saddr) & 0xff;
struct dst_entry *dst = skb_dst(skb);
struct ec_device *edev = NULL;
struct sock *sk = NULL;
struct sk_buff *newskb;
if (dst)
edev = dst->dev->ec_ptr;
if (!edev)
goto bad;
sk = ec_listening_socket(ah->port, stn, edev->net);
if (sk == NULL)
goto bad; /* Nobody wants it */
newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15,
GFP_ATOMIC);
if (newskb == NULL) {
pr_debug("AUN: memory squeeze, dropping packet\n");
/* Send nack and hope sender tries again */
goto bad;
}
memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah + 1),
len - sizeof(struct aunhdr));
if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) {
/* Socket is bankrupt. */
kfree_skb(newskb);
goto bad;
}
aun_send_response(ip->saddr, ah->handle, 3, 0);
sock_put(sk);
return;
bad:
aun_send_response(ip->saddr, ah->handle, 4, 0);
if (sk)
sock_put(sk);
}
/*
* Handle incoming AUN transmit acknowledgements. If the sequence
* number matches something in our backlog then kill it and tell
* the user. If the remote took too long to reply then we may have
* dropped the packet already.
*/
static void aun_tx_ack(unsigned long seq, int result)
{
struct sk_buff *skb;
unsigned long flags;
struct ec_cb *eb;
spin_lock_irqsave(&aun_queue_lock, flags);
skb_queue_walk(&aun_queue, skb) {
eb = (struct ec_cb *)&skb->cb;
if (eb->seq == seq)
goto foundit;
}
spin_unlock_irqrestore(&aun_queue_lock, flags);
pr_debug("AUN: unknown sequence %ld\n", seq);
return;
foundit:
tx_result(skb->sk, eb->cookie, result);
skb_unlink(skb, &aun_queue);
spin_unlock_irqrestore(&aun_queue_lock, flags);
kfree_skb(skb);
}
/*
* Deal with received AUN frames - sort out what type of thing it is
* and hand it to the right function.
*/
static void aun_data_available(struct sock *sk, int slen)
{
int err;
struct sk_buff *skb;
unsigned char *data;
struct aunhdr *ah;
size_t len;
while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
if (err == -EAGAIN) {
pr_err("AUN: no data available?!\n");
return;
}
pr_debug("AUN: recvfrom() error %d\n", -err);
}
data = skb_transport_header(skb) + sizeof(struct udphdr);
ah = (struct aunhdr *)data;
len = skb->len - sizeof(struct udphdr);
switch (ah->code) {
case 2:
aun_incoming(skb, ah, len);
break;
case 3:
aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_OK);
break;
case 4:
aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
break;
default:
pr_debug("AUN: unknown packet type: %d\n", data[0]);
}
skb_free_datagram(sk, skb);
}
/*
* Called by the timer to manage the AUN transmit queue. If a packet
* was sent to a dead or nonexistent host then we will never get an
* acknowledgement back. After a few seconds we need to spot this and
* drop the packet.
*/
static void ab_cleanup(unsigned long h)
{
struct sk_buff *skb, *n;
unsigned long flags;
spin_lock_irqsave(&aun_queue_lock, flags);
skb_queue_walk_safe(&aun_queue, skb, n) {
struct ec_cb *eb = (struct ec_cb *)&skb->cb;
if ((jiffies - eb->start) > eb->timeout) {
tx_result(skb->sk, eb->cookie,
ECTYPE_TRANSMIT_NOT_PRESENT);
skb_unlink(skb, &aun_queue);
kfree_skb(skb);
}
}
spin_unlock_irqrestore(&aun_queue_lock, flags);
mod_timer(&ab_cleanup_timer, jiffies + (HZ * 2));
}
static int __init aun_udp_initialise(void)
{
int error;
struct sockaddr_in sin;
skb_queue_head_init(&aun_queue);
setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
ab_cleanup_timer.expires = jiffies + (HZ * 2);
add_timer(&ab_cleanup_timer);
memset(&sin, 0, sizeof(sin));
sin.sin_port = htons(AUN_PORT);
/* We can count ourselves lucky Acorn machines are too dim to
speak IPv6. :-) */
error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock);
if (error < 0) {
pr_err("AUN: socket error %d\n", -error);
return error;
}
udpsock->sk->sk_reuse = 1;
udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
from interrupts */
error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
sizeof(sin));
if (error < 0) {
pr_err("AUN: bind error %d\n", -error);
goto release;
}
udpsock->sk->sk_data_ready = aun_data_available;
return 0;
release:
sock_release(udpsock);
udpsock = NULL;
return error;
}
#endif
#ifdef CONFIG_ECONET_NATIVE
/*
* Receive an Econet frame from a device.
*/
static int econet_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct ec_framehdr *hdr;
struct sock *sk = NULL;
struct ec_device *edev = dev->ec_ptr;
if (!net_eq(dev_net(dev), &init_net))
goto drop;
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
if (!edev)
goto drop;
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
return NET_RX_DROP;
if (!pskb_may_pull(skb, sizeof(struct ec_framehdr)))
goto drop;
hdr = (struct ec_framehdr *)skb->data;
/* First check for encapsulated IP */
if (hdr->port == EC_PORT_IP) {
skb->protocol = htons(ETH_P_IP);
skb_pull(skb, sizeof(struct ec_framehdr));
netif_rx(skb);
return NET_RX_SUCCESS;
}
sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net);
if (!sk)
goto drop;
if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb,
hdr->port))
goto drop;
sock_put(sk);
return NET_RX_SUCCESS;
drop:
if (sk)
sock_put(sk);
kfree_skb(skb);
return NET_RX_DROP;
}
static struct packet_type econet_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_ECONET),
.func = econet_rcv,
};
static void econet_hw_initialise(void)
{
dev_add_pack(&econet_packet_type);
}
#endif
static int econet_notifier(struct notifier_block *this, unsigned long msg,
void *data)
{
struct net_device *dev = data;
struct ec_device *edev;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
switch (msg) {
case NETDEV_UNREGISTER:
/* A device has gone down - kill any data we hold for it. */
edev = dev->ec_ptr;
if (edev) {
if (net2dev_map[0] == dev)
net2dev_map[0] = NULL;
net2dev_map[edev->net] = NULL;
kfree(edev);
dev->ec_ptr = NULL;
}
break;
}
return NOTIFY_DONE;
}
static struct notifier_block econet_netdev_notifier = {
.notifier_call = econet_notifier,
};
static void __exit econet_proto_exit(void)
{
#ifdef CONFIG_ECONET_AUNUDP
del_timer(&ab_cleanup_timer);
if (udpsock)
sock_release(udpsock);
#endif
unregister_netdevice_notifier(&econet_netdev_notifier);
#ifdef CONFIG_ECONET_NATIVE
dev_remove_pack(&econet_packet_type);
#endif
sock_unregister(econet_family_ops.family);
proto_unregister(&econet_proto);
}
static int __init econet_proto_init(void)
{
int err = proto_register(&econet_proto, 0);
if (err != 0)
goto out;
sock_register(&econet_family_ops);
#ifdef CONFIG_ECONET_AUNUDP
aun_udp_initialise();
#endif
#ifdef CONFIG_ECONET_NATIVE
econet_hw_initialise();
#endif
register_netdevice_notifier(&econet_netdev_notifier);
out:
return err;
}
module_init(econet_proto_init);
module_exit(econet_proto_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ECONET);
|
204031.c | /* otb_main6S_program_call_function.f -- translated by f2c (version 19970805).
You must link the resulting object file with the libraries:
-lf2c -lm (in that order)
*/
#ifdef __cplusplus
extern "C" {
#endif
/* OTB patches: replace "f2c.h" by "otb_6S.h" */
/*#include "f2c.h"*/
#include "otb_6S.h"
/* Table of constant values */
static integer c__9 = 9;
static integer c__1 = 1;
static integer c__5 = 5;
static integer c__3 = 3;
/*< program call6Sv1_0b >*/
/* Main program */ OTB_6S_MAIN__()
{
/* Format strings */
static char fmt_100[] = "(10x,\002 -----> atmospheric reflectance : \
\002,f6.5)";
static char fmt_200[] = "(10x,\002 -----> atmospheric spherical albedo \
: \002,f6.5)";
static char fmt_300[] = "(10x,\002 -----> total gaseous transmission : \
\002,f6.5)";
static char fmt_400[] = "(10x,\002 -----> downward transmittance : \
\002,f6.5)";
static char fmt_500[] = "(10x,\002 -----> upward transmittance : \
\002,f6.5)";
static char fmt_600[] = "(10x,\002 -----> upward diffuse transmittance \
: \002,f6.5)";
static char fmt_700[] = "(10x,\002 -----> upward direct transmittance :\
\002,f6.5)";
static char fmt_710[] = "(10x,\002 -----> upward diffuse transm. ray: \
\002,f6.5)";
static char fmt_720[] = "(10x,\002 -----> upward diffuse transm. aer: \
\002,f6.5)";
/* System generated locals */
integer i__1;
/* Builtin functions */
integer s_wsle(cilist *), do_lio(integer *, integer *, char *, ftnlen),
e_wsle(), s_rsle(cilist *), e_rsle(), s_wsfe(cilist *), do_fio(
integer *, char *, ftnlen), e_wsfe();
/* Local variables */
doublereal otb_ratm__, pressure;
integer i__, l;
doublereal s[1501];
integer ik;
doublereal uw, uo3, otb_tdif_up__, phi0, otb_tdir_up__;
integer iaer, iinf, jday;
doublereal asol, avis, phiv, sast;
integer isup, iread;
doublereal taer55, tgasm, wlinf;
integer month;
doublereal wlsup;
extern /* Subroutine */ int otb_6s_ssssss_otb_main_function(doublereal *, doublereal *,
doublereal *, doublereal *, integer *, integer *, doublereal *,
doublereal *, doublereal *, integer *, doublereal *, doublereal *,
doublereal *, doublereal *, doublereal *, doublereal *,
doublereal *, doublereal *, doublereal *, doublereal *,
doublereal *, doublereal *, doublereal *);
doublereal sdtott, sutott, otb_tdif_up_aer__, otb_tdif_up_ray__;
/* Fortran I/O blocks */
static cilist io___4 = { 0, 6, 0, 0, 0 };
static cilist io___5 = { 0, 0, 0, 0, 0 };
static cilist io___12 = { 0, 6, 0, 0, 0 };
static cilist io___13 = { 0, 6, 0, 0, 0 };
static cilist io___14 = { 0, 6, 0, 0, 0 };
static cilist io___15 = { 0, 0, 0, 0, 0 };
static cilist io___19 = { 0, 6, 0, 0, 0 };
static cilist io___20 = { 0, 6, 0, 0, 0 };
static cilist io___21 = { 0, 6, 0, 0, 0 };
static cilist io___22 = { 0, 0, 0, 0, 0 };
static cilist io___24 = { 0, 6, 0, 0, 0 };
static cilist io___25 = { 0, 6, 0, 0, 0 };
static cilist io___26 = { 0, 6, 0, 0, 0 };
static cilist io___27 = { 0, 6, 0, 0, 0 };
static cilist io___28 = { 0, 0, 0, 0, 0 };
static cilist io___30 = { 0, 6, 0, 0, 0 };
static cilist io___31 = { 0, 6, 0, 0, 0 };
static cilist io___34 = { 0, 0, 0, 0, 0 };
static cilist io___37 = { 0, 6, 0, 0, 0 };
static cilist io___38 = { 0, 6, 0, 0, 0 };
static cilist io___40 = { 0, 0, 0, 0, 0 };
static cilist io___51 = { 0, 6, 0, 0, 0 };
static cilist io___52 = { 0, 6, 0, fmt_100, 0 };
static cilist io___53 = { 0, 6, 0, fmt_200, 0 };
static cilist io___54 = { 0, 6, 0, fmt_300, 0 };
static cilist io___55 = { 0, 6, 0, fmt_400, 0 };
static cilist io___56 = { 0, 6, 0, fmt_500, 0 };
static cilist io___57 = { 0, 6, 0, fmt_600, 0 };
static cilist io___58 = { 0, 6, 0, fmt_700, 0 };
static cilist io___59 = { 0, 6, 0, fmt_710, 0 };
static cilist io___60 = { 0, 6, 0, fmt_720, 0 };
/* _otb MOD V2 : New outputs : */
/* _otb otb_tdif_up = Upward diffuse transmittance */
/* _otb otb_tdir_up = Upward direct transmittance */
/* _otb otb_tdif_up_ray = Upward diffuse transmittance for rayleigh */
/* _otb otb_tdif_up_aer = Upward diffuse transmittance for aerosols */
/*< IMPLICIT NONE >*/
/*< real wlinf,wlsup,s(1501) >*/
/*< real asol,phi0,avis,phiv >*/
/*< integer month,jday >*/
/*< real uw,uo3,taer55 >*/
/*< real tgasm,sdtott,sutott,sast >*/
/*< real sutotr,sutota >*/
/*< integer iwave,iinf,isup,inhomo,idirec,iaer >*/
/*< integer iread,i,ik,l >*/
/* _otb Atmospheric parameters */
/*< real otb_ratm !Atmospheric reflectance >*/
/*< real pressure !Atmospheric pressure >*/
/*< real otb_tdif_up !Upward diffuse transmittance >*/
/*< real otb_tdir_up !Upward direct transmittance >*/
/*< real otb_tdif_up_ray !Upward diffuse transmittance for rayleigh >*/
/*< real otb_tdif_up_aer !Upward diffuse transmittance for aerosols >*/
/* ***********************************************************************
*/
/* Parameters initialization */
/* ***********************************************************************
*/
/*< iread=5 >*/
iread = 5;
/*< iinf=1 >*/
iinf = 1;
/*< isup=1501 >*/
isup = 1501;
/*< write(6,*) "------> Execution de l'interface d'appel de 6S " >*/
s_wsle(&io___4);
do_lio(&c__9, &c__1, "------> Execution de l'interface d'appel de 6S ",
47L);
e_wsle();
/* **********************************************************************c
*/
/* c
*/
/* * sun c
*/
/* \ * / c
*/
/* * * * * * c
*/
/* z / * \ c
*/
/* + /+ c
*/
/* satellite / + / c
*/
/* o/ + / c
*/
/* /.\ + /. c
*/
/* / . \ _avis-_+_-asol_/ . c
*/
/* . \- -+ / . north c
*/
/* . \ + / . + c
*/
/* . \ + / .+ c
*/
/* . \ + / +. c
*/
/* . \ + / + . c
*/
/* . \ + / + . c
*/
/* . \ +/ + . c
*/
/* west + + + + + + + . + + + + +\+ + + + + . + + + + + + + + east c
*/
/* . +.. . c
*/
/* . + . . . c
*/
/* . + . . . c
*/
/* . + . .'. . c
*/
/* . + .. . , ' .. c
*/
/* .+ . \ . c
*/
/* +. . \ . c
*/
/* + . . \ . c
*/
/* south . . (phiv-phi0) c
*/
/* c
*/
/* c
*/
/* c
*/
/* **********************************************************************c
*/
/* **********************************************************************c
*/
/* igeom geometrical conditions c
*/
/* -------------------------------------- c
*/
/* c
*/
/* you choose your own conditions; igeom=0 c
*/
/* 0 enter solar zenith angle (in degrees ) c
*/
/* solar azimuth angle " c
*/
/* satellite zenith angle " c
*/
/* satellite azimuth angle " c
*/
/* month c
*/
/* day of the month c
*/
/* c
*/
/* **********************************************************************c
*/
/*< read(iread,*) asol,phi0,avis,phiv,month,jday >*/
io___5.ciunit = iread;
s_rsle(&io___5);
do_lio(&c__5, &c__1, (char *)&asol, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&phi0, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&avis, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&phiv, (ftnlen)sizeof(doublereal));
do_lio(&c__3, &c__1, (char *)&month, (ftnlen)sizeof(integer));
do_lio(&c__3, &c__1, (char *)&jday, (ftnlen)sizeof(integer));
e_rsle();
/*< write(6,*) "asol,phi0,avis,phiv,month,jday : " >*/
s_wsle(&io___12);
do_lio(&c__9, &c__1, "asol,phi0,avis,phiv,month,jday : ", 33L);
e_wsle();
/*< write(6,*) asol,phi0,avis,phiv,month,jday >*/
s_wsle(&io___13);
do_lio(&c__5, &c__1, (char *)&asol, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&phi0, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&avis, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&phiv, (ftnlen)sizeof(doublereal));
do_lio(&c__3, &c__1, (char *)&month, (ftnlen)sizeof(integer));
do_lio(&c__3, &c__1, (char *)&jday, (ftnlen)sizeof(integer));
e_wsle();
/*< write(6,*) >*/
s_wsle(&io___14);
e_wsle();
/* **********************************************************************c
*/
/* idatm atmospheric model c
*/
/* -------------------- c
*/
/* pressure ( in mb ) c
*/
/* uw (in g/cm2 ) c
*/
/* uo3 (in cm-atm) c
*/
/* profil is taken from us62 c
*/
/* c
*/
/* **********************************************************************c
*/
/*< read(iread,*) pressure, uw, uo3 >*/
io___15.ciunit = iread;
s_rsle(&io___15);
do_lio(&c__5, &c__1, (char *)&pressure, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&uw, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&uo3, (ftnlen)sizeof(doublereal));
e_rsle();
/*< write(6,*) "pressure, uw, uo3" >*/
s_wsle(&io___19);
do_lio(&c__9, &c__1, "pressure, uw, uo3", 17L);
e_wsle();
/*< write(6,*) pressure, uw, uo3 >*/
s_wsle(&io___20);
do_lio(&c__5, &c__1, (char *)&pressure, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&uw, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&uo3, (ftnlen)sizeof(doublereal));
e_wsle();
/*< write(6,*) >*/
s_wsle(&io___21);
e_wsle();
/* **********************************************************************c
*/
/* c
*/
/* iaer aerosol model(type) c
*/
/* -------------- c
*/
/* c
*/
/* c
*/
/* you select one of the following standard aerosol models: c
*/
/* 0 no aerosols c
*/
/* 1 continental model ) c
*/
/* 2 maritime model ) according to sra models c
*/
/* 3 urban model ) c
*/
/* 5 shettle model for background desert aerosol c
*/
/* c
*/
/* **********************************************************************c
*/
/*< read(iread,*) iaer >*/
io___22.ciunit = iread;
s_rsle(&io___22);
do_lio(&c__3, &c__1, (char *)&iaer, (ftnlen)sizeof(integer));
e_rsle();
/*< write(6,*) "iaer = ", iaer >*/
s_wsle(&io___24);
do_lio(&c__9, &c__1, "iaer = ", 7L);
do_lio(&c__3, &c__1, (char *)&iaer, (ftnlen)sizeof(integer));
e_wsle();
/*< write(6,*) >*/
s_wsle(&io___25);
e_wsle();
/*< >*/
if (iaer != 0 && iaer != 1 && iaer != 2 && iaer != 3 && iaer != 5) {
/*< write(6,*) "Choix iaer non compatible : ", iaer >*/
s_wsle(&io___26);
do_lio(&c__9, &c__1, "Choix iaer non compatible : ", 28L);
do_lio(&c__3, &c__1, (char *)&iaer, (ftnlen)sizeof(integer));
e_wsle();
/*< write(6,*) "Choisir 0, 1, 2, 3 ou 5 uniquement" >*/
s_wsle(&io___27);
do_lio(&c__9, &c__1, "Choisir 0, 1, 2, 3 ou 5 uniquement", 34L);
e_wsle();
/*< goto 999 >*/
goto L999;
/*< endif >*/
}
/*< read(iread,*) taer55 >*/
io___28.ciunit = iread;
s_rsle(&io___28);
do_lio(&c__5, &c__1, (char *)&taer55, (ftnlen)sizeof(doublereal));
e_rsle();
/*< write(6,*) "taer55 = ", taer55 >*/
s_wsle(&io___30);
do_lio(&c__9, &c__1, "taer55 = ", 9L);
do_lio(&c__5, &c__1, (char *)&taer55, (ftnlen)sizeof(doublereal));
e_wsle();
/*< write(6,*) >*/
s_wsle(&io___31);
e_wsle();
/* **********************************************************************c
*/
/* iwave input of the spectral conditions c
*/
/* -------------------------------- c
*/
/* c
*/
/* 1 enter wlinf, wlsup and user's filter function s(lambda) c
*/
/* ( by step of 0.0025 micrometer). c
*/
/* c
*/
/* **********************************************************************c
*/
/*< do 38 l=iinf,isup >*/
i__1 = isup;
for (l = iinf; l <= i__1; ++l) {
/*< s(l)=0. >*/
s[l - 1] = 0.;
/*< 38 continue >*/
/* L38: */
}
/*< 110 read(iread,*) wlinf,wlsup >*/
/* L110: */
io___34.ciunit = iread;
s_rsle(&io___34);
do_lio(&c__5, &c__1, (char *)&wlinf, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&wlsup, (ftnlen)sizeof(doublereal));
e_rsle();
/*< write(6,*) " wlinf,wlsup : ", wlinf,wlsup >*/
s_wsle(&io___37);
do_lio(&c__9, &c__1, " wlinf,wlsup : ", 15L);
do_lio(&c__5, &c__1, (char *)&wlinf, (ftnlen)sizeof(doublereal));
do_lio(&c__5, &c__1, (char *)&wlsup, (ftnlen)sizeof(doublereal));
e_wsle();
/*< write(6,*) >*/
s_wsle(&io___38);
e_wsle();
/*< iinf=(wlinf-.25)/0.0025+1.5 >*/
iinf = (integer) ((wlinf - .25) / .0025 + 1.5);
/*< isup=(wlsup-.25)/0.0025+1.5 >*/
isup = (integer) ((wlsup - .25) / .0025 + 1.5);
/*< do 1113 ik=iinf,isup >*/
i__1 = isup;
for (ik = iinf; ik <= i__1; ++ik) {
/*< s(ik)=0. >*/
s[ik - 1] = 0.;
/*< 1113 continue >*/
/* L1113: */
}
/*< read(iread,*) (s(i),i=iinf,isup) >*/
io___40.ciunit = iread;
s_rsle(&io___40);
i__1 = isup;
for (i__ = iinf; i__ <= i__1; ++i__) {
do_lio(&c__5, &c__1, (char *)&s[i__ - 1], (ftnlen)sizeof(doublereal));
}
e_rsle();
/* **********************************************************************c
*/
/* c
*/
/* c
*/
/* start of computations c
*/
/* c
*/
/* c
*/
/* c
*/
/* **********************************************************************c
*/
/* goto 800 */
/*< >*/
otb_6s_ssssss_otb_main_function(&asol, &phi0, &avis, &phiv, &month, &jday, &pressure, &uw, &uo3,
&iaer, &taer55, &wlinf, &wlsup, s, &otb_ratm__, &sast, &tgasm, &
sdtott, &sutott, &otb_tdif_up__, &otb_tdir_up__, &
otb_tdif_up_ray__, &otb_tdif_up_aer__);
/*< write(6,*) >*/
s_wsle(&io___51);
e_wsle();
/*< 800 write(6,100) otb_ratm >*/
/* L800: */
s_wsfe(&io___52);
do_fio(&c__1, (char *)&otb_ratm__, (ftnlen)sizeof(doublereal));
e_wsfe();
/*< write(6,200) sast >*/
s_wsfe(&io___53);
do_fio(&c__1, (char *)&sast, (ftnlen)sizeof(doublereal));
e_wsfe();
/*< write(6,300) tgasm >*/
s_wsfe(&io___54);
do_fio(&c__1, (char *)&tgasm, (ftnlen)sizeof(doublereal));
e_wsfe();
/*< write(6,400) sdtott >*/
s_wsfe(&io___55);
do_fio(&c__1, (char *)&sdtott, (ftnlen)sizeof(doublereal));
e_wsfe();
/*< write(6,500) sutott >*/
s_wsfe(&io___56);
do_fio(&c__1, (char *)&sutott, (ftnlen)sizeof(doublereal));
e_wsfe();
/*< write(6,600) otb_tdif_up >*/
s_wsfe(&io___57);
do_fio(&c__1, (char *)&otb_tdif_up__, (ftnlen)sizeof(doublereal));
e_wsfe();
/*< write(6,700) otb_tdir_up >*/
s_wsfe(&io___58);
do_fio(&c__1, (char *)&otb_tdir_up__, (ftnlen)sizeof(doublereal));
e_wsfe();
/*< write(6,710) otb_tdif_up_ray >*/
s_wsfe(&io___59);
do_fio(&c__1, (char *)&otb_tdif_up_ray__, (ftnlen)sizeof(doublereal));
e_wsfe();
/*< write(6,720) otb_tdif_up_aer >*/
s_wsfe(&io___60);
do_fio(&c__1, (char *)&otb_tdif_up_aer__, (ftnlen)sizeof(doublereal));
e_wsfe();
/*< 100 format(10x,40h -----> atmospheric reflectance : , f6.5) >*/
/*< 200 format(10x,40h -----> atmospheric spherical albedo : , f6.5) >*/
/*< 300 format(10x,40h -----> total gaseous transmission : , f6.5) >*/
/*< 400 format(10x,40h -----> downward transmittance : , f6.5) >*/
/*< 500 format(10x,40h -----> upward transmittance : , f6.5) >*/
/*< 600 format(10x,40h -----> upward diffuse transmittance : , f6.5) >*/
/*< 700 format(10x,40h -----> upward direct transmittance : , f6.5) >*/
/*< 710 format(10x,40h -----> upward diffuse transm. ray: , f6.5) >*/
/*< 720 format(10x,40h -----> upward diffuse transm. aer: , f6.5) >*/
/*< 999 continue >*/
L999:
/*< end >*/
return 0;
} /* MAIN__ */
/* Main program alias */ int call6sv1_0b__ () { OTB_6S_MAIN__ (); return 0; }
#ifdef __cplusplus
}
#endif
|
223777.c | /******************************************************************************
* Automatically generated file. Please don't change anything. *
*****************************************************************************/
#include <stdlib.h>
#include <lua.h>
#include <lauxlib.h>
#include "iup.h"
#include "iuplua.h"
#include "iupglcontrols.h"
#include "il.h"
static int glsubcanvas_gl_action(Ihandle *self)
{
lua_State *L = iuplua_call_start(self, "gl_action");
return iuplua_call(L, 0);
}
static int glsubcanvas_gl_wheel_cb(Ihandle *self, float p0, int p1, int p2, char * p3)
{
lua_State *L = iuplua_call_start(self, "gl_wheel_cb");
lua_pushnumber(L, p0);
lua_pushinteger(L, p1);
lua_pushinteger(L, p2);
lua_pushstring(L, p3);
return iuplua_call(L, 4);
}
static int glsubcanvas_gl_button_cb(Ihandle *self, int p0, int p1, int p2, int p3, char * p4)
{
lua_State *L = iuplua_call_start(self, "gl_button_cb");
lua_pushinteger(L, p0);
lua_pushinteger(L, p1);
lua_pushinteger(L, p2);
lua_pushinteger(L, p3);
lua_pushstring(L, p4);
return iuplua_call(L, 5);
}
static int glsubcanvas_gl_enterwindow_cb(Ihandle *self)
{
lua_State *L = iuplua_call_start(self, "gl_enterwindow_cb");
return iuplua_call(L, 0);
}
static int glsubcanvas_gl_leavewindow_cb(Ihandle *self)
{
lua_State *L = iuplua_call_start(self, "gl_leavewindow_cb");
return iuplua_call(L, 0);
}
static int glsubcanvas_gl_motion_cb(Ihandle *self, int p0, int p1, char * p2)
{
lua_State *L = iuplua_call_start(self, "gl_motion_cb");
lua_pushinteger(L, p0);
lua_pushinteger(L, p1);
lua_pushstring(L, p2);
return iuplua_call(L, 3);
}
static int GLSubCanvas(lua_State *L)
{
Ihandle *ih = IupGLSubCanvas();
iuplua_plugstate(L, ih);
iuplua_pushihandle_raw(L, ih);
return 1;
}
int iupglsubcanvaslua_open(lua_State * L)
{
iuplua_register(L, GLSubCanvas, "GLSubCanvas");
iuplua_register_cb(L, "GL_ACTION", (lua_CFunction)glsubcanvas_gl_action, NULL);
iuplua_register_cb(L, "GL_WHEEL_CB", (lua_CFunction)glsubcanvas_gl_wheel_cb, NULL);
iuplua_register_cb(L, "GL_BUTTON_CB", (lua_CFunction)glsubcanvas_gl_button_cb, NULL);
iuplua_register_cb(L, "GL_ENTERWINDOW_CB", (lua_CFunction)glsubcanvas_gl_enterwindow_cb, NULL);
iuplua_register_cb(L, "GL_LEAVEWINDOW_CB", (lua_CFunction)glsubcanvas_gl_leavewindow_cb, NULL);
iuplua_register_cb(L, "GL_MOTION_CB", (lua_CFunction)glsubcanvas_gl_motion_cb, NULL);
#ifdef IUPLUA_USELOH
#include "glsubcanvas.loh"
#else
#ifdef IUPLUA_USELH
#include "glsubcanvas.lh"
#else
iuplua_dofile(L, "glsubcanvas.lua");
#endif
#endif
return 0;
}
|
3656.c | #include "frida-gumjs.h"
#include "debug.h"
#include "asan.h"
#include "util.h"
#if defined(__arm__)
void asan_instrument(const cs_insn *instr, GumStalkerIterator *iterator) {
UNUSED_PARAMETER(instr);
UNUSED_PARAMETER(iterator);
if (asan_initialized) {
FATAL("ASAN mode not supported on this architecture");
}
}
void asan_arch_init(void) {
FATAL("ASAN mode not supported on this architecture");
}
#endif
|
997337.c | /*
* Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include <openssl/crypto.h>
#include "internal/cryptlib.h"
#if !defined(OPENSSL_THREADS) || defined(CRYPTO_TDEBUG)
CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
{
CRYPTO_RWLOCK *lock;
if ((lock = OPENSSL_zalloc(sizeof(unsigned int))) == NULL) {
/* Don't set error, to avoid recursion blowup. */
return NULL;
}
*(unsigned int *)lock = 1;
return lock;
}
int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
{
if (!ossl_assert(*(unsigned int *)lock == 1))
return 0;
return 1;
}
int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
{
if (!ossl_assert(*(unsigned int *)lock == 1))
return 0;
return 1;
}
int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
{
if (!ossl_assert(*(unsigned int *)lock == 1))
return 0;
return 1;
}
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock) {
if (lock == NULL)
return;
*(unsigned int *)lock = 0;
OPENSSL_free(lock);
return;
}
int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
{
if (*once != 0)
return 1;
init();
*once = 1;
return 1;
}
#define OPENSSL_CRYPTO_THREAD_LOCAL_KEY_MAX 256
static void *thread_local_storage[OPENSSL_CRYPTO_THREAD_LOCAL_KEY_MAX];
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
{
static unsigned int thread_local_key = 0;
if (thread_local_key >= OPENSSL_CRYPTO_THREAD_LOCAL_KEY_MAX)
return 0;
*key = thread_local_key++;
thread_local_storage[*key] = NULL;
return 1;
}
void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
{
if (*key >= OPENSSL_CRYPTO_THREAD_LOCAL_KEY_MAX)
return NULL;
return thread_local_storage[*key];
}
int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
{
if (*key >= OPENSSL_CRYPTO_THREAD_LOCAL_KEY_MAX)
return 0;
thread_local_storage[*key] = val;
return 1;
}
int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
{
*key = OPENSSL_CRYPTO_THREAD_LOCAL_KEY_MAX + 1;
return 1;
}
CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
{
return 0;
}
int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
{
return (a == b);
}
int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
{
*val += amount;
*ret = *val;
return 1;
}
int CRYPTO_atomic_read(int *val, int *ret, CRYPTO_RWLOCK *lock)
{
*ret = *val;
return 1;
}
int CRYPTO_atomic_write(int *val, int n, CRYPTO_RWLOCK *lock)
{
*val = n;
return 1;
}
int openssl_init_fork_handlers(void)
{
return 0;
}
#endif
|
425009.c | /*
+----------------------------------------------------------------------+
| Swoole |
+----------------------------------------------------------------------+
| This source file is subject to version 2.0 of the Apache license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.apache.org/licenses/LICENSE-2.0.html |
| If you did not receive a copy of the Apache2.0 license and are unable|
| to obtain it through the world-wide-web, please send a note to |
| license@swoole.com so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Tianfeng Han <mikan.tenny@gmail.com> |
+----------------------------------------------------------------------+
*/
#include "php_swoole.h"
#include "swoole_http.h"
#ifdef SW_USE_HTTP2
#ifdef SW_COROUTINE
#include "swoole_coroutine.h"
#include "swoole_http_v2_client.h"
static zend_class_entry swoole_http2_client_coro_ce;
static zend_class_entry *swoole_http2_client_coro_class_entry_ptr;
static zend_class_entry swoole_http2_request_coro_ce;
static zend_class_entry *swoole_http2_request_class_entry_ptr;
static zend_class_entry swoole_http2_response_ce;
zend_class_entry *swoole_http2_response_class_entry_ptr;
ZEND_BEGIN_ARG_INFO_EX(arginfo_swoole_void, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_swoole_http2_client_coro_construct, 0, 0, 1)
ZEND_ARG_INFO(0, host)
ZEND_ARG_INFO(0, port)
ZEND_ARG_INFO(0, ssl)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_swoole_http2_client_coro_set, 0, 0, 1)
ZEND_ARG_ARRAY_INFO(0, settings, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_swoole_http2_client_coro_stats, 0, 0, 0)
ZEND_ARG_INFO(0, key)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_swoole_http2_client_coro_isStreamExist, 0, 0, 1)
ZEND_ARG_INFO(0, stream_id)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_swoole_http2_client_coro_send, 0, 0, 1)
ZEND_ARG_INFO(0, request)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_swoole_http2_client_coro_write, 0, 0, 2)
ZEND_ARG_INFO(0, stream_id)
ZEND_ARG_INFO(0, data)
ZEND_ARG_INFO(0, end_stream)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_swoole_http2_client_coro_recv, 0, 0, 0)
ZEND_ARG_INFO(0, timeout)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_swoole_http2_client_coro_goaway, 0, 0, 0)
ZEND_ARG_INFO(0, error_code)
ZEND_ARG_INFO(0, debug_data)
ZEND_END_ARG_INFO()
enum
{
HTTP2_CLIENT_CORO_CONTEXT = 0,
HTTP2_CLIENT_CORO_PROPERTY = 1,
};
static PHP_METHOD(swoole_http2_client_coro, __construct);
static PHP_METHOD(swoole_http2_client_coro, __destruct);
static PHP_METHOD(swoole_http2_client_coro, set);
static PHP_METHOD(swoole_http2_client_coro, connect);
static PHP_METHOD(swoole_http2_client_coro, stats);
static PHP_METHOD(swoole_http2_client_coro, isStreamExist);
static PHP_METHOD(swoole_http2_client_coro, send);
static PHP_METHOD(swoole_http2_client_coro, write);
static PHP_METHOD(swoole_http2_client_coro, recv);
static PHP_METHOD(swoole_http2_client_coro, goaway);
static PHP_METHOD(swoole_http2_client_coro, close);
static uint32_t http2_client_send_request(zval *zobject, zval *request);
static void http2_client_stream_free(void *ptr);
static void http2_client_onConnect(swClient *cli);
static void http2_client_onClose(swClient *cli);
static void http2_client_onTimeout(swTimer *timer, swTimer_node *tnode);
static void http2_client_onReceive(swClient *cli, char *buf, uint32_t _length);
static const zend_function_entry swoole_http2_client_methods[] =
{
PHP_ME(swoole_http2_client_coro, __construct, arginfo_swoole_http2_client_coro_construct, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, __destruct, arginfo_swoole_void, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, set, arginfo_swoole_http2_client_coro_set, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, connect, arginfo_swoole_void, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, stats, arginfo_swoole_http2_client_coro_stats, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, isStreamExist, arginfo_swoole_http2_client_coro_isStreamExist, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, send, arginfo_swoole_http2_client_coro_send, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, write, arginfo_swoole_http2_client_coro_write, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, recv, arginfo_swoole_http2_client_coro_recv, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, goaway, arginfo_swoole_http2_client_coro_goaway, ZEND_ACC_PUBLIC)
PHP_ME(swoole_http2_client_coro, close, arginfo_swoole_void, ZEND_ACC_PUBLIC)
PHP_FE_END
};
void swoole_http2_client_coro_init(int module_number)
{
INIT_CLASS_ENTRY(swoole_http2_client_coro_ce, "Swoole\\Coroutine\\Http2\\Client", swoole_http2_client_methods);
swoole_http2_client_coro_class_entry_ptr = zend_register_internal_class(&swoole_http2_client_coro_ce);
INIT_CLASS_ENTRY(swoole_http2_request_coro_ce, "Swoole\\Http2\\Request", NULL);
swoole_http2_request_class_entry_ptr = zend_register_internal_class(&swoole_http2_request_coro_ce);
INIT_CLASS_ENTRY(swoole_http2_response_ce, "Swoole\\Http2\\Response", NULL);
swoole_http2_response_class_entry_ptr = zend_register_internal_class(&swoole_http2_response_ce);
if (SWOOLE_G(use_namespace))
{
sw_zend_register_class_alias("swoole_http2_request", swoole_http2_request_class_entry_ptr);
sw_zend_register_class_alias("swoole_http2_response", swoole_http2_response_class_entry_ptr);
}
if (SWOOLE_G(use_shortname))
{
sw_zend_register_class_alias("Co\\Http2\\Client", swoole_http2_client_coro_class_entry_ptr);
}
zend_declare_property_long(swoole_http2_client_coro_class_entry_ptr, ZEND_STRL("errCode"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_long(swoole_http2_client_coro_class_entry_ptr, ZEND_STRL("errMsg"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_long(swoole_http2_client_coro_class_entry_ptr, ZEND_STRL("sock"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_long(swoole_http2_client_coro_class_entry_ptr, ZEND_STRL("type"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_client_coro_class_entry_ptr, ZEND_STRL("setting"), ZEND_ACC_PUBLIC);
zend_declare_property_bool(swoole_http2_client_coro_class_entry_ptr, ZEND_STRL("connected"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_client_coro_class_entry_ptr, ZEND_STRL("host"), ZEND_ACC_PUBLIC);
zend_declare_property_long(swoole_http2_client_coro_class_entry_ptr, ZEND_STRL("port"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_request_class_entry_ptr, ZEND_STRL("path"), ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_request_class_entry_ptr, ZEND_STRL("method"), ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_request_class_entry_ptr, ZEND_STRL("headers"), ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_request_class_entry_ptr, ZEND_STRL("cookies"), ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_request_class_entry_ptr, ZEND_STRL("data"), ZEND_ACC_PUBLIC);
zend_declare_property_bool(swoole_http2_request_class_entry_ptr, ZEND_STRL("pipeline"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_request_class_entry_ptr, ZEND_STRL("files"), ZEND_ACC_PUBLIC);
zend_declare_property_long(swoole_http2_response_class_entry_ptr, ZEND_STRL("streamId"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_long(swoole_http2_response_class_entry_ptr, ZEND_STRL("errCode"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_long(swoole_http2_response_class_entry_ptr, ZEND_STRL("statusCode"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_bool(swoole_http2_response_class_entry_ptr, ZEND_STRL("pipeline"), 0, ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_response_class_entry_ptr, ZEND_STRL("headers"), ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_response_class_entry_ptr, ZEND_STRL("set_cookie_headers"), ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_response_class_entry_ptr, ZEND_STRL("cookies"), ZEND_ACC_PUBLIC);
zend_declare_property_null(swoole_http2_response_class_entry_ptr, ZEND_STRL("data"), ZEND_ACC_PUBLIC);
SWOOLE_DEFINE(HTTP2_TYPE_DATA);
SWOOLE_DEFINE(HTTP2_TYPE_HEADERS);
SWOOLE_DEFINE(HTTP2_TYPE_PRIORITY);
SWOOLE_DEFINE(HTTP2_TYPE_RST_STREAM);
SWOOLE_DEFINE(HTTP2_TYPE_SETTINGS);
SWOOLE_DEFINE(HTTP2_TYPE_PUSH_PROMISE);
SWOOLE_DEFINE(HTTP2_TYPE_PING);
SWOOLE_DEFINE(HTTP2_TYPE_GOAWAY);
SWOOLE_DEFINE(HTTP2_TYPE_WINDOW_UPDATE);
SWOOLE_DEFINE(HTTP2_TYPE_CONTINUATION);
SWOOLE_DEFINE(HTTP2_ERROR_NO_ERROR);
SWOOLE_DEFINE(HTTP2_ERROR_PROTOCOL_ERROR);
SWOOLE_DEFINE(HTTP2_ERROR_INTERNAL_ERROR);
SWOOLE_DEFINE(HTTP2_ERROR_FLOW_CONTROL_ERROR);
SWOOLE_DEFINE(HTTP2_ERROR_SETTINGS_TIMEOUT);
SWOOLE_DEFINE(HTTP2_ERROR_STREAM_CLOSED);
SWOOLE_DEFINE(HTTP2_ERROR_FRAME_SIZE_ERROR);
SWOOLE_DEFINE(HTTP2_ERROR_REFUSED_STREAM);
SWOOLE_DEFINE(HTTP2_ERROR_CANCEL);
SWOOLE_DEFINE(HTTP2_ERROR_COMPRESSION_ERROR);
SWOOLE_DEFINE(HTTP2_ERROR_CONNECT_ERROR);
SWOOLE_DEFINE(HTTP2_ERROR_ENHANCE_YOUR_CALM);
SWOOLE_DEFINE(HTTP2_ERROR_INADEQUATE_SECURITY);
}
static PHP_METHOD(swoole_http2_client_coro, __construct)
{
char *host;
size_t host_len;
long port = 80;
zend_bool ssl = SW_FALSE;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|lb", &host, &host_len, &port, &ssl) == FAILURE)
{
return;
}
if (host_len <= 0)
{
zend_throw_exception(swoole_exception_class_entry_ptr, "host is empty.", SW_ERROR_INVALID_PARAMS);
RETURN_FALSE;
}
http2_client_property *hcc;
hcc = (http2_client_property*) emalloc(sizeof(http2_client_property));
bzero(hcc, sizeof(http2_client_property));
long type = SW_FLAG_ASYNC | SW_SOCK_TCP;
if (ssl)
{
#ifdef SW_USE_OPENSSL
type |= SW_SOCK_SSL;
hcc->ssl = 1;
#else
swoole_php_fatal_error(E_ERROR, "require openssl library.");
#endif
}
hcc->host = estrndup(host, host_len);
hcc->host_len = host_len;
hcc->port = port;
swoole_set_property(getThis(), HTTP2_CLIENT_CORO_PROPERTY, hcc);
php_context *context = emalloc(sizeof(php_context));
context->coro_params = *getThis();
swoole_set_property(getThis(), HTTP2_CLIENT_CORO_CONTEXT, context);
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("type"), type);
zend_update_property_stringl(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("host"), host, host_len);
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("port"), port);
}
static PHP_METHOD(swoole_http2_client_coro, set)
{
zval *zset;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "z", &zset) == FAILURE)
{
return;
}
if (Z_TYPE_P(zset) != IS_ARRAY)
{
RETURN_FALSE;
}
zval *zsetting = sw_zend_read_property_array(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("setting"), 1);
php_array_merge(Z_ARRVAL_P(zsetting), Z_ARRVAL_P(zset));
RETURN_TRUE;
}
static sw_inline int http2_client_close(swClient *cli)
{
if (!cli || !cli->socket || cli->socket->closed)
{
return SW_ERR;
}
return cli->close(cli);
}
static ssize_t http2_client_build_header(zval *zobject, zval *req, char *buffer)
{
char *date_str = NULL;
int index = 0;
int find_host = 0;
zval *zmethod = sw_zend_read_property(swoole_http2_request_class_entry_ptr, req, ZEND_STRL("method"), 1);
zval *zpath = sw_zend_read_property(swoole_http2_request_class_entry_ptr, req, ZEND_STRL("path"), 1);
zval *zheaders = sw_zend_read_property(swoole_http2_request_class_entry_ptr, req, ZEND_STRL("headers"), 1);
zval *zcookies = sw_zend_read_property(swoole_http2_request_class_entry_ptr, req, ZEND_STRL("cookies"), 1);
nghttp2_nv nv[1024];
http2_client_property *hcc = swoole_get_property(zobject, HTTP2_CLIENT_CORO_PROPERTY);
if (ZVAL_IS_NULL(zmethod) || Z_TYPE_P(zmethod) != IS_STRING || Z_STRLEN_P(zmethod) == 0)
{
http2_add_header(&nv[index++], ZEND_STRL(":method"), ZEND_STRL("GET"));
}
else
{
http2_add_header(&nv[index++], ZEND_STRL(":method"), Z_STRVAL_P(zmethod), Z_STRLEN_P(zmethod));
}
if (ZVAL_IS_NULL(zpath) || Z_TYPE_P(zpath) != IS_STRING || Z_STRLEN_P(zpath) == 0)
{
http2_add_header(&nv[index++], ZEND_STRL(":path"), "/", 1);
}
else
{
http2_add_header(&nv[index++], ZEND_STRL(":path"), Z_STRVAL_P(zpath), Z_STRLEN_P(zpath));
}
if (hcc->ssl)
{
http2_add_header(&nv[index++], ZEND_STRL(":scheme"), ZEND_STRL("https"));
}
else
{
http2_add_header(&nv[index++], ZEND_STRL(":scheme"), ZEND_STRL("http"));
}
//Host
index++;
if (zheaders && Z_TYPE_P(zheaders) == IS_ARRAY)
{
HashTable *ht = Z_ARRVAL_P(zheaders);
zval *value = NULL;
char *key = NULL;
uint32_t keylen = 0;
int type;
SW_HASHTABLE_FOREACH_START2(ht, key, keylen, type, value)
{
if (!key)
{
break;
}
if (*key == ':')
{
continue;
}
if (strncasecmp("host", key, keylen) == 0)
{
http2_add_header(&nv[HTTP2_CLIENT_HOST_HEADER_INDEX], ZEND_STRL(":authority"), Z_STRVAL_P(value), Z_STRLEN_P(value));
find_host = 1;
}
else
{
http2_add_header(&nv[index++], key, keylen, Z_STRVAL_P(value), Z_STRLEN_P(value));
}
}
SW_HASHTABLE_FOREACH_END();
(void)type;
}
if (!find_host)
{
http2_add_header(&nv[HTTP2_CLIENT_HOST_HEADER_INDEX], ZEND_STRL(":authority"), hcc->host, hcc->host_len);
}
//http cookies
if (zcookies && Z_TYPE_P(zcookies) == IS_ARRAY)
{
http2_add_cookie(nv, &index, zcookies);
}
ssize_t rv;
size_t buflen;
size_t i;
size_t sum = 0;
for (i = 0; i < index; ++i)
{
sum += nv[i].namelen + nv[i].valuelen;
}
buflen = nghttp2_hd_deflate_bound(hcc->deflater, nv, index);
if (buflen > hcc->remote_settings.max_header_list_size)
{
swoole_php_error(E_WARNING, "header cannot bigger than remote max_header_list_size %u.", hcc->remote_settings.header_table_size);
return SW_ERR;
}
rv = nghttp2_hd_deflate_hd(hcc->deflater, (uchar *) buffer, buflen, nv, index);
if (rv < 0)
{
swoole_php_error(E_WARNING, "nghttp2_hd_deflate_hd() failed with error: %s\n", nghttp2_strerror((int ) rv));
return SW_ERR;
}
for (i = 0; i < index; ++i)
{
efree(nv[i].name); // free lower header name copy
}
if (date_str)
{
efree(date_str);
}
return rv;
}
static void http2_client_onReceive(swClient *cli, char *buf, uint32_t _length)
{
uint8_t type = buf[3];
uint8_t flags = buf[4];
uint32_t stream_id = ntohl((*(int *) (buf + 5))) & 0x7fffffff;
ssize_t length = swHttp2_get_length(buf);
buf += SW_HTTP2_FRAME_HEADER_SIZE;
zval *zobject = cli->object;
char frame[SW_HTTP2_FRAME_HEADER_SIZE + SW_HTTP2_FRAME_PING_PAYLOAD_SIZE];
http2_client_property *hcc = swoole_get_property(zobject, HTTP2_CLIENT_CORO_PROPERTY);
if (stream_id > hcc->last_stream_id)
{
hcc->last_stream_id = stream_id;
}
uint16_t id = 0;
uint32_t value = 0;
switch (type)
{
case SW_HTTP2_TYPE_SETTINGS:
{
if (flags & SW_HTTP2_FLAG_ACK)
{
swHttp2FrameTraceLog(recv, "ACK");
return;
}
while(length > 0)
{
id = ntohs(*(uint16_t *) (buf));
value = ntohl(*(uint32_t *) (buf + sizeof(uint16_t)));
swHttp2FrameTraceLog(recv, "id=%d, value=%d", id, value);
switch (id)
{
case SW_HTTP2_SETTING_HEADER_TABLE_SIZE:
if (value != hcc->remote_settings.header_table_size)
{
hcc->remote_settings.header_table_size = value;
int ret = nghttp2_hd_deflate_change_table_size(hcc->deflater, value);
if (ret != 0)
{
swoole_php_error(E_WARNING, "nghttp2_hd_inflate_change_table_size failed with error: %s[%d]\n", nghttp2_strerror(ret), ret);
http2_client_close(cli);
return;
}
}
swTraceLog(SW_TRACE_HTTP2, "setting: header_compression_table_max=%u.", value);
break;
case SW_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS:
hcc->remote_settings.max_concurrent_streams = value;
swTraceLog(SW_TRACE_HTTP2, "setting: max_concurrent_streams=%u.", value);
break;
case SW_HTTP2_SETTINGS_INIT_WINDOW_SIZE:
hcc->remote_settings.window_size = value;
swTraceLog(SW_TRACE_HTTP2, "setting: init_send_window=%u.", value);
break;
case SW_HTTP2_SETTINGS_MAX_FRAME_SIZE:
hcc->remote_settings.max_frame_size = value;
swTraceLog(SW_TRACE_HTTP2, "setting: max_frame_size=%u.", value);
break;
case SW_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE:
hcc->remote_settings.max_header_list_size = value;
swTraceLog(SW_TRACE_HTTP2, "setting: max_header_list_size=%u.", value);
break;
default:
// disable warning and ignore it because some websites are not following http2 protocol totally
// swWarn("unknown option[%d]: %d.", id, value);
break;
}
buf += sizeof(id) + sizeof(value);
length -= sizeof(id) + sizeof(value);
}
swHttp2_set_frame_header(frame, SW_HTTP2_TYPE_SETTINGS, 0, SW_HTTP2_FLAG_ACK, stream_id);
cli->send(cli, frame, SW_HTTP2_FRAME_HEADER_SIZE, 0);
return;
}
case SW_HTTP2_TYPE_WINDOW_UPDATE:
{
value = ntohl(*(uint32_t *) buf);
swHttp2FrameTraceLog(recv, "window_size_increment=%d", value);
if (stream_id == 0)
{
hcc->remote_settings.window_size += value;
}
else
{
http2_client_stream *stream = swHashMap_find_int(hcc->streams, stream_id);
if (stream)
{
stream->remote_window_size += value;
}
}
return;
}
case SW_HTTP2_TYPE_PING:
{
swHttp2FrameTraceLog(recv, "ping");
swHttp2_set_frame_header(frame, SW_HTTP2_TYPE_PING, SW_HTTP2_FRAME_PING_PAYLOAD_SIZE, SW_HTTP2_FLAG_ACK, stream_id);
memcpy(frame + SW_HTTP2_FRAME_HEADER_SIZE, buf + SW_HTTP2_FRAME_HEADER_SIZE, SW_HTTP2_FRAME_PING_PAYLOAD_SIZE);
cli->send(cli, frame, SW_HTTP2_FRAME_HEADER_SIZE + SW_HTTP2_FRAME_PING_PAYLOAD_SIZE, 0);
return;
}
case SW_HTTP2_TYPE_GOAWAY:
{
uint32_t server_last_stream_id = ntohl(*(uint32_t *) (buf));
buf += 4;
value = ntohl(*(uint32_t *) (buf));
buf += 4;
swHttp2FrameTraceLog(recv, "last_stream_id=%d, error_code=%d, opaque_data=[%.*s]", server_last_stream_id, value, (int) (length - SW_HTTP2_GOAWAY_SIZE), buf);
// update goaway error code and error msg
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, zobject, ZEND_STRL("errCode"), value);
zend_update_property_stringl(swoole_http2_client_coro_class_entry_ptr, zobject, ZEND_STRL("errMsg"), buf, length - SW_HTTP2_GOAWAY_SIZE);
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, zobject, ZEND_STRL("serverLastStreamId"), server_last_stream_id);
http2_client_close(cli); // will trigger onClose and resume
return;
}
case SW_HTTP2_TYPE_RST_STREAM:
{
value = ntohl(*(uint32_t *) (buf));
swHttp2FrameTraceLog(recv, "error_code=%d", value);
if (hcc->iowait == 0)
{
// delete and free quietly
swHashMap_del_int(hcc->streams, stream_id);
return;
}
break;
}
default:
{
swHttp2FrameTraceLog(recv, "");
}
}
http2_client_stream *stream = swHashMap_find_int(hcc->streams, stream_id);
// stream has closed
if (stream == NULL)
{
return;
}
if (type == SW_HTTP2_TYPE_HEADERS)
{
http2_client_parse_header(hcc, stream, flags, buf, length);
}
else if (type == SW_HTTP2_TYPE_DATA)
{
if (length > 0)
{
if (!stream->buffer)
{
stream->buffer = swString_new(SW_HTTP2_DATA_BUFFER_SIZE);
}
#ifdef SW_HAVE_ZLIB
if (stream->gzip)
{
if (http_response_uncompress(&stream->gzip_stream, stream->gzip_buffer, buf, length) == SW_ERR)
{
return;
}
swString_append_ptr(stream->buffer, stream->gzip_buffer->str, stream->gzip_buffer->length);
}
else
#endif
{
swString_append_ptr(stream->buffer, buf, length);
}
// now we control the connection flow only (not stream)
// our window size is unlimited, so we don't worry about subtraction overflow
hcc->local_settings.window_size -= length;
stream->local_window_size -= length;
if (hcc->local_settings.window_size < (SW_HTTP2_MAX_WINDOW_SIZE / 4))
{
http2_client_send_window_update(cli, 0, SW_HTTP2_MAX_WINDOW_SIZE - hcc->local_settings.window_size);
hcc->local_settings.window_size = SW_HTTP2_MAX_WINDOW_SIZE;
}
if (stream->local_window_size < (SW_HTTP2_MAX_WINDOW_SIZE / 4))
{
http2_client_send_window_update(cli, stream_id, SW_HTTP2_MAX_WINDOW_SIZE - stream->local_window_size);
stream->local_window_size = SW_HTTP2_MAX_WINDOW_SIZE;
}
}
}
uint8_t stream_type = stream->type;
if (
(type == SW_HTTP2_TYPE_DATA && stream_type == SW_HTTP2_STREAM_PIPELINE) // pipeline data frame
|| (stream_type == SW_HTTP2_STREAM_NORMAL && (flags & SW_HTTP2_FLAG_END_STREAM)) // normal end frame
|| type == SW_HTTP2_TYPE_RST_STREAM || type == SW_HTTP2_TYPE_GOAWAY // rst and goaway frame
)
{
zval _zresponse = stream->_response_object;
zval *zresponse = &_zresponse;
zval *retval = NULL;
if (type == SW_HTTP2_TYPE_RST_STREAM)
{
zend_update_property_long(swoole_http2_response_class_entry_ptr, zresponse, ZEND_STRL("statusCode"), -3);
zend_update_property_long(swoole_http2_response_class_entry_ptr, zresponse, ZEND_STRL("errCode"), value);
}
else if (stream_type == SW_HTTP2_STREAM_PIPELINE && !(flags & SW_HTTP2_FLAG_END_STREAM))
{
zend_update_property_bool(swoole_http2_response_class_entry_ptr, zresponse, ZEND_STRL("pipeline"), 1);
}
if (stream->buffer)
{
zend_update_property_stringl(swoole_http2_response_class_entry_ptr, stream->response_object, ZEND_STRL("data"), stream->buffer->str, stream->buffer->length);
}
if (stream_type == SW_HTTP2_STREAM_NORMAL)
{
Z_ADDREF_P(zresponse); // dtor in del
swHashMap_del_int(hcc->streams, stream_id);
}
else if (stream->buffer)
{
swString_clear(stream->buffer);
}
if (cli->timer)
{
swTimer_del(&SwooleG.timer, cli->timer);
cli->timer = NULL;
}
if (hcc->iowait != 0)
{
hcc->iowait = 0;
hcc->read_cid = 0;
php_context *context = swoole_get_property(zobject, HTTP2_CLIENT_CORO_CONTEXT);
int ret = coro_resume(context, zresponse, &retval);
if (ret == CORO_END && retval)
{
zval_ptr_dtor(retval);
}
}
// if not pipeline or pipeline end, response refcount--
if (stream_type != SW_HTTP2_STREAM_PIPELINE || (flags & SW_HTTP2_FLAG_END_STREAM))
{
zval_ptr_dtor(zresponse);
}
}
}
static void http2_client_stream_free(void *ptr)
{
http2_client_stream *stream = ptr;
if (stream->buffer)
{
swString_free(stream->buffer);
}
#ifdef SW_HAVE_ZLIB
if (stream->gzip)
{
inflateEnd(&stream->gzip_stream);
swString_free(stream->gzip_buffer);
}
#endif
if (stream->response_object)
{
zval_ptr_dtor(stream->response_object);
stream->response_object = NULL;
}
efree(stream);
}
static uint32_t http2_client_send_request(zval *zobject, zval *req)
{
http2_client_property *hcc = swoole_get_property(zobject, HTTP2_CLIENT_CORO_PROPERTY);
swClient *cli = hcc->client;
zval *zheaders = sw_zend_read_property_array(swoole_http2_request_class_entry_ptr, req, ZEND_STRL("headers"), 1);
zval *zpost_data = sw_zend_read_property(swoole_http2_request_class_entry_ptr, req, ZEND_STRL("data"), 1);
zval *zpipeline = sw_zend_read_property(swoole_http2_request_class_entry_ptr, req, ZEND_STRL("pipeline"), 1);
if (!ZVAL_IS_NULL(zpost_data))
{
if (Z_TYPE_P(zpost_data) == IS_ARRAY)
{
add_assoc_stringl_ex(zheaders, ZEND_STRL("content-type"), ZEND_STRL("application/x-www-form-urlencoded"));
}
}
/**
* send header
*/
char* buffer = SwooleTG.buffer_stack->str;
ssize_t n = http2_client_build_header(zobject, req, buffer + SW_HTTP2_FRAME_HEADER_SIZE);
if (n <= 0)
{
swWarn("http2_client_build_header() failed.");
return 0;
}
// malloc
http2_client_stream *stream = emalloc(sizeof(http2_client_stream));
memset(stream, 0, sizeof(http2_client_stream));
// init
stream->response_object = &stream->_response_object;
object_init_ex(stream->response_object, swoole_http2_response_class_entry_ptr);
stream->stream_id = hcc->stream_id;
stream->type = Z_BVAL_P(zpipeline) ? SW_HTTP2_STREAM_PIPELINE : SW_HTTP2_STREAM_NORMAL;
stream->remote_window_size = SW_HTTP2_DEFAULT_WINDOW_SIZE;
stream->local_window_size = SW_HTTP2_DEFAULT_WINDOW_SIZE;
// add to map
swHashMap_add_int(hcc->streams, stream->stream_id, stream);
if (ZVAL_IS_NULL(zpost_data))
{
//pipeline
if (stream->type == SW_HTTP2_STREAM_PIPELINE)
{
swHttp2_set_frame_header(buffer, SW_HTTP2_TYPE_HEADERS, n, SW_HTTP2_FLAG_END_HEADERS, stream->stream_id);
}
else
{
swHttp2_set_frame_header(buffer, SW_HTTP2_TYPE_HEADERS, n, SW_HTTP2_FLAG_END_STREAM | SW_HTTP2_FLAG_END_HEADERS, stream->stream_id);
}
}
else
{
swHttp2_set_frame_header(buffer, SW_HTTP2_TYPE_HEADERS, n, SW_HTTP2_FLAG_END_HEADERS, stream->stream_id);
}
zend_update_property_long(swoole_http2_response_class_entry_ptr, stream->response_object, ZEND_STRL("streamId"), stream->stream_id);
swTraceLog(SW_TRACE_HTTP2, "["SW_ECHO_GREEN", STREAM#%d] length=%zd", swHttp2_get_type(SW_HTTP2_TYPE_HEADERS), stream->stream_id, n);
cli->send(cli, buffer, n + SW_HTTP2_FRAME_HEADER_SIZE, 0);
/**
* send body
*/
if (!ZVAL_IS_NULL(zpost_data))
{
char *p;
size_t len;
smart_str formstr_s = { NULL, 0 };
uint8_t send_flag;
uint32_t send_len;
int flag = stream->type == SW_HTTP2_STREAM_PIPELINE ? 0 : SW_HTTP2_FLAG_END_STREAM;
if (Z_TYPE_P(zpost_data) == IS_ARRAY)
{
p = sw_http_build_query(zpost_data, &len, &formstr_s);
if (p == NULL)
{
swoole_php_error(E_WARNING, "http_build_query failed.");
return 0;
}
}
else
{
convert_to_string(zpost_data);
p = Z_STRVAL_P(zpost_data);
len = Z_STRLEN_P(zpost_data);
}
swTraceLog(SW_TRACE_HTTP2, "["SW_ECHO_GREEN", END, STREAM#%d] length=%zu", swHttp2_get_type(SW_HTTP2_TYPE_DATA), stream->stream_id, len);
while (len > 0)
{
if (len > hcc->remote_settings.max_frame_size)
{
send_len = hcc->remote_settings.max_frame_size;
send_flag = 0;
}
else
{
send_len = len;
send_flag = flag;
}
swHttp2_set_frame_header(buffer, SW_HTTP2_TYPE_DATA, send_len, send_flag, stream->stream_id);
if (cli->send(cli, buffer, SW_HTTP2_FRAME_HEADER_SIZE, 0) < 0)
{
return 0;
}
if (cli->send(cli, p, send_len, 0) < 0)
{
return 0;
}
len -= send_len;
p += send_len;
}
if (formstr_s.s)
{
smart_str_free(&formstr_s);
}
}
hcc->stream_id += 2;
return stream->stream_id;
}
static int http2_client_send_data(http2_client_property *hcc, uint32_t stream_id, zval *data, zend_bool end)
{
swClient *cli = hcc->client;
char buffer[8192];
http2_client_stream *stream = swHashMap_find_int(hcc->streams, stream_id);
if (stream == NULL || stream->type != SW_HTTP2_STREAM_PIPELINE)
{
return -1;
}
int flag = end ? SW_HTTP2_FLAG_END_STREAM : 0;
if (Z_TYPE_P(data) == IS_ARRAY)
{
size_t len;
smart_str formstr_s = { 0 };
char *formstr = sw_http_build_query(data, &len, &formstr_s);
if (formstr == NULL)
{
swoole_php_error(E_WARNING, "http_build_query failed.");
return -1;
}
memset(buffer, 0, SW_HTTP2_FRAME_HEADER_SIZE);
swHttp2_set_frame_header(buffer, SW_HTTP2_TYPE_DATA, len, flag, stream_id);
swTraceLog(SW_TRACE_HTTP2, "["SW_ECHO_GREEN", END, STREAM#%d] length=%zu", swHttp2_get_type(SW_HTTP2_TYPE_DATA), stream_id, len);
cli->send(cli, buffer, SW_HTTP2_FRAME_HEADER_SIZE, 0);
cli->send(cli, formstr, len, 0);
smart_str_free(&formstr_s);
}
else if (Z_TYPE_P(data) == IS_STRING)
{
swHttp2_set_frame_header(buffer, SW_HTTP2_TYPE_DATA, Z_STRLEN_P(data), flag, stream_id);
swTraceLog(SW_TRACE_HTTP2, "["SW_ECHO_GREEN", END, STREAM#%d] length=%zu", swHttp2_get_type(SW_HTTP2_TYPE_DATA), stream_id, Z_STRLEN_P(data));
cli->send(cli, buffer, SW_HTTP2_FRAME_HEADER_SIZE, 0);
cli->send(cli, Z_STRVAL_P(data), Z_STRLEN_P(data), 0);
}
else
{
swoole_php_error(E_WARNING, "unknown data type[%d].", Z_TYPE_P(data) );
return -1;
}
return SW_OK;
}
static PHP_METHOD(swoole_http2_client_coro, send)
{
zval *request;
http2_client_property *hcc = swoole_get_property(getThis(), HTTP2_CLIENT_CORO_PROPERTY);
if (!hcc->streams)
{
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("errCode"), (SwooleG.error = SW_ERROR_CLIENT_NO_CONNECTION));
zend_update_property_string(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("errMsg"), "client is not connected to server.");
swoole_php_error(E_WARNING, "client is not connected to server.");
RETURN_FALSE;
}
if (zend_parse_parameters(ZEND_NUM_ARGS(), "z", &request) == FAILURE)
{
return;
}
if (Z_TYPE_P(request) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(request), swoole_http2_request_class_entry_ptr))
{
swoole_php_fatal_error(E_ERROR, "object is not instanceof swoole_http2_request.");
RETURN_FALSE;
}
uint32_t stream_id = http2_client_send_request(getThis(), request);
if (stream_id == 0)
{
RETURN_FALSE;
}
else
{
RETURN_LONG(stream_id);
}
}
static PHP_METHOD(swoole_http2_client_coro, recv)
{
http2_client_property *hcc = swoole_get_property(getThis(), HTTP2_CLIENT_CORO_PROPERTY);
swClient *cli = hcc->client;
if (!hcc->streams)
{
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("errCode"), (SwooleG.error = SW_ERROR_CLIENT_NO_CONNECTION));
zend_update_property_string(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("errMsg"), "client is not connected to server.");
swoole_php_error(E_WARNING, "client is not connected to server.");
RETURN_FALSE;
}
swoole_php_check_coro_bind("http2 client", hcc->read_cid, RETURN_FALSE);
double timeout = hcc->timeout;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|d", &timeout) == FAILURE)
{
RETURN_FALSE;
}
php_context *context = swoole_get_property(getThis(), HTTP2_CLIENT_CORO_CONTEXT);
if (timeout > 0)
{
cli->timer = swTimer_add(&SwooleG.timer, (int) (timeout * 1000), 0, context, http2_client_onTimeout);
}
hcc->iowait = 1;
hcc->read_cid = sw_get_current_cid();
coro_save(context);
coro_yield();
}
static void http2_client_onConnect(swClient *cli)
{
int ret;
zval *zobject = cli->object;
http2_client_property *hcc = swoole_get_property(zobject, HTTP2_CLIENT_CORO_PROPERTY);
zend_update_property_bool(swoole_http2_client_coro_class_entry_ptr, zobject, ZEND_STRL("connected"), 1);
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, zobject, ZEND_STRL("errCode"), 0);
zend_update_property_string(swoole_http2_client_coro_class_entry_ptr, zobject, ZEND_STRL("errMsg"), "");
cli->send(cli, ZEND_STRL(SW_HTTP2_PRI_STRING), 0);
cli->open_length_check = 1;
cli->protocol.get_package_length = swHttp2_get_frame_length;
cli->protocol.package_length_size = SW_HTTP2_FRAME_HEADER_SIZE;
hcc->stream_id = 1;
hcc->streams = swHashMap_new(8, http2_client_stream_free);
// [init]: we must set default value, server is not always send all the settings
swHttp2_init_settings(&hcc->local_settings);
swHttp2_init_settings(&hcc->remote_settings);
ret = nghttp2_hd_inflate_new(&hcc->inflater);
if (ret != 0)
{
swoole_php_error(E_WARNING, "nghttp2_hd_inflate_init() failed with error: %s[%d].", nghttp2_strerror(ret), ret);
cli->close(cli);
return;
}
ret = nghttp2_hd_deflate_new(&hcc->deflater, hcc->local_settings.header_table_size);
if (ret != 0)
{
swoole_php_error(E_WARNING, "nghttp2_hd_deflate_init() failed with error: %s[%d].", nghttp2_strerror(ret), ret);
cli->close(cli);
return;
}
http2_client_send_setting(cli, &hcc->local_settings);
hcc->iowait = 0;
hcc->read_cid = 0;
// hcc->write_cid = 0;
zval *result;
SW_MAKE_STD_ZVAL(result);
ZVAL_BOOL(result, 1);
zval *retval = NULL;
php_context *context = swoole_get_property(zobject, HTTP2_CLIENT_CORO_CONTEXT);
ret = coro_resume(context, result, &retval);
if (ret == CORO_END && retval)
{
zval_ptr_dtor(retval);
}
}
static void http2_client_onClose(swClient *cli)
{
zval *zobject = cli->object;
zend_update_property_bool(swoole_http2_client_coro_class_entry_ptr, zobject, ZEND_STRL("connected"), 0);
php_swoole_client_free(zobject, cli);
http2_client_property *hcc = swoole_get_property(zobject, HTTP2_CLIENT_CORO_PROPERTY);
if (!hcc)
{
return;
}
hcc->client = NULL;
hcc->read_cid = 0;
// hcc->write_cid = 0;
if (hcc->streams)
{
swHashMap_free(hcc->streams);
hcc->streams = NULL;
}
if (hcc->inflater)
{
nghttp2_hd_inflate_del(hcc->inflater);
hcc->inflater = NULL;
}
if (hcc->deflater)
{
nghttp2_hd_deflate_del(hcc->deflater);
hcc->deflater = NULL;
}
if (hcc->iowait != 0)
{
hcc->iowait = 0;
}
else
{
return;
}
zval _result;
zval *result = &_result;
zval *retval = NULL;
ZVAL_FALSE(result);
php_context *context = swoole_get_property(zobject, HTTP2_CLIENT_CORO_CONTEXT);
int ret = coro_resume(context, result, &retval);
if (ret == CORO_END && retval)
{
zval_ptr_dtor(retval);
}
}
static void http2_client_onTimeout(swTimer *timer, swTimer_node *tnode)
{
php_context *ctx = tnode->data;
zval _zobject = ctx->coro_params;
zval *zobject = &_zobject;
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, zobject, ZEND_STRL("errCode"), ETIMEDOUT);
zend_update_property_string(swoole_http2_client_coro_class_entry_ptr, zobject, ZEND_STRL("errMsg"), strerror(ETIMEDOUT));
swClient *cli = swoole_get_object(zobject);
cli->timer = NULL;
http2_client_property *hcc = swoole_get_property(zobject, HTTP2_CLIENT_CORO_PROPERTY);
hcc->iowait = 0;
hcc->read_cid = 0;
zval *result;
SW_MAKE_STD_ZVAL(result);
ZVAL_BOOL(result, 0);
zval *retval = NULL;
int ret = coro_resume(ctx, result, &retval);
if (ret == CORO_END && retval)
{
zval_ptr_dtor(retval);
}
}
static PHP_METHOD(swoole_http2_client_coro, __destruct)
{
SW_PREVENT_USER_DESTRUCT;
zval *zobject = getThis();
swClient *cli = swoole_get_object(zobject);
http2_client_close(cli); // hcc free: inflate deflate streams
http2_client_property *hcc = swoole_get_property(zobject, HTTP2_CLIENT_CORO_PROPERTY);
if (hcc)
{
if (hcc->host)
{
efree(hcc->host);
}
efree(hcc);
swoole_set_property(zobject, HTTP2_CLIENT_CORO_PROPERTY, NULL);
}
php_context *context = swoole_get_property(zobject, HTTP2_CLIENT_CORO_CONTEXT);
swoole_set_property(zobject, HTTP2_CLIENT_CORO_CONTEXT, NULL);
efree(context);
}
static PHP_METHOD(swoole_http2_client_coro, close)
{
swClient *cli = swoole_get_object(getThis());
SW_CHECK_RETURN(http2_client_close(cli));
}
static PHP_METHOD(swoole_http2_client_coro, connect)
{
http2_client_property *hcc = swoole_get_property(getThis(), HTTP2_CLIENT_CORO_PROPERTY);
if (hcc->client)
{
swoole_php_fatal_error(E_WARNING, "The HTTP2 connection has already been established.");
RETURN_FALSE;
}
php_swoole_check_reactor();
swClient *cli = php_swoole_client_new(getThis(), hcc->host, hcc->host_len, hcc->port);
if (cli == NULL)
{
RETURN_FALSE;
}
hcc->client = cli;
zval *ztmp;
HashTable *vht;
zval *zset = sw_zend_read_property(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("setting"), 1);
if (zset && ZVAL_IS_ARRAY(zset))
{
vht = Z_ARRVAL_P(zset);
/**
* timeout
*/
if (php_swoole_array_get_value(vht, "timeout", ztmp))
{
convert_to_double(ztmp);
hcc->timeout = (double) Z_DVAL_P(ztmp);
}
//client settings
php_swoole_client_check_setting(hcc->client, zset);
}
swoole_set_object(getThis(), cli);
cli->onConnect = http2_client_onConnect;
cli->onClose = http2_client_onClose;
cli->onError = http2_client_onClose; // same as close
cli->onReceive = http2_client_onReceive;
cli->http2 = 1;
cli->open_eof_check = 0;
cli->open_length_check = 0;
cli->reactor_fdtype = PHP_SWOOLE_FD_STREAM_CLIENT;
if (cli->connect(cli, hcc->host, hcc->port, hcc->timeout, 0) < 0)
{
RETURN_FALSE;
}
php_context *context = swoole_get_property(getThis(), HTTP2_CLIENT_CORO_CONTEXT);
cli->object = &context->coro_params;
coro_save(context);
hcc->iowait = 1;
coro_yield();
}
static sw_inline void http2_settings_to_array(swHttp2_settings *settings, zval* zarray)
{
array_init(zarray);
add_assoc_long_ex(zarray, ZEND_STRL("header_table_size"), settings->header_table_size);
add_assoc_long_ex(zarray, ZEND_STRL("window_size"), settings->window_size);
add_assoc_long_ex(zarray, ZEND_STRL("max_concurrent_streams"), settings->max_concurrent_streams);
add_assoc_long_ex(zarray, ZEND_STRL("max_frame_size"), settings->max_frame_size);
add_assoc_long_ex(zarray, ZEND_STRL("max_header_list_size"), settings->max_header_list_size);
}
static PHP_METHOD(swoole_http2_client_coro, stats)
{
http2_client_property *hcc = swoole_get_property(getThis(), HTTP2_CLIENT_CORO_PROPERTY);
zval _zarray, *zarray = &_zarray;
swString key;
bzero(&key, sizeof(key));
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|s", &key.str, &key.length) == FAILURE)
{
return;
}
if (key.length > 0)
{
if (strcmp(key.str, "current_stream_id") == 0)
{
RETURN_LONG(hcc->stream_id);
}
else if (strcmp(key.str, "last_stream_id") == 0)
{
RETURN_LONG(hcc->last_stream_id);
}
else if (strcmp(key.str, "local_settings") == 0)
{
http2_settings_to_array(&hcc->local_settings, zarray);
RETURN_ZVAL(zarray, 0, 0);
}
else if (strcmp(key.str, "remote_settings") == 0)
{
http2_settings_to_array(&hcc->remote_settings, zarray);
RETURN_ZVAL(zarray, 0, 0);
}
else if (strcmp(key.str, "active_stream_num") == 0)
{
RETURN_LONG(hcc->streams ? swHashMap_count(hcc->streams) : 0);
}
}
else
{
array_init(return_value);
add_assoc_long_ex(return_value, ZEND_STRL("current_stream_id"), hcc->stream_id);
add_assoc_long_ex(return_value, ZEND_STRL("last_stream_id"), hcc->last_stream_id);
http2_settings_to_array(&hcc->local_settings, zarray);
add_assoc_zval_ex(return_value, ZEND_STRL("local_settings"), zarray);
http2_settings_to_array(&hcc->remote_settings, zarray);
add_assoc_zval_ex(return_value, ZEND_STRL("remote_settings"), zarray);
add_assoc_long_ex(return_value, ZEND_STRL("active_stream_num"), hcc->streams ? swHashMap_count(hcc->streams) : 0);
}
}
static PHP_METHOD(swoole_http2_client_coro, isStreamExist)
{
zend_long stream_id = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &stream_id) == FAILURE)
{
RETURN_FALSE;
}
if (stream_id < 0)
{
RETURN_FALSE;
}
http2_client_property *hcc = swoole_get_property(getThis(), HTTP2_CLIENT_CORO_PROPERTY);
if (!hcc->client)
{
RETURN_FALSE;
}
else
{
if (stream_id == 0)
{
RETURN_TRUE;
}
if (!hcc->streams)
{
RETURN_FALSE;
}
}
http2_client_stream *stream = swHashMap_find_int(hcc->streams, stream_id);
RETURN_BOOL(stream ? 1 : 0);
}
static PHP_METHOD(swoole_http2_client_coro, write)
{
http2_client_property *hcc = swoole_get_property(getThis(), HTTP2_CLIENT_CORO_PROPERTY);
if (!hcc->streams)
{
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("errCode"), (SwooleG.error = SW_ERROR_CLIENT_NO_CONNECTION));
zend_update_property_string(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("errMsg"), "client is not connected to server.");
swoole_php_error(E_WARNING, "client is not connected to server.");
RETURN_FALSE;
}
long stream_id;
zval *data;
zend_bool end = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "lz|b", &stream_id, &data, &end) == FAILURE)
{
return;
}
SW_CHECK_RETURN(http2_client_send_data(hcc, stream_id, data, end));
}
/**
* +-+-------------------------------------------------------------+
* |R| Last-Stream-ID (31) |
* +-+-------------------------------------------------------------+
* | Error Code (32) |
* +---------------------------------------------------------------+
* | Additional Debug Data (*) |
* +---------------------------------------------------------------+
*/
static PHP_METHOD(swoole_http2_client_coro, goaway)
{
http2_client_property *hcc = swoole_get_property(getThis(), HTTP2_CLIENT_CORO_PROPERTY);
swClient *cli = hcc->client;
int ret;
char* frame;
uint8_t error_code = SW_HTTP2_ERROR_NO_ERROR;
char* debug_data = NULL;
long debug_data_len = 0;
if (!hcc->streams)
{
zend_update_property_long(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("errCode"), (SwooleG.error = SW_ERROR_CLIENT_NO_CONNECTION));
zend_update_property_string(swoole_http2_client_coro_class_entry_ptr, getThis(), ZEND_STRL("errMsg"), "client is not connected to server.");
swoole_php_error(E_WARNING, "client is not connected to server.");
RETURN_FALSE;
}
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|ls", &error_code, &debug_data, &debug_data_len) == FAILURE)
{
return;
}
size_t length = SW_HTTP2_FRAME_HEADER_SIZE + SW_HTTP2_GOAWAY_SIZE + debug_data_len;
frame = emalloc(length);
bzero(frame, length);
swHttp2_set_frame_header(frame, SW_HTTP2_TYPE_GOAWAY, SW_HTTP2_GOAWAY_SIZE + debug_data_len, error_code, 0);
*(uint32_t*) (frame + SW_HTTP2_FRAME_HEADER_SIZE) = htonl(hcc->last_stream_id);
*(uint32_t*) (frame + SW_HTTP2_FRAME_HEADER_SIZE + 4) = htonl(error_code);
memcpy(frame + SW_HTTP2_FRAME_HEADER_SIZE + SW_HTTP2_GOAWAY_SIZE, debug_data, debug_data_len);
swTraceLog(SW_TRACE_HTTP2, "["SW_ECHO_GREEN"] Send: last-sid=%d, error-code=%d", swHttp2_get_type(SW_HTTP2_TYPE_GOAWAY), hcc->last_stream_id, error_code);
ret = cli->send(cli, frame, length, 0);
efree(frame);
SW_CHECK_RETURN(ret);
}
#endif
#endif
|
684495.c | /*
* Copyright (c) 2020 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "strategy_dpo_ctx.h"
#include "strategy_dpo_manager.h"
hicn_dpo_ctx_t *hicn_strategy_dpo_ctx_pool;
void
hicn_strategy_init_dpo_ctx_pool ()
{
pool_init_fixed (hicn_strategy_dpo_ctx_pool, 256);
}
void
hicn_strategy_dpo_ctx_lock (dpo_id_t * dpo)
{
hicn_dpo_ctx_t *dpo_ctx = hicn_strategy_dpo_ctx_get (dpo->dpoi_index);
if (dpo_ctx != NULL)
{
dpo_ctx->locks++;
}
}
void
hicn_strategy_dpo_ctx_unlock (dpo_id_t * dpo)
{
hicn_dpo_ctx_t *hicn_strategy_dpo_ctx =
(hicn_dpo_ctx_t *) hicn_strategy_dpo_ctx_get (dpo->dpoi_index);
if (hicn_strategy_dpo_ctx != NULL)
{
hicn_strategy_dpo_ctx->locks--;
if (0 == hicn_strategy_dpo_ctx->locks)
{
pool_put (hicn_strategy_dpo_ctx_pool, hicn_strategy_dpo_ctx);
}
}
}
u8 *
hicn_strategy_dpo_format_ctx (u8 * s, va_list * ap)
{
index_t index = va_arg (*ap, index_t);
hicn_dpo_ctx_t *dpo = NULL;
u32 indent = va_arg (*ap, u32);
dpo = (hicn_dpo_ctx_t *) hicn_strategy_dpo_ctx_get (index);
const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft (dpo->dpo_type);
s = dpo_vft->hicn_dpo_format (s, 2, index, indent);
return (s);
}
index_t
hicn_strategy_dpo_ctx_get_index (hicn_dpo_ctx_t * cd)
{
return (cd - hicn_strategy_dpo_ctx_pool);
}
hicn_dpo_ctx_t *
hicn_strategy_dpo_ctx_get (index_t index)
{
hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = NULL;
if (!pool_is_free_index (hicn_strategy_dpo_ctx_pool, index))
{
hicn_strategy_dpo_ctx =
(pool_elt_at_index (hicn_strategy_dpo_ctx_pool, index));
}
return hicn_strategy_dpo_ctx;
}
hicn_dpo_ctx_t *
hicn_strategy_dpo_ctx_alloc ()
{
hicn_dpo_ctx_t *dpo_ctx;
pool_get (hicn_strategy_dpo_ctx_pool, dpo_ctx);
return dpo_ctx;
}
int
hicn_strategy_dpo_ctx_add_nh (hicn_face_id_t nh, hicn_dpo_ctx_t * dpo_ctx,
u8 * pos)
{
int empty = dpo_ctx->entry_count;
/* Iterate through the list of faces to find if the face is already a next hop */
for (int i = 0; i < dpo_ctx->entry_count; i++)
{
if (nh == dpo_ctx->next_hops[i])
{
/* If face is marked as deleted, ignore it */
hicn_face_t *face =
hicn_dpoi_get_from_idx (dpo_ctx->next_hops[i]);
if (face->flags & HICN_FACE_FLAGS_DELETED)
{
continue;
}
return HICN_ERROR_DPO_CTX_NHOPS_EXISTS;
}
}
/* Get an empty place */
if (empty > HICN_PARAM_FIB_ENTRY_NHOPS_MAX)
{
return HICN_ERROR_DPO_CTX_NHOPS_NS;
}
dpo_ctx->next_hops[empty] = nh;
hicn_face_lock_with_id (nh);
dpo_ctx->entry_count++;
*pos = empty;
return HICN_ERROR_NONE;
}
int
hicn_strategy_dpo_ctx_del_nh (hicn_face_id_t face_id,
hicn_dpo_ctx_t * dpo_ctx)
{
int ret = HICN_ERROR_DPO_CTX_NOT_FOUND;
hicn_face_id_t invalid = NEXT_HOP_INVALID;
for (int i = 0; i < dpo_ctx->entry_count; i++)
{
if (dpo_ctx->next_hops[i] == face_id)
{
hicn_face_unlock_with_id (dpo_ctx->next_hops[i]);
dpo_ctx->entry_count--;
dpo_ctx->next_hops[i] = dpo_ctx->next_hops[dpo_ctx->entry_count];
dpo_ctx->next_hops[dpo_ctx->entry_count] = invalid;
ret = HICN_ERROR_NONE;
break;
}
}
return ret;
}
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables: eval: (c-set-style "gnu") End:
*/
|
340013.c | /**
******************************************************************************
* @file ADC/ADC_RegularConversion_Interrupt/Src/main.c
* @author MCD Application Team
* @brief This example describes how to use an Interrupt to convert
* continuously data
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT(c) 2017 STMicroelectronics</center></h2>>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "main.h"
/** @addtogroup STM32F4xx_HAL_Examples
* @{
*/
/** @addtogroup ADC_RegularConversion_Interrupt
* @{
*/
/* Private typedef -----------------------------------------------------------*/
/* Private define ------------------------------------------------------------*/
/* Private macro -------------------------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
/* ADC handler declaration */
ADC_HandleTypeDef AdcHandle;
/* Variable used to get converted value */
__IO uint16_t uhADCxConvertedValue = 0;
/* Private function prototypes -----------------------------------------------*/
static void SystemClock_Config(void);
static void Error_Handler(void);
/* Private functions ---------------------------------------------------------*/
/**
* @brief Main program
* @param None
* @retval None
*/
int main(void)
{
ADC_ChannelConfTypeDef sConfig;
/* STM32F4xx HAL library initialization:
- Configure the Flash prefetch, instruction and Data caches
- Configure the Systick to generate an interrupt each 1 msec
- Set NVIC Group Priority to 4
- Global MSP (MCU Support Package) initialization
*/
HAL_Init();
/* Configure the system clock to 144 MHz */
SystemClock_Config();
/* Configure LED3 */
BSP_LED_Init(LED3);
/*##-1- Configure the ADC peripheral #######################################*/
AdcHandle.Instance = ADCx;
AdcHandle.Init.ClockPrescaler = ADC_CLOCKPRESCALER_PCLK_DIV2;
AdcHandle.Init.Resolution = ADC_RESOLUTION_12B;
AdcHandle.Init.ScanConvMode = DISABLE;
AdcHandle.Init.ContinuousConvMode = ENABLE;
AdcHandle.Init.DiscontinuousConvMode = DISABLE;
AdcHandle.Init.NbrOfDiscConversion = 0;
AdcHandle.Init.ExternalTrigConvEdge = ADC_EXTERNALTRIGCONVEDGE_NONE;
AdcHandle.Init.ExternalTrigConv = ADC_EXTERNALTRIGCONV_T1_CC1;
AdcHandle.Init.DataAlign = ADC_DATAALIGN_RIGHT;
AdcHandle.Init.NbrOfConversion = 1;
AdcHandle.Init.DMAContinuousRequests = DISABLE;
AdcHandle.Init.EOCSelection = DISABLE;
if(HAL_ADC_Init(&AdcHandle) != HAL_OK)
{
/* Initialization Error */
Error_Handler();
}
/*##-2- Configure ADC regular channel ######################################*/
sConfig.Channel = ADCx_CHANNEL;
sConfig.Rank = 1;
sConfig.SamplingTime = ADC_SAMPLETIME_3CYCLES;
sConfig.Offset = 0;
if(HAL_ADC_ConfigChannel(&AdcHandle, &sConfig) != HAL_OK)
{
/* Channel Configuration Error */
Error_Handler();
}
/*##-3- Start the conversion process and enable interrupt ##################*/
if(HAL_ADC_Start_IT(&AdcHandle) != HAL_OK)
{
/* Start Conversation Error */
Error_Handler();
}
/* Infinite loop */
while (1)
{
}
}
/**
* @brief System Clock Configuration
* The system Clock is configured as follow :
* System Clock source = PLL (HSE)
* SYSCLK(Hz) = 144000000
* HCLK(Hz) = 144000000
* AHB Prescaler = 1
* APB1 Prescaler = 4
* APB2 Prescaler = 2
* HSE Frequency(Hz) = 25000000
* PLL_M = 25
* PLL_N = 288
* PLL_P = 2
* PLL_Q = 6
* VDD(V) = 3.3
* Main regulator output voltage = Scale2 mode
* Flash Latency(WS) = 4
* @param None
* @retval None
*/
static void SystemClock_Config(void)
{
RCC_ClkInitTypeDef RCC_ClkInitStruct;
RCC_OscInitTypeDef RCC_OscInitStruct;
/* Enable Power Control clock */
__HAL_RCC_PWR_CLK_ENABLE();
/* The voltage scaling allows optimizing the power consumption when the device is
clocked below the maximum system frequency, to update the voltage scaling value
regarding system frequency refer to product datasheet. */
__HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE2);
/* Enable HSE Oscillator and activate PLL with HSE as source */
RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSE;
RCC_OscInitStruct.HSEState = RCC_HSE_ON;
RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON;
RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSE;
RCC_OscInitStruct.PLL.PLLM = 25;
RCC_OscInitStruct.PLL.PLLN = 288;
RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2;
RCC_OscInitStruct.PLL.PLLQ = 6;
HAL_RCC_OscConfig(&RCC_OscInitStruct);
/* Select PLL as system clock source and configure the HCLK, PCLK1 and PCLK2
clocks dividers */
RCC_ClkInitStruct.ClockType = (RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2);
RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK;
RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1;
RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV4;
RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV2;
HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_4);
/* STM32F405x/407x/415x/417x Revision Z devices: prefetch is supported */
if (HAL_GetREVID() == 0x1001)
{
/* Enable the Flash prefetch */
__HAL_FLASH_PREFETCH_BUFFER_ENABLE();
}
}
/**
* @brief This function is executed in case of error occurrence.
* @param None
* @retval None
*/
static void Error_Handler(void)
{
/* Turn LED3 on */
BSP_LED_On(LED3);
while(1)
{
}
}
/**
* @brief Conversion complete callback in non blocking mode
* @param AdcHandle : AdcHandle handle
* @note This example shows a simple way to report end of conversion, and
* you can add your own implementation.
* @retval None
*/
void HAL_ADC_ConvCpltCallback(ADC_HandleTypeDef* AdcHandle)
{
/* Get the converted value of regular channel */
uhADCxConvertedValue = HAL_ADC_GetValue(AdcHandle);
}
#ifdef USE_FULL_ASSERT
/**
* @brief Reports the name of the source file and the source line number
* where the assert_param error has occurred.
* @param file: pointer to the source file name
* @param line: assert_param error line source number
* @retval None
*/
void assert_failed(uint8_t* file, uint32_t line)
{
/* User can add his own implementation to report the file name and line number,
ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */
/* Infinite loop */
while (1)
{
}
}
#endif
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
367755.c | #include <exec_once.h>
#include <voba/value.h>
#include "ast.h"
#include "syn.h"
#include "syn2ast_quote.h"
#include "syn2ast_report.h"
voba_value_t compile_quote(voba_value_t syn_form, voba_value_t env,voba_value_t toplevel_env)
{
voba_value_t ret = VOBA_NIL;
voba_value_t form = SYNTAX(syn_form)->v;
int64_t len = voba_array_len(form);
if(len == 2){
ret = make_ast_constant(voba_array_at(form,1));
}else{
report_error(
VOBA_CONST_CHAR("illegal form for quote"),
syn_form, toplevel_env
);
}
return ret;
}
|
127507.c | /*
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <net/mqtt.h>
#include "mqtt_pkt.h"
#include <net/net_ip.h>
#include <net/nbuf.h>
#include <net/buf.h>
#include <errno.h>
#define MSG_SIZE CONFIG_MQTT_MSG_MAX_SIZE
#define MQTT_BUF_CTR (1 + CONFIG_MQTT_ADDITIONAL_BUFFER_CTR)
/* Memory pool internally used to handle messages that may exceed the size of
* system defined network buffer. By using this memory pool, routines don't deal
* with fragmentation, so algorithms are more easy to implement.
*/
NET_BUF_POOL_DEFINE(mqtt_msg_pool, MQTT_BUF_CTR, MSG_SIZE, 0, NULL);
#define MQTT_PUBLISHER_MIN_MSG_SIZE 2
int mqtt_tx_connect(struct mqtt_ctx *ctx, struct mqtt_connect_msg *msg)
{
struct net_buf *data = NULL;
struct net_buf *tx = NULL;
int rc;
data = net_buf_alloc(&mqtt_msg_pool, ctx->net_timeout);
if (data == NULL) {
rc = -ENOMEM;
goto exit_connect;
}
ctx->clean_session = msg->clean_session ? 1 : 0;
rc = mqtt_pack_connect(data->data, &data->len, MSG_SIZE, msg);
if (rc != 0) {
rc = -EINVAL;
goto exit_connect;
}
tx = net_nbuf_get_tx(ctx->net_ctx);
if (tx == NULL) {
rc = -ENOMEM;
goto exit_connect;
}
net_buf_frag_add(tx, data);
data = NULL;
rc = net_context_send(tx, NULL, ctx->net_timeout, NULL, NULL);
if (rc < 0) {
rc = -EIO;
goto exit_connect;
}
tx = NULL;
exit_connect:
net_nbuf_unref(data);
net_nbuf_unref(tx);
return rc;
}
int mqtt_tx_disconnect(struct mqtt_ctx *ctx)
{
struct net_buf *tx = NULL;
/* DISCONNECT is a zero length message: 2 bytes required, no payload */
uint8_t msg[2];
uint16_t len;
int rc;
rc = mqtt_pack_disconnect(msg, &len, sizeof(msg));
if (rc != 0) {
rc = -EINVAL;
goto exit_disconnect;
}
tx = net_nbuf_get_tx(ctx->net_ctx);
if (tx == NULL) {
rc = -ENOMEM;
goto exit_disconnect;
}
rc = net_nbuf_append(tx, len, msg);
if (rc != true) {
rc = -ENOMEM;
goto exit_disconnect;
}
rc = net_context_send(tx, NULL, ctx->net_timeout, NULL, NULL);
if (rc < 0) {
rc = -EIO;
goto exit_disconnect;
}
ctx->connected = 0;
tx = NULL;
if (ctx->disconnect) {
ctx->disconnect(ctx->disconnect_data);
}
exit_disconnect:
net_nbuf_unref(tx);
return rc;
}
/**
* @brief mqtt_tx_pub_msgs Writes the MQTT PUBxxx msg indicated by pkt_type
* with identifier 'id'
* @param [in] ctx MQTT context
* @param [in] id MQTT packet identifier
* @param [in] pkt_type MQTT packet type
* @return 0 on success
* @return -EINVAL if an invalid parameter was passed to
* this routine
* @return -ENOMEM if a tx buffer is not available
* @return -EIO on network error
*/
static
int mqtt_tx_pub_msgs(struct mqtt_ctx *ctx, uint16_t id,
enum mqtt_packet pkt_type)
{
struct net_buf *tx = NULL;
uint8_t msg[4];
uint16_t len;
int rc;
switch (pkt_type) {
case MQTT_PUBACK:
rc = mqtt_pack_puback(msg, &len, sizeof(msg), id);
break;
case MQTT_PUBCOMP:
rc = mqtt_pack_pubcomp(msg, &len, sizeof(msg), id);
break;
case MQTT_PUBREC:
rc = mqtt_pack_pubrec(msg, &len, sizeof(msg), id);
break;
case MQTT_PUBREL:
rc = mqtt_pack_pubrel(msg, &len, sizeof(msg), id);
break;
default:
return -EINVAL;
}
if (rc != 0) {
return -EINVAL;
}
tx = net_nbuf_get_tx(ctx->net_ctx);
if (tx == NULL) {
rc = -ENOMEM;
goto exit_send;
}
rc = net_nbuf_append(tx, len, msg);
if (rc != true) {
rc = -ENOMEM;
goto exit_send;
}
rc = net_context_send(tx, NULL, ctx->net_timeout, NULL, NULL);
if (rc < 0) {
rc = -EIO;
goto exit_send;
}
tx = NULL;
exit_send:
net_nbuf_unref(tx);
return rc;
}
int mqtt_tx_puback(struct mqtt_ctx *ctx, uint16_t id)
{
return mqtt_tx_pub_msgs(ctx, id, MQTT_PUBACK);
}
int mqtt_tx_pubcomp(struct mqtt_ctx *ctx, uint16_t id)
{
return mqtt_tx_pub_msgs(ctx, id, MQTT_PUBCOMP);
}
int mqtt_tx_pubrec(struct mqtt_ctx *ctx, uint16_t id)
{
return mqtt_tx_pub_msgs(ctx, id, MQTT_PUBREC);
}
int mqtt_tx_pubrel(struct mqtt_ctx *ctx, uint16_t id)
{
return mqtt_tx_pub_msgs(ctx, id, MQTT_PUBREL);
}
int mqtt_tx_publish(struct mqtt_ctx *ctx, struct mqtt_publish_msg *msg)
{
struct net_buf *data = NULL;
struct net_buf *tx = NULL;
int rc;
data = net_buf_alloc(&mqtt_msg_pool, ctx->net_timeout);
if (data == NULL) {
rc = -ENOMEM;
goto exit_publish;
}
rc = mqtt_pack_publish(data->data, &data->len, data->size, msg);
if (rc != 0) {
net_nbuf_unref(data);
rc = -EINVAL;
goto exit_publish;
}
tx = net_nbuf_get_tx(ctx->net_ctx);
if (tx == NULL) {
rc = -ENOMEM;
goto exit_publish;
}
net_buf_frag_add(tx, data);
data = NULL;
rc = net_context_send(tx, NULL, ctx->net_timeout, NULL, NULL);
if (rc < 0) {
rc = -EIO;
goto exit_publish;
}
tx = NULL;
exit_publish:
net_nbuf_unref(data);
net_nbuf_unref(tx);
return rc;
}
int mqtt_tx_pingreq(struct mqtt_ctx *ctx)
{
struct net_buf *tx = NULL;
uint8_t msg[2];
uint16_t len;
int rc;
rc = mqtt_pack_pingreq(msg, &len, sizeof(msg));
if (rc != 0) {
rc = -EINVAL;
goto exit_pingreq;
}
tx = net_nbuf_get_tx(ctx->net_ctx);
if (tx == NULL) {
rc = -ENOMEM;
goto exit_pingreq;
}
net_nbuf_append(tx, len, msg);
rc = net_context_send(tx, NULL, ctx->net_timeout, NULL, NULL);
if (rc < 0) {
rc = -EIO;
goto exit_pingreq;
}
tx = NULL;
exit_pingreq:
net_nbuf_unref(tx);
return rc;
}
int mqtt_tx_subscribe(struct mqtt_ctx *ctx, uint16_t pkt_id, uint8_t items,
const char *topics[], const enum mqtt_qos qos[])
{
struct net_buf *data = NULL;
struct net_buf *tx = NULL;
int rc;
data = net_buf_alloc(&mqtt_msg_pool, ctx->net_timeout);
if (data == NULL) {
rc = -ENOMEM;
goto exit_subs;
}
rc = mqtt_pack_subscribe(data->data, &data->len, data->size,
pkt_id, items, topics, qos);
if (rc != 0) {
net_nbuf_unref(data);
rc = -EINVAL;
goto exit_subs;
}
tx = net_nbuf_get_tx(ctx->net_ctx);
if (tx == NULL) {
rc = -ENOMEM;
goto exit_subs;
}
net_buf_frag_add(tx, data);
data = NULL;
rc = net_context_send(tx, NULL, ctx->net_timeout, NULL, NULL);
if (rc < 0) {
rc = -EIO;
goto exit_subs;
}
tx = NULL;
exit_subs:
net_nbuf_unref(data);
net_nbuf_unref(tx);
return rc;
}
int mqtt_tx_unsubscribe(struct mqtt_ctx *ctx, uint16_t pkt_id, uint8_t items,
const char *topics[])
{
struct net_buf *data = NULL;
struct net_buf *tx = NULL;
int rc;
data = net_buf_alloc(&mqtt_msg_pool, ctx->net_timeout);
if (data == NULL) {
rc = -ENOMEM;
goto exit_unsub;
}
rc = mqtt_pack_unsubscribe(data->data, &data->len, data->size, pkt_id,
items, topics);
if (rc != 0) {
rc = -EINVAL;
goto exit_unsub;
}
tx = net_nbuf_get_tx(ctx->net_ctx);
if (tx == NULL) {
rc = -ENOMEM;
goto exit_unsub;
}
net_buf_frag_add(tx, data);
data = NULL;
rc = net_context_send(tx, NULL, ctx->net_timeout, NULL, NULL);
if (rc < 0) {
rc = -EIO;
goto exit_unsub;
}
tx = NULL;
exit_unsub:
net_buf_unref(data);
net_buf_unref(tx);
return rc;
}
int mqtt_rx_connack(struct mqtt_ctx *ctx, struct net_buf *rx, int clean_session)
{
uint16_t len;
uint8_t connect_rc;
uint8_t session;
uint8_t *data;
int rc;
data = rx->data;
len = rx->len;
/* CONNACK is 4 bytes len */
rc = mqtt_unpack_connack(data, len, &session, &connect_rc);
if (rc != 0) {
rc = -EINVAL;
goto exit_connect;
}
switch (clean_session) {
/* new session */
case 1:
/* server acks there is no previous session
* and server connection return code is OK
*/
if (session == 0 && connect_rc == 0) {
rc = 0;
} else {
rc = -EINVAL;
goto exit_connect;
}
break;
/* previous session */
case 0:
/* TODO */
/* FALLTHROUGH */
default:
rc = -EINVAL;
goto exit_connect;
}
ctx->connected = 1;
if (ctx->connect) {
ctx->connect(ctx->connect_data);
}
exit_connect:
return rc;
}
/**
* @brief mqtt_rx_pub_msgs Parses and validates the MQTT PUBxxxx message
* contained in the rx buffer. It validates against
* message structure and Packet Identifier.
* @details For the MQTT PUBREC and PUBREL messages, this
* function writes the corresponding MQTT PUB msg.
* @param ctx MQTT context
* @param rx RX buffer
* @param type MQTT Packet type
* @return 0 on success
* @return -EINVAL on error
*/
static
int mqtt_rx_pub_msgs(struct mqtt_ctx *ctx, struct net_buf *rx,
enum mqtt_packet type)
{
int (*unpack)(uint8_t *, uint16_t, uint16_t *) = NULL;
int (*response)(struct mqtt_ctx *, uint16_t) = NULL;
uint16_t pkt_id;
uint16_t len;
uint8_t *data;
int rc;
switch (type) {
case MQTT_PUBACK:
unpack = mqtt_unpack_puback;
break;
case MQTT_PUBCOMP:
unpack = mqtt_unpack_pubcomp;
break;
case MQTT_PUBREC:
unpack = mqtt_unpack_pubrec;
response = mqtt_tx_pubrel;
break;
case MQTT_PUBREL:
unpack = mqtt_unpack_pubrel;
response = mqtt_tx_pubcomp;
break;
default:
return -EINVAL;
}
data = rx->data;
len = rx->len;
/* 4 bytes message */
rc = unpack(data, len, &pkt_id);
if (rc != 0) {
return -EINVAL;
}
/* Only MQTT_APP_SUBSCRIBER, MQTT_APP_PUBLISHER_SUBSCRIBER and
* MQTT_APP_SERVER apps must receive the MQTT_PUBREL msg.
*/
if (type == MQTT_PUBREL) {
if (ctx->app_type != MQTT_APP_PUBLISHER) {
rc = ctx->publish_rx(ctx->publish_rx_data, NULL, pkt_id,
MQTT_PUBREL);
} else {
rc = -EINVAL;
}
} else {
rc = ctx->publish_tx(ctx->publish_tx_data, pkt_id, type);
}
if (rc != 0) {
return -EINVAL;
}
if (!response) {
return 0;
}
rc = response(ctx, pkt_id);
if (rc != 0) {
return -EINVAL;
}
return 0;
}
int mqtt_rx_puback(struct mqtt_ctx *ctx, struct net_buf *rx)
{
return mqtt_rx_pub_msgs(ctx, rx, MQTT_PUBACK);
}
int mqtt_rx_pubcomp(struct mqtt_ctx *ctx, struct net_buf *rx)
{
return mqtt_rx_pub_msgs(ctx, rx, MQTT_PUBCOMP);
}
int mqtt_rx_pubrec(struct mqtt_ctx *ctx, struct net_buf *rx)
{
return mqtt_rx_pub_msgs(ctx, rx, MQTT_PUBREC);
}
int mqtt_rx_pubrel(struct mqtt_ctx *ctx, struct net_buf *rx)
{
return mqtt_rx_pub_msgs(ctx, rx, MQTT_PUBREL);
}
int mqtt_rx_pingresp(struct mqtt_ctx *ctx, struct net_buf *rx)
{
int rc;
ARG_UNUSED(ctx);
/* 2 bytes message */
rc = mqtt_unpack_pingresp(rx->data, rx->len);
if (rc != 0) {
return -EINVAL;
}
return 0;
}
int mqtt_rx_suback(struct mqtt_ctx *ctx, struct net_buf *rx)
{
enum mqtt_qos suback_qos[CONFIG_MQTT_SUBSCRIBE_MAX_TOPICS];
uint16_t pkt_id;
uint16_t len;
uint8_t items;
uint8_t *data;
int rc;
data = rx->data;
len = rx->len;
rc = mqtt_unpack_suback(data, len, &pkt_id, &items,
CONFIG_MQTT_SUBSCRIBE_MAX_TOPICS, suback_qos);
if (rc != 0) {
return -EINVAL;
}
if (!ctx->subscribe) {
return -EINVAL;
}
rc = ctx->subscribe(ctx->subscribe_data, pkt_id, items, suback_qos);
if (rc != 0) {
return -EINVAL;
}
return 0;
}
int mqtt_rx_unsuback(struct mqtt_ctx *ctx, struct net_buf *rx)
{
uint16_t pkt_id;
uint16_t len;
uint8_t *data;
int rc;
data = rx->data;
len = rx->len;
/* 4 bytes message */
rc = mqtt_unpack_unsuback(data, len, &pkt_id);
if (rc != 0) {
return -EINVAL;
}
if (!ctx->unsubscribe) {
return -EINVAL;
}
rc = ctx->unsubscribe(ctx->subscribe_data, pkt_id);
if (rc != 0) {
return -EINVAL;
}
return 0;
}
int mqtt_rx_publish(struct mqtt_ctx *ctx, struct net_buf *rx)
{
struct mqtt_publish_msg msg;
int rc;
rc = mqtt_unpack_publish(rx->data, rx->len, &msg);
if (rc != 0) {
return -EINVAL;
}
rc = ctx->publish_rx(ctx->publish_rx_data, &msg, msg.pkt_id,
MQTT_PUBLISH);
if (rc != 0) {
return -EINVAL;
}
switch (msg.qos) {
case MQTT_QoS2:
rc = mqtt_tx_pubrec(ctx, msg.pkt_id);
break;
case MQTT_QoS1:
rc = mqtt_tx_puback(ctx, msg.pkt_id);
break;
case MQTT_QoS0:
break;
default:
rc = -EINVAL;
}
return rc;
}
/**
* @brief mqtt_linearize_buffer Linearize an IP fragmented buffer
* @param [in] ctx MQTT context structure
* @param [in] rx RX IP stack buffer
* @param [in] min_size Min message size allowed. This allows us
* to exit if the rx buffer is shorter
* than the expected msg size
* @return Data buffer
* @return NULL on error
*/
static
struct net_buf *mqtt_linearize_buffer(struct mqtt_ctx *ctx, struct net_buf *rx,
uint16_t min_size)
{
struct net_buf *data = NULL;
uint16_t data_len;
uint16_t offset;
int rc;
data = net_buf_alloc(&mqtt_msg_pool, ctx->net_timeout);
if (data == NULL) {
return NULL;
}
/* CONFIG_MQTT_MSG_MAX_SIZE is defined via Kconfig. So here it's
* determined if the input buffer could fit our data buffer or if
* it has the expected size.
*/
data_len = net_nbuf_appdatalen(rx);
if (data_len < min_size || data_len > CONFIG_MQTT_MSG_MAX_SIZE) {
goto exit_error;
}
offset = net_buf_frags_len(rx) - data_len;
rc = net_nbuf_linear_copy(data, rx, offset, data_len);
if (rc != 0) {
goto exit_error;
}
return data;
exit_error:
net_nbuf_unref(data);
return NULL;
}
/**
* @brief mqtt_publisher_parser Calls the appropriate rx routine for the MQTT
* message contained in rx
* @param ctx MQTT context
* @param rx RX buffer
* @return 0 on success
* @return -EINVAL if an unknown message is received
* @return -ENOMEM if no data buffer is available
* @return mqtt_rx_connack, mqtt_rx_puback, mqtt_rx_pubrec,
* mqtt_rx_pubcomp, and mqtt_rx_pingresp
* return codes
*/
static
int mqtt_publisher_parser(struct mqtt_ctx *ctx, struct net_buf *rx)
{
uint16_t pkt_type = MQTT_INVALID;
struct net_buf *data = NULL;
int rc = -EINVAL;
data = mqtt_linearize_buffer(ctx, rx, MQTT_PUBLISHER_MIN_MSG_SIZE);
if (!data) {
rc = -ENOMEM;
goto exit_parser;
}
pkt_type = MQTT_PACKET_TYPE(data->data[0]);
switch (pkt_type) {
case MQTT_CONNACK:
if (!ctx->connected) {
rc = mqtt_rx_connack(ctx, data, ctx->clean_session);
} else {
rc = -EINVAL;
}
break;
case MQTT_PUBACK:
rc = mqtt_rx_puback(ctx, data);
break;
case MQTT_PUBREC:
rc = mqtt_rx_pubrec(ctx, data);
break;
case MQTT_PUBCOMP:
rc = mqtt_rx_pubcomp(ctx, data);
break;
case MQTT_PINGRESP:
rc = mqtt_rx_pingresp(ctx, data);
break;
default:
rc = -EINVAL;
break;
}
exit_parser:
/* TODO: add error handling via a user provided callback */
net_nbuf_unref(data);
return rc;
}
/**
* @brief mqtt_subscriber_parser Calls the appropriate rx routine for the MQTT
* message contained in rx
* @details On error, this routine will execute the
* 'ctx->malformed' callback (if defined)
* @param ctx MQTT context
* @param rx RX buffer
* @return 0 on success
* @return -EINVAL if an unknown message is received
* @return -ENOMEM if no data buffer is available
* @return mqtt_rx_publish, mqtt_rx_pubrel, mqtt_rx_pubrel,
* mqtt_rx_suback
* return codes
*/
static
int mqtt_subscriber_parser(struct mqtt_ctx *ctx, struct net_buf *rx)
{
uint16_t pkt_type = MQTT_INVALID;
struct net_buf *data = NULL;
int rc = 0;
data = mqtt_linearize_buffer(ctx, rx, MQTT_PUBLISHER_MIN_MSG_SIZE);
if (!data) {
rc = -EINVAL;
goto exit_parser;
}
pkt_type = MQTT_PACKET_TYPE(data->data[0]);
switch (pkt_type) {
case MQTT_CONNACK:
if (!ctx->connected) {
rc = mqtt_rx_connack(ctx, data, ctx->clean_session);
} else {
rc = -EINVAL;
}
break;
case MQTT_PUBLISH:
rc = mqtt_rx_publish(ctx, data);
break;
case MQTT_PUBREL:
rc = mqtt_rx_pubrel(ctx, data);
break;
case MQTT_PINGRESP:
rc = mqtt_rx_pubrel(ctx, data);
break;
case MQTT_SUBACK:
rc = mqtt_rx_suback(ctx, data);
break;
default:
rc = -EINVAL;
break;
}
exit_parser:
/* TODO: add error handling via a user provided callback */
net_nbuf_unref(data);
return rc;
}
static
void mqtt_recv(struct net_context *net_ctx, struct net_buf *buf, int status,
void *data)
{
struct mqtt_ctx *mqtt = (struct mqtt_ctx *)data;
/* net_ctx is already referenced to by the mqtt_ctx struct */
ARG_UNUSED(net_ctx);
if (status != 0) {
return;
}
mqtt->rcv(mqtt, buf);
net_nbuf_unref(buf);
}
int mqtt_init(struct mqtt_ctx *ctx, enum mqtt_app app_type)
{
/* So far, only clean session = 1 is supported */
ctx->clean_session = 1;
ctx->connected = 0;
/* Install the receiver callback, timeout is set to K_NO_WAIT.
* In this case, no return code is evaluated.
*/
(void)net_context_recv(ctx->net_ctx, mqtt_recv, K_NO_WAIT, ctx);
ctx->app_type = app_type;
switch (ctx->app_type) {
case MQTT_APP_PUBLISHER:
ctx->rcv = mqtt_publisher_parser;
break;
case MQTT_APP_SUBSCRIBER:
ctx->rcv = mqtt_subscriber_parser;
break;
default:
return -EINVAL;
}
return 0;
}
|
489690.c | /*
* Phoenix-RTOS
*
* Phoenix-RTOS SHell
*
* Copyright 2017, 2018, 2020 Phoenix Systems
* Author: Pawel Pisarczyk, Jan Sikorski, Lukasz Kosinski
*
* This file is part of Phoenix-RTOS.
*
* %LICENSE%
*/
#include <dirent.h>
#include <errno.h>
#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <termios.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/pwman.h>
#include <posix/utils.h>
#include "psh.h"
/* Shell definitions */
#define PROMPT "(psh)% " /* Shell prompt */
#define SCRIPT_MAGIC ":{}:" /* Every psh script should start with this line */
#define HISTSZ 512 /* Command history size */
/* Special key codes */
#define UP "^[[A" /* Up */
#define DOWN "^[[B" /* Down */
#define RIGHT "^[[C" /* Right */
#define LEFT "^[[D" /* Left */
#define DELETE "^[[3~" /* Delete */
/* Misc definitions */
#define BP_OFFS 0 /* Offset of 0 exponent entry in binary prefix table */
#define BP_EXP_OFFS 10 /* Offset between consecutive entries exponents in binary prefix table */
#define SI_OFFS 8 /* Offset of 0 exponent entry in SI prefix table */
#define SI_EXP_OFFS 3 /* Offset between consecutive entries exponents in SI prefix table */
typedef struct {
int n; /* Command length (each word is followed by '\0') */
char *cmd; /* Command pointer */
} psh_histent_t;
typedef struct {
int hb; /* History begin index (oldest command) */
int he; /* History end index (newest command) */
psh_histent_t entries[HISTSZ]; /* Command history entries */
} psh_hist_t;
/* Shell commands */
extern int psh_bind(int argc, char **argv);
extern int psh_cat(int argc, char **argv);
extern int psh_kill(int argc, char **argv);
extern int psh_ls(int argc, char **argv);
extern int psh_mem(int argc, char **argv);
extern int psh_mkdir(int argc, char **argv);
extern int psh_mount(int argc, char **argv);
extern int psh_perf(int argc, char **argv);
extern int psh_ps(int argc, char **argv);
extern int psh_reboot(int argc, char **argv);
extern int psh_sync(int argc, char **argv);
extern int psh_top(int argc, char **argv);
extern int psh_touch(int argc, char **argv);
/* Binary (base 2) prefixes */
static const char *bp[] = {
"", /* 2^0 */
"K", /* 2^10 kibi */
"M", /* 2^20 mebi */
"G", /* 2^30 gibi */
"T", /* 2^40 tebi */
"P", /* 2^50 pebi */
"E", /* 2^60 exbi */
"Z", /* 2^70 zebi */
"Y" /* 2^80 yobi */
};
/* SI (base 10) prefixes */
static const char* si[] = {
"y", /* 10^-24 yocto */
"z", /* 10^-21 zepto */
"a", /* 10^-18 atto */
"f", /* 10^-15 femto */
"p", /* 10^-12 pico */
"n", /* 10^-9 nano */
"u", /* 10^-6 micro */
"m", /* 10^-3 milli */
"", /* 10^0 */
"k", /* 10^3 kilo */
"M", /* 10^6 mega */
"G", /* 10^9 giga */
"T", /* 10^12 tera */
"P", /* 10^15 peta */
"E", /* 10^18 exa */
"Z", /* 10^21 zetta */
"Y", /* 10^24 yotta */
};
psh_common_t psh_common;
static void psh_exit(int code)
{
keepidle(0);
exit(code);
}
static void _psh_exit(int code)
{
keepidle(0);
_exit(code);
}
static int psh_mod(int x, int y)
{
int ret = x % y;
if (ret < 0)
ret += abs(y);
return ret;
}
static int psh_div(int x, int y)
{
return (x - psh_mod(x, y)) / y;
}
static int psh_log(unsigned int base, unsigned int x)
{
int ret = 0;
while (x /= base)
ret++;
return ret;
}
static int psh_pow(int x, unsigned int y)
{
int ret = 1;
while (y) {
if (y & 1)
ret *= x;
y >>= 1;
if (!y)
break;
x *= x;
}
return ret;
}
static const char *psh_bp(int exp)
{
exp = psh_div(exp, BP_EXP_OFFS) + BP_OFFS;
if ((exp < 0) || (exp >= sizeof(bp) / sizeof(bp[0])))
return NULL;
return bp[exp];
}
static const char *psh_si(int exp)
{
exp = psh_div(exp, SI_EXP_OFFS) + SI_OFFS;
if ((exp < 0) || (exp >= sizeof(si) / sizeof(si[0])))
return NULL;
return si[exp];
}
int psh_prefix(unsigned int base, int x, int y, unsigned int prec, char *buff)
{
int div = psh_log(base, abs(x)), exp = div + y;
int offs, ipart, fpart;
const char *(*fp)(int);
const char *prefix;
/* Support precision for up to 8 decimal places */
if (prec > 8)
return -EINVAL;
switch (base) {
/* Binary prefix */
case 2:
fp = psh_bp;
offs = BP_EXP_OFFS;
break;
/* SI prefix */
case 10:
fp = psh_si;
offs = SI_EXP_OFFS;
break;
default:
return -EINVAL;
}
/* div < 0 => accumulate extra exponents in x */
if ((div -= psh_mod(exp, offs)) < 0) {
x *= psh_pow(base, -div);
div = 0;
}
div = psh_pow(base, div);
/* Save integer part and fractional part as percentage */
ipart = abs(x) / div;
fpart = (int)((uint64_t)psh_pow(10, prec + 1) * (abs(x) % div) / div);
/* Round the result */
if ((fpart = (fpart + 5) / 10) == psh_pow(10, prec)) {
ipart++;
fpart = 0;
if (ipart == psh_pow(base, offs)) {
ipart = 1;
exp += offs;
}
}
/* Remove trailing zeros */
while (fpart && !(fpart % 10)) {
fpart /= 10;
prec--;
}
/* Get the prefix */
if ((prefix = fp((!ipart && !fpart) ? y : exp)) == NULL)
return -EINVAL;
if (x < 0)
*buff++ = '-';
if (fpart)
sprintf(buff, "%d.%0*d%s", ipart, prec, fpart, prefix);
else
sprintf(buff, "%d%s", ipart, prefix);
return EOK;
}
static int psh_extendcmd(char **cmd, int *cmdsz, int n)
{
char *rcmd;
if ((rcmd = realloc(*cmd, n)) == NULL) {
fprintf(stderr, "\r\npsh: out of memory\r\n");
free(*cmd);
return -ENOMEM;
}
*cmd = rcmd;
*cmdsz = n;
return EOK;
}
static int psh_histentcmd(char **cmd, int *cmdsz, psh_histent_t *entry)
{
int err, i;
if ((entry->n > *cmdsz) && ((err = psh_extendcmd(cmd, cmdsz, entry->n + 1)) < 0))
return err;
for (i = 0; i < entry->n; i++)
(*cmd)[i] = (entry->cmd[i] == '\0') ? ' ' : entry->cmd[i];
return entry->n;
}
static void psh_printhistent(psh_histent_t *entry)
{
int i;
for (i = 0; i < entry->n; i++)
write(STDOUT_FILENO, (entry->cmd[i] == '\0') ? " " : entry->cmd + i, 1);
}
static int psh_completepath(char *dir, char *base, char ***files)
{
size_t i, size = 32, dlen = strlen(dir), blen = strlen(base);
int nfiles = 0, err = EOK;
char *path, **rfiles;
struct stat stat;
DIR *stream;
*files = NULL;
do {
if ((stream = opendir(dir)) == NULL)
break;
if (dir[dlen - 1] != '/')
dir[dlen++] = '/';
if ((*files = malloc(size * sizeof(char *))) == NULL) {
fprintf(stderr, "\r\npsh: out of memory\r\n");
err = -ENOMEM;
break;
}
while (readdir(stream) != NULL) {
if ((stream->dirent->d_namlen < blen) || strncmp(stream->dirent->d_name, base, blen))
continue;
if (!blen && (!strcmp(stream->dirent->d_name, ".") || !strcmp(stream->dirent->d_name, "..")))
continue;
i = dlen + stream->dirent->d_namlen;
if ((path = malloc(i + 1)) == NULL) {
fprintf(stderr, "\r\npsh: out of memory\r\n");
err = -ENOMEM;
break;
}
memcpy(path, dir, dlen);
strcpy(path + dlen, stream->dirent->d_name);
if ((err = lstat(path, &stat)) < 0) {
fprintf(stderr, "\r\npsh: can't stat file %s\r\n", path);
free(path);
break;
}
free(path);
if (nfiles == size) {
if ((rfiles = realloc(*files, 2 * size * sizeof(char *))) == NULL) {
fprintf(stderr, "\r\npsh: out of memory\r\n");
err = -ENOMEM;
break;
}
*files = rfiles;
size *= 2;
}
if (((*files)[nfiles] = malloc(stream->dirent->d_namlen + 2)) == NULL) {
fprintf(stderr, "\r\npsh: out of memory\r\n");
err = -ENOMEM;
break;
}
memcpy((*files)[nfiles], stream->dirent->d_name, stream->dirent->d_namlen);
(*files)[nfiles][stream->dirent->d_namlen] = (S_ISDIR(stat.st_mode)) ? '/' : ' ';
(*files)[nfiles][stream->dirent->d_namlen + 1] = '\0';
nfiles++;
}
} while (0);
if (err < 0) {
for (i = 0; i < nfiles; i++)
free((*files)[i]);
free(*files);
*files = NULL;
return err;
}
return nfiles;
}
static int psh_printfiles(char **files, int nfiles)
{
int i, row, col, rows, cols, *colsz, len = 0;
struct winsize ws;
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) < 0) {
ws.ws_row = 25;
ws.ws_col = 80;
}
for (i = 0; i < nfiles; i++)
len += strlen(files[i]);
rows = len / ws.ws_col + 1;
cols = nfiles / rows + 1;
if ((colsz = malloc(cols * sizeof(int))) == NULL) {
fprintf(stderr, "\r\npsh: out of memory\r\n");
return -ENOMEM;
}
for (; rows <= nfiles; rows++) {
cols = nfiles / rows + !!(nfiles % rows);
for (i = 0; i < cols; i++)
colsz[i] = 0;
for (i = 0; i < nfiles; i++) {
col = i / rows;
if ((len = strlen(files[i])) + 2 > colsz[col])
colsz[col] = len + 2;
}
colsz[cols - 1] -= 2;
for (len = 0, col = 0; col < cols; col++)
len += colsz[col];
if (len < ws.ws_col)
break;
}
printf("\r\n");
for (row = 0; row < rows; row++) {
for (col = 0; col < cols; col++) {
if ((i = col * rows + row) >= nfiles)
continue;
if ((len = colsz[col]) > ws.ws_col)
len = ws.ws_col;
printf("%-*s", len, files[i]);
}
printf("\r\n");
}
fflush(stdout);
free(colsz);
return EOK;
}
static void psh_movecursor(int col, int n)
{
struct winsize ws;
int p;
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) < 0) {
ws.ws_row = 25;
ws.ws_col = 80;
}
col %= ws.ws_col;
if (col + n < 0) {
p = (-(col + n) + ws.ws_col - 1) / ws.ws_col;
n += p * ws.ws_col;
printf("\033[%dA", p);
}
else if (col + n > ws.ws_col - 1) {
p = (col + n) / ws.ws_col;
n -= p * ws.ws_col;
printf("\033[%dB", p);
}
if (n > 0)
printf("\033[%dC", n);
else if (n < 0)
printf("\033[%dD", -n);
fflush(stdout);
}
static int psh_cmpname(const void *n1, const void *n2)
{
return strcasecmp(*(char **)n1, *(char **)n2);
}
extern void cfmakeraw(struct termios *termios);
static int psh_readcmd(struct termios *orig, psh_hist_t *cmdhist, char **cmd)
{
int i, nfiles, err = EOK, esc = 0, n = 0, m = 0, ln = 0, hp = cmdhist->he, cmdsz = 128;
char c, *path, *fpath, *dir, *base, **files, buff[8];
struct termios raw = *orig;
if ((*cmd = malloc(cmdsz)) == NULL) {
fprintf(stderr, "\npsh: out of memory\n");
return -ENOMEM;
}
/* Enable raw mode for command processing */
cfmakeraw(&raw);
if ((err = tcsetattr(STDIN_FILENO, TCSAFLUSH, &raw)) < 0) {
fprintf(stderr, "\npsh: failed to enable raw mode\n");
free(*cmd);
return err;
}
for (;;) {
read(STDIN_FILENO, &c, 1);
/* Process control characters */
if ((c < 0x20) || (c == 0x7f)) {
/* Print not recognized escape codes */
if (esc) {
if (hp != cmdhist->he) {
if ((err = psh_histentcmd(cmd, &cmdsz, cmdhist->entries + hp)) < 0)
break;
hp = cmdhist->he;
}
if ((n + m + esc + 1 > cmdsz) && ((err = psh_extendcmd(cmd, &cmdsz, 2 * (n + m + esc) + 1)) < 0))
break;
memmove(*cmd + n + esc, *cmd + n, m);
memcpy(*cmd + n, buff, esc);
write(STDOUT_FILENO, *cmd + n, esc + m);
n += esc;
psh_movecursor(n + m + sizeof(PROMPT) - 1, -m);
esc = 0;
}
/* ETX => cancel command */
if (c == '\003') {
printf("^C");
if (m > 2)
psh_movecursor(n + sizeof(PROMPT) + 1, m - 2);
printf("\r\n");
n = m = 0;
break;
}
/* EOT => delete next character/exit */
else if (c == '\004') {
if (m) {
if (hp != cmdhist->he) {
if ((err = psh_histentcmd(cmd, &cmdsz, cmdhist->entries + hp)) < 0)
break;
hp = cmdhist->he;
}
memmove(*cmd + n, *cmd + n + 1, --m);
write(STDOUT_FILENO, "\033[0J", 4);
write(STDOUT_FILENO, *cmd + n, m);
psh_movecursor(n + m + sizeof(PROMPT) - 1, -m);
}
else if (!(n + m)) {
printf("exit\r\n");
free(*cmd);
*cmd = NULL;
break;
}
}
/* BS => remove last character */
else if ((c == '\b') || (c == '\177')) {
if (n) {
if (hp != cmdhist->he) {
if ((err = psh_histentcmd(cmd, &cmdsz, cmdhist->entries + hp)) < 0)
break;
hp = cmdhist->he;
}
write(STDOUT_FILENO, "\b", 1);
n--;
memmove(*cmd + n, *cmd + n + 1, m);
write(STDOUT_FILENO, "\033[0J", 4);
write(STDOUT_FILENO, *cmd + n, m);
psh_movecursor(n + m + sizeof(PROMPT) - 1, -m);
}
}
/* TAB => autocomplete paths */
else if (c == '\t') {
nfiles = err = 0;
path = (hp != cmdhist->he) ? cmdhist->entries[hp].cmd : *cmd;
for (i = n; i && (path[i - 1] != ' ') && (path[i - 1] != '\0'); i--);
if (i < n) {
path += i;
i = n - i;
c = path[i];
path[i] = '\0';
if ((fpath = canonicalize_file_name(path)) == NULL) {
fprintf(stderr, "\r\npsh: out of memory\r\n");
path[i] = c;
err = -ENOMEM;
break;
}
path[i] = c;
if (i && (path[i - 1] == '/') && (fpath[strlen(fpath) - 1] == '.'))
fpath[strlen(fpath) - 1] = '\0';
splitname(fpath, &base, &dir);
do {
if ((nfiles = psh_completepath(dir, base, &files)) <= 0) {
err = nfiles;
break;
}
/* Print hints */
if (nfiles > 1) {
psh_movecursor(n + sizeof(PROMPT) - 1, m);
qsort(files, nfiles, sizeof(char *), psh_cmpname);
if ((err = psh_printfiles(files, nfiles)) < 0)
break;
write(STDOUT_FILENO, "\r\033[0J", 5);
write(STDOUT_FILENO, PROMPT, sizeof(PROMPT) - 1);
if (hp == cmdhist->he)
write(STDOUT_FILENO, *cmd, n + m);
else
psh_printhistent(cmdhist->entries + hp);
psh_movecursor(n + m + sizeof(PROMPT) - 1, -m);
}
/* Complete path */
else {
if (hp != cmdhist->he) {
if ((err = psh_histentcmd(cmd, &cmdsz, cmdhist->entries + hp)) < 0)
break;
hp = cmdhist->he;
}
i = strlen(files[0]) - strlen(base);
if ((n + m + i + 1 > cmdsz) && ((err = psh_extendcmd(cmd, &cmdsz, 2 * (n + m + i) + 1)) < 0)) {
free(dir);
free(base);
break;
}
memmove(*cmd + n + i, *cmd + n, m);
memcpy(*cmd + n, files[0] + strlen(base), i);
write(STDOUT_FILENO, *cmd + n, i + m);
n += i;
psh_movecursor(n + m + sizeof(PROMPT) - 1, -m);
}
} while (0);
for (i = 0; i < nfiles; i++)
free(files[i]);
free(files);
free(fpath);
if (err < 0)
break;
}
}
/* FF => clear screen */
else if (c == '\014') {
write(STDOUT_FILENO, "\033[f", 3);
write(STDOUT_FILENO, "\r\033[0J", 5);
write(STDOUT_FILENO, PROMPT, sizeof(PROMPT) - 1);
if (hp != cmdhist->he)
psh_printhistent(cmdhist->entries + hp);
else
write(STDOUT_FILENO, *cmd, n + m);
psh_movecursor(n + m + sizeof(PROMPT) - 1, -m);
}
/* LF or CR => go to new line and break (finished reading command) */
else if ((c == '\r') || (c == '\n')) {
if (hp != cmdhist->he) {
if ((err = psh_histentcmd(cmd, &cmdsz, cmdhist->entries + hp)) < 0)
break;
hp = cmdhist->he;
}
psh_movecursor(n + sizeof(PROMPT) - 1, m);
write(STDOUT_FILENO, "\r\n", 2);
break;
}
/* ESC => process escape code keys */
else if (c == '\033') {
buff[esc++] = '^';
buff[esc++] = '[';
}
}
/* Process regular characters */
else {
if (!esc) {
if (hp != cmdhist->he) {
if ((err = psh_histentcmd(cmd, &cmdsz, cmdhist->entries + hp)) < 0)
break;
hp = cmdhist->he;
}
if ((n + m + 2 > cmdsz) && ((err = psh_extendcmd(cmd, &cmdsz, 2 * (n + m + 1) + 1)) < 0))
break;
memmove(*cmd + n + 1, *cmd + n, m);
(*cmd)[n++] = c;
write(STDOUT_FILENO, *cmd + n - 1, m + 1);
psh_movecursor(n + m + sizeof(PROMPT) - 1, -m);
continue;
}
buff[esc++] = c;
if (!strncmp(buff, UP, esc)) {
if (esc == sizeof(UP) - 1) {
if (hp != cmdhist->hb) {
if (hp == cmdhist->he)
ln = n + m;
psh_movecursor(n + sizeof(PROMPT) - 1, -(n + sizeof(PROMPT) - 1));
write(STDOUT_FILENO, "\r\033[0J", 5);
write(STDOUT_FILENO, PROMPT, sizeof(PROMPT) - 1);
psh_printhistent(cmdhist->entries + (hp = (hp) ? hp - 1 : HISTSZ - 1));
n = cmdhist->entries[hp].n;
m = 0;
}
esc = 0;
}
}
else if (!strncmp(buff, DOWN, esc)) {
if (esc == sizeof(DOWN) - 1) {
if (hp != cmdhist->he) {
psh_movecursor(n + sizeof(PROMPT) - 1, -(n + sizeof(PROMPT) - 1));
write(STDOUT_FILENO, "\r\033[0J", 5);
write(STDOUT_FILENO, PROMPT, sizeof(PROMPT) - 1);
if ((hp = (hp + 1) % HISTSZ) == cmdhist->he) {
n = ln;
write(STDOUT_FILENO, *cmd, n);
}
else {
n = cmdhist->entries[hp].n;
psh_printhistent(cmdhist->entries + hp);
}
m = 0;
}
esc = 0;
}
}
else if (!strncmp(buff, RIGHT, esc)) {
if (esc == sizeof(RIGHT) - 1) {
if (m) {
psh_movecursor(n + sizeof(PROMPT) - 1, 1);
n++;
m--;
}
esc = 0;
}
}
else if (!strncmp(buff, LEFT, esc)) {
if (esc == sizeof(LEFT) - 1) {
if (n) {
psh_movecursor(n + sizeof(PROMPT) - 1, -1);
n--;
m++;
}
esc = 0;
}
}
else if (!strncmp(buff, DELETE, esc)) {
if (esc == sizeof(DELETE) - 1) {
if (m) {
if (hp != cmdhist->he) {
if ((err = psh_histentcmd(cmd, &cmdsz, cmdhist->entries + hp)) < 0)
break;
hp = cmdhist->he;
}
memmove(*cmd + n, *cmd + n + 1, --m);
write(STDOUT_FILENO, "\033[0J", 4);
write(STDOUT_FILENO, *cmd + n, m);
psh_movecursor(n + m + sizeof(PROMPT) - 1, -m);
}
esc = 0;
}
}
else {
if (hp != cmdhist->he) {
if ((err = psh_histentcmd(cmd, &cmdsz, cmdhist->entries + hp)) < 0)
break;
hp = cmdhist->he;
}
if ((n + m + esc + 1 > cmdsz) && ((err = psh_extendcmd(cmd, &cmdsz, 2 * (n + m + esc) + 1)) < 0))
break;
memmove(*cmd + n + esc, *cmd + n, m);
memcpy(*cmd + n, buff, esc);
write(STDOUT_FILENO, *cmd + n, esc + m);
n += esc;
psh_movecursor(n + m + sizeof(PROMPT) - 1, -m);
esc = 0;
}
}
}
/* Restore original terminal settings */
if ((esc = tcsetattr(STDIN_FILENO, TCSAFLUSH, orig)) < 0) {
fprintf(stderr, "\r\npsh: failed to restore terminal settings\r\n");
if (err >= 0)
err = esc;
}
if (err < 0) {
free(*cmd);
return err;
}
if (*cmd == NULL)
psh_exit(EXIT_SUCCESS);
(*cmd)[n + m] = '\0';
return n + m;
}
static int psh_parsecmd(char *line, int *argc, char ***argv)
{
char *cmd, *arg, **rargv;
if ((cmd = strtok(line, "\t ")) == NULL)
return -EINVAL;
if ((*argv = malloc(2 * sizeof(char *))) == NULL)
return -ENOMEM;
*argc = 0;
(*argv)[(*argc)++] = cmd;
while ((arg = strtok(NULL, "\t ")) != NULL) {
if ((rargv = realloc(*argv, (*argc + 2) * sizeof(char *))) == NULL) {
free(*argv);
return -ENOMEM;
}
*argv = rargv;
(*argv)[(*argc)++] = arg;
}
(*argv)[*argc] = NULL;
return EOK;
}
static int psh_runfile(char **argv)
{
pid_t pid;
if ((pid = vfork()) < 0) {
fprintf(stderr, "psh: vfork failed with code %d\n", pid);
return pid;
}
else if (!pid) {
/* Put process in its own process group */
pid = getpid();
if (setpgid(pid, pid) < 0) {
fprintf(stderr, "psh: failed to put %s process in its own process group\n", argv[0]);
_psh_exit(EXIT_FAILURE);
}
/* Take terminal control */
tcsetpgrp(STDIN_FILENO, pid);
/* Execute the file */
execve(argv[0], argv, NULL);
switch (errno) {
case EIO:
fprintf(stderr, "psh: failed to load %s executable\n", argv[0]);
break;
case ENOMEM:
fprintf(stderr, "psh: out of memory\n");
break;
case EINVAL:
case ENOENT:
fprintf(stderr, "psh: invalid executable\n");
break;
default:
fprintf(stderr, "psh: exec failed with code %d\n", -errno);
}
_psh_exit(EXIT_FAILURE);
}
waitpid(pid, NULL, 0);
/* Take back terminal control */
tcsetpgrp(STDIN_FILENO, getpgid(getpid()));
return EOK;
}
static int psh_runscript(char *path)
{
char **argv = NULL, *line = NULL;
int i, err, argc = 0;
size_t n = 0;
ssize_t ret;
FILE *stream;
pid_t pid;
if ((stream = fopen(path, "r")) == NULL) {
fprintf(stderr, "psh: failed to open file %s\n", path);
return -EINVAL;
}
if ((getline(&line, &n, stream) < sizeof(SCRIPT_MAGIC)) || strncmp(line, SCRIPT_MAGIC, sizeof(SCRIPT_MAGIC) - 1)) {
fprintf(stderr, "psh: %s is not a psh script\n", path);
free(line);
fclose(stream);
return -EINVAL;
}
free(line);
line = NULL;
n = 0;
for (i = 2; (ret = getline(&line, &n, stream)) > 0; i++) {
if (line[0] == 'X' || line[0] == 'W') {
if (line[ret - 1] == '\n')
line[ret - 1] = '\0';
do {
if ((err = psh_parsecmd(line + 1, &argc, &argv)) < 0) {
fprintf(stderr, "psh: failed to parse line %d\n", i);
break;
}
if ((pid = vfork()) < 0) {
fprintf(stderr, "psh: vfork failed in line %d\n", i);
err = pid;
break;
}
else if (!pid) {
execve(argv[0], argv, NULL);
fprintf(stderr, "psh: exec failed in line %d\n", i);
_psh_exit(EXIT_FAILURE);
}
if ((line[0] == 'W') && ((err = waitpid(pid, NULL, 0)) < 0)) {
fprintf(stderr, "psh: waitpid failed in line %d\n", i);
break;
}
} while (0);
free(argv);
argv = NULL;
}
if (err < 0)
break;
free(line);
line = NULL;
n = 0;
}
free(line);
fclose(stream);
return EOK;
}
static int psh_exec(int argc, char **argv)
{
int err;
if (argc < 2) {
fprintf(stderr, "usage: %s command [args]...\n", argv[0]);
return -EINVAL;
}
switch (err = execve(argv[1], argv + 1, NULL)) {
case EOK:
break;
case -ENOMEM:
fprintf(stderr, "psh: out of memory\n");
break;
case -EINVAL:
fprintf(stderr, "psh: invalid executable\n");
break;
default:
fprintf(stderr, "psh: exec failed with code %d\n", err);
}
return err;
}
static void psh_help(void)
{
printf("Available commands:\n");
printf(" bind - binds device to directory\n");
printf(" cat - concatenate file(s) to standard output\n");
printf(" exec - replace shell with the given command\n");
printf(" exit - exits the shell\n");
printf(" help - prints this help message\n");
printf(" history - prints command history\n");
printf(" kill - terminates process\n");
printf(" ls - lists files in the namespace\n");
printf(" mem - prints memory map\n");
printf(" mkdir - creates directory\n");
printf(" mount - mounts a filesystem\n");
printf(" perf - tracks kernel performance\n");
printf(" ps - prints processes and threads\n");
printf(" reboot - restarts the machine\n");
printf(" sync - synchronizes device\n");
printf(" top - top utility\n");
printf(" touch - changes file timestamp\n");
}
static int psh_history(int argc, char **argv, psh_hist_t *cmdhist)
{
unsigned char clear = 0;
int c, i, size;
while ((c = getopt(argc, argv, "ch")) != -1) {
switch (c) {
case 'c':
clear = 1;
break;
case 'h':
default:
printf("usage: %s [options] or no args to print command history\n", argv[0]);
printf(" -c: clears command history\n");
printf(" -h: shows this help message\n");
return EOK;
}
}
if (clear) {
for (i = cmdhist->hb; i != cmdhist->he; i = (i + 1) % HISTSZ)
free(cmdhist->entries[i].cmd);
cmdhist->hb = cmdhist->he = 0;
}
else {
size = (cmdhist->hb < cmdhist->he) ? cmdhist->he - cmdhist->hb : HISTSZ - cmdhist->hb + cmdhist->he;
c = psh_log(10, size) + 1;
for (i = 0; i < size; i++) {
printf(" %*u ", c, i + 1);
fflush(stdout);
psh_printhistent(cmdhist->entries + (cmdhist->hb + i) % HISTSZ);
putchar('\n');
}
}
return EOK;
}
static void psh_signalint(int sig)
{
psh_common.sigint = 1;
}
static void psh_signalquit(int sig)
{
psh_common.sigquit = 1;
}
static void psh_signalstop(int sig)
{
psh_common.sigstop = 1;
}
static int psh_run(void)
{
psh_hist_t *cmdhist;
psh_histent_t *entry;
struct termios orig;
char *cmd, **argv;
int err, n, argc;
pid_t pgrp;
/* Check if we run interactively */
if (!isatty(STDIN_FILENO))
return -ENOTTY;
/* Wait till we run in foreground */
if (tcgetpgrp(STDIN_FILENO) != -1) {
while ((pgrp = getpgrp()) != tcgetpgrp(STDIN_FILENO))
kill(-pgrp, SIGTTIN);
}
/* Set signal handlers */
signal(SIGINT, psh_signalint);
signal(SIGQUIT, psh_signalquit);
signal(SIGTSTP, psh_signalstop);
signal(SIGTTIN, SIG_IGN);
signal(SIGTTOU, SIG_IGN);
signal(SIGCHLD, SIG_IGN);
/* Put ourselves in our own process group */
pgrp = getpid();
if ((err = setpgid(pgrp, pgrp)) < 0) {
fprintf(stderr, "psh: failed to put shell in its own process group\n");
return err;
}
/* Save original terminal settings */
if ((err = tcgetattr(STDIN_FILENO, &orig)) < 0) {
fprintf(stderr, "psh: failed to save terminal settings\n");
return err;
}
/* Take terminal control */
if ((err = tcsetpgrp(STDIN_FILENO, pgrp)) < 0) {
fprintf(stderr, "psh: failed to take terminal control\n");
return err;
}
if ((cmdhist = calloc(1, sizeof(*cmdhist))) == NULL) {
fprintf(stderr, "psh: failed to allocated command history storage\n");
return -ENOMEM;
}
for (;;) {
write(STDOUT_FILENO, "\r\033[0J", 5);
write(STDOUT_FILENO, PROMPT, sizeof(PROMPT) - 1);
if ((n = psh_readcmd(&orig, cmdhist, &cmd)) < 0) {
err = n;
break;
}
if ((err = psh_parsecmd(cmd, &argc, &argv)) < 0) {
free(cmd);
if (err == -EINVAL)
continue;
break;
}
/* Select command history entry */
if (cmdhist->he != cmdhist->hb) {
entry = &cmdhist->entries[(cmdhist->he) ? cmdhist->he - 1 : HISTSZ - 1];
if ((n == entry->n) && !memcmp(cmd, entry->cmd, n)) {
cmdhist->he = (cmdhist->he) ? cmdhist->he - 1 : HISTSZ - 1;
free(entry->cmd);
}
else {
entry = cmdhist->entries + cmdhist->he;
}
}
else {
entry = cmdhist->entries + cmdhist->he;
}
/* Update command history */
entry->cmd = cmd;
entry->n = n;
if ((cmdhist->he = (cmdhist->he + 1) % HISTSZ) == cmdhist->hb) {
free(cmdhist->entries[cmdhist->hb].cmd);
cmdhist->entries[cmdhist->hb].cmd = NULL;
cmdhist->hb = (cmdhist->hb + 1) % HISTSZ;
}
/* Clear signals */
psh_common.sigint = 0;
psh_common.sigquit = 0;
psh_common.sigstop = 0;
/* Reset getopt */
optind = 1;
if (!strcmp(argv[0], "bind"))
psh_bind(argc, argv);
else if (!strcmp(argv[0], "cat"))
psh_cat(argc, argv);
else if (!strcmp(argv[0], "exec"))
psh_exec(argc, argv);
else if (!strcmp(argv[0], "exit"))
psh_exit(EXIT_SUCCESS);
else if (!strcmp(argv[0], "help"))
psh_help();
else if (!strcmp(argv[0], "history"))
psh_history(argc, argv, cmdhist);
else if (!strcmp(argv[0], "kill"))
psh_kill(argc, argv);
else if (!strcmp(argv[0], "ls"))
psh_ls(argc, argv);
else if (!strcmp(argv[0], "mem"))
psh_mem(argc, argv);
else if (!strcmp(argv[0], "mkdir"))
psh_mkdir(argc, argv);
else if (!strcmp(argv[0], "mount"))
psh_mount(argc, argv);
else if (!strcmp(argv[0], "perf"))
psh_perf(argc, argv);
else if (!strcmp(argv[0], "ps"))
psh_ps(argc, argv);
else if (!strcmp(argv[0], "reboot"))
psh_reboot(argc, argv);
else if (!strcmp(argv[0], "sync"))
psh_sync(argc, argv);
else if (!strcmp(argv[0], "top"))
psh_top(argc, argv);
else if (!strcmp(argv[0], "touch"))
psh_touch(argc, argv);
else if (argv[0][0] == '/')
psh_runfile(argv);
else
printf("Unknown command!\n");
free(argv);
fflush(NULL);
}
/* Free command history */
for (; cmdhist->hb != cmdhist->he; cmdhist->hb = (cmdhist->hb + 1) % HISTSZ)
free(cmdhist->entries[cmdhist->hb].cmd);
free(cmdhist);
return err;
}
int main(int argc, char **argv)
{
char *base, *dir, *path = NULL;
oid_t oid;
int c;
keepidle(1);
splitname(argv[0], &base, &dir);
if (!strcmp(base, "psh")) {
/* Wait for root filesystem */
while (lookup("/", NULL, &oid) < 0)
usleep(10000);
/* Wait for console */
while (write(1, "", 0) < 0)
usleep(50000);
/* Run shell script */
if (argc > 1) {
while ((c = getopt(argc, argv, "i:h")) != -1) {
switch (c) {
case 'i':
path = optarg;
break;
case 'h':
default:
printf("usage: %s [options] [script path] or no args to run shell interactively\n", argv[0]);
printf(" -i <script path>: selects psh script to execute\n");
printf(" -h: shows this help message\n");
return EOK;
}
}
if (optind < argc)
path = argv[optind];
if (path != NULL)
psh_runscript(path);
}
/* Run shell interactively */
else {
psh_run();
}
}
else if (!strcmp(base, "bind")) {
psh_bind(argc, argv);
}
else if (!strcmp(base, "mem")) {
psh_mem(argc, argv);
}
else if (!strcmp(base, "mount")) {
psh_mount(argc, argv);
}
else if (!strcmp(base, "perf")) {
psh_perf(argc, argv);
}
else if (!strcmp(base, "ps")) {
psh_ps(argc, argv);
}
else if (!strcmp(base, "reboot")) {
psh_reboot(argc, argv);
}
else if (!strcmp(base, "sync")) {
psh_sync(argc, argv);
}
else if(!strcmp(base, "top")) {
psh_top(argc, argv);
}
else {
fprintf(stderr, "psh: %s: unknown command\n", argv[0]);
}
keepidle(0);
return EOK;
}
|
100241.c | /*********************************************************
* Copyright (C) 2005-2015 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation version 2.1 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser GNU General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*********************************************************/
/*********************************************************
* The contents of this file are subject to the terms of the Common
* Development and Distribution License (the "License") version 1.0
* and no later version. You may not use this file except in
* compliance with the License.
*
* You can obtain a copy of the License at
* http://www.opensource.org/licenses/cddl1.php
*
* See the License for the specific language governing permissions
* and limitations under the License.
*
*********************************************************/
/*
* backdoorGcc64.c --
*
* Implements the real work for guest-side backdoor for GCC, 64-bit
* target (supports inline ASM, GAS syntax). The asm sections are marked
* volatile since vmware can change the registers content without the
* compiler knowing it.
*
* See backdoorGCC32.c (from which this code was mostly copied) for
* details on why the ASM is written this way. Also note that it might be
* possible to write the asm blocks using the symbolic operand specifiers
* in such a way that the same asm would generate correct code for both
* 32-bit and 64-bit targets, but I'm too lazy to figure it all out.
* --rrdharan
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "backdoor.h"
#include "backdoorInt.h"
/*
*----------------------------------------------------------------------------
*
* Backdoor_InOut --
*
* Send a low-bandwidth basic request (16 bytes) to vmware, and return its
* reply (24 bytes).
*
* Results:
* Host-side response returned in bp IN/OUT parameter.
*
* Side effects:
* Pokes the backdoor.
*
*----------------------------------------------------------------------------
*/
void
Backdoor_InOut(Backdoor_proto *myBp) // IN/OUT
{
uint64 dummy;
__asm__ __volatile__(
#ifdef __APPLE__
/*
* Save %rbx on the stack because the Mac OS GCC doesn't want us to
* clobber it - it erroneously thinks %rbx is the PIC register.
* (Radar bug 7304232)
*/
"pushq %%rbx" "\n\t"
#endif
"pushq %%rax" "\n\t"
"movq 40(%%rax), %%rdi" "\n\t"
"movq 32(%%rax), %%rsi" "\n\t"
"movq 24(%%rax), %%rdx" "\n\t"
"movq 16(%%rax), %%rcx" "\n\t"
"movq 8(%%rax), %%rbx" "\n\t"
"movq (%%rax), %%rax" "\n\t"
"inl %%dx, %%eax" "\n\t" /* NB: There is no inq instruction */
"xchgq %%rax, (%%rsp)" "\n\t"
"movq %%rdi, 40(%%rax)" "\n\t"
"movq %%rsi, 32(%%rax)" "\n\t"
"movq %%rdx, 24(%%rax)" "\n\t"
"movq %%rcx, 16(%%rax)" "\n\t"
"movq %%rbx, 8(%%rax)" "\n\t"
"popq (%%rax)" "\n\t"
#ifdef __APPLE__
"popq %%rbx" "\n\t"
#endif
: "=a" (dummy)
: "0" (myBp)
/*
* vmware can modify the whole VM state without the compiler knowing
* it. So far it does not modify EFLAGS. --hpreg
*/
:
#ifndef __APPLE__
/* %rbx is unchanged at the end of the function on Mac OS. */
"rbx",
#endif
"rcx", "rdx", "rsi", "rdi", "memory"
);
}
/*
*-----------------------------------------------------------------------------
*
* BackdoorHbIn --
* BackdoorHbOut --
*
* Send a high-bandwidth basic request to vmware, and return its
* reply.
*
* Results:
* Host-side response returned in bp IN/OUT parameter.
*
* Side-effects:
* Pokes the high-bandwidth backdoor port.
*
*-----------------------------------------------------------------------------
*/
void
BackdoorHbIn(Backdoor_proto_hb *myBp) // IN/OUT
{
uint64 dummy;
__asm__ __volatile__(
"pushq %%rbp" "\n\t"
#ifdef __APPLE__
/*
* Save %rbx on the stack because the Mac OS GCC doesn't want us to
* clobber it - it erroneously thinks %rbx is the PIC register.
* (Radar bug 7304232)
*/
"pushq %%rbx" "\n\t"
#endif
"pushq %%rax" "\n\t"
"movq 48(%%rax), %%rbp" "\n\t"
"movq 40(%%rax), %%rdi" "\n\t"
"movq 32(%%rax), %%rsi" "\n\t"
"movq 24(%%rax), %%rdx" "\n\t"
"movq 16(%%rax), %%rcx" "\n\t"
"movq 8(%%rax), %%rbx" "\n\t"
"movq (%%rax), %%rax" "\n\t"
"cld" "\n\t"
"rep; insb" "\n\t"
"xchgq %%rax, (%%rsp)" "\n\t"
"movq %%rbp, 48(%%rax)" "\n\t"
"movq %%rdi, 40(%%rax)" "\n\t"
"movq %%rsi, 32(%%rax)" "\n\t"
"movq %%rdx, 24(%%rax)" "\n\t"
"movq %%rcx, 16(%%rax)" "\n\t"
"movq %%rbx, 8(%%rax)" "\n\t"
"popq (%%rax)" "\n\t"
#ifdef __APPLE__
"popq %%rbx" "\n\t"
#endif
"popq %%rbp"
: "=a" (dummy)
: "0" (myBp)
/*
* vmware can modify the whole VM state without the compiler knowing
* it. --hpreg
*/
:
#ifndef __APPLE__
/* %rbx is unchanged at the end of the function on Mac OS. */
"rbx",
#endif
"rcx", "rdx", "rsi", "rdi", "memory", "cc"
);
}
void
BackdoorHbOut(Backdoor_proto_hb *myBp) // IN/OUT
{
uint64 dummy;
__asm__ __volatile__(
"pushq %%rbp" "\n\t"
#ifdef __APPLE__
/*
* Save %rbx on the stack because the Mac OS GCC doesn't want us to
* clobber it - it erroneously thinks %rbx is the PIC register.
* (Radar bug 7304232)
*/
"pushq %%rbx" "\n\t"
#endif
"pushq %%rax" "\n\t"
"movq 48(%%rax), %%rbp" "\n\t"
"movq 40(%%rax), %%rdi" "\n\t"
"movq 32(%%rax), %%rsi" "\n\t"
"movq 24(%%rax), %%rdx" "\n\t"
"movq 16(%%rax), %%rcx" "\n\t"
"movq 8(%%rax), %%rbx" "\n\t"
"movq (%%rax), %%rax" "\n\t"
"cld" "\n\t"
"rep; outsb" "\n\t"
"xchgq %%rax, (%%rsp)" "\n\t"
"movq %%rbp, 48(%%rax)" "\n\t"
"movq %%rdi, 40(%%rax)" "\n\t"
"movq %%rsi, 32(%%rax)" "\n\t"
"movq %%rdx, 24(%%rax)" "\n\t"
"movq %%rcx, 16(%%rax)" "\n\t"
"movq %%rbx, 8(%%rax)" "\n\t"
"popq (%%rax)" "\n\t"
#ifdef __APPLE__
"popq %%rbx" "\n\t"
#endif
"popq %%rbp"
: "=a" (dummy)
: "0" (myBp)
:
#ifndef __APPLE__
/* %rbx is unchanged at the end of the function on Mac OS. */
"rbx",
#endif
"rcx", "rdx", "rsi", "rdi", "memory", "cc"
);
}
#ifdef __cplusplus
}
#endif
|
401994.c | /* Driver for routine laguer */
#include <stdio.h>
#include <math.h>
#define NRANSI
#include "nr.h"
#include "complex.h"
#define M 4 /* degree of polynomial */
#define MP1 (M+1) /* no. of polynomial coefficients */
#define NTRY 21
#define NTRY1 NTRY+1
#define EPS 1.e-6
int main(void)
{
fcomplex y[NTRY1],x;
static fcomplex a[MP1]={{0.0,2.0},
{0.0,0.0},
{-1.0,-2.0},
{0.0,0.0},
{1.0,0.0} };
int i,iflag,its,j,n=0;
printf("\nRoots of polynomial x^4-(1+2i)*x^2+2i\n");
printf("\n%15s %15s %7s\n","Real","Complex","#iter");
for (i=1;i<=NTRY;i++) {
x=Complex((i-11.0)/10.0,(i-11.0)/10.0);
laguer(a,M,&x,&its);
if (n == 0) {
n=1;
y[1]=x;
printf("%5d %12.6f %12.6f %5d\n",n,x.r,x.i,its);
} else {
iflag=0;
for (j=1;j<=n;j++)
if (Cabs(Csub(x,y[j])) <= EPS*Cabs(x)) iflag=1;
if (iflag == 0) {
y[++n]=x;
printf("%5d %12.6f %12.6f %5d\n",n,x.r,x.i,its);
}
}
}
return 0;
}
#undef NRANSI
/* (C) Copr. 1986-92 Numerical Recipes Software 7&X*. */
|
721279.c |
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include "mrsocket.h"
struct User{
int id;
int type;
int fd;
int snd_id;
int rcv_id;
struct mr_buffer* buffer;
};
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
//60 connections
#define TEST_CLIENT_NUM 60
#else
//1000 connections
#define TEST_CLIENT_NUM 1000
//Yes,1000 socket connect sever
#endif
#define TEST_SERVER_IP "127.0.0.1"
// #define TEST_SERVER_IP "192.168.188.224"
#define TEST_SERVER_PORT 8765
struct User* clientUsers[TEST_CLIENT_NUM] = {0};
struct User* create_user(){
struct User* user = (struct User*)malloc(sizeof(struct User));
user->buffer = mr_buffer_create(4);
return user;
}
void destroy_user(struct User* user){
mr_buffer_free(user->buffer);
free(user);
}
static void client_handle_data(uintptr_t uid, int fd, char* data, int size)
{
printf("client_handle_data uid = %d, fd = %d, size = %d \n", (int)uid, fd, size);
struct User* user = (struct User*)uid;
struct mr_buffer* buffer = user->buffer;
mr_buffer_read_push(buffer, data, size);
int ret = mr_buffer_read_pack(buffer);
if (ret > 0){
const char* ptr = buffer->read_data;
uint32_t id = 0;
ptr = mr_decode32u(ptr, &id);
uint32_t send_time = 0;
ptr = mr_decode32u(ptr, &send_time);
uint32_t rcv_id = 0;
ptr = mr_decode32u(ptr, &rcv_id);
assert(user->rcv_id == rcv_id);
user->rcv_id++;
uint32_t cur_time = mr_clock();
printf("[client]id = %d, rcv_id=%d, costtime = %d \n", id, rcv_id, cur_time-send_time);
assert(id%2 == 1);
char* enptr = buffer->read_data;
enptr = mr_encode32u(enptr, ++id);
enptr = mr_encode32u(enptr, cur_time);
enptr = mr_encode32u(enptr, (uint32_t)user->snd_id);
user->snd_id++;
mr_buffer_write_push(buffer, buffer->read_data, buffer->read_len);
mr_buffer_write_pack(buffer);
int ret = mr_socket_send(fd, buffer->write_data, buffer->write_len);
if (ret < 0)
{
printf("[client]mr_socket_send faild ret = %d\n", ret);
}
}
}
static void client_handle_connect(uintptr_t uid, int fd, char* data, int size)
{
printf("client_handle_connect uid = %d, fd = %d, data =%s, size = %d \n", (int)uid, fd, data, size);
struct User* user = (struct User*)uid;
user->snd_id = 0;
user->rcv_id = 0;
//100KB data
char tmp[1024*100] = {0};
// snprintf(tmp, 2048, "send data hello world");
memset(tmp, 97, sizeof(tmp)-1);
char* ptr = tmp;
uint32_t id = 0;
ptr = mr_encode32u(ptr, id);
uint32_t time = mr_clock();
ptr = mr_encode32u(ptr, time);
ptr = mr_encode32u(ptr, (uint32_t)user->snd_id);
user->snd_id++;
struct mr_buffer* buffer = user->buffer;
mr_buffer_write_push(buffer, tmp, sizeof(tmp));
mr_buffer_write_pack(buffer);
int ret = mr_socket_send(fd, buffer->write_data, buffer->write_len);
if (ret < 0)
{
printf("mr_socket_send faild ret = %d\n", ret);
}
}
static void client_handle_close(uintptr_t uid, int fd, char* data, int size)
{
printf("client_handle_close uid = %d, fd = %d, data=%s, size = %d \n", (int)uid, fd, data, size);
}
static void client_handle_error(uintptr_t uid, int fd, char* data, int size)
{
printf("client_handle_error uid = %d, fd = %d, data=%s, size = %d \n", (int)uid, fd, data, size);
}
static void client_handle_warning(uintptr_t uid, int fd, char* data, int size)
{
printf("client_handle_warning uid = %d, fd = %d, data=%s, size = %d \n", (int)uid, fd, data, size);
}
int main(int argc, char* argv[])
{
mr_socket_init();
mr_socket_run();
mr_set_handle_data(client_handle_data);
mr_set_handle_connect(client_handle_connect);
mr_set_handle_close(client_handle_close);
mr_set_handle_error(client_handle_error);
mr_set_handle_warning(client_handle_warning);
int i = 0;
for (i = 0; i < TEST_CLIENT_NUM; ++i)
{
struct User* user = create_user();
user->id = i;
uintptr_t uid = (uintptr_t)user;
int fd = mr_socket_connect(uid, TEST_SERVER_IP, TEST_SERVER_PORT);
if (fd < 0)
{
printf("mr_socket_connect faild fd = %d\n", fd);
assert(0);
}
printf("mr_socket_connect id=%d, uid=%ld, fd =%d \n", user->id, uid, fd);
user->fd = fd;
clientUsers[i] = user;
}
printf("start success\n");
while(1)
{
mr_socket_update();
mr_sleep(1);
}
i = 0;
for (; i < TEST_CLIENT_NUM; ++i){
if (clientUsers[i]){
destroy_user(clientUsers[i]);
clientUsers[i] = NULL;
}
}
mr_socket_free();
return 0;
}
|
249066.c | /* This wrapper performs the following forwarding
* The lowercase names with underscore on the left are
* void functions with a status variable as last argument,
* all arguments are passed by reference
* so they can be called from Fortran like subroutines.
* The right hand side are C-interfaced functions
* that return a status (int) as defined in "tfqmrgpu.h".
*
* tfqmrgpucreatehandle_ --> tfqmrgpuCreateHandle
* tfqmrgpusetstream_ --> tfqmrgpuSetStream
* tfqmrgpugetstream_ --> tfqmrgpuGetStream
* tfqmrgpuprinterror_ --> tfqmrgpuPrintError
* tfqmrgpu_bsrsv_createplan_ --> tfqmrgpu_bsrsv_createPlan
* tfqmrgpu_bsrsv_buffersize_ --> tfqmrgpu_bsrsv_bufferSize
* tfqmrgpucreateworkspace_ --> tfqmrgpuCreateWorkspace
* tfqmrgpu_bsrsv_setbuffer_ --> tfqmrgpu_bsrsv_setBuffer
* tfqmrgpu_bsrsv_getbuffer_ --> tfqmrgpu_bsrsv_getBuffer
* tfqmrgpu_bsrsv_setmatrix_c_ --> tfqmrgpu_bsrsv_setMatrix
* tfqmrgpu_bsrsv_setmatrix_z_ --> tfqmrgpu_bsrsv_setMatrix
* tfqmrgpu_bsrsv_solve_ --> tfqmrgpu_bsrsv_solve
* tfqmrgpu_bsrsv_getinfo_ --> tfqmrgpu_bsrsv_getInfo
* tfqmrgpu_bsrsv_getmatrix_c_ --> tfqmrgpu_bsrsv_getMatrix
* tfqmrgpu_bsrsv_getmatrix_z_ --> tfqmrgpu_bsrsv_getMatrix
* tfqmrgpu_bsrsv_destroyplan_ --> tfqmrgpu_bsrsv_destroyPlan
* tfqmrgpudestroyworkspace_ --> tfqmrgpuDestroyWorkspace
* tfqmrgpudestroyhandle_ --> tfqmrgpuDestroyHandle
*
* The order of listing roughly resembles the default workflow
* of this library (except for GetStream and getBuffer).
* The _c_ and _z_ suffixes are for 32bit and 64bit complex arrays, respectively.
*/
// #include <assert.h>
#include <stddef.h> // for size_t
#ifdef DEBUG
#include <stdio.h>
#include <stdlib.h>
#endif
typedef int cudaStream_t; // workaround to test without cuda headers:
#include "tfqmrgpu.h" // the full C-API of the tfqmrgpu library
// type abbreviations
typedef tfqmrgpuBsrsvPlan_t plan_t; //
typedef tfqmrgpuHandle_t handle_t; //
typedef tfqmrgpuStatus_t stat_t; //
typedef tfqmrgpuDataLayout_t layout_t; //
// For the Fortran interface, we generate a set of wrapper void functions,
// which can be called like subroutines in Fortran
void tfqmrgpuprinterror_(stat_t const *status, stat_t *stat) {
*stat = tfqmrgpuPrintError(*status);
}
void tfqmrgpucreatehandle_(handle_t *handle, stat_t *stat) {
*handle = NULL;
*stat = tfqmrgpuCreateHandle(handle); // here, handle is passed by reference
}
void tfqmrgpudestroyhandle_(handle_t *handle, stat_t *stat) {
*stat = tfqmrgpuDestroyHandle(*handle);
*handle = NULL;
}
void tfqmrgpusetstream_(handle_t const *handle, cudaStream_t const *streamId, stat_t *stat) {
*stat = tfqmrgpuSetStream(*handle, *streamId);
}
void tfqmrgpugetstream_(handle_t const *handle, cudaStream_t *streamId, stat_t *stat) {
*stat = tfqmrgpuGetStream(*handle, streamId);
}
void tfqmrgpu_bsrsv_createplan_(handle_t const *handle, plan_t *plan, int const *mb,
int const* bsrRowPtrA, int const *nnzbA, int const* bsrColIndA,
int const* bsrRowPtrX, int const *nnzbX, int const* bsrColIndX,
int const* bsrRowPtrB, int const *nnzbB, int const* bsrColIndB,
stat_t *stat) {
int const FortranIndexOffset = 1;
*plan = NULL;
#ifdef DEBUG
printf("tfqmrgpu_bsrsv_createplan_(handle=%p, *plan=%p, mb=%d, \n"
" bsrRowPtrA=%p, nnzbA=%d, bsrColIndA=%p, \n"
" bsrRowPtrX=%p, nnzbX=%d, bsrColIndX=%p, \n"
" bsrRowPtrB=%p, nnzbB=%d, bsrColIndB=%p, indexOffset=%d)\n",
*handle, *plan, *mb, bsrRowPtrA, *nnzbA, bsrColIndA,
bsrRowPtrX, *nnzbX, bsrColIndX, bsrRowPtrB, *nnzbB, bsrColIndB, FortranIndexOffset);
#endif
*stat = tfqmrgpu_bsrsv_createPlan(*handle, plan, *mb, // here, plan is passed by reference
bsrRowPtrA, *nnzbA, bsrColIndA,
bsrRowPtrX, *nnzbX, bsrColIndX,
bsrRowPtrB, *nnzbB, bsrColIndB,
FortranIndexOffset); // passed by value
if (TFQMRGPU_STATUS_SUCCESS != *stat) tfqmrgpuPrintError(*stat);
#ifdef DEBUG
printf("done tfqmrgpu_bsrsv_createplan_(handle=%p, *plan=%p, ...)\n", *handle, *plan);
#endif
}
void tfqmrgpu_bsrsv_destroyplan_(handle_t const *handle, plan_t *plan, stat_t *stat) {
*stat = tfqmrgpu_bsrsv_destroyPlan(*handle, *plan);
*plan = NULL;
}
void tfqmrgpu_bsrsv_buffersize_(handle_t const *handle, plan_t const *plan,
int const *ldA, int const *blockDim, int const *ldB, int const *RhsBlockDim,
char const *doublePrecision, size_t *pBufferSizeInBytes, stat_t *stat) {
*stat = tfqmrgpu_bsrsv_bufferSize(*handle, *plan,
*ldA, *blockDim, *ldB, *RhsBlockDim, *doublePrecision,
pBufferSizeInBytes); // here, pBufferSizeInBytes is passed by reference
}
void tfqmrgpucreateworkspace_(void* *pBuffer, size_t const *pBufferSizeInBytes, stat_t *stat) {
*stat = tfqmrgpuCreateWorkspace(pBuffer, *pBufferSizeInBytes, 'd'); // 'd':use device memory, 'm': use managed memory
#ifdef DEBUGGPU
printf("# allocate %.6f MByte at %p @device\n", 1e-6*(*pBufferSizeInBytes), *pBuffer);
#endif
}
void tfqmrgpudestroyworkspace_(void* *pBuffer, stat_t *stat) {
*stat = tfqmrgpuDestroyWorkspace(*pBuffer);
}
void tfqmrgpu_bsrsv_setbuffer_(handle_t const *handle, plan_t const *plan,
void* const *pBuffer, stat_t *stat) {
#ifdef DEBUG
printf("# register device pointer %p @device in plan\n", *pBuffer);
#endif
*stat = tfqmrgpu_bsrsv_setBuffer(*handle, *plan, *pBuffer);
#ifdef DEBUG
if (TFQMRGPU_STATUS_SUCCESS != *stat) tfqmrgpuPrintError(*stat);
#endif
}
void tfqmrgpu_bsrsv_getbuffer_(handle_t const *handle, plan_t const *plan,
void* *pBuffer, stat_t *stat) {
#ifdef DEBUG
printf("# query device pointer registered in plan\n");
#endif
*stat = tfqmrgpu_bsrsv_getBuffer(*handle, *plan, pBuffer); // here, pBuffer is passed by reference
#ifdef DEBUG
if (TFQMRGPU_STATUS_SUCCESS != *stat) tfqmrgpuPrintError(*stat);
printf("# device pointer %p @device registered in plan\n", *pBuffer);
#endif
}
void tfqmrgpu_bsrsv_setmatrix_c_(handle_t const *handle, plan_t const *plan, char const *var,
float const* val, int const *ld, char const *trans, layout_t const *layout, stat_t *stat) {
*stat = tfqmrgpu_bsrsv_setMatrix(*handle, *plan, *var, (void*) val, 'C', *ld, *trans, *layout);
}
void tfqmrgpu_bsrsv_setmatrix_z_(handle_t const *handle, plan_t const *plan, char const *var,
double const* val, int const *ld, char const *trans, layout_t const *layout, stat_t *stat) {
*stat = tfqmrgpu_bsrsv_setMatrix(*handle, *plan, *var, (void*) val, 'Z', *ld, *trans, *layout);
}
void tfqmrgpu_bsrsv_getmatrix_c_(handle_t const *handle, plan_t const *plan, char const *var,
float* val, int const *ld, char const *trans, layout_t const *layout, stat_t *stat) {
*stat = tfqmrgpu_bsrsv_getMatrix(*handle, *plan, *var, (void*) val, 'C', *ld, *trans, *layout);
}
void tfqmrgpu_bsrsv_getmatrix_z_(handle_t const *handle, plan_t const *plan, char const *var,
double* val, int const *ld, char const *trans, layout_t const *layout, stat_t *stat) {
*stat = tfqmrgpu_bsrsv_getMatrix(*handle, *plan, *var, (void*) val, 'Z', *ld, *trans, *layout);
}
void tfqmrgpu_bsrsv_solve_(handle_t const *handle, plan_t const *plan,
double const *threshold, int const *maxIterations, stat_t *stat) {
*stat = tfqmrgpu_bsrsv_solve(*handle, *plan, *threshold, *maxIterations);
}
void tfqmrgpu_bsrsv_getinfo_(handle_t const *handle, plan_t const *plan, double *residuum_reached,
int *iterations_needed, double *flops_performed, double *flops_performed_all, stat_t *stat) {
*stat = tfqmrgpu_bsrsv_getInfo(*handle, *plan, residuum_reached, iterations_needed, flops_performed, flops_performed_all); // last 4 args by reference
}
|
488518.c | /*
* Copyright (c) 2007, 2008 University of Tsukuba
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Tsukuba nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <core.h>
#include "ata.h"
#include "ata_cmd.h"
static const ata_cmd_type_t ata_cmd_type_table[256] = {
// Non-data Mandatory
[0x40] = { ATA_CMD_NONDATA, 0, 0 }, // READ VERIFY SECTOR
[0xE0] = { ATA_CMD_NONDATA, 0, 0 }, // STANDBY IMMEDIATE
[0xE1] = { ATA_CMD_NONDATA, 0, 0 }, // IDLE IMMEDIATE
[0xE2] = { ATA_CMD_NONDATA, 0, 0 }, // STANDBY
[0xE3] = { ATA_CMD_NONDATA, 0, 0 }, // IDLE
[0xE5] = { ATA_CMD_NONDATA, 0, 0 }, // CHECK POWER MODE
[0xE6] = { ATA_CMD_NONDATA, 0, 0 }, // SLEEP
[0xE7] = { ATA_CMD_NONDATA, 0, 0 }, // FLUSH CACHE
[0xEA] = { ATA_CMD_NONDATA, 0, 0 }, // FLUSH CACHE EXT
[0xC6] = { ATA_CMD_NONDATA, 0, 0 }, // SET MULTIPLE MODE
[0xEF] = { ATA_CMD_NONDATA, 0, 0 }, // SET FEATURES
// Non-data Optional
[0x00] = { ATA_CMD_NONDATA, 0, 0 }, // NOP
[0x03] = { ATA_CMD_NONDATA, 0, 0 }, // CFA REQUEST EXTENDED ERROR
[0x27] = { ATA_CMD_NONDATA, 0, 0 }, // READ NATIVE MAX ADDRESS EXT
[0x37] = { ATA_CMD_NONDATA, 0, 0 }, // SET MAX ADDRESS EXT
[0x42] = { ATA_CMD_NONDATA, 0, 0 }, // READ VERIFY SECTOR EXT
[0x51] = { ATA_CMD_NONDATA, 0, 0 }, // CONFIGURE STREAM
[0xC0] = { ATA_CMD_NONDATA, 0, 0 }, // CFA ERASE SECTORS
[0xD1] = { ATA_CMD_NONDATA, 0, 0 }, // CHECK MEDIA CARD TYPE
[0xDA] = { ATA_CMD_NONDATA, 0, 0 }, // GET MEDIA STATUS
[0xDE] = { ATA_CMD_NONDATA, 0, 0 }, // MEDIA LOCK
[0xDF] = { ATA_CMD_NONDATA, 0, 0 }, // MEDIA UNLOCK
[0xED] = { ATA_CMD_NONDATA, 0, 0 }, // MEDIA EJECT
[0xF3] = { ATA_CMD_NONDATA, 0, 0 }, // SECURITY ERASE PREPARE
[0xF5] = { ATA_CMD_NONDATA, 0, 0 }, // SECURITY FREEZE LOCK
[0xF8] = { ATA_CMD_NONDATA, 0, 0 }, // READ NATIVE MAX ADDRESS
[0xF9] = { ATA_CMD_NONDATA, 0, 0 }, // SET MAX ADDRESS
// Obsoleted
[0x10] = { ATA_CMD_NONDATA, 0, 0 }, // RECALIBRATE (until ATA-3)
// PIO IN
[0x20] = { ATA_CMD_PIO, STORAGE_READ, 0 }, // READ SECTOR
[0x21] = { ATA_CMD_PIO, STORAGE_READ, 0 }, // READ SECTOR NORETRY
[0xC4] = { ATA_CMD_PIO, STORAGE_READ, 0 }, // READ SECTOR MULTIPLE
[0x24] = { ATA_CMD_PIO, STORAGE_READ, 1 }, // READ SECTOR EXT
[0x29] = { ATA_CMD_PIO, STORAGE_READ, 1 }, // READ SECTOR MULTIPLE EXT
// PIO OUT
[0x30] = { ATA_CMD_PIO, STORAGE_WRITE, 0 }, // WRITE SECTOR
[0xC5] = { ATA_CMD_PIO, STORAGE_WRITE, 0 }, // WRITE SECTOR MULTIPLE
[0x34] = { ATA_CMD_PIO, STORAGE_WRITE, 1 }, // WRITE SECTOR EXT
[0x39] = { ATA_CMD_PIO, STORAGE_WRITE, 1 }, // WRITE SECTOR MULTIPLE EXT
// DMA
[0xC8] = { ATA_CMD_DMA, STORAGE_READ, 0 }, // READ DMA
[0x25] = { ATA_CMD_DMA, STORAGE_READ, 1 }, // READ DMA EXT
[0xCA] = { ATA_CMD_DMA, STORAGE_WRITE, 0 }, // WRITE DMA
[0x35] = { ATA_CMD_DMA, STORAGE_WRITE, 1 }, // WRITE DMA EXT
[0x3D] = { ATA_CMD_DMA, STORAGE_WRITE, 1 }, // WRITE DMA FUA EXT
// QUEUED DMA
[0xC7] = { ATA_CMD_DMQ, STORAGE_READ, 0 }, // READ DMA QUEUED
[0xCC] = { ATA_CMD_DMQ, STORAGE_WRITE, 0 }, // WRITE DMA QUEUED
[0x26] = { ATA_CMD_DMQ, STORAGE_READ, 1 }, // READ DMA QUEUED EXT
[0x36] = { ATA_CMD_DMQ, STORAGE_WRITE, 1 }, // WRITE DMA QUEUED EXT
[0x3E] = { ATA_CMD_DMQ, STORAGE_WRITE, 1 }, // WRITE DMA QUEUED FUA EXT
// ATAPI
[0xA0] = { ATA_CMD_PACKET, 0, 0 }, // PACKET
[0xA1] = { ATA_CMD_IDENTIFY, 0, 1 }, // IDENTIFY PACKET DEVICE
[0xA2] = { ATA_CMD_SERVICE, 0, 0 }, // SERVICE
[0x08] = { ATA_CMD_NONDATA, 0, 0 }, // DEVICE RESET
// Command Specific
[0x91] = { ATA_CMD_DEVPARAM, 0, 0 }, // INITIALIZE DEVICE PARAMETERS (until ATA/ATAPI-5)
[0xEC] = { ATA_CMD_IDENTIFY, 0, 0 }, // IDENTIFY DEVICE
// should implement PIO read
[0xB0] = { ATA_CMD_THROUGH, 0, 0 }, // SMART
[0x2F] = { ATA_CMD_THROUGH, 0, 0 }, /* READ LOG EXT */
[0x06] = { ATA_CMD_THROUGH, 0, 0 }, /* DATA SET MANAGEMENT (TRIM) */
/* Native Command Queuing */
[0x60] = { ATA_CMD_NCQ, STORAGE_READ, 0 }, /* READ FPDMA QUEUED */
[0x61] = { ATA_CMD_NCQ, STORAGE_WRITE, 0 }, /* WRITE FPDMA QUEUED */
};
ata_cmd_type_t
ata_get_cmd_type (u8 command)
{
return ata_cmd_type_table[command];
}
|
629184.c | /*
* Monkey's Audio lossless audio decoder
* Copyright (c) 2007 Benjamin Zores <ben@geexbox.org>
* based upon libdemac from Dave Chapman.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <inttypes.h>
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "lossless_audiodsp.h"
#include "avcodec.h"
#include "bswapdsp.h"
#include "bytestream.h"
#include "internal.h"
#include "get_bits.h"
#include "unary.h"
/**
* @file
* Monkey's Audio lossless audio decoder
*/
#define MAX_CHANNELS 2
#define MAX_BYTESPERSAMPLE 3
#define APE_FRAMECODE_MONO_SILENCE 1
#define APE_FRAMECODE_STEREO_SILENCE 3
#define APE_FRAMECODE_PSEUDO_STEREO 4
#define HISTORY_SIZE 512
#define PREDICTOR_ORDER 8
/** Total size of all predictor histories */
#define PREDICTOR_SIZE 50
#define YDELAYA (18 + PREDICTOR_ORDER*4)
#define YDELAYB (18 + PREDICTOR_ORDER*3)
#define XDELAYA (18 + PREDICTOR_ORDER*2)
#define XDELAYB (18 + PREDICTOR_ORDER)
#define YADAPTCOEFFSA 18
#define XADAPTCOEFFSA 14
#define YADAPTCOEFFSB 10
#define XADAPTCOEFFSB 5
/**
* Possible compression levels
* @{
*/
enum APECompressionLevel {
COMPRESSION_LEVEL_FAST = 1000,
COMPRESSION_LEVEL_NORMAL = 2000,
COMPRESSION_LEVEL_HIGH = 3000,
COMPRESSION_LEVEL_EXTRA_HIGH = 4000,
COMPRESSION_LEVEL_INSANE = 5000
};
/** @} */
#define APE_FILTER_LEVELS 3
/** Filter orders depending on compression level */
static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = {
{ 0, 0, 0 },
{ 16, 0, 0 },
{ 64, 0, 0 },
{ 32, 256, 0 },
{ 16, 256, 1280 }
};
/** Filter fraction bits depending on compression level */
static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS] = {
{ 0, 0, 0 },
{ 11, 0, 0 },
{ 11, 0, 0 },
{ 10, 13, 0 },
{ 11, 13, 15 }
};
/** Filters applied to the decoded data */
typedef struct APEFilter {
int16_t *coeffs; ///< actual coefficients used in filtering
int16_t *adaptcoeffs; ///< adaptive filter coefficients used for correcting of actual filter coefficients
int16_t *historybuffer; ///< filter memory
int16_t *delay; ///< filtered values
int avg;
} APEFilter;
typedef struct APERice {
uint32_t k;
uint32_t ksum;
} APERice;
typedef struct APERangecoder {
uint32_t low; ///< low end of interval
uint32_t range; ///< length of interval
uint32_t help; ///< bytes_to_follow resp. intermediate value
unsigned int buffer; ///< buffer for input/output
} APERangecoder;
/** Filter histories */
typedef struct APEPredictor {
int32_t *buf;
int32_t lastA[2];
int32_t filterA[2];
int32_t filterB[2];
int32_t coeffsA[2][4]; ///< adaption coefficients
int32_t coeffsB[2][5]; ///< adaption coefficients
int32_t historybuffer[HISTORY_SIZE + PREDICTOR_SIZE];
unsigned int sample_pos;
} APEPredictor;
/** Decoder context */
typedef struct APEContext {
AVClass *class; ///< class for AVOptions
AVCodecContext *avctx;
BswapDSPContext bdsp;
LLAudDSPContext adsp;
int channels;
int samples; ///< samples left to decode in current frame
int bps;
int fileversion; ///< codec version, very important in decoding process
int compression_level; ///< compression levels
int fset; ///< which filter set to use (calculated from compression level)
int flags; ///< global decoder flags
uint32_t CRC; ///< frame CRC
int frameflags; ///< frame flags
APEPredictor predictor; ///< predictor used for final reconstruction
int32_t *decoded_buffer;
int decoded_size;
int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel
int blocks_per_loop; ///< maximum number of samples to decode for each call
int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory
APERangecoder rc; ///< rangecoder used to decode actual values
APERice riceX; ///< rice code parameters for the second channel
APERice riceY; ///< rice code parameters for the first channel
APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction
GetBitContext gb;
uint8_t *data; ///< current frame data
uint8_t *data_end; ///< frame data end
int data_size; ///< frame data allocated size
const uint8_t *ptr; ///< current position in frame data
int error;
void (*entropy_decode_mono)(struct APEContext *ctx, int blockstodecode);
void (*entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode);
void (*predictor_decode_mono)(struct APEContext *ctx, int count);
void (*predictor_decode_stereo)(struct APEContext *ctx, int count);
} APEContext;
static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
int32_t *decoded1, int count);
static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode);
static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode);
static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode);
static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode);
static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode);
static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode);
static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode);
static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode);
static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode);
static void predictor_decode_mono_3800(APEContext *ctx, int count);
static void predictor_decode_stereo_3800(APEContext *ctx, int count);
static void predictor_decode_mono_3930(APEContext *ctx, int count);
static void predictor_decode_stereo_3930(APEContext *ctx, int count);
static void predictor_decode_mono_3950(APEContext *ctx, int count);
static void predictor_decode_stereo_3950(APEContext *ctx, int count);
static av_cold int ape_decode_close(AVCodecContext *avctx)
{
APEContext *s = avctx->priv_data;
int i;
for (i = 0; i < APE_FILTER_LEVELS; i++)
av_freep(&s->filterbuf[i]);
av_freep(&s->decoded_buffer);
av_freep(&s->data);
s->decoded_size = s->data_size = 0;
return 0;
}
static av_cold int ape_decode_init(AVCodecContext *avctx)
{
APEContext *s = avctx->priv_data;
int i;
if (avctx->extradata_size != 6) {
av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
return AVERROR(EINVAL);
}
if (avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
return AVERROR(EINVAL);
}
s->bps = avctx->bits_per_coded_sample;
switch (s->bps) {
case 8:
avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
break;
case 16:
avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
break;
case 24:
avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
break;
default:
avpriv_request_sample(avctx,
"%d bits per coded sample", s->bps);
return AVERROR_PATCHWELCOME;
}
s->avctx = avctx;
s->channels = avctx->channels;
s->fileversion = AV_RL16(avctx->extradata);
s->compression_level = AV_RL16(avctx->extradata + 2);
s->flags = AV_RL16(avctx->extradata + 4);
av_log(avctx, AV_LOG_VERBOSE, "Compression Level: %d - Flags: %d\n",
s->compression_level, s->flags);
if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE ||
!s->compression_level ||
(s->fileversion < 3930 && s->compression_level == COMPRESSION_LEVEL_INSANE)) {
av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
s->compression_level);
return AVERROR_INVALIDDATA;
}
s->fset = s->compression_level / 1000 - 1;
for (i = 0; i < APE_FILTER_LEVELS; i++) {
if (!ape_filter_orders[s->fset][i])
break;
FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
(ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
filter_alloc_fail);
}
if (s->fileversion < 3860) {
s->entropy_decode_mono = entropy_decode_mono_0000;
s->entropy_decode_stereo = entropy_decode_stereo_0000;
} else if (s->fileversion < 3900) {
s->entropy_decode_mono = entropy_decode_mono_3860;
s->entropy_decode_stereo = entropy_decode_stereo_3860;
} else if (s->fileversion < 3930) {
s->entropy_decode_mono = entropy_decode_mono_3900;
s->entropy_decode_stereo = entropy_decode_stereo_3900;
} else if (s->fileversion < 3990) {
s->entropy_decode_mono = entropy_decode_mono_3900;
s->entropy_decode_stereo = entropy_decode_stereo_3930;
} else {
s->entropy_decode_mono = entropy_decode_mono_3990;
s->entropy_decode_stereo = entropy_decode_stereo_3990;
}
if (s->fileversion < 3930) {
s->predictor_decode_mono = predictor_decode_mono_3800;
s->predictor_decode_stereo = predictor_decode_stereo_3800;
} else if (s->fileversion < 3950) {
s->predictor_decode_mono = predictor_decode_mono_3930;
s->predictor_decode_stereo = predictor_decode_stereo_3930;
} else {
s->predictor_decode_mono = predictor_decode_mono_3950;
s->predictor_decode_stereo = predictor_decode_stereo_3950;
}
ff_bswapdsp_init(&s->bdsp);
ff_llauddsp_init(&s->adsp);
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
return 0;
filter_alloc_fail:
ape_decode_close(avctx);
return AVERROR(ENOMEM);
}
/**
* @name APE range decoding functions
* @{
*/
#define CODE_BITS 32
#define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1))
#define SHIFT_BITS (CODE_BITS - 9)
#define EXTRA_BITS ((CODE_BITS-2) % 8 + 1)
#define BOTTOM_VALUE (TOP_VALUE >> 8)
/** Start the decoder */
static inline void range_start_decoding(APEContext *ctx)
{
ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
ctx->rc.range = (uint32_t) 1 << EXTRA_BITS;
}
/** Perform normalization */
static inline void range_dec_normalize(APEContext *ctx)
{
while (ctx->rc.range <= BOTTOM_VALUE) {
ctx->rc.buffer <<= 8;
if(ctx->ptr < ctx->data_end) {
ctx->rc.buffer += *ctx->ptr;
ctx->ptr++;
} else {
ctx->error = 1;
}
ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
ctx->rc.range <<= 8;
}
}
/**
* Calculate culmulative frequency for next symbol. Does NO update!
* @param ctx decoder context
* @param tot_f is the total frequency or (code_value)1<<shift
* @return the culmulative frequency
*/
static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
{
range_dec_normalize(ctx);
ctx->rc.help = ctx->rc.range / tot_f;
return ctx->rc.low / ctx->rc.help;
}
/**
* Decode value with given size in bits
* @param ctx decoder context
* @param shift number of bits to decode
*/
static inline int range_decode_culshift(APEContext *ctx, int shift)
{
range_dec_normalize(ctx);
ctx->rc.help = ctx->rc.range >> shift;
return ctx->rc.low / ctx->rc.help;
}
/**
* Update decoding state
* @param ctx decoder context
* @param sy_f the interval length (frequency of the symbol)
* @param lt_f the lower end (frequency sum of < symbols)
*/
static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
{
ctx->rc.low -= ctx->rc.help * lt_f;
ctx->rc.range = ctx->rc.help * sy_f;
}
/** Decode n bits (n <= 16) without modelling */
static inline int range_decode_bits(APEContext *ctx, int n)
{
int sym = range_decode_culshift(ctx, n);
range_decode_update(ctx, 1, sym);
return sym;
}
#define MODEL_ELEMENTS 64
/**
* Fixed probabilities for symbols in Monkey Audio version 3.97
*/
static const uint16_t counts_3970[22] = {
0, 14824, 28224, 39348, 47855, 53994, 58171, 60926,
62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419,
65450, 65469, 65480, 65487, 65491, 65493,
};
/**
* Probability ranges for symbols in Monkey Audio version 3.97
*/
static const uint16_t counts_diff_3970[21] = {
14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756,
1104, 677, 415, 248, 150, 89, 54, 31,
19, 11, 7, 4, 2,
};
/**
* Fixed probabilities for symbols in Monkey Audio version 3.98
*/
static const uint16_t counts_3980[22] = {
0, 19578, 36160, 48417, 56323, 60899, 63265, 64435,
64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482,
65485, 65488, 65490, 65491, 65492, 65493,
};
/**
* Probability ranges for symbols in Monkey Audio version 3.98
*/
static const uint16_t counts_diff_3980[21] = {
19578, 16582, 12257, 7906, 4576, 2366, 1170, 536,
261, 119, 65, 31, 19, 10, 6, 3,
3, 2, 1, 1, 1,
};
/**
* Decode symbol
* @param ctx decoder context
* @param counts probability range start position
* @param counts_diff probability range widths
*/
static inline int range_get_symbol(APEContext *ctx,
const uint16_t counts[],
const uint16_t counts_diff[])
{
int symbol, cf;
cf = range_decode_culshift(ctx, 16);
if(cf > 65492){
symbol= cf - 65535 + 63;
range_decode_update(ctx, 1, cf);
if(cf > 65535)
ctx->error=1;
return symbol;
}
/* figure out the symbol inefficiently; a binary search would be much better */
for (symbol = 0; counts[symbol + 1] <= cf; symbol++);
range_decode_update(ctx, counts_diff[symbol], counts[symbol]);
return symbol;
}
/** @} */ // group rangecoder
static inline void update_rice(APERice *rice, unsigned int x)
{
int lim = rice->k ? (1 << (rice->k + 4)) : 0;
rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5);
if (rice->ksum < lim)
rice->k--;
else if (rice->ksum >= (1 << (rice->k + 5)))
rice->k++;
}
static inline int get_rice_ook(GetBitContext *gb, int k)
{
unsigned int x;
x = get_unary(gb, 1, get_bits_left(gb));
if (k)
x = (x << k) | get_bits(gb, k);
return x;
}
static inline int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb,
APERice *rice)
{
unsigned int x, overflow;
overflow = get_unary(gb, 1, get_bits_left(gb));
if (ctx->fileversion > 3880) {
while (overflow >= 16) {
overflow -= 16;
rice->k += 4;
}
}
if (!rice->k)
x = overflow;
else if(rice->k <= MIN_CACHE_BITS) {
x = (overflow << rice->k) + get_bits(gb, rice->k);
} else {
av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", rice->k);
return AVERROR_INVALIDDATA;
}
rice->ksum += x - (rice->ksum + 8 >> 4);
if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0))
rice->k--;
else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24)
rice->k++;
/* Convert to signed */
return ((x >> 1) ^ ((x & 1) - 1)) + 1;
}
static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice)
{
unsigned int x, overflow;
int tmpk;
overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970);
if (overflow == (MODEL_ELEMENTS - 1)) {
tmpk = range_decode_bits(ctx, 5);
overflow = 0;
} else
tmpk = (rice->k < 1) ? 0 : rice->k - 1;
if (tmpk <= 16 || ctx->fileversion < 3910) {
if (tmpk > 23) {
av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
return AVERROR_INVALIDDATA;
}
x = range_decode_bits(ctx, tmpk);
} else if (tmpk <= 31) {
x = range_decode_bits(ctx, 16);
x |= (range_decode_bits(ctx, tmpk - 16) << 16);
} else {
av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
return AVERROR_INVALIDDATA;
}
x += overflow << tmpk;
update_rice(rice, x);
/* Convert to signed */
return ((x >> 1) ^ ((x & 1) - 1)) + 1;
}
static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice)
{
unsigned int x, overflow;
int base, pivot;
pivot = rice->ksum >> 5;
if (pivot == 0)
pivot = 1;
overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980);
if (overflow == (MODEL_ELEMENTS - 1)) {
overflow = range_decode_bits(ctx, 16) << 16;
overflow |= range_decode_bits(ctx, 16);
}
if (pivot < 0x10000) {
base = range_decode_culfreq(ctx, pivot);
range_decode_update(ctx, 1, base);
} else {
int base_hi = pivot, base_lo;
int bbits = 0;
while (base_hi & ~0xFFFF) {
base_hi >>= 1;
bbits++;
}
base_hi = range_decode_culfreq(ctx, base_hi + 1);
range_decode_update(ctx, 1, base_hi);
base_lo = range_decode_culfreq(ctx, 1 << bbits);
range_decode_update(ctx, 1, base_lo);
base = (base_hi << bbits) + base_lo;
}
x = base + overflow * pivot;
update_rice(rice, x);
/* Convert to signed */
return ((x >> 1) ^ ((x & 1) - 1)) + 1;
}
static void decode_array_0000(APEContext *ctx, GetBitContext *gb,
int32_t *out, APERice *rice, int blockstodecode)
{
int i;
int ksummax, ksummin;
rice->ksum = 0;
for (i = 0; i < FFMIN(blockstodecode, 5); i++) {
out[i] = get_rice_ook(&ctx->gb, 10);
rice->ksum += out[i];
}
rice->k = av_log2(rice->ksum / 10) + 1;
if (rice->k >= 24)
return;
for (; i < FFMIN(blockstodecode, 64); i++) {
out[i] = get_rice_ook(&ctx->gb, rice->k);
rice->ksum += out[i];
rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1;
if (rice->k >= 24)
return;
}
ksummax = 1 << rice->k + 7;
ksummin = rice->k ? (1 << rice->k + 6) : 0;
for (; i < blockstodecode; i++) {
out[i] = get_rice_ook(&ctx->gb, rice->k);
rice->ksum += out[i] - out[i - 64];
while (rice->ksum < ksummin) {
rice->k--;
ksummin = rice->k ? ksummin >> 1 : 0;
ksummax >>= 1;
}
while (rice->ksum >= ksummax) {
rice->k++;
if (rice->k > 24)
return;
ksummax <<= 1;
ksummin = ksummin ? ksummin << 1 : 128;
}
}
for (i = 0; i < blockstodecode; i++)
out[i] = ((out[i] >> 1) ^ ((out[i] & 1) - 1)) + 1;
}
static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode)
{
decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
blockstodecode);
}
static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode)
{
decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY,
blockstodecode);
decode_array_0000(ctx, &ctx->gb, ctx->decoded[1], &ctx->riceX,
blockstodecode);
}
static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode)
{
int32_t *decoded0 = ctx->decoded[0];
while (blockstodecode--)
*decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
}
static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
{
int32_t *decoded0 = ctx->decoded[0];
int32_t *decoded1 = ctx->decoded[1];
int blocks = blockstodecode;
while (blockstodecode--)
*decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
while (blocks--)
*decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX);
}
static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode)
{
int32_t *decoded0 = ctx->decoded[0];
while (blockstodecode--)
*decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
}
static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode)
{
int32_t *decoded0 = ctx->decoded[0];
int32_t *decoded1 = ctx->decoded[1];
int blocks = blockstodecode;
while (blockstodecode--)
*decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
range_dec_normalize(ctx);
// because of some implementation peculiarities we need to backpedal here
ctx->ptr -= 1;
range_start_decoding(ctx);
while (blocks--)
*decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
}
static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode)
{
int32_t *decoded0 = ctx->decoded[0];
int32_t *decoded1 = ctx->decoded[1];
while (blockstodecode--) {
*decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY);
*decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX);
}
}
static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode)
{
int32_t *decoded0 = ctx->decoded[0];
while (blockstodecode--)
*decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
}
static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode)
{
int32_t *decoded0 = ctx->decoded[0];
int32_t *decoded1 = ctx->decoded[1];
while (blockstodecode--) {
*decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY);
*decoded1++ = ape_decode_value_3990(ctx, &ctx->riceX);
}
}
static int init_entropy_decoder(APEContext *ctx)
{
/* Read the CRC */
if (ctx->fileversion >= 3900) {
if (ctx->data_end - ctx->ptr < 6)
return AVERROR_INVALIDDATA;
ctx->CRC = bytestream_get_be32(&ctx->ptr);
} else {
ctx->CRC = get_bits_long(&ctx->gb, 32);
}
/* Read the frame flags if they exist */
ctx->frameflags = 0;
if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
ctx->CRC &= ~0x80000000;
if (ctx->data_end - ctx->ptr < 6)
return AVERROR_INVALIDDATA;
ctx->frameflags = bytestream_get_be32(&ctx->ptr);
}
/* Initialize the rice structs */
ctx->riceX.k = 10;
ctx->riceX.ksum = (1 << ctx->riceX.k) * 16;
ctx->riceY.k = 10;
ctx->riceY.ksum = (1 << ctx->riceY.k) * 16;
if (ctx->fileversion >= 3900) {
/* The first 8 bits of input are ignored. */
ctx->ptr++;
range_start_decoding(ctx);
}
return 0;
}
static const int32_t initial_coeffs_fast_3320[1] = {
375,
};
static const int32_t initial_coeffs_a_3800[3] = {
64, 115, 64,
};
static const int32_t initial_coeffs_b_3800[2] = {
740, 0
};
static const int32_t initial_coeffs_3930[4] = {
360, 317, -109, 98
};
static void init_predictor_decoder(APEContext *ctx)
{
APEPredictor *p = &ctx->predictor;
/* Zero the history buffers */
memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
/* Initialize and zero the coefficients */
if (ctx->fileversion < 3930) {
if (ctx->compression_level == COMPRESSION_LEVEL_FAST) {
memcpy(p->coeffsA[0], initial_coeffs_fast_3320,
sizeof(initial_coeffs_fast_3320));
memcpy(p->coeffsA[1], initial_coeffs_fast_3320,
sizeof(initial_coeffs_fast_3320));
} else {
memcpy(p->coeffsA[0], initial_coeffs_a_3800,
sizeof(initial_coeffs_a_3800));
memcpy(p->coeffsA[1], initial_coeffs_a_3800,
sizeof(initial_coeffs_a_3800));
}
} else {
memcpy(p->coeffsA[0], initial_coeffs_3930, sizeof(initial_coeffs_3930));
memcpy(p->coeffsA[1], initial_coeffs_3930, sizeof(initial_coeffs_3930));
}
memset(p->coeffsB, 0, sizeof(p->coeffsB));
if (ctx->fileversion < 3930) {
memcpy(p->coeffsB[0], initial_coeffs_b_3800,
sizeof(initial_coeffs_b_3800));
memcpy(p->coeffsB[1], initial_coeffs_b_3800,
sizeof(initial_coeffs_b_3800));
}
p->filterA[0] = p->filterA[1] = 0;
p->filterB[0] = p->filterB[1] = 0;
p->lastA[0] = p->lastA[1] = 0;
p->sample_pos = 0;
}
/** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */
static inline int APESIGN(int32_t x) {
return (x < 0) - (x > 0);
}
static av_always_inline int filter_fast_3320(APEPredictor *p,
const int decoded, const int filter,
const int delayA)
{
int32_t predictionA;
p->buf[delayA] = p->lastA[filter];
if (p->sample_pos < 3) {
p->lastA[filter] = decoded;
p->filterA[filter] = decoded;
return decoded;
}
predictionA = p->buf[delayA] * 2 - p->buf[delayA - 1];
p->lastA[filter] = decoded + (predictionA * p->coeffsA[filter][0] >> 9);
if ((decoded ^ predictionA) > 0)
p->coeffsA[filter][0]++;
else
p->coeffsA[filter][0]--;
p->filterA[filter] += p->lastA[filter];
return p->filterA[filter];
}
static av_always_inline int filter_3800(APEPredictor *p,
const int decoded, const int filter,
const int delayA, const int delayB,
const int start, const int shift)
{
int32_t predictionA, predictionB, sign;
int32_t d0, d1, d2, d3, d4;
p->buf[delayA] = p->lastA[filter];
p->buf[delayB] = p->filterB[filter];
if (p->sample_pos < start) {
predictionA = decoded + p->filterA[filter];
p->lastA[filter] = decoded;
p->filterB[filter] = decoded;
p->filterA[filter] = predictionA;
return predictionA;
}
d2 = p->buf[delayA];
d1 = (p->buf[delayA] - p->buf[delayA - 1]) << 1;
d0 = p->buf[delayA] + ((p->buf[delayA - 2] - p->buf[delayA - 1]) << 3);
d3 = p->buf[delayB] * 2 - p->buf[delayB - 1];
d4 = p->buf[delayB];
predictionA = d0 * p->coeffsA[filter][0] +
d1 * p->coeffsA[filter][1] +
d2 * p->coeffsA[filter][2];
sign = APESIGN(decoded);
p->coeffsA[filter][0] += (((d0 >> 30) & 2) - 1) * sign;
p->coeffsA[filter][1] += (((d1 >> 28) & 8) - 4) * sign;
p->coeffsA[filter][2] += (((d2 >> 28) & 8) - 4) * sign;
predictionB = d3 * p->coeffsB[filter][0] -
d4 * p->coeffsB[filter][1];
p->lastA[filter] = decoded + (predictionA >> 11);
sign = APESIGN(p->lastA[filter]);
p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign;
p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign;
p->filterB[filter] = p->lastA[filter] + (predictionB >> shift);
p->filterA[filter] = p->filterB[filter] + ((p->filterA[filter] * 31) >> 5);
return p->filterA[filter];
}
static void long_filter_high_3800(int32_t *buffer, int order, int shift, int length)
{
int i, j;
int32_t dotprod, sign;
int32_t coeffs[256], delay[256];
if (order >= length)
return;
memset(coeffs, 0, order * sizeof(*coeffs));
for (i = 0; i < order; i++)
delay[i] = buffer[i];
for (i = order; i < length; i++) {
dotprod = 0;
sign = APESIGN(buffer[i]);
for (j = 0; j < order; j++) {
dotprod += delay[j] * coeffs[j];
coeffs[j] += ((delay[j] >> 31) | 1) * sign;
}
buffer[i] -= dotprod >> shift;
for (j = 0; j < order - 1; j++)
delay[j] = delay[j + 1];
delay[order - 1] = buffer[i];
}
}
static void long_filter_ehigh_3830(int32_t *buffer, int length)
{
int i, j;
int32_t dotprod, sign;
int32_t coeffs[8] = { 0 }, delay[8] = { 0 };
for (i = 0; i < length; i++) {
dotprod = 0;
sign = APESIGN(buffer[i]);
for (j = 7; j >= 0; j--) {
dotprod += delay[j] * coeffs[j];
coeffs[j] += ((delay[j] >> 31) | 1) * sign;
}
for (j = 7; j > 0; j--)
delay[j] = delay[j - 1];
delay[0] = buffer[i];
buffer[i] -= dotprod >> 9;
}
}
static void predictor_decode_stereo_3800(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
int32_t *decoded0 = ctx->decoded[0];
int32_t *decoded1 = ctx->decoded[1];
int start = 4, shift = 10;
if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) {
start = 16;
long_filter_high_3800(decoded0, 16, 9, count);
long_filter_high_3800(decoded1, 16, 9, count);
} else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
int order = 128, shift2 = 11;
if (ctx->fileversion >= 3830) {
order <<= 1;
shift++;
shift2++;
long_filter_ehigh_3830(decoded0 + order, count - order);
long_filter_ehigh_3830(decoded1 + order, count - order);
}
start = order;
long_filter_high_3800(decoded0, order, shift2, count);
long_filter_high_3800(decoded1, order, shift2, count);
}
while (count--) {
int X = *decoded0, Y = *decoded1;
if (ctx->compression_level == COMPRESSION_LEVEL_FAST) {
*decoded0 = filter_fast_3320(p, Y, 0, YDELAYA);
decoded0++;
*decoded1 = filter_fast_3320(p, X, 1, XDELAYA);
decoded1++;
} else {
*decoded0 = filter_3800(p, Y, 0, YDELAYA, YDELAYB,
start, shift);
decoded0++;
*decoded1 = filter_3800(p, X, 1, XDELAYA, XDELAYB,
start, shift);
decoded1++;
}
/* Combined */
p->buf++;
p->sample_pos++;
/* Have we filled the history buffer? */
if (p->buf == p->historybuffer + HISTORY_SIZE) {
memmove(p->historybuffer, p->buf,
PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
}
}
}
static void predictor_decode_mono_3800(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
int32_t *decoded0 = ctx->decoded[0];
int start = 4, shift = 10;
if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) {
start = 16;
long_filter_high_3800(decoded0, 16, 9, count);
} else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) {
int order = 128, shift2 = 11;
if (ctx->fileversion >= 3830) {
order <<= 1;
shift++;
shift2++;
long_filter_ehigh_3830(decoded0 + order, count - order);
}
start = order;
long_filter_high_3800(decoded0, order, shift2, count);
}
while (count--) {
if (ctx->compression_level == COMPRESSION_LEVEL_FAST) {
*decoded0 = filter_fast_3320(p, *decoded0, 0, YDELAYA);
decoded0++;
} else {
*decoded0 = filter_3800(p, *decoded0, 0, YDELAYA, YDELAYB,
start, shift);
decoded0++;
}
/* Combined */
p->buf++;
p->sample_pos++;
/* Have we filled the history buffer? */
if (p->buf == p->historybuffer + HISTORY_SIZE) {
memmove(p->historybuffer, p->buf,
PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
}
}
}
static av_always_inline int predictor_update_3930(APEPredictor *p,
const int decoded, const int filter,
const int delayA)
{
int32_t predictionA, sign;
int32_t d0, d1, d2, d3;
p->buf[delayA] = p->lastA[filter];
d0 = p->buf[delayA ];
d1 = p->buf[delayA ] - p->buf[delayA - 1];
d2 = p->buf[delayA - 1] - p->buf[delayA - 2];
d3 = p->buf[delayA - 2] - p->buf[delayA - 3];
predictionA = d0 * p->coeffsA[filter][0] +
d1 * p->coeffsA[filter][1] +
d2 * p->coeffsA[filter][2] +
d3 * p->coeffsA[filter][3];
p->lastA[filter] = decoded + (predictionA >> 9);
p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5);
sign = APESIGN(decoded);
p->coeffsA[filter][0] += ((d0 < 0) * 2 - 1) * sign;
p->coeffsA[filter][1] += ((d1 < 0) * 2 - 1) * sign;
p->coeffsA[filter][2] += ((d2 < 0) * 2 - 1) * sign;
p->coeffsA[filter][3] += ((d3 < 0) * 2 - 1) * sign;
return p->filterA[filter];
}
static void predictor_decode_stereo_3930(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
int32_t *decoded0 = ctx->decoded[0];
int32_t *decoded1 = ctx->decoded[1];
ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
while (count--) {
/* Predictor Y */
int Y = *decoded1, X = *decoded0;
*decoded0 = predictor_update_3930(p, Y, 0, YDELAYA);
decoded0++;
*decoded1 = predictor_update_3930(p, X, 1, XDELAYA);
decoded1++;
/* Combined */
p->buf++;
/* Have we filled the history buffer? */
if (p->buf == p->historybuffer + HISTORY_SIZE) {
memmove(p->historybuffer, p->buf,
PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
}
}
}
static void predictor_decode_mono_3930(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
int32_t *decoded0 = ctx->decoded[0];
ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
while (count--) {
*decoded0 = predictor_update_3930(p, *decoded0, 0, YDELAYA);
decoded0++;
p->buf++;
/* Have we filled the history buffer? */
if (p->buf == p->historybuffer + HISTORY_SIZE) {
memmove(p->historybuffer, p->buf,
PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
}
}
}
static av_always_inline int predictor_update_filter(APEPredictor *p,
const int decoded, const int filter,
const int delayA, const int delayB,
const int adaptA, const int adaptB)
{
int32_t predictionA, predictionB, sign;
p->buf[delayA] = p->lastA[filter];
p->buf[adaptA] = APESIGN(p->buf[delayA]);
p->buf[delayA - 1] = p->buf[delayA] - p->buf[delayA - 1];
p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]);
predictionA = p->buf[delayA ] * p->coeffsA[filter][0] +
p->buf[delayA - 1] * p->coeffsA[filter][1] +
p->buf[delayA - 2] * p->coeffsA[filter][2] +
p->buf[delayA - 3] * p->coeffsA[filter][3];
/* Apply a scaled first-order filter compression */
p->buf[delayB] = p->filterA[filter ^ 1] - ((p->filterB[filter] * 31) >> 5);
p->buf[adaptB] = APESIGN(p->buf[delayB]);
p->buf[delayB - 1] = p->buf[delayB] - p->buf[delayB - 1];
p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]);
p->filterB[filter] = p->filterA[filter ^ 1];
predictionB = p->buf[delayB ] * p->coeffsB[filter][0] +
p->buf[delayB - 1] * p->coeffsB[filter][1] +
p->buf[delayB - 2] * p->coeffsB[filter][2] +
p->buf[delayB - 3] * p->coeffsB[filter][3] +
p->buf[delayB - 4] * p->coeffsB[filter][4];
p->lastA[filter] = decoded + ((predictionA + (predictionB >> 1)) >> 10);
p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5);
sign = APESIGN(decoded);
p->coeffsA[filter][0] += p->buf[adaptA ] * sign;
p->coeffsA[filter][1] += p->buf[adaptA - 1] * sign;
p->coeffsA[filter][2] += p->buf[adaptA - 2] * sign;
p->coeffsA[filter][3] += p->buf[adaptA - 3] * sign;
p->coeffsB[filter][0] += p->buf[adaptB ] * sign;
p->coeffsB[filter][1] += p->buf[adaptB - 1] * sign;
p->coeffsB[filter][2] += p->buf[adaptB - 2] * sign;
p->coeffsB[filter][3] += p->buf[adaptB - 3] * sign;
p->coeffsB[filter][4] += p->buf[adaptB - 4] * sign;
return p->filterA[filter];
}
static void predictor_decode_stereo_3950(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
int32_t *decoded0 = ctx->decoded[0];
int32_t *decoded1 = ctx->decoded[1];
ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count);
while (count--) {
/* Predictor Y */
*decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
YADAPTCOEFFSA, YADAPTCOEFFSB);
decoded0++;
*decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
XADAPTCOEFFSA, XADAPTCOEFFSB);
decoded1++;
/* Combined */
p->buf++;
/* Have we filled the history buffer? */
if (p->buf == p->historybuffer + HISTORY_SIZE) {
memmove(p->historybuffer, p->buf,
PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
}
}
}
static void predictor_decode_mono_3950(APEContext *ctx, int count)
{
APEPredictor *p = &ctx->predictor;
int32_t *decoded0 = ctx->decoded[0];
int32_t predictionA, currentA, A, sign;
ape_apply_filters(ctx, ctx->decoded[0], NULL, count);
currentA = p->lastA[0];
while (count--) {
A = *decoded0;
p->buf[YDELAYA] = currentA;
p->buf[YDELAYA - 1] = p->buf[YDELAYA] - p->buf[YDELAYA - 1];
predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] +
p->buf[YDELAYA - 1] * p->coeffsA[0][1] +
p->buf[YDELAYA - 2] * p->coeffsA[0][2] +
p->buf[YDELAYA - 3] * p->coeffsA[0][3];
currentA = A + (predictionA >> 10);
p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]);
p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]);
sign = APESIGN(A);
p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign;
p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign;
p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign;
p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign;
p->buf++;
/* Have we filled the history buffer? */
if (p->buf == p->historybuffer + HISTORY_SIZE) {
memmove(p->historybuffer, p->buf,
PREDICTOR_SIZE * sizeof(*p->historybuffer));
p->buf = p->historybuffer;
}
p->filterA[0] = currentA + ((p->filterA[0] * 31) >> 5);
*(decoded0++) = p->filterA[0];
}
p->lastA[0] = currentA;
}
static void do_init_filter(APEFilter *f, int16_t *buf, int order)
{
f->coeffs = buf;
f->historybuffer = buf + order;
f->delay = f->historybuffer + order * 2;
f->adaptcoeffs = f->historybuffer + order;
memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer));
memset(f->coeffs, 0, order * sizeof(*f->coeffs));
f->avg = 0;
}
static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
{
do_init_filter(&f[0], buf, order);
do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
}
static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
int32_t *data, int count, int order, int fracbits)
{
int res;
int absres;
while (count--) {
/* round fixedpoint scalar product */
res = ctx->adsp.scalarproduct_and_madd_int16(f->coeffs,
f->delay - order,
f->adaptcoeffs - order,
order, APESIGN(*data));
res = (res + (1 << (fracbits - 1))) >> fracbits;
res += *data;
*data++ = res;
/* Update the output history */
*f->delay++ = av_clip_int16(res);
if (version < 3980) {
/* Version ??? to < 3.98 files (untested) */
f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
f->adaptcoeffs[-4] >>= 1;
f->adaptcoeffs[-8] >>= 1;
} else {
/* Version 3.98 and later files */
/* Update the adaption coefficients */
absres = FFABS(res);
if (absres)
*f->adaptcoeffs = APESIGN(res) *
(8 << ((absres > f->avg * 3) + (absres > f->avg * 4 / 3)));
/* equivalent to the following code
if (absres <= f->avg * 4 / 3)
*f->adaptcoeffs = APESIGN(res) * 8;
else if (absres <= f->avg * 3)
*f->adaptcoeffs = APESIGN(res) * 16;
else
*f->adaptcoeffs = APESIGN(res) * 32;
*/
else
*f->adaptcoeffs = 0;
f->avg += (absres - f->avg) / 16;
f->adaptcoeffs[-1] >>= 1;
f->adaptcoeffs[-2] >>= 1;
f->adaptcoeffs[-8] >>= 1;
}
f->adaptcoeffs++;
/* Have we filled the history buffer? */
if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
memmove(f->historybuffer, f->delay - (order * 2),
(order * 2) * sizeof(*f->historybuffer));
f->delay = f->historybuffer + order * 2;
f->adaptcoeffs = f->historybuffer + order;
}
}
}
static void apply_filter(APEContext *ctx, APEFilter *f,
int32_t *data0, int32_t *data1,
int count, int order, int fracbits)
{
do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
if (data1)
do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
}
static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
int32_t *decoded1, int count)
{
int i;
for (i = 0; i < APE_FILTER_LEVELS; i++) {
if (!ape_filter_orders[ctx->fset][i])
break;
apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
ape_filter_orders[ctx->fset][i],
ape_filter_fracbits[ctx->fset][i]);
}
}
static int init_frame_decoder(APEContext *ctx)
{
int i, ret;
if ((ret = init_entropy_decoder(ctx)) < 0)
return ret;
init_predictor_decoder(ctx);
for (i = 0; i < APE_FILTER_LEVELS; i++) {
if (!ape_filter_orders[ctx->fset][i])
break;
init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
ape_filter_orders[ctx->fset][i]);
}
return 0;
}
static void ape_unpack_mono(APEContext *ctx, int count)
{
if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) {
/* We are pure silence, so we're done. */
av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n");
return;
}
ctx->entropy_decode_mono(ctx, count);
/* Now apply the predictor decoding */
ctx->predictor_decode_mono(ctx, count);
/* Pseudo-stereo - just copy left channel to right channel */
if (ctx->channels == 2) {
memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1]));
}
}
static void ape_unpack_stereo(APEContext *ctx, int count)
{
int32_t left, right;
int32_t *decoded0 = ctx->decoded[0];
int32_t *decoded1 = ctx->decoded[1];
if ((ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) == APE_FRAMECODE_STEREO_SILENCE) {
/* We are pure silence, so we're done. */
av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n");
return;
}
ctx->entropy_decode_stereo(ctx, count);
/* Now apply the predictor decoding */
ctx->predictor_decode_stereo(ctx, count);
/* Decorrelate and scale to output depth */
while (count--) {
left = *decoded1 - (*decoded0 / 2);
right = left + *decoded0;
*(decoded0++) = left;
*(decoded1++) = right;
}
}
static int ape_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
APEContext *s = avctx->priv_data;
uint8_t *sample8;
int16_t *sample16;
int32_t *sample24;
int i, ch, ret;
int blockstodecode;
/* this should never be negative, but bad things will happen if it is, so
check it just to make sure. */
av_assert0(s->samples >= 0);
if(!s->samples){
uint32_t nblocks, offset;
int buf_size;
if (!avpkt->size) {
*got_frame_ptr = 0;
return 0;
}
if (avpkt->size < 8) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
return AVERROR_INVALIDDATA;
}
buf_size = avpkt->size & ~3;
if (buf_size != avpkt->size) {
av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. "
"extra bytes at the end will be skipped.\n");
}
if (s->fileversion < 3950) // previous versions overread two bytes
buf_size += 2;
av_fast_padded_malloc(&s->data, &s->data_size, buf_size);
if (!s->data)
return AVERROR(ENOMEM);
s->bdsp.bswap_buf((uint32_t *) s->data, (const uint32_t *) buf,
buf_size >> 2);
memset(s->data + (buf_size & ~3), 0, buf_size & 3);
s->ptr = s->data;
s->data_end = s->data + buf_size;
nblocks = bytestream_get_be32(&s->ptr);
offset = bytestream_get_be32(&s->ptr);
if (s->fileversion >= 3900) {
if (offset > 3) {
av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
s->data = NULL;
return AVERROR_INVALIDDATA;
}
if (s->data_end - s->ptr < offset) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
return AVERROR_INVALIDDATA;
}
s->ptr += offset;
} else {
if ((ret = init_get_bits8(&s->gb, s->ptr, s->data_end - s->ptr)) < 0)
return ret;
if (s->fileversion > 3800)
skip_bits_long(&s->gb, offset * 8);
else
skip_bits_long(&s->gb, offset);
}
if (!nblocks || nblocks > INT_MAX) {
av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %"PRIu32".\n",
nblocks);
return AVERROR_INVALIDDATA;
}
/* Initialize the frame decoder */
if (init_frame_decoder(s) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
return AVERROR_INVALIDDATA;
}
s->samples = nblocks;
}
if (!s->data) {
*got_frame_ptr = 0;
return avpkt->size;
}
blockstodecode = FFMIN(s->blocks_per_loop, s->samples);
// for old files coefficients were not interleaved,
// so we need to decode all of them at once
if (s->fileversion < 3930)
blockstodecode = s->samples;
/* reallocate decoded sample buffer if needed */
av_fast_malloc(&s->decoded_buffer, &s->decoded_size,
2 * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer));
if (!s->decoded_buffer)
return AVERROR(ENOMEM);
memset(s->decoded_buffer, 0, s->decoded_size);
s->decoded[0] = s->decoded_buffer;
s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
/* get output buffer */
frame->nb_samples = blockstodecode;
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
s->error=0;
if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO))
ape_unpack_mono(s, blockstodecode);
else
ape_unpack_stereo(s, blockstodecode);
emms_c();
if (s->error) {
s->samples=0;
av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
return AVERROR_INVALIDDATA;
}
switch (s->bps) {
case 8:
for (ch = 0; ch < s->channels; ch++) {
sample8 = (uint8_t *)frame->data[ch];
for (i = 0; i < blockstodecode; i++)
*sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
}
break;
case 16:
for (ch = 0; ch < s->channels; ch++) {
sample16 = (int16_t *)frame->data[ch];
for (i = 0; i < blockstodecode; i++)
*sample16++ = s->decoded[ch][i];
}
break;
case 24:
for (ch = 0; ch < s->channels; ch++) {
sample24 = (int32_t *)frame->data[ch];
for (i = 0; i < blockstodecode; i++)
*sample24++ = s->decoded[ch][i] << 8;
}
break;
}
s->samples -= blockstodecode;
*got_frame_ptr = 1;
return !s->samples ? avpkt->size : 0;
}
static void ape_flush(AVCodecContext *avctx)
{
APEContext *s = avctx->priv_data;
s->samples= 0;
}
#define OFFSET(x) offsetof(APEContext, x)
#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
static const AVOption options[] = {
{ "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" },
{ "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" },
{ NULL},
};
static const AVClass ape_decoder_class = {
.class_name = "APE decoder",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVCodec ff_ape_decoder = {
.name = "ape",
.long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_APE,
.priv_data_size = sizeof(APEContext),
.init = ape_decode_init,
.close = ape_decode_close,
.decode = ape_decode_frame,
.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1,
.flush = ape_flush,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE },
.priv_class = &ape_decoder_class,
};
|
387213.c | /*****************************************************************************/
/* */
/* model.c */
/* */
/* o65 model definitions for the co65 object file converter */
/* */
/* */
/* */
/* (C) 2003 Ullrich von Bassewitz */
/* Roemerstrasse 52 */
/* D-70794 Filderstadt */
/* EMail: uz@cc65.org */
/* */
/* */
/* This software is provided 'as-is', without any expressed or implied */
/* warranty. In no event will the authors be held liable for any damages */
/* arising from the use of this software. */
/* */
/* Permission is granted to anyone to use this software for any purpose, */
/* including commercial applications, and to alter it and redistribute it */
/* freely, subject to the following restrictions: */
/* */
/* 1. The origin of this software must not be misrepresented; you must not */
/* claim that you wrote the original software. If you use this software */
/* in a product, an acknowledgment in the product documentation would be */
/* appreciated but is not required. */
/* 2. Altered source versions must be plainly marked as such, and must not */
/* be misrepresented as being the original software. */
/* 3. This notice may not be removed or altered from any source */
/* distribution. */
/* */
/*****************************************************************************/
/* common */
#include "strutil.h"
/* co65 */
#include "error.h"
#include "model.h"
/*****************************************************************************/
/* Data */
/*****************************************************************************/
/* Current model */
O65Model Model = O65_MODEL_NONE;
/* Name table */
static const char* const NameTable[O65_MODEL_COUNT] = {
"none",
"os/a65",
"lunix",
"cc65-module"
};
/*****************************************************************************/
/* Code */
/*****************************************************************************/
const char* GetModelName (O65Model M)
/* Map the model to its name. */
{
if (M < 0 || M >= O65_MODEL_COUNT) {
Internal ("O65 Model %d not found", M);
}
return NameTable[M];
}
O65Model FindModel (const char* ModelName)
/* Map a model name to its identifier. Return O65_MODEL_INVALID if the name
** could not be found. Case is ignored when comparing names.
*/
{
O65Model M;
for (M = O65_MODEL_NONE; M < O65_MODEL_COUNT; ++M) {
if (StrCaseCmp (ModelName, NameTable[M]) == 0) {
return M;
}
}
return O65_MODEL_INVALID;
}
|